Edit model card
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/model-cards#model-card-metadata)

This is a test model

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
from webscout import WEBS
import json

# Load the HelpingAI-flash model
model = AutoModelForCausalLM.from_pretrained("Abhaykoul/HelpingAI-function", trust_remote_code=True).to("cuda")

# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("Abhaykoul/HelpingAI-function", trust_remote_code=True)

# Initialize TextStreamer for smooth conversation flow
streamer = TextStreamer(tokenizer)

# Define the prompt template
prompt_template = """
<|im_start|>system: {system}
<|im_end|>
<|im_start|>user: {insaan}
<|im_end|>
<|im_start|>assistant:
"""
user_query = "tell me about SearchGPT by openai"

# Prepare the prompt

# Web Search Tool Class
class WebSearchTool:
    """A web search tool using DuckDuckGo."""

    def __init__(self):
        self.webs = WEBS()

    def search(self, query: str) -> str:
        """Performs a web search and returns a summary of the results."""
        results = self.webs.text(query, max_results=5)
        summary = "\n".join(
            f"**{result['title']}**\n{result['body']}\n{result['href']}"
            for result in results
        )
        return summary, results

# Initialize the web search tool
web_search_tool = WebSearchTool()

def use_tools(tools, user_query):
    """Use tools to handle specific user queries."""
    # Example of using the web search tool
    if any(tool["name"] == "web_search" for tool in tools):
        search_summary, search_results = web_search_tool.search(user_query)
        
        # Prepare the response from the model
        detailed_prompt = f"User query: {user_query}\n\nWeb search results:\n{search_summary}\n\nProvide a comprehensive answer based on this information."
        
        # Tokenize the detailed prompt
        inputs = tokenizer(detailed_prompt, return_tensors="pt", return_attention_mask=False).to("cuda")

        # Generate the response using the model
        generated_text = model.generate(**inputs, max_length=3084, top_p=0.95, do_sample=True, temperature=0.6, use_cache=True, streamer=streamer)
        
        # Decode the response
        response = tokenizer.decode(generated_text[0], skip_special_tokens=True)
        print(response)

# Example tools
tools = [
    {
        "name": "web_search",
        "description": "Search the web for information.",
        "parameters": {
            "type": "object",
            "properties": {
                "query": {
                    "type": "string",
                    "description": "The search query.",
                },
            },
            "required": ["query"],
        }
    }
]

# Example usage of the tools
use_tools(tools, user_query)
Downloads last month
6
Safetensors
Model size
2.8B params
Tensor type
FP16
·
Inference Examples
Inference API (serverless) is not available, repository is disabled.

Model tree for Abhaykoul/HelpingAI-function

Quantizations
1 model