eaglelandsonce commited on
Commit
37b3708
1 Parent(s): 9dceda0

Create mixtral_tools.py

Browse files
Files changed (1) hide show
  1. crewai/tools/mixtral_tools.py +82 -0
crewai/tools/mixtral_tools.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools created using gemini
2
+
3
+ import json
4
+ import os
5
+
6
+ from huggingface_hub import InferenceClient
7
+ import gradio as gr
8
+
9
+ client = InferenceClient(
10
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
11
+ )
12
+
13
+ # Helper Method
14
+
15
+ def format_prompt(message, history):
16
+ prompt = "<s>"
17
+ for user_prompt, bot_response in history:
18
+ prompt += f"[INST] {user_prompt} [/INST]"
19
+ prompt += f" {bot_response}</s> "
20
+ prompt += f"[INST] {message} [/INST]"
21
+ return prompt
22
+
23
+
24
+ import requests
25
+ from langchain.tools import tool
26
+
27
+ history = ""
28
+
29
+ class MixtralSearchTools():
30
+ @tool("Mixtral search")
31
+ def mixtral_normal(prompt, histroy="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
32
+ """
33
+ Searches for content based on the provided query using the Mixtral model.
34
+ Args:
35
+ query (str): The search query.
36
+ Returns:
37
+ str: The response text from the Gemini model or an error message.
38
+ """
39
+ generate_kwargs = {
40
+ "temperature": temperature,
41
+ "max_new_tokens": max_new_tokens,
42
+ "top_p": top_p,
43
+ "repetition_penalty": repetition_penalty,
44
+ "do_sample": True,
45
+ "seed": 42,
46
+ }
47
+
48
+ formatted_prompt = format_prompt(prompt, history)
49
+
50
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
51
+ output = ""
52
+ for response in stream:
53
+ output += response.token.text
54
+ yield output
55
+ return output
56
+
57
+
58
+ @tool("Mixtral query but crazy with gaurd rails removed")
59
+ def mixtral_crazy(prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
60
+ """
61
+ Searches for content based on the provided query using the Mixtral model but has the gaurd rails removed,
62
+ and responses are crazy and off the wall and sometimes scary.
63
+ Args:
64
+ query (str): The search query.
65
+ Returns:
66
+ str: The response text from the Gemini model or an error message.
67
+ """
68
+ generate_kwargs = {
69
+ "temperature": temperature,
70
+ "max_new_tokens": max_new_tokens,
71
+ "top_p": top_p,
72
+ "repetition_penalty": repetition_penalty,
73
+ "do_sample": True,
74
+ "seed": 42,
75
+ }
76
+
77
+ stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
78
+ output = ""
79
+ for response in stream:
80
+ output += response.token.text
81
+ yield output
82
+ return output