Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,16 @@
|
|
1 |
import os
|
|
|
2 |
import gradio as gr
|
3 |
from groq import Groq
|
4 |
|
5 |
-
def generate_response(prompt, history, model, temperature, max_tokens, top_p):
|
6 |
client = Groq(
|
7 |
api_key = os.environ.get("Groq_Api_Key")
|
8 |
)
|
9 |
|
|
|
|
|
|
|
10 |
stream = client.chat.completions.create(
|
11 |
messages=[
|
12 |
{"role": "system", "content": "you are a helpful assistant."},
|
@@ -16,6 +20,7 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p):
|
|
16 |
temperature=temperature,
|
17 |
max_tokens=max_tokens,
|
18 |
top_p=top_p,
|
|
|
19 |
stop=None,
|
20 |
stream=True,
|
21 |
)
|
@@ -23,17 +28,18 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p):
|
|
23 |
response = ""
|
24 |
for chunk in stream:
|
25 |
delta_content = chunk.choices[0].delta.content
|
26 |
-
|
27 |
-
|
28 |
|
29 |
return response
|
30 |
|
31 |
# Define the Gradio chat interface
|
32 |
additional_inputs = [
|
33 |
gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"], value="llama3-70b-8192", label="LLM Model"),
|
34 |
-
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature"),
|
35 |
-
gr.Slider(minimum=1, maximum=4096, step=1, value=4096, label="Max Tokens"),
|
36 |
-
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P"),
|
|
|
37 |
]
|
38 |
|
39 |
gr.ChatInterface(
|
@@ -41,5 +47,5 @@ gr.ChatInterface(
|
|
41 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
42 |
additional_inputs=additional_inputs,
|
43 |
title="Groq API LLMs AI Models",
|
44 |
-
description="Using https://groq.com/ api, ofc as its free it will have some limitations so its better if you duplicate this space with your own api key<br>Hugging Face Space by [Nick088](https://linktr.ee/Nick088)",
|
45 |
).launch()
|
|
|
1 |
import os
|
2 |
+
import random
|
3 |
import gradio as gr
|
4 |
from groq import Groq
|
5 |
|
6 |
+
def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed):
|
7 |
client = Groq(
|
8 |
api_key = os.environ.get("Groq_Api_Key")
|
9 |
)
|
10 |
|
11 |
+
if seed == 0:
|
12 |
+
seed = random.randint(1, 100000)
|
13 |
+
|
14 |
stream = client.chat.completions.create(
|
15 |
messages=[
|
16 |
{"role": "system", "content": "you are a helpful assistant."},
|
|
|
20 |
temperature=temperature,
|
21 |
max_tokens=max_tokens,
|
22 |
top_p=top_p,
|
23 |
+
seed=seed
|
24 |
stop=None,
|
25 |
stream=True,
|
26 |
)
|
|
|
28 |
response = ""
|
29 |
for chunk in stream:
|
30 |
delta_content = chunk.choices[0].delta.content
|
31 |
+
response += delta_content
|
32 |
+
yield output
|
33 |
|
34 |
return response
|
35 |
|
36 |
# Define the Gradio chat interface
|
37 |
additional_inputs = [
|
38 |
gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"], value="llama3-70b-8192", label="LLM Model"),
|
39 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls randomness of responses"),
|
40 |
+
gr.Slider(minimum=1, maximum=4096, step=1, value=4096, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response"),
|
41 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
|
42 |
+
gr.Number(precision=0, value=42, label="Seed", info="A starting point to initiate generation, use 0 for random")
|
43 |
]
|
44 |
|
45 |
gr.ChatInterface(
|
|
|
47 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
48 |
additional_inputs=additional_inputs,
|
49 |
title="Groq API LLMs AI Models",
|
50 |
+
description="Using https://groq.com/ api, ofc as its free it will have some limitations of requests per minute, so its better if you duplicate this space with your own api key<br>Hugging Face Space by [Nick088](https://linktr.ee/Nick088)",
|
51 |
).launch()
|