Spaces:
Running
Running
File size: 2,730 Bytes
aa0eed8 d06678c 031211a aa0eed8 d06678c 5295650 d06678c 5295650 d06678c 5295650 c33ebb9 5295650 240d98a 453ee12 d06678c 453ee12 24755bb 38dfd80 24755bb 3017744 719f157 c33ebb9 5295650 f612802 719f157 d06678c 719f157 06d9591 d06678c 06d9591 06cc452 06d9591 d06678c 06cc452 d06678c 06d9591 d06678c 453ee12 240d98a 06d9591 d06678c 453ee12 d06678c 24755bb aa0eed8 19797f3 976e692 8fae4d3 a985e86 19797f3 976e692 19797f3 8fae4d3 19797f3 5d6c0c0 19797f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import gradio as gr
import json, openai, os
from openai import OpenAI
def show_json(str, obj):
print(f"### {str}")
print(json.loads(obj.model_dump_json()))
def init_assistant():
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Answer questions briefly, in a sentence or less.",
model="gpt-4-1106-preview",
)
show_json("assistant", assistant)
thread = client.beta.threads.create()
show_json("thread", thread)
return client, assistant, thread
def wait_on_run(client, run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.25)
return run
def extract_content_value(data):
content_values = []
for item in data.data:
for content in item.content:
if content.type == 'text':
content_values.append(content.text.value)
return content_values
def chat(message, history):
client, assistant, thread = init_assistant()
history_openai_format = []
for human, assistant in history:
history_openai_format.append({"role": "user", "content": human })
history_openai_format.append({"role": "assistant", "content":assistant})
history_openai_format.append({"role": "user", "content": message})
print("### history")
print(history_openai_format)
messages = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=message,
)
show_json("messages", messages)
runs = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
show_json("runs", runs)
run = wait_on_run(client, run, thread)
show_json("runs", runs)
messages = client.beta.threads.messages.list(thread_id=thread.id)
show_json("messages", messages)
return extract_content_value(messages)[0]
gr.ChatInterface(
chat,
chatbot=gr.Chatbot(height=300),
textbox=gr.Textbox(placeholder="Ask Math Tutor any question", container=False, scale=7),
title="Math Tutor",
description="Question",
theme="soft",
examples=["I need to solve the equation `3x + 12 = 14`. Can you help me?"],
cache_examples=False,
retry_btn=None,
undo_btn=None,
clear_btn="Clear",
#multimodal=True,
#additional_inputs=[
# gr.Textbox("You are a personal math tutor. Answer questions briefly, in a sentence or less.", label="System Prompt"),
#],
).launch() |