Spaces:
Running
Running
File size: 1,518 Bytes
aa0eed8 b1972fa 031211a aa0eed8 89fd850 7d51c10 89fd850 453ee12 48d337f 7d155ee 031211a 7d155ee 06d9591 453ee12 06d9591 453ee12 06d9591 7d155ee 453ee12 aa0eed8 48d337f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import gradio as gr
import json, openai, os, time
from openai import OpenAI
def show_json(obj):
print("###")
print(json.loads(obj.model_dump_json()))
print("###")
def wait_on_run(run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
def chat(message, history):
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Answer questions briefly, in a sentence or less.",
model="gpt-4-1106-preview",
)
show_json(assistant)
thread = client.beta.threads.create()
show_json(thread)
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="I need to solve the equation `3x + 11 = 14`. Can you help me?",
)
show_json(message)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
show_json(run)
run = wait_on_run(run, thread)
show_json(run)
messages = client.beta.threads.messages.list(thread_id=thread.id)
show_json(messages)
###
#for i in range(len(message)):
# time.sleep(0.5)
# yield "You typed: " + message[: i+1]
return "Done"
gr.ChatInterface(chat).launch() |