File size: 2,895 Bytes
aa0eed8
12338d7
92fb7b8
031211a
aa0eed8
53ed856
 
 
ee66ad7
29455b5
53ed856
12c2b66
 
92fb7b8
12c2b66
68cb77a
53ed856
5295650
29455b5
53ed856
68cb77a
53ed856
aaf4e3a
 
 
 
 
 
 
 
 
881c209
 
 
 
 
 
 
 
 
53ed856
453ee12
53ed856
 
9ea93d2
453ee12
d06678c
881c209
453ee12
 
cee6a57
 
 
 
 
 
 
12c2b66
24755bb
38dfd80
 
 
 
24755bb
3017744
cee6a57
 
 
d42fd6b
df144ba
7fd13ba
53ed856
29455b5
53ed856
 
29455b5
53ed856
cee6a57
 
 
881c209
53ed856
a030fa6
cee6a57
24755bb
12c2b66
805ff2f
19797f3
 
8186c74
431abc0
c4a7aa8
bd63387
19797f3
8fae4d3
19797f3
1ba7dce
bd63387
d42fd6b
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import gradio as gr
import json, openai, os, time

from openai import OpenAI

_client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))

_assistant, _thread = None, None

def create_assistant(client):
    assistant = client.beta.assistants.create(
        name="Math Tutor",
        instructions="You are a personal math tutor. Answer questions briefly, in a sentence or less.",
        model="gpt-4-1106-preview",
    )
    show_json("assistant", assistant)
    return assistant
    
def create_thread(client):
    thread = client.beta.threads.create()
    show_json("thread", thread)
    return thread

def create_message(client, thread, msg):
    message = client.beta.threads.messages.create(
        role="user",
        thread_id=thread.id,
        content=msg,
    )
    show_json("message", message)
    return message

def create_run(client, assistant, thread):
    run = client.beta.threads.runs.create(
        assistant_id=assistant.id,
        thread_id=thread.id,
    )
    show_json("run", run)
    return run

def wait_on_run(client, thread, run):
    while run.status == "queued" or run.status == "in_progress":
        run = client.beta.threads.runs.retrieve(
            thread_id=thread.id,
            run_id=run.id,
        )
        time.sleep(0.25)
    show_json("run", run)
    return run

def list_messages(client, thread):
    messages = client.beta.threads.messages.list(
        thread_id=thread.id
    )
    show_json("messages", messages)
    return messages
    
def extract_content_values(data):
    content_values = []
    for item in data.data:
        for content in item.content:
            if content.type == 'text':
                content_values.append(content.text.value)
    return content_values

def show_json(str, obj):
    print(f"=> {str}\n{json.loads(obj.model_dump_json())}")

def chat(message, history, openai_api_key):
    global _client, _assistant, _thread     
       
    if _assistant == None:
        _assistant = create_assistant(_client)

    if _thread == None:
        _thread = create_thread(_client)
        
    create_message(_client, _thread, message)

    # async
    run = create_run(_client, _assistant, _thread)
    run = wait_on_run(_client, _thread, run)
    
    messages = list_messages(client, thread)

    return extract_content_values(messages)[0]
        
gr.ChatInterface(
    chat,
    chatbot=gr.Chatbot(height=300),
    textbox=gr.Textbox(placeholder="Question", container=False, scale=7),
    title="Multi-Assistant Demo",
    description="Ask AAA Assistant, BBB Assistant, and CCC Assistant any question",
    retry_btn=None,
    undo_btn=None,
    clear_btn="Clear",
    examples=[["I need to solve the equation '2x + 10 = 7.5'. Can you help me?", "sk-<BringYourOwn>"]],
    cache_examples=False,
    additional_inputs=[
        gr.Textbox("sk-", label="OpenAI API Key", type = "password"),
    ],
).launch()