File size: 5,238 Bytes
cfa124c
 
 
 
 
5eae7c2
cfa124c
 
 
5eae7c2
 
cfa124c
 
78a42dd
9064b67
5da2b8f
9064b67
 
 
 
5da2b8f
9064b67
5da2b8f
9064b67
 
 
2de5e80
0b33796
b184639
 
0b33796
 
59c15ca
7accea0
f498bf2
7accea0
9064b67
2de5e80
9064b67
2de5e80
9064b67
fc30f91
d92a321
1c1978e
0784f56
 
 
 
fc30f91
9064b67
 
2de5e80
9064b67
2de5e80
9064b67
 
5fca11d
9064b67
 
 
421ee5d
9064b67
2de5e80
9064b67
2de5e80
9064b67
 
 
 
 
 
 
2de5e80
9064b67
2de5e80
9064b67
 
 
 
 
 
 
 
2de5e80
9064b67
2de5e80
9064b67
2de5e80
9064b67
 
fc30f91
9064b67
 
 
 
 
fc30f91
 
2de5e80
fc30f91
 
 
9064b67
 
fc30f91
9064b67
fc30f91
 
9064b67
 
 
2de5e80
9064b67
2de5e80
9064b67
 
 
 
2de5e80
9064b67
 
 
 
2de5e80
9064b67
 
5befa9c
50ddfc1
 
 
5da2b8f
e04bd50
5da2b8f
1c1978e
42c9326
e893203
d747e39
9064b67
5da2b8f
9064b67
5da2b8f
 
9064b67
fc30f91
 
e466f62
9064b67
fc30f91
9064b67
2de5e80
29028c0
 
 
 
2de5e80
 
1057e6a
5e0acb9
c40c2ce
d92a321
c8be7d8
b184639
b384475
7fe3430
 
 
3f12f24
b184639
a72d153
b184639
 
 
 
3f12f24
325a748
c8be7d8
952a213
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
# TODO:
#
# 1. Multi-user thread
# 2. Tools: Code interpreter - https://platform.openai.com/docs/assistants/tools/code-interpreter - Matlplotlib chart
# 3. Tools: Function calling - https://platform.openai.com/docs/assistants/tools/function-calling

# Reference:
#
# https://vimeo.com/990334325/56b552bc7a
# https://platform.openai.com/playground/assistants
# https://cookbook.openai.com/examples/assistants_api_overview_python
# https://platform.openai.com/docs/api-reference/assistants/createAssistant
# https://platform.openai.com/docs/assistants/tools

import gradio as gr
import datetime, openai, os, time

from openai import OpenAI
from utils import show_json

client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))

assistant, thread = None, None

def create_assistant(client):
    assistant = client.beta.assistants.create(
        name="Python Code Generator",
        instructions=(
                         "You are a Python programming language expert that "
                         "generates Pylint-compliant code and explains it. "
                         "Only execute code when explicitly asked to."
                     ),
        model="gpt-4o",
        tools=[
                  {"type": "code_interpreter"},
              ],
    )
    
    show_json("assistant", assistant)
    
    return assistant

def load_assistant(client):
    assistant = client.beta.assistants.retrieve("asst_kjO8BRHMREWBlY0LQ7WECfeD")

    show_json("assistant", assistant)
    
    return assistant

def create_thread(client):
    thread = client.beta.threads.create()
    
    show_json("thread", thread)
    
    return thread

def create_message(client, thread, msg):        
    message = client.beta.threads.messages.create(
        role="user",
        thread_id=thread.id,
        content=msg["text"],
    )
    
    show_json("message", message)
    
    return message

def create_run(client, assistant, thread):
    run = client.beta.threads.runs.create(
        assistant_id=assistant.id,
        thread_id=thread.id,
    )
    
    show_json("run", run)
    
    return run

def wait_on_run(client, thread, run):
    while run.status == "queued" or run.status == "in_progress":
        run = client.beta.threads.runs.retrieve(
            thread_id=thread.id,
            run_id=run.id,
        )
    
        time.sleep(0.25)
    
    show_json("run", run)
    
    return run

def get_run_steps(client, thread, run):
    run_steps = client.beta.threads.runs.steps.list(
        thread_id=thread.id,
        run_id=run.id,
        order="asc",
    )

    show_json("run_steps", run_steps)
    
    return run_steps

def get_run_step_details(run_steps):
    for step in run_steps.data:
        step_details = step.step_details
        
        show_json("step_details", step_details)

def get_messages(client, thread):
    messages = client.beta.threads.messages.list(
        thread_id=thread.id
    )
    
    show_json("messages", messages)
    
    return messages
    
def extract_content_values(data):
    content_values = []
    
    for item in data.data:
        for content in item.content:
            if content.type == "text":
                content_values.append(content.text.value)
    
    return content_values

def chat(message, history):
    if not message:
        raise gr.Error("Message is required.")
    
    global client, assistant, thread     
    
    if assistant == None:
        assistant = load_assistant(client)
    
    if thread == None or len(history) == 0:
        thread = create_thread(client)
        
    create_message(client, thread, message)

    run = create_run(client, assistant, thread)
    run = wait_on_run(client, thread, run)

    run_steps = get_run_steps(client, thread, run)

    get_run_step_details(run_steps)
    
    messages = get_messages(client, thread)

    content_values = extract_content_values(messages)

    print("###")
    print(content_values[0])
    print("###")
    
    return content_values[0]

gr.ChatInterface(
        fn=chat,
        chatbot=gr.Chatbot(height=350),
        textbox=gr.MultimodalTextbox(placeholder="Ask anything", container=False, scale=7),
        title="Python Code Generator",
        description="The assistant can generate code, explain, fix, optimize, document, test, and generally help with code. It can also execute code.",
        clear_btn="Clear",
        retry_btn=None,
        undo_btn=None,
        examples=[
                  [{"text": "Generate: Python code to fine-tune model meta-llama/Meta-Llama-3.1-8B on dataset gretelai/synthetic_text_to_sql using QLoRA", "files": []}],
                  [{"text": "Explain: r\"^(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])(?=.*[\\W]).{8,}$\"", "files": []}],
                  [{"text": "Fix: x = [5, 2, 1, 3, 4]; print(x.sort())", "files": []}],
                  [{"text": "Optimize: x = []; for i in range(0, 10000): x.append(i)", "files": []}],
                  [{"text": "Execute: First 25 Fibbonaci numbers", "files": []}],
                  [{"text": "Execute: Chart showing stock gain YTD for NVDA, MSFT, AAPL, and GOOG, x-axis is 'Day' and y-axis is 'YTD Gain %'", "files": []}],
                 ],
        cache_examples=False,
        multimodal=True,
    ).launch()