File size: 3,066 Bytes
cfa124c
 
 
5eae7c2
 
cfa124c
 
78a42dd
9064b67
a31fc9a
f2a5223
9064b67
764e0ce
 
 
 
 
 
 
 
 
 
d1f4761
766f004
764e0ce
 
 
5befa9c
50ddfc1
 
 
536b36d
37a1f88
 
 
 
5da2b8f
b12653c
1c935f0
b12653c
da3de4b
b12653c
188583e
37a1f88
5d62e53
 
9064b67
f2a5223
 
 
 
f422c9f
f2a5223
29d58d0
29028c0
7ddca6e
188583e
 
a2df0ee
c15560f
2de5e80
ac5b26b
 
1057e6a
5e0acb9
c40c2ce
d92a321
edd99e4
70bf8f7
18eed82
7fe3430
 
 
3f12f24
d0298be
1292850
9247b68
1292850
a39c626
 
8f10262
3f12f24
d8b592a
952a213
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# Reference:
#
# https://vimeo.com/990334325/56b552bc7a
# https://platform.openai.com/playground/assistants
# https://cookbook.openai.com/examples/assistants_api_overview_python
# https://platform.openai.com/docs/api-reference/assistants/createAssistant
# https://platform.openai.com/docs/assistants/tools

import gradio as gr

import os

from assistants import (
    assistant,
    thread,
    create_assistant,
    load_assistant,
    create_thread,
    create_message,
    create_run,
    wait_on_run,
    get_run_steps,
    recurse_execute_tool_calls,
    get_messages,
    extract_content_values,
)

def chat(message, history):
    if not message:
        raise gr.Error("Message is required.")
    
    global assistant, thread     

    # On first run, create assistant and update assistant_id,
    # see https://platform.openai.com/playground/assistants.
    # On subsequent runs, load assistant.
    if assistant == None:
        try: 
            #assistant = create_assistant()
            assistant = load_assistant()
        except:
            raise gr.Error("Please clone and bring your own credentials.")

    # TODO: Use Gradio session to support multiple users
    if thread == None or len(history) == 0:
        thread = create_thread()
        
    create_message(thread, message)
    run = create_run(assistant, thread)
    run = wait_on_run(thread, run)
    run_steps = get_run_steps(thread, run)
    recurse_execute_tool_calls(thread, run, run_steps, 0)
    messages = get_messages(thread)
    text_values, image_values = extract_content_values(messages)

    download_link = ""

    # TODO: Handle multiple images and other file types
    if len(image_values) > 0:
        download_link = f"<hr>[Download](https://platform.openai.com/storage/files/{image_values[0]})"
    
    #return f"{'<hr>'.join(list(reversed(text_values))[1:])}{download_link}"
    return f"{text_values[0]}{download_link}"

gr.ChatInterface(
        fn=chat,
        chatbot=gr.Chatbot(height=350),
        textbox=gr.Textbox(placeholder="Ask anything", container=False, scale=7),
        title="Python Coding Assistant",
        description=os.environ.get("DESCRIPTION"),
        clear_btn="Clear",
        retry_btn=None,
        undo_btn=None,
        examples=[
                  ["Generate: Code to fine-tune model meta-llama/Meta-Llama-3.1-8B on dataset gretelai/synthetic_text_to_sql using QLoRA"],
                  ["Explain: r\"^(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])(?=.*[\\W]).{8,}$\""],
                  ["Fix: x = [5, 2, 1, 3, 4]; print(x.sort())"],
                  ["Optimize: x = []; for i in range(0, 10000): x.append(i)"],
                  ["1. Execute: First 25 Fibbonaci numbers. 2. Show the code."],
                  ["1. Execute with tools: Create a plot showing stock gain QTD for NVDA and AMD, x-axis is \"Day\" and y-axis is \"Gain %\". 2. Show the code."],
                  ["1. Execute with tools: Get key announcements from latest OpenAI Dev Day. 2. Show the web references."]
                 ],
        cache_examples=True,
    ).launch()