import gradio as gr import openai, os, time from openai import OpenAI from utils import show_json _client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) _assistant, _thread = None, None def create_assistant(client): assistant = client.beta.assistants.create( name="Python Code Generator", instructions=( "You are a Python programming language expert that " "generates Pylint-compliant code and explains it. " "Only execute code when explicitly asked to." ), model="gpt-4o", tools=[ {"type": "code_interpreter"}, {"type": "retrieval"}, ], ) show_json("assistant", assistant) return assistant def create_thread(client): thread = client.beta.threads.create() show_json("thread", thread) return thread def create_message(client, thread, msg): message = client.beta.threads.messages.create( role="user", thread_id=thread.id, content=msg, ) show_json("message", message) return message def create_run(client, assistant, thread): run = client.beta.threads.runs.create( assistant_id=assistant.id, thread_id=thread.id, ) show_json("run", run) return run def wait_on_run(client, thread, run): while run.status == "queued" or run.status == "in_progress": run = client.beta.threads.runs.retrieve( thread_id=thread.id, run_id=run.id, ) time.sleep(0.25) show_json("run", run) return run def list_run_steps(client, thread, run): run_steps = client.beta.threads.runs.steps.list( thread_id=thread.id, run_id=run.id, order="asc", ) for step in run_steps.data: step_details = step.step_details show_json("step_details", step_details) return run_steps def list_messages(client, thread): messages = client.beta.threads.messages.list( thread_id=thread.id ) show_json("messages", messages) return messages def extract_content_values(data): content_values = [] for item in data.data: for content in item.content: if content.type == "text": content_values.append(content.text.value) return content_values def chat(message, history, openai_api_key): global _client, _assistant, _thread if _assistant == None: _assistant = create_assistant(_client) if _thread == None: _thread = create_thread(_client) create_message(_client, _thread, message) run = create_run(_client, _assistant, _thread) run = wait_on_run(_client, _thread, run) list_run_steps(_client, _thread, run) messages = list_messages(_client, _thread) content_values = extract_content_values(messages) return content_values[0] def vote(data: gr.LikeData): print("voted") gr.ChatInterface( fn=chat, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Ask anything", container=False, scale=7), title="Python Code Generator", description="Generate, explain, fix, optimize, document, test, help, ... Can execute code when asked to.", clear_btn="Clear", retry_btn="Retry", undo_btn="Undo", multimodal=True, examples=[ ["Generate: NumPy/Pandas/Matplotlib & yfinance trading app", "sk-"], ["Explain: r'^(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])(?=.*[\\W]).{8,}$'", "sk-"], ["Fix: x = [5, 2, 1, 3, 4]; print(x.sort())", "sk-"], ["Optimize: x = []; for i in range(0, 10000): x.append(i)", "sk-"], ["Execute: Code to generate the first 20 fibbonaci numbers", "sk-"], ], cache_examples=False, additional_inputs=[ gr.Textbox("sk-", label="OpenAI API Key", type = "password"), ], ).launch()