import gradio as gr import openai, os, time from agents import create_triage_agent, create_sales_agent, create_issues_repairs_agent from agents import get_current_agent, get_current_thread, set_current_agent, set_current_thread from openai import OpenAI from utils import show_json ### current_agent, current_thread = None, None ### def set_current_agent(agent): current_agent = agent def set_current_thread(thread): current_thread = thread def get_current_agent(): return current_agent def get_current_thread(): return current_thread ### def transfer_to_sales_agent(): """Use for anything sales or buying related.""" current_agent = sales_agent def transfer_to_issues_repairs_agent(): """Use for issues, repairs, or refunds.""" current_agent = issues_repairs_agent def transfer_to_triage_agent(): """Call this if the user brings up a topic outside of your purview, including escalating to human.""" current_agent = triage_agent ### def escalate_to_human(summary): """Only call this if explicitly asked to.""" print("Escalating to human agent...") print("\n=== Escalation Report ===") print(f"Summary: {summary}") print("=========================\n") exit() ### def execute_order(product, price: int): """Price should be in USD.""" print("\n\n=== Order Summary ===") print(f"Product: {product}") print(f"Price: ${price}") print("=================\n") confirm = input("Confirm order? y/n: ").strip().lower() if confirm == "y": print("Order execution successful!") return "Success" else: print(color("Order cancelled!", "red")) return "User cancelled order." def execute_refund(item_id, reason="not provided"): print("\n\n=== Refund Summary ===") print(f"Item ID: {item_id}") print(f"Reason: {reason}") print("=================\n") print("Refund execution successful!") return "Success" def look_up_item(search_query): """Use to find item ID. Search query can be a description or keywords.""" item_id = "item_132612938" print("Found item:", item_id) return item_id ### #def create_assistant(client): # assistant = client.beta.assistants.create( # name="Math Tutor", # instructions="You are a personal math tutor. Answer questions briefly, in a sentence or less.", # model="gpt-4-1106-preview", # tools=[{"type": "code_interpreter"}], # ) # show_json("assistant", assistant) # return assistant def create_thread(client): thread = client.beta.threads.create() #show_json("thread", thread) return thread def create_message(client, thread, msg): message = client.beta.threads.messages.create( role="user", thread_id=thread.id, content=msg, ) #show_json("message", message) return message def create_run(client, assistant, thread): run = client.beta.threads.runs.create( assistant_id=assistant.id, thread_id=thread.id, ) #show_json("run", run) return run def wait_on_run(client, thread, run): while run.status == "queued" or run.status == "in_progress": run = client.beta.threads.runs.retrieve( thread_id=thread.id, run_id=run.id, ) time.sleep(0.25) #show_json("run", run) return run def list_run_steps(client, thread, run): run_steps = client.beta.threads.runs.steps.list( thread_id=thread.id, run_id=run.id, order="asc", ) for step in run_steps.data: step_details = step.step_details show_json("step_details", step_details) return run_steps def list_messages(client, thread): messages = client.beta.threads.messages.list( thread_id=thread.id ) #show_json("messages", messages) return messages def extract_content_values(data): content_values = [] for item in data.data: for content in item.content: if content.type == 'text': content_values.append(content.text.value) return content_values _client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) #_assistant, _thread = None, None ### triage_agent = create_triage_agent(_client) sales_agent = create_sales_agent(_client) issues_repairs_agent = create_issues_repairs_agent(_client) set_current_agent(triage_agent) triage_thread = create_thread(_client) sales_thread = create_thread(_client) issues_repairs_thread = create_thread(_client) set_current_thread(triage_thread) ### def chat(message, history, openai_api_key): global _client, _assistant, _thread #if _assistant == None: # _assistant = create_assistant(_client) #if _thread == None: # _thread = create_thread(_client) create_message(_client, _thread, message) # async run = create_run(_client, _assistant, _thread) run = wait_on_run(_client, _thread, run) list_run_steps(_client, _thread, run) messages = list_messages(_client, _thread) return extract_content_values(messages)[0] gr.ChatInterface( chat, chatbot=gr.Chatbot(height=300), textbox=gr.Textbox(placeholder="Question", container=False, scale=7), title="Multi-Agent Orchestration", description="Demo using hand-off pattern: triage agent, sales agent, and issues & repairs agent", retry_btn=None, undo_btn=None, clear_btn="Clear", #examples=[["Generate the first 10 Fibbonaci numbers with code.", "sk-"]], #cache_examples=False, additional_inputs=[ gr.Textbox("sk-", label="OpenAI API Key", type = "password"), ], ).launch()