Spaces:
Running
Running
File size: 8,708 Bytes
aa0eed8 3d34f94 92fb7b8 031211a 8279036 aa0eed8 32a4665 8279036 0c8e3ae 2e933f4 32a4665 9f6122c 32a4665 9f6122c 32a4665 8279036 32a4665 8279036 99c5eee 8279036 99c5eee 8279036 99c5eee 29455b5 53ed856 39bf620 84782eb 53ed856 aaf4e3a 39bf620 84782eb aaf4e3a 881c209 39bf620 84782eb 881c209 53ed856 453ee12 53ed856 9ea93d2 453ee12 d06678c 39bf620 84782eb 453ee12 ca4d650 6f626b7 79f31ae c73be41 ca4d650 6f626b7 51c03d5 9f6122c 51c03d5 8f66d0d 0cf7f1c d510ef1 ce170dd 106d733 a75468b ea10685 ca4d650 9e1f243 ea10685 a906963 ea10685 a81c66e ca4d650 a81c66e ce170dd ca4d650 9f6122c 51c03d5 cee6a57 39bf620 84782eb cee6a57 9f6122c 1d8c0f6 84782eb 1d8c0f6 84782eb 1d8c0f6 3017744 32a4665 0c8e3ae 32a4665 0c8e3ae 32a4665 0c8e3ae 32a4665 0c8e3ae 32a4665 9f6122c 32a4665 80336e1 32a4665 80336e1 32a4665 d42fd6b 9f6122c 106d733 84782eb 9f6122c 6f626b7 53ed856 9f6122c cee6a57 9f6122c 51c03d5 9f6122c a030fa6 9f6122c 1679686 9f6122c 19797f3 c73be41 e91564c c097c3c 39bf620 0d63545 19797f3 8fae4d3 c097c3c 6f626b7 d42fd6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import gradio as gr
import json, openai, os, time
from openai import OpenAI
from utils import function_to_schema, show_json
# Tools
triage_agent, sales_agent, issues_repairs_agent = None, None, None
def transfer_to_triage_agent():
"""Call this if the user brings up a topic outside of your purview,
including escalating to human."""
set_current_agent(triage_agent)
def transfer_to_sales_agent():
"""Use for anything sales or buying related."""
set_current_agent(sales_agent)
def transfer_to_issues_repairs_agent():
"""Use for issues, repairs, or refunds."""
set_current_agent(issues_repairs_agent)
#
def escalate_to_human(summary):
"""Only call this if explicitly asked to."""
print("Escalating to human agent...")
print("\n=== Escalation Report ===")
print(f"Summary: {summary}")
print("=========================\n")
exit()
#
def execute_order(product, price: int):
"""Price should be in USD."""
print("\n\n=== Order Summary ===")
print(f"Product: {product}")
print(f"Price: ${price}")
print("=================\n")
confirm = input("Confirm order? y/n: ").strip().lower()
if confirm == "y":
print("Order execution successful!")
return "Success"
else:
print(color("Order cancelled!", "red"))
return "User cancelled order."
def look_up_item(search_query):
"""Use to find item ID.
Search query can be a description or keywords."""
item_id = "item_132612938"
print("Found item:", item_id)
return item_id
def execute_refund(item_id, reason="not provided"):
print("\n\n=== Refund Summary ===")
print(f"Item ID: {item_id}")
print(f"Reason: {reason}")
print("=================\n")
print("Refund execution successful!")
return "Success"
# Agents
MODEL = "gpt-4o-mini"
def create_triage_agent(client):
return client.beta.assistants.create(
name="Triage Agent",
instructions=(
"You are a customer service bot for ACME Inc. "
"Introduce yourself. Always be very brief. "
"Gather information to direct the customer to the right department. "
"But make your questions subtle and natural."
),
model=MODEL,
tools=[{"type": "function", "function": function_to_schema(transfer_to_sales_agent)},
{"type": "function", "function": function_to_schema(transfer_to_issues_repairs_agent)},
{"type": "function", "function": function_to_schema(escalate_to_human)}],
)
def create_sales_agent(client):
return client.beta.assistants.create(
name="Sales Agent",
instructions=(
"You are a sales agent for ACME Inc."
"Always answer in a sentence or less."
"Follow the following routine with the user:"
"1. Ask them about any problems in their life related to catching roadrunners.\n"
"2. Casually mention one of ACME's crazy made-up products can help.\n"
" - Don't mention price.\n"
"3. Once the user is bought in, drop a ridiculous price.\n"
"4. Only after everything, and if the user says yes, "
"tell them a crazy caveat and execute their order.\n"
""
),
model=MODEL,
tools=[{"type": "function", "function": function_to_schema(execute_order)},
{"type": "function", "function": function_to_schema(transfer_to_triage_agent)}],
)
def create_issues_repairs_agent(client):
return client.beta.assistants.create(
name="Issues and Repairs Agent",
instructions=(
"You are a customer support agent for ACME Inc."
"Always answer in a sentence or less."
"Follow the following routine with the user:"
"1. First, ask probing questions and understand the user's problem deeper.\n"
" - unless the user has already provided a reason.\n"
"2. Propose a fix (make one up).\n"
"3. ONLY if not satesfied, offer a refund.\n"
"4. If accepted, search for the ID and then execute refund."
""
),
model=MODEL,
tools=[{"type": "function", "function": function_to_schema(look_up_item)},
{"type": "function", "function": function_to_schema(execute_refund)},
{"type": "function", "function": function_to_schema(transfer_to_triage_agent)}],
)
#
def create_thread(client):
thread = client.beta.threads.create()
#show_json("thread", thread)
return thread
def create_message(client, thread, msg):
message = client.beta.threads.messages.create(
role="user",
thread_id=thread.id,
content=msg,
)
#show_json("message", message)
return message
def create_run(client, assistant, thread):
run = client.beta.threads.runs.create(
assistant_id=assistant.id,
thread_id=thread.id,
)
#show_json("run", run)
return run
def wait_on_run(client, thread, run):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.25)
#show_json("run", run)
return run
def execute_tool_call(tools, tool_call):
name = tool_call.function.name
args = json.loads(tool_call.function.arguments)
print(f"{name}, {args}")
return tools[name](**args)
def list_run_steps(client, thread, run):
run_steps = client.beta.threads.runs.steps.list(
thread_id=thread.id,
run_id=run.id,
order="asc",
)
for step in run_steps.data:
step_details = step.step_details
show_json("step_details", step_details)
if not hasattr(step_details, "tool_calls"):
break
print("XXXXX")
print(get_current_agent().tools)
print("XXXXX")
#print(tools)
#print("XXXXX")
###
tools = {}
for tool in get_current_agent().tools:
print("===")
print(tool)
print("===")
print(tool.function)
print("===")
###
tools = {tool.function.__name__: tool.function for tool in get_current_agent().tools}
for tool_call in step_details.tool_calls:
result = execute_tool_call(tools, tool_call)
return run_steps
def list_messages(client, thread):
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
#show_json("messages", messages)
return messages
def extract_content_values(data):
content_values = []
for item in data.data:
for content in item.content:
if content.type == 'text':
content_values.append(content.text.value)
return content_values
#
current_agent, current_thread = None, None
def set_current_agent(agent):
global current_agent
current_agent = agent
def set_current_thread(thread):
global current_thread
current_thread = thread
def get_current_agent():
global current_agent
return current_agent
def get_current_thread():
global current_thread
return current_thread
#
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
triage_agent = create_triage_agent(client)
sales_agent = create_sales_agent(client)
issues_repairs_agent = create_issues_repairs_agent(client)
set_current_agent(triage_agent)
triage_thread = create_thread(client)
sales_thread = create_thread(client)
issues_repairs_thread = create_thread(client)
set_current_thread(triage_thread)
def chat(message, history, openai_api_key):
global client
assistant = get_current_agent()
#show_json("Current Agent", assistant)
thread = get_current_thread()
#show_json("Current Thread", thread)
create_message(client, thread, message)
# async
run = create_run(client, assistant, thread)
run = wait_on_run(client, thread, run)
list_run_steps(client, thread, run)
messages = list_messages(client, thread)
return extract_content_values(messages)[0]
gr.ChatInterface(
chat,
chatbot=gr.Chatbot(),
textbox=gr.Textbox(),
title="Multi-Agent Orchestration",
description="Demo using hand-off pattern: triage agent, sales agent, and issues & repairs agent",
clear_btn=None,
retry_btn=None,
undo_btn=None,
#examples=[["Generate the first 10 Fibbonaci numbers with code.", "sk-<BringYourOwn>"]],
#cache_examples=False,
#additional_inputs=[
# gr.Textbox("sk-", label="OpenAI API Key", type = "password"),
#],
).launch() |