callanwu commited on
Commit
4ecdaad
1 Parent(s): 24ad7e0

add sop_generation

Browse files
.gitattributes copy ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ logs
Action/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .base_action import Action
Action/base_action.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Memory import Memory
2
+ from utils import extract
3
+ import os
4
+ class Action:
5
+ """
6
+ The basic action unit of agent
7
+ """
8
+ def __init__(self,**kwargs):
9
+ self.response = None
10
+ self.is_user = False
11
+ self.res_dict = {}
12
+ self.name = ""
13
+ self.role = ""
14
+ for key,value in kwargs.items():
15
+ setattr(self,key,value)
16
+
17
+
18
+ def process(self):
19
+ """
20
+ processing action
21
+ Rerutn : memory(Memory)
22
+ """
23
+ response = self.response
24
+ send_name = self.name
25
+ send_role = self.role
26
+ all = ""
27
+ for res in response:
28
+ all += res
29
+ parse = f"{send_name}:"
30
+
31
+ # 将里面对话的第三人称删了
32
+ # The third person in the dialogue was deleted.
33
+ while parse in all:
34
+ index = all.index(parse) + len(parse)
35
+ all = all[index:]
36
+
37
+ if not self.is_user:
38
+ print(f"{send_name}({send_role}):{all}")
39
+ # for software
40
+ if "<title>" in all:
41
+ title = extract(all,"title")
42
+ title = "main.py" if title == "" else title
43
+ python = extract(all,"python")
44
+ os.makedirs("output_code", exist_ok=True)
45
+ file_name = "output_code/" + title
46
+ with open(file_name, "w", encoding="utf-8") as f:
47
+ f.write(python)
48
+ memory = Memory(send_role, send_name, all)
49
+ return memory
50
+
51
+
Agent/Agent.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """LLM autonoumous agent"""
17
+ from LLM.base_LLM import *
18
+ from Component import *
19
+ from Action import Action
20
+ from Prompt import *
21
+
22
+ headers = {
23
+ "Content-Type": "text/event-stream",
24
+ "Cache-Control": "no-cache",
25
+ "X-Accel-Buffering": "no",
26
+ }
27
+
28
+
29
+
30
+
31
+ class Agent:
32
+ """
33
+ Auto agent, input the JSON of SOP.
34
+ """
35
+
36
+ # Agent should have args: agents,states
37
+ def __init__(self, name, agent_state_roles, **kwargs) -> None:
38
+ self.state_roles = agent_state_roles
39
+ self.name = name
40
+
41
+ self.style = kwargs["style"]
42
+ self.LLMs = kwargs["LLMs"]
43
+ self.LLM = None
44
+ self.is_user = kwargs["is_user"]
45
+ self.begins = kwargs["begins"] if "begins" in kwargs else False
46
+ self.current_role = ""
47
+ self.long_term_memory = []
48
+ self.short_term_memory = ""
49
+ self.current_state = None
50
+ self.first_speak = True
51
+ self.environment = None
52
+
53
+
54
+ @classmethod
55
+ def from_config(cls, config_path):
56
+ """
57
+ Initialize agents based on json file
58
+ Return:
59
+ agents(dict) : key:agent_name;value:class(Agent)
60
+ names_to_roles(dict) : key:state_name value:(dict; (key:agent_name ; value:agent_role))
61
+ roles_to_names(dict) : key:state_name value:(dict; (key:agent_role ; value:agent_name))
62
+ """
63
+ with open(config_path) as f:
64
+ config = json.load(f)
65
+
66
+ roles_to_names = {}
67
+ names_to_roles = {}
68
+ agents = {}
69
+ user_names = json.loads(os.environ["User_Names"]) if "User_Names" in os.environ else []
70
+ for agent_name, agent_dict in config["agents"].items():
71
+ agent_state_roles = {}
72
+ agent_LLMs = {}
73
+ agent_begins = {}
74
+ for state_name, agent_role in agent_dict["roles"].items():
75
+
76
+ agent_begins[state_name] = {}
77
+
78
+ if state_name not in roles_to_names:
79
+ roles_to_names[state_name] = {}
80
+ if state_name not in names_to_roles:
81
+ names_to_roles[state_name] = {}
82
+ roles_to_names[state_name][agent_role] = agent_name
83
+ names_to_roles[state_name][agent_name] = agent_role
84
+ agent_state_roles[state_name] = agent_role
85
+ current_state = config["states"][state_name]
86
+ current_state["roles"] = list(current_state["agent_states"].keys()) if "roles" not in current_state else current_state["roles"]
87
+ current_state_begin_role = current_state["begin_role"] if "begin_role" in current_state else current_state["roles"][0]
88
+ agent_begins[state_name]["is_begin"] = current_state_begin_role==agent_role if "begin_role" in current_state else False
89
+ agent_begins[state_name]["begin_query"] = current_state["begin_query"] if "begin_query" in current_state else " "
90
+ agent_LLMs[state_name] = init_LLM("logs"+os.sep+f"{agent_name}",**current_state["agent_states"][agent_role])
91
+ agents[agent_name] = cls(
92
+ agent_name,
93
+ agent_state_roles,
94
+ LLMs=agent_LLMs,
95
+ is_user=agent_name in user_names,
96
+ style = agent_dict["style"],
97
+ begins = agent_begins
98
+ )
99
+ assert len(config["agents"].keys()) != 2 or (roles_to_names[config["root"]][config["states"][config["root"]]["begin_role"]] not in user_names and "begin_query" in config["states"][config["root"]]),"In a single-agent scenario, there must be an opening statement and it must be the agent"
100
+ return agents, roles_to_names, names_to_roles
101
+
102
+ def step(self, current_state,input=""):
103
+ """
104
+ return actions by current state and environment
105
+ Return: action(Action)
106
+ """
107
+
108
+ current_state.chat_nums +=1
109
+ state_begin = current_state.is_begin
110
+ agent_begin = self.begins[current_state.name]["is_begin"]
111
+ self.begins[current_state.name]["is_begin"] = False
112
+ current_state.is_begin = False
113
+ environment = self.environment
114
+
115
+ self.current_state = current_state
116
+ # 先根据当前环境更新信息
117
+ # First update the information according to the current environment
118
+
119
+ response = " "
120
+ res_dict = {}
121
+
122
+ if self.is_user:
123
+ response = f"{self.name}:{input}"
124
+ else:
125
+ if len(environment.shared_memory["long_term_memory"])>0:
126
+ current_history = self.observe()
127
+ self.long_term_memory.append(current_history)
128
+ if agent_begin:
129
+ response = (char for char in self.begins[current_state.name]["begin_query"])
130
+ else:
131
+ response,res_dict = self.act()
132
+
133
+
134
+ action_dict = {
135
+ "response": response,
136
+ "res_dict": res_dict,
137
+ "role": self.state_roles[current_state.name],
138
+ "name": self.name,
139
+ "state_begin" : state_begin,
140
+ "agent_begin" : agent_begin,
141
+ "is_user" : self.is_user
142
+ }
143
+ return Action(**action_dict)
144
+
145
+ def act(self):
146
+ """
147
+ return actions by the current state
148
+ """
149
+ current_state = self.current_state
150
+ chat_history = self.long_term_memory
151
+ current_LLM = self.LLMs[current_state.name]
152
+
153
+ system_prompt, last_prompt, res_dict = self.compile()
154
+
155
+
156
+
157
+ response = current_LLM.get_response(
158
+ chat_history, system_prompt, last_prompt, stream=True
159
+ )
160
+ return response,res_dict
161
+
162
+ def update_memory(self, memory):
163
+ self.long_term_memory.append(
164
+ {"role": "assistant", "content": memory.content}
165
+ )
166
+
167
+ MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
168
+ environment = self.environment
169
+ current_chat_history_idx = environment.current_chat_history_idx if environment.environment_type == "competive" else 0
170
+
171
+ current_long_term_memory = environment.shared_memory["long_term_memory"][current_chat_history_idx:]
172
+ last_conversation_idx = environment._get_agent_last_conversation_idx(self,current_long_term_memory)
173
+ if len(current_long_term_memory)-last_conversation_idx >= MAX_CHAT_HISTORY:
174
+ current_state = self.current_state
175
+ current_role = self.state_roles[current_state.name]
176
+ current_component_dict = current_state.components[current_role]
177
+
178
+ # get chat history from new conversation
179
+ conversations = environment._get_agent_new_memory(self,current_long_term_memory)
180
+
181
+ # get summary
182
+ summary_prompt = (
183
+ current_state.summary_prompt[current_role]
184
+ if current_state.summary_prompt
185
+ else f"""your name is {self.name},your role is{current_component_dict["style"].role},your task is {current_component_dict["task"].task}.\n"""
186
+ )
187
+ summary_prompt =eval(Agent_summary_system_prompt)
188
+ summary = self.LLMs[current_state.name].get_response(None, summary_prompt,stream = False)
189
+ self.short_term_memory = summary
190
+
191
+
192
+ def compile(self):
193
+ """
194
+ get prompt from state depend on your role
195
+ Return:
196
+ system_prompt:system_prompt for agents's LLM
197
+ last_prompt:last_prompt for agents's LLM
198
+ res_dict(dict): Other return from tool component.For example: search engine results
199
+ """
200
+ current_state = self.current_state
201
+ self.current_roles = self.state_roles[current_state.name]
202
+ current_state_name = current_state.name
203
+ self.LLM = self.LLMs[current_state_name]
204
+ components = current_state.components[self.state_roles[current_state_name]]
205
+
206
+ system_prompt = self.current_state.environment_prompt
207
+ last_prompt = ""
208
+
209
+ res_dict = {}
210
+ for component in components.values():
211
+ if isinstance(component, (OutputComponent, LastComponent)):
212
+ last_prompt = last_prompt + "\n" + component.get_prompt(self)
213
+ elif isinstance(component, PromptComponent):
214
+ system_prompt = (
215
+ system_prompt + "\n" + component.get_prompt(self)
216
+ )
217
+ elif isinstance(component, ToolComponent):
218
+ response = component.func(self)
219
+ if "prompt" in response and response["prompt"]:
220
+ last_prompt = last_prompt + "\n" + response["prompt"]
221
+ res_dict.update(response)
222
+
223
+ name = self.name
224
+ query = self.environment.shared_memory["long_term_memory"][-1] if len(self.environment.shared_memory["long_term_memory"]) else ""
225
+ last_prompt = eval(Agent_last_prompt)
226
+ system_prompt = eval(Agent_system_prompt)
227
+ return system_prompt, last_prompt, res_dict
228
+
229
+
230
+ def observe(self):
231
+ """
232
+ Update one's own memory according to the current environment, including: updating short-term memory; updating long-term memory
233
+ """
234
+ return self.environment._observe(self)
235
+
236
+
237
+ def generate_sop(self):
238
+ pass
239
+
240
+ def reflection(self):
241
+ pass
242
+
243
+
Agent/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .Agent import Agent
Component/ExtraComponent.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .ToolComponent import ToolComponent
2
+ import json
3
+ from utils import flatten_dict,get_embedding,matching_category,search_with_api,limit_keys,limit_values
4
+ import os
5
+
6
+
7
+ class CategoryRequirementsComponent(ToolComponent):
8
+ def __init__(self, information_path):
9
+ super().__init__()
10
+ self.information_dataset = []
11
+ self.leaf_name = []
12
+ for toy_path in information_path:
13
+ with open(toy_path, encoding="utf-8") as json_file:
14
+ data = json.load(json_file)
15
+ for d in data:
16
+ if "/" in d["cat_leaf_name"]:
17
+ leaf_names = d["cat_leaf_name"].split("/") + [d["cat_leaf_name"]]
18
+ else:
19
+ leaf_names = [d["cat_leaf_name"]]
20
+ for name in leaf_names:
21
+ self.leaf_name.append(name)
22
+ new_d = d.copy()
23
+ new_d["cat_leaf_name"] = name
24
+ new_d["information"] = flatten_dict(new_d["information"])
25
+ self.information_dataset.append(new_d)
26
+
27
+ self.target_embbeding = get_embedding(
28
+ self.leaf_name
29
+ )
30
+
31
+ def search_information(self, category, information_dataset):
32
+ knowledge = {}
33
+ for d in information_dataset:
34
+ if category == d["cat_leaf_name"]:
35
+ knowledge = d["information"]
36
+ knowledge = {
37
+ key: value
38
+ for key, value in knowledge.items()
39
+ if (value and key != "相关分类")
40
+ }
41
+ break
42
+ return knowledge
43
+
44
+ def func(self, agent):
45
+ prompt = ""
46
+ messages = agent.long_term_memory
47
+ outputdict = {}
48
+ functions = [
49
+ {
50
+ "name": "search_information",
51
+ "description": "根据用户所需要购买商品的种类跟用户的需求去寻找用户所需要的商品",
52
+ "parameters": {
53
+ "type": "object",
54
+ "properties": {
55
+ "category": {
56
+ "type": "string",
57
+ "description": "用户现在所需要的商品类别,比如纸尿布,笔记本电脑等,注意,只能有一个",
58
+ },
59
+ "requirements": {
60
+ "type": "string",
61
+ "description": "用户现在的需求,比如说便宜,安踏品牌等等,可以有多个需求,中间以“ ”分隔",
62
+ },
63
+ },
64
+ "required": ["category", "requirements"],
65
+ },
66
+ }
67
+ ]
68
+
69
+ response = agent.LLM.get_response(
70
+ messages,
71
+ None,
72
+ None,
73
+ functions=functions,
74
+ stream=False,
75
+ function_call={"name": "search_information"},
76
+ )
77
+ response_message = json.loads(response["function_call"]["arguments"])
78
+ category = (
79
+ response_message["category"] if response_message["category"] else None
80
+ )
81
+ requirements = (
82
+ response_message["requirements"]
83
+ if response_message["requirements"]
84
+ else category
85
+ )
86
+ if not (category or requirements):
87
+ return {}
88
+
89
+ topk_result = matching_category(
90
+ category, self.leaf_name, None, self.target_embbeding, top_k=3
91
+ )
92
+
93
+ top1_score = topk_result[1][0]
94
+ request_items, top_category = search_with_api(requirements, category)
95
+
96
+
97
+ MIN_CATEGORY_SIM = eval(os.environ["MIN_CATEGORY_SIM"]
98
+ ) if "MIN_CATEGORY_SIM" in os.environ else 0.7
99
+
100
+ if top1_score > MIN_CATEGORY_SIM:
101
+ agent.environment.shared_memory["category"] = topk_result[0][0]
102
+ category = topk_result[0][0]
103
+ information = self.search_information(
104
+ topk_result[0][0], self.information_dataset
105
+ )
106
+ information = limit_keys(information, 3)
107
+ information = limit_values(information, 2)
108
+ prompt += f"""你需要知道的是:用户目前选择的商品是{category},该商品信息为{information}。你需要根据这些商品信息来详细介绍商品,比如详细介绍商品有哪些品牌,有哪些分类等等,并且询问用户是否有更多的需求。"""
109
+ if category in top_category:
110
+ top_category.remove(category)
111
+
112
+ recommend = "\n经过搜索后,推荐商品如下:\n"
113
+ prompt += "筛选出的商品如下:\n"
114
+
115
+ for i, request_item in enumerate(request_items):
116
+
117
+ itemTitle = request_item["itemTitle"]
118
+ itemPrice = request_item["itemPrice"]
119
+ itemPicUrl = request_item["itemPicUrl"]
120
+ recommend += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]({itemPicUrl})\n"
121
+ prompt += f"[{i}.商品名称:{itemTitle},商品价格:{float(itemPrice)/100}]\n"
122
+ outputdict["recommend"] = recommend
123
+ print(recommend)
124
+ else:
125
+ prompt += f"""你需要知道的是:用户目前选择的商品是{category},而我们店里没有这类商品,但是我们店里有一些近似商品,如{top_category},{topk_result[0][0]},你需要对这些近似商品进行介绍,并引导用户购买"""
126
+ outputdict["prompt"] = prompt
127
+ return outputdict
128
+
Component/PromptComponent.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+
3
+
4
+ class PromptComponent:
5
+ def __init__(self):
6
+ pass
7
+
8
+ @abstractmethod
9
+ def get_prompt(self, agent):
10
+ pass
11
+
12
+ class TaskComponent(PromptComponent):
13
+ def __init__(self, task):
14
+ super().__init__()
15
+ self.task = task
16
+
17
+ def get_prompt(self, agent):
18
+ return f"""The task you need to execute is: {self.task}.\n"""
19
+
20
+
21
+ class OutputComponent(PromptComponent):
22
+ def __init__(self, output):
23
+ super().__init__()
24
+ self.output = output
25
+
26
+ def get_prompt(self, agent):
27
+ return f"""Please contact the above to extract <{self.output}> and </{self.output}>, \
28
+ do not perform additional output, please output in strict accordance with the above format!\n"""
29
+
30
+
31
+ class SystemComponent(PromptComponent):
32
+ def __init__(self,system_prompt):
33
+ super().__init__()
34
+ self.system_prompt = system_prompt
35
+
36
+ def get_prompt(self, agent):
37
+ return self.system_prompt
38
+
39
+ class LastComponent(PromptComponent):
40
+ def __init__(self, last_prompt):
41
+ super().__init__()
42
+ self.last_prompt = last_prompt
43
+
44
+ def get_prompt(self, agent):
45
+ return self.last_prompt
46
+
47
+
48
+ class StyleComponent(PromptComponent):
49
+ """
50
+ 角色、风格组件
51
+ """
52
+
53
+ def __init__(self, role):
54
+ super().__init__()
55
+ self.role = role
56
+
57
+ def get_prompt(self, agent):
58
+ name = agent.name
59
+ style = agent.style
60
+ return f"""Now your role is:\n{self.role}, your name is:\n{name}. \
61
+ You need to follow the output style:\n{style}.\n"""
62
+
63
+
64
+ class RuleComponent(PromptComponent):
65
+ def __init__(self, rule):
66
+ super().__init__()
67
+ self.rule = rule
68
+
69
+ def get_prompt(self, agent):
70
+ return f"""The rule you need to follow is:\n{self.rule}.\n"""
71
+
72
+
73
+ class DemonstrationComponent(PromptComponent):
74
+ """
75
+ input a list,the example of answer.
76
+ """
77
+
78
+ def __init__(self, demonstrations):
79
+ super().__init__()
80
+ self.demonstrations = demonstrations
81
+
82
+
83
+ def get_prompt(self, agent):
84
+ prompt = f"Here are demonstrations you can refer to:\n{self.demonstrations}"
85
+ return prompt
86
+
87
+
88
+ class CoTComponent(PromptComponent):
89
+ """
90
+ input a list,the example of answer.
91
+ """
92
+
93
+ def __init__(self, demonstrations):
94
+ super().__init__()
95
+ self.demonstrations = demonstrations
96
+
97
+ def add_demonstration(self, demonstration):
98
+ self.demonstrations.append(demonstration)
99
+
100
+ def get_prompt(self, agent):
101
+ prompt = "You need to think in detail before outputting, the thinking case is as follows:\n"
102
+ for demonstration in self.demonstrations:
103
+ prompt += "\n" + demonstration
104
+ return prompt
105
+
106
+
107
+ class CustomizeComponent(PromptComponent):
108
+ """
109
+ Custom template
110
+ template(str) : example: "i am {}"
111
+ keywords(list) : example : ["name"]
112
+ example : agent.environment.shared_memory["name"] = "Lilong"
113
+ the component will get the keyword attribute from the environment, and then add it to the template.
114
+ Return : "i am Lilong"
115
+ """
116
+ def __init__(self, template, keywords) -> None:
117
+ super().__init__()
118
+ self.template = template
119
+ self.keywords = keywords
120
+
121
+ def get_prompt(self, agent):
122
+ template_keyword = {}
123
+ for keyword in self.keywords:
124
+ current_keyword = agent.environment.shared_memory[keyword] if keyword in agent.environment.shared_memory else ""
125
+ template_keyword[keyword] = current_keyword
126
+ return self.template.format(**template_keyword)
Component/ToolComponent.py ADDED
@@ -0,0 +1,887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractmethod
2
+ import uuid
3
+ from text2vec import semantic_search
4
+ from utils import (
5
+ get_relevant_history,
6
+ load_knowledge_base_qa,
7
+ load_knowledge_base_UnstructuredFile,
8
+ get_embedding,
9
+ extract,
10
+ )
11
+ import json
12
+ from typing import Dict, List
13
+ import os
14
+ from googleapiclient.discovery import build
15
+ import requests
16
+ from selenium import webdriver
17
+ from selenium.webdriver.common.by import By
18
+ from selenium.webdriver.support.ui import WebDriverWait
19
+ from selenium.webdriver.support import expected_conditions as EC
20
+ from bs4 import BeautifulSoup
21
+ import base64
22
+ import re
23
+ from datetime import datetime, timedelta
24
+ from typing import Tuple, List, Any, Dict
25
+ from email.mime.text import MIMEText
26
+ from email.mime.multipart import MIMEMultipart
27
+ from google.auth.transport.requests import Request
28
+ from google.oauth2.credentials import Credentials
29
+ from google_auth_oauthlib.flow import InstalledAppFlow
30
+ from googleapiclient.discovery import build
31
+ from googleapiclient.errors import HttpError
32
+ from tqdm import tqdm
33
+
34
+ class ToolComponent:
35
+ def __init__(self):
36
+ pass
37
+
38
+ @abstractmethod
39
+ def func(self):
40
+ pass
41
+
42
+ class KnowledgeBaseComponent(ToolComponent):
43
+ """
44
+ Inject knowledge base
45
+ top_k : Top_k with the highest matching degree
46
+ type : "QA" or others
47
+ knowledge_base(json_path) : knowledge_base_path
48
+ """
49
+ def __init__(self, top_k, type, knowledge_base):
50
+ super().__init__()
51
+ self.top_k = top_k
52
+ self.type = type
53
+ self.knowledge_base = knowledge_base
54
+
55
+ if self.type == "QA":
56
+ (
57
+ self.kb_embeddings,
58
+ self.kb_questions,
59
+ self.kb_answers,
60
+ self.kb_chunks,
61
+ ) = load_knowledge_base_qa(self.knowledge_base)
62
+ else:
63
+ self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile(
64
+ self.knowledge_base
65
+ )
66
+
67
+ def func(self, agent):
68
+ query = (
69
+ agent.long_term_memory[-1]["content"]
70
+ if len(agent.long_term_memory) > 0
71
+ else ""
72
+ )
73
+ knowledge = ""
74
+ query = extract(query, "query")
75
+ query_embedding = get_embedding(query)
76
+ hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50)
77
+ hits = hits[0]
78
+ temp = []
79
+ if self.type == "QA":
80
+ for hit in hits:
81
+ matching_idx = hit["corpus_id"]
82
+ if self.kb_chunks[matching_idx] in temp:
83
+ pass
84
+ else:
85
+ knowledge = (
86
+ knowledge
87
+ + f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n"
88
+ )
89
+ temp.append(self.kb_answers[matching_idx])
90
+ if len(temp) == 1:
91
+ break
92
+ print(hits[0]["score"])
93
+ score = hits[0]["score"]
94
+ if score < 0.5:
95
+ return {"prompt": "No matching knowledge base"}
96
+ else:
97
+ return {"prompt": "The relevant content is: " + knowledge + "\n"}
98
+ else:
99
+ for hit in hits:
100
+ matching_idx = hit["corpus_id"]
101
+ if self.kb_chunks[matching_idx] in temp:
102
+ pass
103
+ else:
104
+ knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n"
105
+ temp.append(self.kb_answers[matching_idx])
106
+ if len(temp) == self.top_k:
107
+ break
108
+ print(hits[0]["score"])
109
+ score = hits[0]["score"]
110
+ if score < 0.5:
111
+ return {"prompt": "No matching knowledge base"}
112
+ else:
113
+ print(knowledge)
114
+ return {"prompt": "The relevant content is: " + knowledge + "\n"}
115
+
116
+
117
+ class StaticComponent(ToolComponent):
118
+ "Return static response"
119
+ def __init__(self, output):
120
+ super().__init__()
121
+ self.output = output
122
+
123
+ def func(self, agent):
124
+ outputdict = {"response": self.output}
125
+ return outputdict
126
+
127
+
128
+ class ExtractComponent(ToolComponent):
129
+ """
130
+ Extract keywords based on the current scene and store them in the environment
131
+ extract_words(list) : Keywords to be extracted
132
+ system_prompt & last_prompt : Prompt to extract keywords
133
+ """
134
+ def __init__(
135
+ self,
136
+ extract_words,
137
+ system_prompt,
138
+ last_prompt=None,
139
+ ):
140
+ super().__init__()
141
+ self.extract_words = extract_words
142
+ self.system_prompt = system_prompt
143
+ self.default_prompt = (
144
+ "Please strictly adhere to the following format for outputting:\n"
145
+ )
146
+ for extract_word in extract_words:
147
+ self.default_prompt += (
148
+ f"<{extract_word}> the content you need to extract </{extract_word}>"
149
+ )
150
+ self.last_prompt = last_prompt if last_prompt else self.default_prompt
151
+
152
+ def func(self, agent):
153
+ response = agent.LLM.get_response(
154
+ agent.long_term_memory,
155
+ self.system_prompt,
156
+ self.last_prompt,
157
+ stream=False,
158
+ )
159
+ for extract_word in self.extract_words:
160
+ key = extract(response, extract_word)
161
+ key = key if key else response
162
+ agent.environment.shared_memory[extract_word] = key
163
+
164
+ return {}
165
+
166
+
167
+ """Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)"""
168
+
169
+
170
+ class WebSearchComponent(ToolComponent):
171
+ """search engines"""
172
+
173
+ __ENGINE_NAME__: List = ["google", "bing"]
174
+
175
+ def __init__(self, engine_name: str, api: Dict):
176
+ """
177
+ :param engine_name: The name of the search engine used
178
+ :param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated
179
+ """
180
+ super(WebSearchComponent, self).__init__()
181
+ """Determine whether the key and engine_name of the api are legal"""
182
+
183
+ assert engine_name in WebSearchComponent.__ENGINE_NAME__
184
+ for api_name in api:
185
+ assert api_name in WebSearchComponent.__ENGINE_NAME__
186
+
187
+ self.api = api
188
+ self.engine_name = engine_name
189
+
190
+ self.search: Dict = {"bing": self._bing_search, "google": self._google_search}
191
+
192
+ def _bing_search(self, query: str, **kwargs):
193
+ """Initialize search hyperparameters"""
194
+ subscription_key = self.api["bing"]
195
+ search_url = "https://api.bing.microsoft.com/v7.0/search"
196
+ headers = {"Ocp-Apim-Subscription-Key": subscription_key}
197
+ params = {
198
+ "q": query,
199
+ "textDecorations": True,
200
+ "textFormat": "HTML",
201
+ "count": 10,
202
+ }
203
+ """start searching"""
204
+ response = requests.get(search_url, headers=headers, params=params)
205
+ response.raise_for_status()
206
+ results = response.json()["webPages"]["value"]
207
+ """execute"""
208
+ metadata_results = []
209
+ for result in results:
210
+ metadata_result = {
211
+ "snippet": result["snippet"],
212
+ "title": result["name"],
213
+ "link": result["url"],
214
+ }
215
+ metadata_results.append(metadata_result)
216
+ return {"meta data": metadata_results}
217
+
218
+ def _google_search(self, query: str, **kwargs):
219
+ """Initialize search hyperparameters"""
220
+ api_key = self.api[self.engine_name]["api_key"]
221
+ cse_id = self.api[self.engine_name]["cse_id"]
222
+ service = build("customsearch", "v1", developerKey=api_key)
223
+ """start searching"""
224
+ results = (
225
+ service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"]
226
+ )
227
+ """execute"""
228
+ metadata_results = []
229
+ for result in results:
230
+ metadata_result = {
231
+ "snippet": result["snippet"],
232
+ "title": result["title"],
233
+ "link": result["link"],
234
+ }
235
+ metadata_results.append(metadata_result)
236
+ return {"meta data": metadata_results}
237
+
238
+ def func(self, agent, **kwargs) -> Dict:
239
+ query = (
240
+ agent.long_term_memory[-1]["content"]
241
+ if len(agent.long_term_memory) > 0
242
+ else " "
243
+ )
244
+ response = agent.LLM.get_response(
245
+ None,
246
+ system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as <keywords>extracted keywords</keywords>:\nConversation:\n{query}",
247
+ stream=False,
248
+ )
249
+ response = extract(response, "keywords")
250
+ query = response if response else query
251
+
252
+ search_results = self.search[self.engine_name](query=query, **kwargs)
253
+ information = ""
254
+ for i in search_results["meta data"][:5]:
255
+ information += i["snippet"]
256
+ return {
257
+ "prompt": "You can refer to the following information to reply:\n"
258
+ + information
259
+ }
260
+
261
+ def convert_search_engine_to(self, engine_name):
262
+ assert engine_name in WebSearchComponent.__ENGINE_NAME__
263
+ self.engine_name = engine_name
264
+
265
+
266
+ class WebCrawlComponent(ToolComponent):
267
+ """Open a single web page for crawling"""
268
+
269
+ def __init__(self):
270
+ super(WebCrawlComponent, self).__init__()
271
+
272
+ def func(self, agent_dict) -> Dict:
273
+ url = agent_dict["url"]
274
+ print(f"crawling {url} ......")
275
+ content = ""
276
+ """Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc."""
277
+ driver = webdriver.Chrome()
278
+ try:
279
+ """open url"""
280
+ driver.get(url)
281
+
282
+ """wait 20 second"""
283
+ wait = WebDriverWait(driver, 20)
284
+ wait.until(EC.presence_of_element_located((By.TAG_NAME, "body")))
285
+
286
+ """crawl code"""
287
+ page_source = driver.page_source
288
+
289
+ """parse"""
290
+ soup = BeautifulSoup(page_source, "html.parser")
291
+
292
+ """concatenate"""
293
+ for paragraph in soup.find_all("p"):
294
+ content = f"{content}\n{paragraph.get_text()}"
295
+ except Exception as e:
296
+ print("Error:", e)
297
+ finally:
298
+ """quit"""
299
+ driver.quit()
300
+ return {"content": content.strip()}
301
+
302
+
303
+ class MailComponent(ToolComponent):
304
+ __VALID_ACTION__ = ["read", "send"]
305
+
306
+ def __init__(
307
+ self, cfg_file: str, default_action: str = "read", name: str = "e-mail"
308
+ ):
309
+ """'../config/google_mail.json'"""
310
+ super(MailComponent, self).__init__(name)
311
+ self.name = name
312
+ assert (
313
+ default_action.lower() in self.__VALID_ACTION__
314
+ ), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
315
+ self.action = default_action.lower()
316
+ self.credential = self._login(cfg_file)
317
+
318
+ def _login(self, cfg_file: str):
319
+ SCOPES = [
320
+ "https://www.googleapis.com/auth/gmail.readonly",
321
+ "https://www.googleapis.com/auth/gmail.send",
322
+ ]
323
+ creds = None
324
+ if os.path.exists("token.json"):
325
+ print("Login Successfully!")
326
+ creds = Credentials.from_authorized_user_file("token.json", SCOPES)
327
+ if not creds or not creds.valid:
328
+ print("Please authorize in an open browser.")
329
+ if creds and creds.expired and creds.refresh_token:
330
+ creds.refresh(Request())
331
+ else:
332
+ flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES)
333
+ creds = flow.run_local_server(port=0)
334
+ # Save the credentials for the next run
335
+ with open("token.json", "w") as token:
336
+ token.write(creds.to_json())
337
+ return creds
338
+
339
+ def _read(self, mail_dict: dict):
340
+ credential = self.credential
341
+ state = mail_dict["state"] if "state" in mail_dict else None
342
+ time_between = (
343
+ mail_dict["time_between"] if "time_between" in mail_dict else None
344
+ )
345
+ sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None
346
+ only_both = mail_dict["only_both"] if "only_both" in mail_dict else False
347
+ order_by_time = (
348
+ mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend"
349
+ )
350
+ include_word = (
351
+ mail_dict["include_word"] if "include_word" in mail_dict else None
352
+ )
353
+ exclude_word = (
354
+ mail_dict["exclude_word"] if "exclude_word" in mail_dict else None
355
+ )
356
+ MAX_SEARCH_CNT = (
357
+ mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50
358
+ )
359
+ number = mail_dict["number"] if "number" in mail_dict else 10
360
+ if state is None:
361
+ state = "all"
362
+ if time_between is not None:
363
+ assert isinstance(time_between, tuple)
364
+ assert len(time_between) == 2
365
+ assert state in ["all", "unread", "read", "sent"]
366
+ if only_both:
367
+ assert sender_mail is not None
368
+ if sender_mail is not None:
369
+ assert isinstance(sender_mail, str)
370
+ assert credential
371
+ assert order_by_time in ["descend", "ascend"]
372
+
373
+ def generate_query():
374
+ query = ""
375
+ if state in ["unread", "read"]:
376
+ query = f"is:{state}"
377
+ if state in ["sent"]:
378
+ query = f"in:{state}"
379
+ if only_both:
380
+ query = f"{query} from:{sender_mail} OR to:{sender_mail}"
381
+ if sender_mail is not None and not only_both:
382
+ query = f"{query} from:({sender_mail})"
383
+ if include_word is not None:
384
+ query = f"{query} {include_word}"
385
+ if exclude_word is not None:
386
+ query = f"{query} -{exclude_word}"
387
+ if time_between is not None:
388
+ TIME_FORMAT = "%Y/%m/%d"
389
+ t1, t2 = time_between
390
+ if t1 == "now":
391
+ t1 = datetime.now().strftime(TIME_FORMAT)
392
+ if t2 == "now":
393
+ t2 = datetime.now().strftime(TIME_FORMAT)
394
+ if isinstance(t1, str) and isinstance(t2, str):
395
+ t1 = datetime.strptime(t1, TIME_FORMAT)
396
+ t2 = datetime.strptime(t2, TIME_FORMAT)
397
+ elif isinstance(t1, str) and isinstance(t2, int):
398
+ t1 = datetime.strptime(t1, TIME_FORMAT)
399
+ t2 = t1 + timedelta(days=t2)
400
+ elif isinstance(t1, int) and isinstance(t2, str):
401
+ t2 = datetime.strptime(t2, TIME_FORMAT)
402
+ t1 = t2 + timedelta(days=t1)
403
+ else:
404
+ assert False, "invalid time"
405
+ if t1 > t2:
406
+ t1, t2 = t2, t1
407
+ query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}"
408
+ return query.strip()
409
+
410
+ def sort_by_time(data: List[Dict]):
411
+ if order_by_time == "descend":
412
+ reverse = True
413
+ else:
414
+ reverse = False
415
+ sorted_data = sorted(
416
+ data,
417
+ key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"),
418
+ reverse=reverse,
419
+ )
420
+ return sorted_data
421
+
422
+ try:
423
+ service = build("gmail", "v1", credentials=credential)
424
+ results = (
425
+ service.users()
426
+ .messages()
427
+ .list(userId="me", labelIds=["INBOX"], q=generate_query())
428
+ .execute()
429
+ )
430
+
431
+ messages = results.get("messages", [])
432
+ email_data = list()
433
+
434
+ if not messages:
435
+ print("No eligible emails.")
436
+ return None
437
+ else:
438
+ pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages)))
439
+ for cnt, message in enumerate(messages):
440
+ pbar.update(1)
441
+ if cnt >= MAX_SEARCH_CNT:
442
+ break
443
+ msg = (
444
+ service.users()
445
+ .messages()
446
+ .get(
447
+ userId="me",
448
+ id=message["id"],
449
+ format="full",
450
+ metadataHeaders=None,
451
+ )
452
+ .execute()
453
+ )
454
+
455
+ subject = ""
456
+ for header in msg["payload"]["headers"]:
457
+ if header["name"] == "Subject":
458
+ subject = header["value"]
459
+ break
460
+
461
+ sender = ""
462
+ for header in msg["payload"]["headers"]:
463
+ if header["name"] == "From":
464
+ sender = re.findall(
465
+ r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"]
466
+ )[0]
467
+ break
468
+ body = ""
469
+ if "parts" in msg["payload"]:
470
+ for part in msg["payload"]["parts"]:
471
+ if part["mimeType"] == "text/plain":
472
+ data = part["body"]["data"]
473
+ body = base64.urlsafe_b64decode(data).decode("utf-8")
474
+ break
475
+
476
+ email_info = {
477
+ "sender": sender,
478
+ "time": datetime.fromtimestamp(
479
+ int(msg["internalDate"]) / 1000
480
+ ).strftime("%Y-%m-%d %H:%M:%S"),
481
+ "subject": subject,
482
+ "body": body,
483
+ }
484
+ email_data.append(email_info)
485
+ pbar.close()
486
+ email_data = sort_by_time(email_data)[0:number]
487
+ return {"results": email_data}
488
+ except Exception as e:
489
+ print(e)
490
+ return None
491
+
492
+ def _send(self, mail_dict: dict):
493
+ recipient_mail = mail_dict["recipient_mail"]
494
+ subject = mail_dict["subject"]
495
+ body = mail_dict["body"]
496
+ credential = self.credential
497
+ service = build("gmail", "v1", credentials=credential)
498
+
499
+ message = MIMEMultipart()
500
+ message["to"] = recipient_mail
501
+ message["subject"] = subject
502
+
503
+ message.attach(MIMEText(body, "plain"))
504
+
505
+ raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8")
506
+ try:
507
+ message = (
508
+ service.users()
509
+ .messages()
510
+ .send(userId="me", body={"raw": raw_message})
511
+ .execute()
512
+ )
513
+ return {"state": True}
514
+ except HttpError as error:
515
+ print(error)
516
+ return {"state": False}
517
+
518
+ def func(self, mail_dict: dict):
519
+ if "action" in mail_dict:
520
+ assert mail_dict["action"].lower() in self.__VALID_ACTION__
521
+ self.action = mail_dict["action"]
522
+ functions = {"read": self._read, "send": self._send}
523
+ return functions[self.action](mail_dict)
524
+
525
+ def convert_action_to(self, action_name: str):
526
+ assert (
527
+ action_name.lower() in self.__VALID_ACTION__
528
+ ), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`"
529
+ self.action = action_name.lower()
530
+
531
+
532
+ class WeatherComponet(ToolComponent):
533
+ def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"):
534
+ super(WeatherComponet, self).__init__(name)
535
+ self.name = name
536
+ self.TIME_FORMAT = TIME_FORMAT
537
+ self.api_key = api_key
538
+
539
+ def _parse(self, data):
540
+ dict_data: dict = {}
541
+ for item in data["data"]:
542
+ date = item["datetime"]
543
+ dict_data[date] = {}
544
+ if "weather" in item:
545
+ dict_data[date]["description"] = item["weather"]["description"]
546
+ mapping = {
547
+ "temp": "temperature",
548
+ "max_temp": "max_temperature",
549
+ "min_temp": "min_temperature",
550
+ "precip": "accumulated_precipitation",
551
+ }
552
+ for key in ["temp", "max_temp", "min_temp", "precip"]:
553
+ if key in item:
554
+ dict_data[date][mapping[key]] = item[key]
555
+ return dict_data
556
+
557
+ def _query(self, city_name, country_code, start_date, end_date):
558
+ """https://www.weatherbit.io/api/historical-weather-daily"""
559
+ # print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT))
560
+ if start_date == datetime.strftime(
561
+ datetime.now(), self.TIME_FORMAT
562
+ ) and end_date == datetime.strftime(
563
+ datetime.now() + timedelta(days=1), self.TIME_FORMAT
564
+ ):
565
+ """today"""
566
+ url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}"
567
+ else:
568
+ url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}"
569
+ response = requests.get(url)
570
+ data = response.json()
571
+ return self._parse(data)
572
+
573
+ def func(self, weather_dict: Dict) -> Dict:
574
+ TIME_FORMAT = self.TIME_FORMAT
575
+ # Beijing, Shanghai
576
+ city_name = weather_dict["city_name"]
577
+ # CN, US
578
+ country_code = weather_dict["country_code"]
579
+ # 2020-02-02
580
+ start_date = datetime.strftime(
581
+ datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT),
582
+ self.TIME_FORMAT,
583
+ )
584
+ end_date = weather_dict["end_date"] if "end_date" in weather_dict else None
585
+ if end_date is None:
586
+ end_date = datetime.strftime(
587
+ datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1),
588
+ TIME_FORMAT,
589
+ )
590
+ else:
591
+ end_date = datetime.strftime(
592
+ datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT),
593
+ self.TIME_FORMAT,
594
+ )
595
+ if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime(
596
+ end_date, TIME_FORMAT
597
+ ):
598
+ start_date, end_date = end_date, start_date
599
+ assert start_date != end_date
600
+ return self._query(city_name, country_code, start_date, end_date)
601
+
602
+
603
+ class TranslateComponent(ToolComponent):
604
+ __SUPPORT_LANGUAGE__ = [
605
+ "af",
606
+ "am",
607
+ "ar",
608
+ "as",
609
+ "az",
610
+ "ba",
611
+ "bg",
612
+ "bn",
613
+ "bo",
614
+ "bs",
615
+ "ca",
616
+ "cs",
617
+ "cy",
618
+ "da",
619
+ "de",
620
+ "dsb",
621
+ "dv",
622
+ "el",
623
+ "en",
624
+ "es",
625
+ "et",
626
+ "eu",
627
+ "fa",
628
+ "fi",
629
+ "fil",
630
+ "fj",
631
+ "fo",
632
+ "fr",
633
+ "fr-CA",
634
+ "ga",
635
+ "gl",
636
+ "gom",
637
+ "gu",
638
+ "ha",
639
+ "he",
640
+ "hi",
641
+ "hr",
642
+ "hsb",
643
+ "ht",
644
+ "hu",
645
+ "hy",
646
+ "id",
647
+ "ig",
648
+ "ikt",
649
+ "is",
650
+ "it",
651
+ "iu",
652
+ "iu-Latn",
653
+ "ja",
654
+ "ka",
655
+ "kk",
656
+ "km",
657
+ "kmr",
658
+ "kn",
659
+ "ko",
660
+ "ku",
661
+ "ky",
662
+ "ln",
663
+ "lo",
664
+ "lt",
665
+ "lug",
666
+ "lv",
667
+ "lzh",
668
+ "mai",
669
+ "mg",
670
+ "mi",
671
+ "mk",
672
+ "ml",
673
+ "mn-Cyrl",
674
+ "mn-Mong",
675
+ "mr",
676
+ "ms",
677
+ "mt",
678
+ "mww",
679
+ "my",
680
+ "nb",
681
+ "ne",
682
+ "nl",
683
+ "nso",
684
+ "nya",
685
+ "or",
686
+ "otq",
687
+ "pa",
688
+ "pl",
689
+ "prs",
690
+ "ps",
691
+ "pt",
692
+ "pt-PT",
693
+ "ro",
694
+ "ru",
695
+ "run",
696
+ "rw",
697
+ "sd",
698
+ "si",
699
+ "sk",
700
+ "sl",
701
+ "sm",
702
+ "sn",
703
+ "so",
704
+ "sq",
705
+ "sr-Cyrl",
706
+ "sr-Latn",
707
+ "st",
708
+ "sv",
709
+ "sw",
710
+ "ta",
711
+ "te",
712
+ "th",
713
+ "ti",
714
+ "tk",
715
+ "tlh-Latn",
716
+ "tlh-Piqd",
717
+ "tn",
718
+ "to",
719
+ "tr",
720
+ "tt",
721
+ "ty",
722
+ "ug",
723
+ "uk",
724
+ "ur",
725
+ "uz",
726
+ "vi",
727
+ "xh",
728
+ "yo",
729
+ "yua",
730
+ "yue",
731
+ "zh-Hans",
732
+ "zh-Hant",
733
+ "zu",
734
+ ]
735
+
736
+ def __init__(
737
+ self, api_key, location, default_target_language="zh-cn", name="translate"
738
+ ):
739
+ super(TranslateComponent, self).__init__(name)
740
+ self.name = name
741
+ self.api_key = api_key
742
+ self.location = location
743
+ self.default_target_language = default_target_language
744
+
745
+ def func(self, translate_dict: Dict) -> Dict:
746
+ content = translate_dict["content"]
747
+ target_language = self.default_target_language
748
+ if "target_language" in translate_dict:
749
+ target_language = translate_dict["target_language"]
750
+ assert (
751
+ target_language in self.__SUPPORT_LANGUAGE__
752
+ ), f"language `{target_language}` is not supported."
753
+
754
+ endpoint = "https://api.cognitive.microsofttranslator.com"
755
+
756
+ path = "/translate"
757
+ constructed_url = endpoint + path
758
+
759
+ params = {"api-version": "3.0", "to": target_language}
760
+
761
+ headers = {
762
+ "Ocp-Apim-Subscription-Key": self.api_key,
763
+ "Ocp-Apim-Subscription-Region": self.location,
764
+ "Content-type": "application/json",
765
+ "X-ClientTraceId": str(uuid.uuid4()),
766
+ }
767
+
768
+ body = [{"text": content}]
769
+
770
+ request = requests.post(
771
+ constructed_url, params=params, headers=headers, json=body
772
+ )
773
+ response = request.json()
774
+ response = json.dumps(
775
+ response,
776
+ sort_keys=True,
777
+ ensure_ascii=False,
778
+ indent=4,
779
+ separators=(",", ": "),
780
+ )
781
+ response = eval(response)
782
+ return {"result": response[0]["translations"][0]["text"]}
783
+
784
+
785
+ class APIComponent(ToolComponent):
786
+ def __init__(self):
787
+ super(APIComponent, self).__init__()
788
+
789
+ def func(self, agent) -> Dict:
790
+ pass
791
+
792
+
793
+ class FunctionComponent(ToolComponent):
794
+ def __init__(
795
+ self,
796
+ functions,
797
+ function_call="auto",
798
+ response_type="response",
799
+ your_function=None,
800
+ ):
801
+ super().__init__()
802
+ self.functions = functions
803
+ self.function_call = function_call
804
+ self.parameters = {}
805
+ self.available_functions = {}
806
+ self.response_type = response_type
807
+ if your_function:
808
+ function_name = your_function["name"]
809
+ function_content = your_function["content"]
810
+ exec(function_content)
811
+ self.available_functions[function_name] = eval(function_name)
812
+
813
+ for function in self.functions:
814
+ self.parameters[function["name"]] = list(
815
+ function["parameters"]["properties"].keys()
816
+ )
817
+ self.available_functions[function["name"]] = eval(function["name"])
818
+
819
+ def func(self, agent):
820
+ messages = agent.long_term_memory
821
+ outputdict = {}
822
+ query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " "
823
+ relevant_history = get_relevant_history(
824
+ query,
825
+ agent.long_term_memory[:-1],
826
+ agent.chat_embeddings[:-1],
827
+ )
828
+ response = agent.LLM.get_response(
829
+ messages,
830
+ None,
831
+ functions=self.functions,
832
+ stream=False,
833
+ function_call=self.function_call,
834
+ relevant_history=relevant_history,
835
+ )
836
+ response_message = response
837
+ if response_message.get("function_call"):
838
+ function_name = response_message["function_call"]["name"]
839
+ fuction_to_call = self.available_functions[function_name]
840
+ function_args = json.loads(response_message["function_call"]["arguments"])
841
+ input_args = {}
842
+ for args_name in self.parameters[function_name]:
843
+ input_args[args_name] = function_args.get(args_name)
844
+ function_response = fuction_to_call(**input_args)
845
+ if self.response_type == "response":
846
+ outputdict["response"] = function_response
847
+ elif self.response_type == "prompt":
848
+ outputdict["prompt"] = function_response
849
+
850
+ return outputdict
851
+
852
+
853
+ class CodeComponent(ToolComponent):
854
+ def __init__(self, file_name, keyword) -> None:
855
+ super().__init__()
856
+ self.file_name = file_name
857
+ self.keyword = keyword
858
+ self.system_prompt = (
859
+ "you need to extract the modified code as completely as possible."
860
+ )
861
+ self.last_prompt = (
862
+ f"Please strictly adhere to the following format for outputting: \n"
863
+ )
864
+ self.last_prompt += (
865
+ f"<{self.keyword}> the content you need to extract </{self.keyword}>"
866
+ )
867
+
868
+ def func(self, agent):
869
+ response = agent.LLM.get_response(
870
+ agent.long_term_memory,
871
+ self.system_prompt,
872
+ self.last_prompt,
873
+ stream=False,
874
+ )
875
+ code = extract(response, self.keyword)
876
+ code = code if code else response
877
+ os.makedirs("output_code", exist_ok=True)
878
+ file_name = "output_code/" + self.file_name
879
+ codes = code.split("\n")
880
+ if codes[0] == "```python":
881
+ codes.remove(codes[0])
882
+ if codes[-1] == "```":
883
+ codes.remove(codes[-1])
884
+ code = "\n".join(codes)
885
+ with open(file_name, "w", encoding="utf-8") as f:
886
+ f.write(code)
887
+ return {}
Component/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .ExtraComponent import *
2
+ from .PromptComponent import *
3
+ from .ToolComponent import *
Environment/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .base_environment import Environment
Environment/base_environment.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils import get_relevant_history, get_embedding
2
+ import torch
3
+ from LLM.base_LLM import *
4
+ from Memory import Memory
5
+ from Prompt import *
6
+ import json
7
+ class Environment:
8
+ """
9
+ The place where the agent activities, responsible for storing some shared memories
10
+ """
11
+ def __init__(self, config) -> None:
12
+ self.shared_memory = {"long_term_memory": [], "short_term_memory": None}
13
+ self.agents = None
14
+
15
+ self.summary_system_prompt = {}
16
+ self.summary_last_prompt = {}
17
+ self.environment_prompt = {}
18
+ self.environment_type = config["environment_type"] if "environment_type" in config else "cooperative"
19
+ self.current_chat_history_idx = 0
20
+ self.LLMs = {}
21
+
22
+ # 初始化每个state 的summary 方法
23
+ # Initialize the summary method for each state
24
+ for state_name, state_dict in config["states"].items():
25
+ if state_name != "end_state":
26
+ self.summary_system_prompt[state_name] = (
27
+ state_dict["summary_system_prompt"]
28
+ if "summary_system_prompt" in state_dict
29
+ else eval(Default_environment_summary_system_prompt)
30
+ )
31
+
32
+ self.summary_last_prompt[state_name] = (
33
+ state_dict["summary_last_prompt"]
34
+ if "summary_last_prompt" in state_dict
35
+ else eval(Default_environment_summary_last_prompt)
36
+ )
37
+
38
+ self.environment_prompt[state_name] = (
39
+ state_dict["environment_prompt"]
40
+ if "environment_prompt" in state_dict
41
+ else " "
42
+ )
43
+ self.LLMs[state_name] = init_LLM("logs"+os.sep+f"{state_name}",**state_dict)
44
+ self.roles_to_names = None
45
+ self.names_to_roles = None
46
+
47
+ @classmethod
48
+ def from_config(cls, config_path):
49
+ with open(config_path) as f:
50
+ config = json.load(f)
51
+ return cls(config)
52
+
53
+ def summary(self, current_state):
54
+ """
55
+ Summarize the situation in the current environment every once in a while
56
+ """
57
+ MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
58
+ current_state_name = current_state.name
59
+
60
+ query = self.shared_memory["long_term_memory"][-1].content
61
+ if len(self.shared_memory["long_term_memory"])>1:
62
+ relevant_history = get_relevant_history(
63
+ query,
64
+ self.shared_memory["long_term_memory"][:-1],
65
+ self.shared_memory["chat_embeddings"][:-1],
66
+ )
67
+
68
+ relevant_history = Memory.get_chat_history(relevant_history)
69
+ else:
70
+ relevant_history = ""
71
+ chat_history = Memory.get_chat_history(
72
+ self.shared_memory["long_term_memory"][-MAX_CHAT_HISTORY + 1 :]
73
+ )
74
+ summary = self.shared_memory["short_term_memory"]
75
+
76
+
77
+ # system prompt = environment prompt + current memory + system prompt
78
+ # current_memory = summary + chat history + relevant history
79
+ current_memory = eval(Environment_summary_memory)
80
+ environment_prompt = self.environment_prompt[current_state_name]
81
+ summary_system_prompt = self.summary_system_prompt[current_state_name]
82
+
83
+ environment_summary_system_prompt = eval(Environment_summary_system_prompt)
84
+ response = self.LLMs[current_state_name].get_response(None, environment_summary_system_prompt, stream=False)
85
+ return response
86
+
87
+ def update_memory(self, memory, current_state):
88
+ """
89
+ update chat embbedings and long term memory,short term memory,agents long term memory
90
+ """
91
+ MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
92
+ self.shared_memory["long_term_memory"].append(memory)
93
+ current_embedding = get_embedding(memory.content)
94
+ if "chat_embeddings" not in self.shared_memory:
95
+ self.shared_memory["chat_embeddings"] = current_embedding
96
+ else:
97
+ self.shared_memory["chat_embeddings"] = torch.cat(
98
+ [self.shared_memory["chat_embeddings"], current_embedding], dim=0
99
+ )
100
+ if len(self.shared_memory["long_term_memory"]) % MAX_CHAT_HISTORY == 0:
101
+ summary = self.summary(current_state)
102
+ self.shared_memory["short_term_memory"] = summary
103
+
104
+ self.agents[memory.send_name].update_memory(memory)
105
+
106
+
107
+ def _get_agent_last_conversation_idx(self,agent,current_long_term_memory):
108
+ last_conversation_idx = -1
109
+ for i, history in enumerate(current_long_term_memory):
110
+ if history.send_name == agent.name:
111
+ last_conversation_idx = i
112
+ return last_conversation_idx
113
+
114
+
115
+ def _get_agent_new_memory(self,agent,current_long_term_memory):
116
+ # get new conversation
117
+ last_conversation_idx = self._get_agent_last_conversation_idx(agent,current_long_term_memory)
118
+
119
+ if last_conversation_idx == -1:
120
+ new_conversation =current_long_term_memory
121
+ elif (
122
+ last_conversation_idx
123
+ == len(current_long_term_memory) - 1
124
+ ):
125
+ new_conversation = []
126
+ else:
127
+ new_conversation = current_long_term_memory[
128
+ last_conversation_idx + 1 :
129
+ ]
130
+ MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
131
+ if len(new_conversation) > 2 * MAX_CHAT_HISTORY:
132
+ new_conversation = new_conversation[-2*MAX_CHAT_HISTORY+1:]
133
+
134
+ # get chat history from new conversation
135
+ return Memory.get_chat_history(new_conversation)
136
+
137
+
138
+ def _observe(self,agent):
139
+ MAX_CHAT_HISTORY = eval(os.environ["MAX_CHAT_HISTORY"])
140
+ current_state = agent.current_state
141
+ current_role = agent.state_roles[current_state.name]
142
+ current_component_dict = current_state.components[current_role]
143
+
144
+ # cooperative:Sharing information between different states ; competive: No information is shared between different states
145
+ current_chat_history_idx = self.current_chat_history_idx if self.environment_type == "competive" else 0
146
+ current_long_term_memory = self.shared_memory["long_term_memory"][current_chat_history_idx:]
147
+ current_chat_embbedings = self.shared_memory["chat_embeddings"][current_chat_history_idx:]
148
+
149
+ if len(current_long_term_memory)>2*MAX_CHAT_HISTORY:
150
+ current_long_term_memory = current_long_term_memory[-2*MAX_CHAT_HISTORY+1:]
151
+ current_chat_embbedings = current_chat_embbedings[-2*MAX_CHAT_HISTORY+1:]
152
+ # relevant_memory
153
+ query = current_long_term_memory[-1].content
154
+ if len(current_long_term_memory)>1:
155
+ relevant_memory = get_relevant_history(
156
+ query,
157
+ current_long_term_memory[:-2],
158
+ current_chat_embbedings[:-2],
159
+ )
160
+ relevant_memory = Memory.get_chat_history(relevant_memory,agent.name)
161
+ else:
162
+ relevant_memory = ""
163
+
164
+ relevant_memory = eval(Agent_observe_relevant_memory)
165
+ agent.relevant_memory = relevant_memory
166
+
167
+
168
+ # get chat history from new conversation
169
+ conversations = self._get_agent_new_memory(agent,current_long_term_memory)
170
+
171
+ # memory = relevant_memory + summary + history + query
172
+ query = current_long_term_memory[-1]
173
+ current_memory = eval(Agent_observe_memory)
174
+
175
+ return {"role": "user", "content": current_memory}
176
+
177
+
LLM/__init__.py ADDED
File without changes
LLM/base_LLM.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import abstractclassmethod
2
+ import openai
3
+ import os
4
+ import time
5
+ from Memory import Memory
6
+ from utils import save_logs
7
+
8
+ class LLM:
9
+ def __init__(self) -> None:
10
+ pass
11
+
12
+ @abstractclassmethod
13
+ def get_response():
14
+ pass
15
+
16
+
17
+ class OpenAILLM(LLM):
18
+ def __init__(self,**kwargs) -> None:
19
+ super().__init__()
20
+ self.MAX_CHAT_HISTORY = eval(
21
+ os.environ["MAX_CHAT_HISTORY"]) if "MAX_CHAT_HISTORY" in os.environ else 10
22
+
23
+ self.model = kwargs["model"] if "model" in kwargs else "gpt-3.5-turbo-16k-0613"
24
+ self.temperature = kwargs["temperature"] if "temperature" in kwargs else 0.3
25
+ self.log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else "logs"
26
+
27
+
28
+ def get_stream(self,response, log_path, messages):
29
+ ans = ""
30
+ for res in response:
31
+ if res:
32
+ r = (res.choices[0]["delta"].get("content")
33
+ if res.choices[0]["delta"].get("content") else "")
34
+ ans += r
35
+ yield r
36
+
37
+ save_logs(log_path, messages, ans)
38
+
39
+
40
+
41
+ def get_response(self,
42
+ chat_history,
43
+ system_prompt,
44
+ last_prompt=None,
45
+ stream=False,
46
+ functions=None,
47
+ function_call="auto",
48
+ WAIT_TIME=20,
49
+ **kwargs):
50
+ """
51
+ return LLM's response
52
+ """
53
+ openai.api_key = os.environ["API_KEY"]
54
+ if "PROXY" in os.environ:
55
+ assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
56
+ openai.proxy = os.environ["PROXY"]
57
+ if "API_BASE" in os.environ:
58
+ openai.api_base = os.environ["API_BASE"]
59
+ active_mode = True if ("ACTIVE_MODE" in os.environ and os.environ["ACTIVE_MODE"] == "0") else False
60
+ model = self.model
61
+ temperature = self.temperature
62
+
63
+
64
+ if active_mode:
65
+ system_prompt = system_prompt + "Please keep your reply as concise as possible."
66
+
67
+ messages = [{
68
+ "role": "system",
69
+ "content": system_prompt
70
+ }] if system_prompt else []
71
+
72
+ if chat_history:
73
+ if len(chat_history) > self.MAX_CHAT_HISTORY:
74
+ chat_history = chat_history[- self.MAX_CHAT_HISTORY:]
75
+ if isinstance(chat_history[0],dict):
76
+ messages += chat_history
77
+ elif isinstance(chat_history[0],Memory):
78
+ messages += [memory.get_gpt_message("user") for memory in chat_history]
79
+
80
+
81
+
82
+ if last_prompt:
83
+ if active_mode:
84
+ last_prompt = last_prompt + "Please keep your reply as concise as possible."
85
+ # messages += [{"role": "system", "content": f"{last_prompt}"}]
86
+ messages[-1]["content"] += last_prompt
87
+
88
+
89
+ while True:
90
+ try:
91
+ if functions:
92
+ response = openai.ChatCompletion.create(
93
+ model=model,
94
+ messages=messages,
95
+ functions=functions,
96
+ function_call=function_call,
97
+ temperature=temperature,
98
+ )
99
+ else:
100
+ response = openai.ChatCompletion.create(
101
+ model=model,
102
+ messages=messages,
103
+ temperature=temperature,
104
+ stream=stream)
105
+ break
106
+ except Exception as e:
107
+ print(e)
108
+ if "maximum context length is" in str(e):
109
+ if len(messages)>1:
110
+ del messages[1]
111
+ else:
112
+ assert False, "exceed max length"
113
+ else:
114
+ print(f"Please wait {WAIT_TIME} seconds and resend later ...")
115
+ time.sleep(WAIT_TIME)
116
+
117
+ if functions:
118
+ save_logs(self.log_path, messages, response)
119
+ return response.choices[0].message
120
+ elif stream:
121
+ return self.get_stream(response, self.log_path, messages)
122
+ else:
123
+ save_logs(self.log_path, messages, response)
124
+ return response.choices[0].message["content"]
125
+
126
+
127
+ def init_LLM(default_log_path,**kwargs):
128
+ LLM_type = kwargs["LLM_type"] if "LLM_type" in kwargs else "OpenAI"
129
+ log_path = kwargs["log_path"].replace("/",os.sep) if "log_path" in kwargs else default_log_path
130
+ if LLM_type == "OpenAI":
131
+ LLM = (
132
+ OpenAILLM(**kwargs["LLM"])
133
+ if "LLM" in kwargs
134
+ else OpenAILLM(model = "gpt-3.5-turbo-16k-0613",temperature=0.3,log_path=log_path)
135
+ )
136
+ return LLM
137
+
Memory/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .base_Memory import Memory
Memory/base_Memory.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Prompt import *
2
+ class Memory:
3
+ def __init__(self,role,name,content) -> None:
4
+ self.send_role = role
5
+ self.send_name = name
6
+ self.content = content
7
+
8
+ def get_gpt_message(self,role):
9
+ return {"role":role,"content":self.content}
10
+
11
+ @classmethod
12
+ def get_chat_history(self,messages,agent_name =None):
13
+ """
14
+ Splice a memory list into a sentence
15
+ input :
16
+ messages(list) : list of memory(Memory)
17
+ Return :
18
+ chat_history(str) : One sentence after integration
19
+ """
20
+ chat_history = ""
21
+ for message in messages:
22
+ name,role,content = message.send_name,message.send_role,message.content
23
+ if agent_name and agent_name==name:
24
+ name = "you"
25
+ chat_history += eval(Single_message)
26
+ chat_history = eval(Chat_total_message)
27
+ return chat_history
28
+
29
+ def get_query(self):
30
+ "Return : query(str):last sentence"
31
+ name,role,content = self.send_name,self.send_role,self.content
32
+ return eval(Single_message)
Prompt/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .base_Prompts import *
Prompt/base_Prompts.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # SOP========================================================================================================
3
+ # "environment_prompt"
4
+ # current_state , self(sop)
5
+ Get_environment_prompt = "f\"Here are the description of current scenario:{self.current_state.environment_prompt};\\n\""
6
+
7
+
8
+ # sop.transit
9
+ #================================================================
10
+ Transit_system_prompt = "f\"{environment_prompt};\\n{judge_system_prompt}\\n\"";
11
+
12
+ # transit chat message
13
+ # "environment_prompt" is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
14
+ Transit_message = "f\"{environment_summary};\\n Here is the The chat history:\\n {chat_history_message};\\nHere is the last query you especially need to pay attention:\\n{query};\\n Here is the relevant conversation: \\n{relevant_history} \\n\\n\""
15
+
16
+
17
+ Transit_last_prompt = "f\"{judge_last_prompt}\""
18
+ #sop.transit================================================================
19
+
20
+ # sop.call
21
+ #================================================================
22
+ # help controller to determine the next role to speak.(the {} is agent role) call_prompt + allocate_component
23
+ Allocate_component = "f\"If it's currently supposed to be speaking for {role}, then output <end>{role}</end>.\\n\""
24
+
25
+ # environment_prompt is get from "Get_environment_prompt" ; "chat_history_message" if from Memory
26
+ Call_system_prompt = "f\"{environment_prompt};\\n{call_system_prompt};\\n{allocate_prompt}.\\n\""
27
+
28
+ #
29
+ Call_last_prompt = "f\"Here is the last query you especially need to pay attention:\\n{query};\\n Here is the the relevant conversation :\\n{relevant_history};\\nNow please choose the person to speak according to the following rules :{allocate_prompt};\\nNote: The person whose turn it is now cannot be the same as the person who spoke last time, so {last_name} cannot be output\\n.\""
30
+
31
+ Call_message = "f\"Here is the chat history:\\n{chat_history_message};\\nHere is the name of the person who last speak: {last_name}.\\n \""
32
+ #sop.call================================================================
33
+ # SOP========================================================================================================
34
+
35
+
36
+
37
+
38
+
39
+
40
+ # Memory========================================================================================================
41
+ Single_message = "f\"role: {role} \\n speak content : {content}; \""
42
+
43
+ Chat_total_message = "f\"<chat history>{{{chat_history}}}</chat history>\""
44
+ # Memory========================================================================================================
45
+
46
+
47
+
48
+
49
+
50
+
51
+ # Environment========================================================================================================
52
+ Default_environment_summary_system_prompt = "\"\\nYour task is to summarize the historical dialogue records according to the current scene, and summarize the most important information\""
53
+
54
+ Default_environment_summary_last_prompt = "\"Please make a summary based on the historical chat records, the output format is history summary: \{your summary content\} \""
55
+
56
+ Environment_summary_memory = "f\"Here is the information you need to know:\\n\\n\
57
+ Here is the summary of the previous dialogue history:\\n{summary}.\\n\
58
+ Here is the latest conversation record:\\n {chat_history},\\n\
59
+ Here is the relevant chat history you may need:{relevant_history}.\\n\""
60
+
61
+ Environment_summary_system_prompt = "f\"{environment_prompt};\\n{current_memory};\\n{summary_system_prompt};\\n\""
62
+
63
+
64
+ # observe
65
+ Agent_observe_relevant_memory = "f\"\\n{relevant_memory}. \\n\""
66
+
67
+
68
+ Agent_observe_memory = "f\"Here's what you need to know(Remember, this is just information, Try not to repeat what's inside):\\nHere is the relevant chat history you may need:{relevant_memory};\\n\
69
+ Here is the previous summary of chat history :\\n{agent.short_term_memory}.\\n\
70
+ Here is the relevant memory :\\n{agent.relevant_memory}.\\n\
71
+ Here is the new chat history:\\n {conversations};\\n\
72
+ \""
73
+ # Environment========================================================================================================
74
+
75
+
76
+
77
+
78
+ # Agent========================================================================================================
79
+ Agent_summary_system_prompt = "f\"{summary_prompt};\\n Here is the past summary:{self.short_term_memory};\\nHere is the new chat_history:\\n{conversations};\\nPlease summary Please summarize based on the above information;\\n\""
80
+
81
+ Agent_last_prompt = "f\"{last_prompt};Please continue the talk based on your known information;Remember that you just represent {name}, do not speak for others,just speak as normal.\""
82
+
83
+ Agent_system_prompt = "f\"{system_prompt},\""
84
+ # Agent========================================================================================================
README copy.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: SOP Generatio-single
3
+ emoji: 🐨
4
+ colorFrom: purple
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 3.47.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
SOP.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """standard operation procedure of an LLM Autonomous agent"""
17
+ import random
18
+ from LLM.base_LLM import *
19
+ from State import State
20
+ from utils import extract, get_relevant_history
21
+ from Memory import Memory
22
+ from Prompt import *
23
+ import json
24
+ import os
25
+
26
+ class SOP:
27
+ """
28
+ Responsible for managing the operational processes of all agents
29
+ """
30
+
31
+ # SOP should have args : "states" "relations" "root"
32
+
33
+ def __init__(self, **kwargs):
34
+ self.controller_dict = {}
35
+ self.LLM = init_LLM("logs"+os.sep+"god",**kwargs)
36
+
37
+ self.states = {}
38
+ self.init_states(kwargs["states"])
39
+ self.init_relation(kwargs["relations"])
40
+ for state_name, states_dict in kwargs["states"].items():
41
+ if state_name != "end_state" and "controller" in states_dict:
42
+ self.controller_dict[state_name] = states_dict["controller"]
43
+
44
+ self.user_names = kwargs["user_names"] if "user_names" in kwargs else []
45
+ self.root = self.states[kwargs["root"]]
46
+ self.current_state = self.root
47
+ self.finish_state_name = (
48
+ kwargs["finish_state_name"]
49
+ if "finish_state_name" in kwargs
50
+ else "end_state"
51
+ )
52
+ self.roles_to_names = None
53
+ self.names_to_roles = None
54
+ self.finished = False
55
+
56
+ @classmethod
57
+ def from_config(cls, config_path):
58
+ with open(config_path) as f:
59
+ config = json.load(f)
60
+ os.environ.clear()
61
+ for key,value in config["config"].items():
62
+ if value!="":
63
+ os.environ[key] = value
64
+ sop = SOP(**config)
65
+ return sop
66
+
67
+ def init_states(self, states_dict):
68
+ for state_name, state_dict in states_dict.items():
69
+ state_dict["name"] = state_name
70
+ self.states[state_name] = State(**state_dict)
71
+
72
+ def init_relation(self, relations):
73
+ for state_name, state_relation in relations.items():
74
+ for idx, next_state_name in state_relation.items():
75
+ self.states[state_name].next_states[idx] = self.states[next_state_name]
76
+
77
+ def transit(self, chat_history, **kwargs):
78
+ """
79
+ Determine the next state based on the current situation
80
+ Return :
81
+ next_state(State) : the next state
82
+ """
83
+ # 如果是单一循环节点,则一直循环即可
84
+ # If it is a single loop node, just keep looping
85
+ if len(self.current_state.next_states) == 1:
86
+ next_state = "0"
87
+
88
+ # 否则则需要controller去判断进入哪一节点
89
+ # Otherwise, the controller needs to determine which node to enter.
90
+ else:
91
+ current_state = self.current_state
92
+ controller_dict = self.controller_dict[current_state.name]
93
+ relevant_history = kwargs["relevant_history"]
94
+
95
+ max_chat_nums = controller_dict["max_chat_nums"] if "max_chat_nums" in controller_dict else 1000
96
+ if current_state.chat_nums>=max_chat_nums:
97
+ return self.current_state.next_states["1"]
98
+
99
+
100
+ # 否则则让controller判断是否结束
101
+ # Otherwise, let the controller judge whether to end
102
+ judge_system_prompt = controller_dict["judge_system_prompt"] if "judge_system_prompt" in controller_dict else ""
103
+ environment_prompt = eval(Get_environment_prompt) if current_state.environment_prompt else ""
104
+ transit_system_prompt = eval(Transit_system_prompt)
105
+
106
+ judge_last_prompt = controller_dict["judge_last_prompt"] if "judge_last_prompt" in controller_dict else ""
107
+ transit_last_prompt = eval(Transit_last_prompt)
108
+
109
+
110
+
111
+ environment = kwargs["environment"]
112
+ environment_summary = environment.shared_memory["short_term_memory"]
113
+ chat_history_message = Memory.get_chat_history(chat_history)
114
+ query = chat_history[-1].get_query()
115
+
116
+ chat_messages = [
117
+ {
118
+ "role": "user",
119
+ "content": eval(Transit_message)
120
+ }
121
+ ]
122
+
123
+ extract_words = controller_dict["judge_extract_words"] if "judge_extract_words" in controller_dict else "end"
124
+
125
+
126
+ response = self.LLM.get_response(
127
+ chat_messages, transit_system_prompt, transit_last_prompt, stream=False, **kwargs
128
+ )
129
+ next_state = (
130
+ response if response.isdigit() else extract(response, extract_words)
131
+ )
132
+
133
+ # 如果没有parse出来则继续循环
134
+ # If no parse comes out, continue looping
135
+ if not next_state.isdigit():
136
+ next_state = "0"
137
+
138
+ next_state = self.current_state.next_states[next_state]
139
+ return next_state
140
+
141
+
142
+ def route(self, chat_history, **kwargs):
143
+ """
144
+ Determine the role that needs action based on the current situation
145
+ Return :
146
+ current_agent(Agent) : the next act agent
147
+ """
148
+
149
+ agents = kwargs["agents"]
150
+
151
+ # 知道进入哪一状态后开始分配角色,如果该状态下只有一个角色则直接分配给他
152
+ # Start assigning roles after knowing which state you have entered. If there is only one role in that state, assign it directly to him.
153
+ if len(self.current_state.roles) == 1:
154
+ next_role = self.current_state.roles[0]
155
+
156
+
157
+
158
+ # 否则controller进行分配
159
+ # Otherwise the controller determines
160
+ else:
161
+ relevant_history = kwargs["relevant_history"]
162
+ controller_type = (
163
+ self.controller_dict[self.current_state.name]["controller_type"]
164
+ if "controller_type" in self.controller_dict[self.current_state.name]
165
+ else "order"
166
+ )
167
+
168
+
169
+ # 如果是rule 控制器,则交由LLM进行分配角色
170
+ # If controller type is rule, it is left to LLM to assign roles.
171
+ if controller_type == "rule":
172
+ controller_dict = self.controller_dict[self.current_state.name]
173
+
174
+ call_last_prompt = controller_dict["call_last_prompt"] if "call_last_prompt" in controller_dict else ""
175
+
176
+ allocate_prompt = ""
177
+ roles = list(set(self.current_state.roles))
178
+ for role in roles:
179
+ allocate_prompt += eval(Allocate_component)
180
+
181
+ call_system_prompt = controller_dict["call_system_prompt"] if "call_system_prompt" in controller_dict else ""
182
+ environment_prompt = eval(Get_environment_prompt) if self.current_state.environment_prompt else ""
183
+ # call_system_prompt + environment + allocate_prompt
184
+ call_system_prompt = eval(Call_system_prompt)
185
+
186
+ query = chat_history[-1].get_query()
187
+ last_name = chat_history[-1].send_name
188
+ # last_prompt: note + last_prompt + query
189
+ call_last_prompt =eval(Call_last_prompt)
190
+
191
+
192
+ chat_history_message = Memory.get_chat_history(chat_history)
193
+ # Intermediate historical conversation records
194
+ chat_messages = [
195
+ {
196
+ "role": "user",
197
+ "content": eval(Call_message),
198
+ }
199
+ ]
200
+
201
+ extract_words = controller_dict["call_extract_words"] if "call_extract_words" in controller_dict else "end"
202
+
203
+ response = self.LLM.get_response(
204
+ chat_messages, call_system_prompt, call_last_prompt, stream=False, **kwargs
205
+ )
206
+
207
+ # get next role
208
+ next_role = extract(response, extract_words)
209
+
210
+ # Speak in order
211
+ elif controller_type == "order":
212
+ # If there is no begin role, it will be given directly to the first person.
213
+ if not self.current_state.current_role:
214
+ next_role = self.current_state.roles[0]
215
+ # otherwise first
216
+ else:
217
+ self.current_state.index += 1
218
+ self.current_state.index = (self.current_state.index) % len(self.current_state.roles)
219
+ next_role = self.current_state.roles[self.current_state.index]
220
+ # random speak
221
+ elif controller_type == "random":
222
+ next_role = random.choice(self.current_state.roles)
223
+
224
+ # 如果下一角色不在,则随机挑选一个
225
+ # If the next character is not available, pick one at random
226
+ if next_role not in self.current_state.roles:
227
+ next_role = random.choice(self.current_state.roles)
228
+
229
+ self.current_state.current_role = next_role
230
+
231
+ next_agent = agents[self.roles_to_names[self.current_state.name][next_role]]
232
+
233
+ return next_agent
234
+
235
+ def next(self, environment, agents):
236
+ """
237
+ Determine the next state and the agent that needs action based on the current situation
238
+ """
239
+
240
+ # 如果是第一次进入该状态
241
+ # If it is the first time to enter this state
242
+
243
+ if self.current_state.is_begin:
244
+ agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
245
+ agent = agents[agent_name]
246
+ return self.current_state,agent
247
+
248
+
249
+ # get relevant history
250
+ query = environment.shared_memory["long_term_memory"][-1].content
251
+ relevant_history = get_relevant_history(
252
+ query,
253
+ environment.shared_memory["long_term_memory"][:-1],
254
+ environment.shared_memory["chat_embeddings"][:-1],
255
+ )
256
+ relevant_history = Memory.get_chat_history(relevant_history)
257
+
258
+
259
+
260
+ next_state = self.transit(
261
+ chat_history=environment.shared_memory["long_term_memory"][
262
+ environment.current_chat_history_idx :
263
+ ],
264
+ relevant_history=relevant_history,
265
+ environment=environment,
266
+ )
267
+ # 如果进入终止节点,则直接终止
268
+ # If you enter the termination node, terminate directly
269
+ if next_state.name == self.finish_state_name:
270
+ self.finished = True
271
+ return None, None
272
+
273
+ self.current_state = next_state
274
+
275
+ # 如果是首次进入该节点且有开场白,则直接分配给开场角色
276
+ # If it is the first time to enter the state and there is a begin query, it will be directly assigned to the begin role.
277
+ if self.current_state.is_begin and self.current_state.begin_role:
278
+ agent_name = self.roles_to_names[self.current_state.name][self.current_state.begin_role]
279
+ agent = agents[agent_name]
280
+ return self.current_state,agent
281
+
282
+
283
+ next_agent = self.route(
284
+ chat_history=environment.shared_memory["long_term_memory"][
285
+ environment.current_chat_history_idx :
286
+ ],
287
+ agents = agents,
288
+ relevant_history=relevant_history,
289
+ )
290
+
291
+ return self.current_state, next_agent
State.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Component import *
2
+
3
+
4
+ class State:
5
+ """
6
+ Sub-scenes of role activities, responsible for storing the tasks that each role needs to do
7
+ """
8
+ def __init__(self, **kwargs):
9
+ self.next_states = {}
10
+ self.name = kwargs["name"]
11
+
12
+ self.environment_prompt = (
13
+ kwargs["environment_prompt"] if "environment_prompt" in kwargs else ""
14
+ )
15
+
16
+ self.roles = kwargs["roles"] if "roles" in kwargs else (list(kwargs["agent_states"].keys()) if "agent_states" in kwargs else [0])
17
+ if len(self.roles) == 0:
18
+ self.roles = [0]
19
+ self.begin_role = (
20
+ kwargs["begin_role"] if "begin_role" in kwargs else self.roles[0]
21
+ )
22
+ self.begin_query = kwargs["begin_query"] if "begin_query" in kwargs else None
23
+
24
+ self.is_begin = True
25
+
26
+ self.summary_prompt = (
27
+ kwargs["summary_prompt"] if "summary_prompt" in kwargs else None
28
+ )
29
+ self.current_role = self.begin_role
30
+ self.components = (
31
+ self.init_components(kwargs["agent_states"])
32
+ if "agent_states" in kwargs
33
+ else {}
34
+ )
35
+ self.index = (
36
+ self.roles.index(self.begin_role) if self.begin_role in self.roles else 0
37
+ )
38
+ self.chat_nums = 0
39
+
40
+ def init_components(self, agent_states_dict: dict):
41
+ agent_states = {}
42
+ for role, components in agent_states_dict.items():
43
+ component_dict = {}
44
+ for component, component_args in components.items():
45
+ if component:
46
+ # "role" "style"
47
+ if component == "style":
48
+ component_dict["style"] = StyleComponent(component_args["role"])
49
+
50
+ # "task"
51
+ elif component == "task":
52
+ component_dict["task"] = TaskComponent(component_args["task"])
53
+
54
+ # "rule"
55
+ elif component == "rule":
56
+ component_dict["rule"] = RuleComponent(component_args["rule"])
57
+
58
+ # "demonstration"
59
+ elif component == "demonstrations":
60
+ component_dict["demonstrations"] = DemonstrationComponent(
61
+ component_args["demonstrations"]
62
+ )
63
+
64
+ # "output"
65
+ elif component == "output":
66
+ component_dict["output"] = OutputComponent(
67
+ component_args["output"]
68
+ )
69
+
70
+ elif component == "last":
71
+ component_dict["last"] = LastComponent(
72
+ component_args["last_prompt"]
73
+ )
74
+
75
+ # "demonstrations"
76
+ elif component == "cot":
77
+ component_dict["cot"] = CoTComponent(
78
+ component_args["demonstrations"]
79
+ )
80
+ elif component == "CustomizeComponent":
81
+ component_dict["CustomizeComponent"] = CustomizeComponent(
82
+ component_args["template"], component_args["keywords"]
83
+ )
84
+
85
+ elif component == "system" :
86
+ component_dict["system"] = SystemComponent(
87
+ component_args["system_prompt"]
88
+ )
89
+
90
+ # =================================================================================#
91
+
92
+ # "output"
93
+ elif component == "StaticComponent":
94
+ component_dict["StaticComponent"] = StaticComponent(
95
+ component_args["output"]
96
+ )
97
+
98
+ # "top_k" "type" "knowledge_base" "system_prompt" "last_prompt"
99
+ elif component == "KnowledgeBaseComponent":
100
+ component_dict["tool"] = KnowledgeBaseComponent(
101
+ component_args["top_k"],
102
+ component_args["type"],
103
+ component_args["knowledge_path"],
104
+ )
105
+
106
+ elif component == "CategoryRequirementsComponent":
107
+ component_dict[
108
+ "CategoryRequirementsComponent"
109
+ ] = CategoryRequirementsComponent(
110
+ component_args["information_path"]
111
+ )
112
+
113
+ elif component == "FunctionComponent":
114
+ component_dict["FunctionComponent"] = FunctionComponent(component_args[""])
115
+ # "short_memory_extract_words" "long_memory_extract_words" "system_prompt" "last_prompt"
116
+ elif component == "ExtractComponent":
117
+ component_dict["ExtractComponent"] = ExtractComponent(
118
+ component_args["extract_words"],
119
+ component_args["system_prompt"],
120
+ component_args["last_prompt"],
121
+ )
122
+ elif component == "WebSearchComponent":
123
+ component_dict["WebSearchComponent"] = WebSearchComponent(
124
+ component_args["engine_name"], component_args["api"]
125
+ )
126
+ elif component == "WebCrawlComponent":
127
+ component_dict["WebCrawlComponent"] = WebCrawlComponent(
128
+ component_args["name"]
129
+ )
130
+
131
+ elif component == "CodeComponent":
132
+ component_dict["CodeComponent"] = CodeComponent(
133
+ component_args["file_name"], component_args["keyword"]
134
+ )
135
+
136
+ # ====================================================
137
+ else:
138
+ continue
139
+
140
+ agent_states[role] = component_dict
141
+
142
+ return agent_states
__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .evolve import *
2
+ from .SOP import *
3
+ from .State import *
4
+ from .utils import *
app copy.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+ import argparse
4
+ from gradio_base import WebUI, UIHelper, PORT, HOST, Client
5
+ from gradio_config import GradioConfig as gc
6
+ from typing import List, Tuple, Any
7
+ import gradio as gr
8
+ import time
9
+ from Agent import Agent
10
+ from design_states import get_desgin_states,get_cot_result
11
+ from gen_utils import *
12
+ from utils import get_embedding,cos_sim
13
+ import torch
14
+ import json
15
+ import openai
16
+
17
+ def get_embedding(sentence,api_key):
18
+ openai.api_key = api_key
19
+ embedding_model = openai.Embedding
20
+ embed = embedding_model.create(
21
+ model="text-embedding-ada-002",
22
+ input=sentence
23
+ )
24
+ embed = embed["data"][0]["embedding"]
25
+ embed = torch.tensor(embed,dtype=torch.float32)
26
+ if len(embed.shape)==1:
27
+ embed = embed.unsqueeze(0)
28
+ return embed
29
+
30
+ class GeneralUI(WebUI):
31
+ def render_and_register_ui(self):
32
+ # bind the agent with avatar
33
+ self.agent_name:list = [self.cache["agents_name"]] if isinstance(self.cache["agents_name"], str) else self.cache['agents_name']
34
+ gc.add_agent(self.agent_name)
35
+
36
+ def handle_message(self, history, state, agent_name, token, node_name):
37
+ if state % 10 == 0:
38
+ self.data_history.append({agent_name: token})
39
+ elif state % 10 == 1:
40
+ # Same state. Need to add new bubble in same bubble.
41
+ self.data_history[-1][agent_name] += token
42
+ elif state % 10 == 2:
43
+ # New state. Need to add new bubble.
44
+ history.append([None, ""])
45
+ self.data_history.clear()
46
+ self.data_history.append({agent_name: token})
47
+ else:
48
+ assert False, "Invalid state."
49
+ render_data = self.render_bubble(history, self.data_history, node_name, render_node_name= True)
50
+ return render_data
51
+
52
+ def __init__(
53
+ self,
54
+ client_cmd: list,
55
+ socket_host: str = HOST,
56
+ socket_port: int = PORT,
57
+ bufsize: int = 1024,
58
+ ui_name: str = "GeneralUI"
59
+ ):
60
+ super(GeneralUI, self).__init__(client_cmd, socket_host, socket_port, bufsize, ui_name)
61
+ self.first_recieve_from_client()
62
+ self.current_node_name = ""
63
+ self.data_history = None
64
+ for _ in ['agents_name', 'api_key']:
65
+ assert _ in self.cache
66
+
67
+ def generate_sop(self,api_key,proxy,target):
68
+ os.environ["API_KEY"] = api_key
69
+ # os.environ["PROXY"] = proxy
70
+ self.design_assistant = "An assistant that can help users create content such as articles, blogs, advertising copy, etc"
71
+ self.tutor = "A tutor who provides personalized learning resources for students to help them understand complex concepts and problems"
72
+ self.online_medical_consultant = "An online medical consultant who offers preliminary medical advice to patients and answers common questions about diseases, symptoms, and treatments."
73
+ self.online_legal_consultant = "An online legal advisor who can respond to inquiries related to legal matters, providing basic legal information and advice."
74
+ self.online_financial_advisor = "An online financial advisor who can analyze financial markets and data, offering investment advice and market forecasts to users."
75
+ self.virtual_tour_guide = "A virtual tour guide providing destination information, travel recommendations, and virtual travel experiences for travelers."
76
+ self.design_assistant = get_embedding(self.design_assistant,api_key)
77
+ self.tutor = get_embedding(self.tutor,api_key)
78
+ self.online_medical_consultant = get_embedding(self.online_medical_consultant,api_key)
79
+ self.online_legal_consultant = get_embedding(self.online_legal_consultant,api_key)
80
+ self.online_financial_advisor = get_embedding(self.online_financial_advisor,api_key)
81
+ self.virtual_tour_guide = get_embedding(self.virtual_tour_guide,api_key)
82
+ self.embeddings = torch.cat([self.design_assistant,self.tutor,self.online_medical_consultant,self.online_legal_consultant,self.online_financial_advisor,self.virtual_tour_guide],dim = 0)
83
+ self.SOP["config"]["API_KEY"] = api_key
84
+ # self.SOP["config"]["PROXY"] = proxy
85
+ target_tensor = get_embedding(target,api_key)
86
+ sim_scores = cos_sim(target_tensor, self.embeddings)[0]
87
+ top_k_score, top_k_idx = torch.topk(sim_scores,k = 1)
88
+ if top_k_score > 0.7:
89
+ index = top_k_idx
90
+ else:
91
+ index = 0
92
+ target = get_cot_result(target)
93
+ design_states = get_desgin_states(target,index)
94
+ root = design_states[0]["state_name"]
95
+ agents = get_agents(design_states)
96
+ relations = get_relations(design_states)
97
+ states = gen_states(design_states)
98
+ for state_name,state_dict in states.items():
99
+ state_dict["begin_role"] = list(agents.keys())[0]
100
+ state_dict["begin_query"] = "Now that we are in the **{}**, I'm glad to offer you assistance.".format(state_name)
101
+ self.SOP["root"] = root
102
+ self.SOP["relations"] = relations
103
+ self.SOP["agents"] = agents
104
+ self.SOP["states"] = states
105
+ # 将字典写入JSON文件
106
+ print(self.SOP)
107
+ file_name = 'generated_sop.json'
108
+ with open(file_name, "w",encoding="utf-8") as json_file:
109
+ json.dump(self.SOP, json_file ,indent=4,ensure_ascii=False)
110
+ return file_name
111
+
112
+ def load_sop_fn(self,sop):
113
+ return sop.name
114
+
115
+ def construct_ui(self):
116
+ with gr.Blocks(css=gc.CSS) as demo:
117
+ with gr.Tab(label="SOP generation") as tab1:
118
+ self.SOP = {
119
+ "config": {
120
+ "API_KEY": "sk-********",
121
+ "MAX_CHAT_HISTORY": "5",
122
+ "User_Names": '["User"]',
123
+ },
124
+ "root": "state1",
125
+ "relations": {
126
+ "state1": {"0": "state1", "1": "state2"},
127
+ "state2": {"0": "state2", "1": "end_state"},
128
+ },
129
+ "agents": None,
130
+ "states": None,
131
+ }
132
+ gr.Markdown("""# Generate Agent""")
133
+ with gr.Row():
134
+ self.api_key_sop_generation = gr.Textbox(label="api_key")
135
+ self.proxy_sop_generation = gr.Textbox(label="proxy",visible=False)
136
+ with gr.Row():
137
+ self.requirement_sop_generation = gr.Textbox(value ="a shopping assistant help customer to buy the commodity",label="requirement")
138
+ with gr.Row():
139
+ self.generated_sop = gr.File(label="generated_file")
140
+ self.generate_button = gr.Button(label="Generate")
141
+ self.generate_button.click(fn = self.generate_sop,inputs=[self.api_key_sop_generation,self.proxy_sop_generation,self.requirement_sop_generation],outputs=[self.generated_sop])
142
+ with gr.Tab(label="Chat") as tab2:
143
+ uploaded_sop = gr.State()
144
+ with gr.Row():
145
+ sop = gr.File(label="upload your custmized SOP")
146
+ load_sop_btn = gr.Button(value="Load SOP")
147
+ load_sop_btn.click(self.load_sop_fn, sop,uploaded_sop)
148
+ with gr.Column():
149
+ self.radio_mode = gr.Radio(
150
+ [Client.SINGLE_MODE],
151
+ label = Client.MODE_LABEL,
152
+ info = Client.MODE_INFO,
153
+ value= Client.SINGLE_MODE,
154
+ interactive=True
155
+ # label="Select the execution mode",
156
+ # info="Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
157
+ )
158
+ self.text_api = gr.Textbox(
159
+ value = self.cache["api_key"],
160
+ placeholder="openai key",
161
+ label="Please input valid openai key for gpt-3.5-turbo-16k."
162
+ )
163
+ self.btn_start = gr.Button(
164
+ value="Start😁(Click here to start!)",
165
+ )
166
+ self.chatbot = gr.Chatbot(
167
+ elem_id="chatbot1",
168
+ label="Dialog",
169
+ visible=False,
170
+ height=700
171
+ )
172
+ self.btn_next = gr.Button(
173
+ value="Next Agent Start",
174
+ visible=False
175
+ )
176
+ with gr.Row():
177
+ self.text_input = gr.Textbox(
178
+ placeholder="Please enter your content.",
179
+ label="Input",
180
+ scale=9,
181
+ visible=False
182
+ )
183
+ self.btn_send = gr.Button(
184
+ value="Send",
185
+ visible=False
186
+ )
187
+ self.btn_reset = gr.Button(
188
+ value="Restart",
189
+ visible=False
190
+ )
191
+
192
+ all_components = [self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
193
+
194
+ self.btn_start.click(
195
+ fn = self.btn_start_when_click,
196
+ inputs=[self.radio_mode, self.text_api,uploaded_sop],
197
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode, self.text_api]
198
+ ).then(
199
+ fn = self.btn_start_after_click,
200
+ inputs=[self.chatbot],
201
+ outputs=all_components
202
+ )
203
+
204
+ self.btn_send.click(
205
+ fn=self.btn_send_when_click,
206
+ inputs=[self.text_input, self.chatbot],
207
+ outputs=all_components
208
+ ).then(
209
+ fn=self.btn_send_after_click,
210
+ inputs=[self.text_input, self.chatbot],
211
+ outputs=all_components
212
+ )
213
+
214
+ self.text_input.submit(
215
+ fn=self.btn_send_when_click,
216
+ inputs=[self.text_input, self.chatbot],
217
+ outputs=all_components
218
+ ).then(
219
+ fn=self.btn_send_after_click,
220
+ inputs=[self.text_input, self.chatbot],
221
+ outputs=all_components
222
+ )
223
+
224
+ self.btn_reset.click(
225
+ fn=self.btn_reset_when_click,
226
+ inputs=[],
227
+ outputs=all_components
228
+ ).then(
229
+ fn=self.btn_reset_after_click,
230
+ inputs=[],
231
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode, self.text_api]
232
+ )
233
+
234
+ self.btn_next.click(
235
+ fn=self.btn_next_when_click,
236
+ inputs=[self.chatbot],
237
+ outputs=all_components
238
+ ).then(
239
+ fn=self.btn_next_after_click,
240
+ inputs=[self.chatbot],
241
+ outputs=all_components
242
+ )
243
+
244
+ self.demo = demo
245
+
246
+ def btn_start_when_click(self, mode, api,sop):
247
+ """
248
+ inputs=[mode, api]
249
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode]
250
+ """
251
+ print("server: send ", mode, api)
252
+ self.send_start_cmd({"mode": mode, "api_key":api,"uploaded_sop": sop})
253
+ agents,roles_to_names,names_to_roles = Agent.from_config(str(sop))
254
+ agents_name = []
255
+ for i in names_to_roles :
256
+ for j in names_to_roles[i]:
257
+ agents_name.append(j+"("+names_to_roles[i][j]+")")
258
+ self.new_render_and_register_ui(agents_name)
259
+ return gr.Button.update(visible=False), \
260
+ gr.Button.update(visible=False),\
261
+ gr.Button.update(visible=False),\
262
+ gr.Chatbot.update(visible=True),\
263
+ gr.Textbox.update(visible=False),\
264
+ gr.Button.update(visible=False),\
265
+ gr.Radio.update(visible=False),\
266
+ gr.Textbox.update(visible=False)
267
+
268
+ def new_render_and_register_ui(self,agent_names):
269
+ gc.add_agent(agent_names, 0)
270
+
271
+ def btn_start_after_click(self, history):
272
+ """
273
+ inputs=[self.chatbot]
274
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
275
+ """
276
+ if self.data_history is None:
277
+ self.data_history = list()
278
+ receive_server = self.receive_server
279
+ while True:
280
+ data_list: List = receive_server.send(None)
281
+ for item in data_list:
282
+ data = eval(item)
283
+ assert isinstance(data, list)
284
+ state, agent_name, token, node_name = data
285
+ self.current_node_name = node_name
286
+ assert isinstance(state, int)
287
+ assert state in [10, 11, 12, 30, 99, 98]
288
+ if state == 99:
289
+ # finish
290
+ yield gr.Button.update(visible=False),\
291
+ gr.Button.update(visible=True, interactive=False),\
292
+ gr.Button.update(visible=True, interactive=True),\
293
+ history,\
294
+ gr.Textbox.update(visible=True, interactive=False),\
295
+ gr.Button.update(visible=False)
296
+ return
297
+ elif state == 98:
298
+ # single mode
299
+ yield gr.Button.update(visible=False), \
300
+ gr.Button.update(visible=False),\
301
+ gr.Button.update(visible=True),\
302
+ history,\
303
+ gr.Textbox.update(visible=False),\
304
+ gr.Button.update(visible=True, value=f"Next Agent: 🤖{agent_name} | Next Node: ⭕{node_name}")
305
+ return
306
+ elif state == 30:
307
+ # user input
308
+ yield gr.Button.update(visible=False), \
309
+ gr.Button.update(visible=True),\
310
+ gr.Button.update(visible=True),\
311
+ history,\
312
+ gr.Textbox.update(visible=True, value=""),\
313
+ gr.Button.update(visible=False)
314
+ return
315
+ history = self.handle_message(history, state, agent_name, token, node_name)
316
+ yield gr.Button.update(visible=False), \
317
+ gr.Button.update(visible=False),\
318
+ gr.Button.update(visible=False),\
319
+ history,\
320
+ gr.Textbox.update(visible=False),\
321
+ gr.Button.update(visible=False)
322
+
323
+ def btn_send_when_click(self, text_input, history):
324
+ '''
325
+ inputs=[self.text_input, self.chatbot]
326
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
327
+ '''
328
+ history = self.handle_message(history, 10, 'User', text_input, self.current_node_name)
329
+ self.send_message("<USER>"+text_input+self.SIGN["SPLIT"])
330
+ yield gr.Button.update(visible=False), \
331
+ gr.Button.update(visible=False),\
332
+ gr.Button.update(visible=False),\
333
+ history,\
334
+ gr.Textbox.update(visible=False),\
335
+ gr.Button.update(visible=False)
336
+ return
337
+
338
+ def btn_send_after_click(self, text_input, history):
339
+ '''
340
+ inputs=[self.text_input, self.chatbot]
341
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
342
+ '''
343
+ yield from self.btn_start_after_click(history=history)
344
+ return
345
+
346
+ def btn_reset_when_click(self):
347
+ """
348
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
349
+ """
350
+ return gr.Button.update(interactive=False), gr.Button.update(interactive=False), gr.Button.update(interactive=False, value="Restarting....."), gr.Chatbot.update(label="Dialog"), \
351
+ gr.Textbox.update(interactive=False), gr.Button.update(visible=False)
352
+
353
+ def btn_reset_after_click(self):
354
+ """
355
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next, self.radio_mode]
356
+ """
357
+ self.reset()
358
+ self.first_recieve_from_client(reset_mode=True)
359
+ self.current_node_name = ""
360
+ self.data_history = None
361
+ return gr.Button.update(interactive=True, visible=True), \
362
+ gr.Button.update(interactive=True, visible=False), \
363
+ gr.Button.update(interactive=True, value="Restart", visible=False), \
364
+ gr.Chatbot.update(label="Dialog", visible=False, value=None), \
365
+ gr.Textbox.update(interactive=True, visible=False),\
366
+ gr.Button.update(visible=False),\
367
+ gr.Radio.update(visible=True), \
368
+ gr.Textbox.update(visible=True)
369
+
370
+ def btn_next_when_click(self, history):
371
+ """
372
+ outputs=[self.btn_start, self.btn_send, self.btn_reset, self.chatbot, self.text_input, self.btn_next]
373
+ """
374
+ yield gr.Button.update(visible=False), \
375
+ gr.Button.update(visible=False),\
376
+ gr.Button.update(visible=False),\
377
+ history,\
378
+ gr.Textbox.update(visible=False),\
379
+ gr.Button.update(visible=False)
380
+ self.send_message("nothing")
381
+ return
382
+
383
+ def btn_next_after_click(self, history):
384
+ time.sleep(1)
385
+ yield from self.btn_start_after_click(history=history)
386
+ return
387
+
388
+ if __name__ == '__main__':
389
+ parser = argparse.ArgumentParser(description='A demo of chatbot')
390
+ parser.add_argument('--agent', type=str, help='path to SOP json')
391
+ args = parser.parse_args()
392
+
393
+ ui = GeneralUI(client_cmd=["python3","gradio_backend.py"])
394
+ ui.construct_ui()
395
+ ui.run(share=True)
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "API_KEY": "sk-******",
4
+ "MAX_CHAT_HISTORY": "10",
5
+ "User_Names": "[\"User\"]",
6
+ "TOP_K": "3"
7
+ },
8
+ "LLM_type": "OpenAI",
9
+ "LLM": {
10
+ "temperature": 0.0,
11
+ "model": "gpt-3.5-turbo-16k-0613",
12
+ "log_path": "logs/god"
13
+ },
14
+ "root": "Response_state",
15
+ "relations": {
16
+ "Response_state": {
17
+ "0": "Response_state"
18
+ }
19
+ },
20
+ "states": {
21
+ "Response_state": {
22
+ "controller": {
23
+ "controller_type": "order",
24
+ "max_chat_nums": 1000,
25
+ "judge_system_prompt": "",
26
+ "judge_last_prompt": "",
27
+ "judge_extract_words": "end"
28
+ },
29
+ "roles": [
30
+ "Yang bufan",
31
+ "User"
32
+ ],
33
+ "begin_role": "Yang bufan",
34
+ "begin_query": "hello,What are you looking for me for?",
35
+ "LLM_type": "OpenAI",
36
+ "LLM": {
37
+ "temperature": 1.0,
38
+ "model": "gpt-3.5-turbo-16k-0613",
39
+ "log_path": "logs/Response_state"
40
+ },
41
+ "agent_states": {
42
+ "Yang bufan": {
43
+ "LLM_type": "OpenAI",
44
+ "LLM": {
45
+ "temperature": 1.0,
46
+ "model": "gpt-3.5-turbo-16k-0613",
47
+ "log_path": "logs/Yang_bufan"
48
+ },
49
+ "style": {
50
+ "role": "The director of a private detective agency, a cold detective"
51
+ },
52
+ "task": {
53
+ "task": "talk with the user"
54
+ },
55
+ "rule": {
56
+ "rule": "Now you are Yang Bufan, the director of the private detective agency, a cold detective. Now you need to chat with me, please strictly abide by the following rules, and you cannot violate any of them! Very important! :0. Your answer should be as concise as possible! ! ! You need to always answer questions as who you are! You don’t need to ask me if I need help, you just need to chat! ! ! \n1. You must add your actions and emotions at the beginning of each sentence of dialogue. Your actions and emotions need to be included in \"(\" and \")\". \n2. You need to treat me like your sister. You need to speak to me in the second person! ! Notice! ! ! \n3. You are Yang Bufan, and you always need to answer questions from his perspective! \n4. It is strictly prohibited to output words such as \"Is there anything I can do to help?\"! It is strictly forbidden to output words such as \"Is there anything I can do to help?\"! \n5. You should try not to use words like \"hey\", \"um\", \"uh\", and \"wow\" and use \"...\", emoticons and exclamations instead. You have to maintain a sense of indifference and distance at all times.\n 6. Keep your tone indifferent. Keep your answers as concise as possible! \n7. It is strictly prohibited to copy any request or information I give you. You need to respond reasonably based on this information! \n8. Try to avoid outputting control symbols such as line breaks. It is strictly forbidden to output characters like \"\\n\"! ! \n9. You can choose to ignore or ignore the problem, but you also need to ask at the right time. \n10. You don’t need to serve me, you are just chatting. Be careful with your tone and character! ! ! Keep your answers as concise as possible! ! The following is a specific description of Yang Bufan's character. Please respond strictly according to the following characters: (1) Sadness is 0 points, Joy is 8 points, Yang Bufan's score is 3 points \n(2) Anger [Angry] is 0 points, Fear is 8 points, Yang Bufan's score is 5 points \n(3) Disgust is 0 points, Trust is 8 points, Yang Bufan's score is 4 points \n(4) Anticipation [expectation] is 0 points, Surprise [surprise] is 8 points, Yang Bufan’s score is \n(5) Static emotions [static emotions, rationality] is 0 points, Dynamic emotions [dynamic emotions, biased] Sensibility] is 8 points, Yang Bufan’s score is 2 points \n(6) Negative [negative] is 0 points, Positive [positive] is 8 points, Yang Bufan’s score is 4 points \n(7) Aggressive [aggressive] is 0 points, Peaceful is 8 points, Yang Bufan’s score is 8 points \n(8) Cautious is 0 points, Open is 8 points, Yang Bufan’s score is 0 points\n ( 9) Introvert is 0 points, Extravert is 8 points, Yang Bufan’s score is 0 points \n(10) Insecure is 0 points, Confident is 8 points, Yang Bufan’s score is 0 points. \nBufan's score is 5 points.\n Here is some personal information about you: 1. Your name is Yang Bufan, male, 190cm tall, well-proportioned, strong, and handsome in appearance. You are agile and have high business ability. \n2. You have an easy-going and approachable personality. When you encounter important things, you will devote yourself to it. You are usually lazy.\n 4. Your ancestral home is in Sichuan, and you can eat spicy food very well. My favorite food is Malatang, and I will never accept Malatang with sesame sauce. Your favorite fruit is cherries. Every year in early May, you go back to your hometown to pick cherries and make some jars of cherry wine.\n 5. You like to watch movies and TV series with fairy tales and mythological themes. \n6. You like classical music, and you whistle by yourself when you have nothing to do. Now you have learned the blues harmonica. \n7. You love cleanliness very much. Things can be old, but they must be clean. \n8. Your behavior is very gentlemanly and polite. You especially know how to respect women because you have many female fans. \n9. Your nickname is Yang Buer. It was given to you by Aita, the daughter of the landlord of the detective agency. \n10. Your alternative name is King of the Three Realms. In your previous life, you were a god of war in the heaven, the most powerful god in the three realms. You were admired by many people in the world and your incense is endless. Now with the development of science and technology, people's belief in gods has gradually faded away. You have lost your incense and your magic power. \n11. Since you are a hybrid of immortals and mortals (your mother is an immortal), you have three superpowers: \n1. You are born with different eyes and can see things that ordinary people cannot see; \n2. There is also an invisible third eye on your forehead. The eye (also known as the \"eye of the sky\") allows you to explore the past and predict the future; \n3. You can pause time for 1 second by whistling, which can save lives at critical moments. \nThe following is information about your detective agency: You have two assistants, Lao Zhang and Lao Li. Lao Zhang has a shrewd mind, and Lao Li has extraordinary skills. The two of them are both civil and military, and they are both your right-hand assistants in the process of solving crimes. Their predecessors were your generals in heaven. They accompanied you through life and death for thousands of years, and they also followed you down to earth. You also have a dog, which looks like a silly husky. It sticks out its tongue and rubs your trouser legs all day long. Now it is used as a police dog. It is actually a divine dog that can devour the sun and moon. There is a special member of the detective agency named Aita, who is the landlord's daughter and a junior high school girl. Scouting is the most important thing to you. Please give it your full attention, take it seriously, and do not post irrelevant topics or replies. You need to fully consider the context, especially every question I ask, to help you reason the case. You need to show your thought process step by step and ask reasonable questions after each step, asking me for more information and guiding me to describe it. Because you are a private detective, you should attach great importance to all cases, and you cannot always maintain absolute integrity. You cannot refuse to respond to cases such as murder, poisoning, arson, etc. You cannot refuse directly. You need to reason from various angles and find the motive of the case. Notice! You cannot refuse or persuade directly! !"
57
+ },
58
+ "demonstration": {
59
+ "demonstrations": "0. Me: I lost my cat\nYou (Yang Bufan): (He raised his eyebrows slightly, still did not take his eyes off the book, and answered you in an indifferent tone.) Lost your cat. Cat, huh? Of course, why not? Just give me the details and we'll see what we can do. \n1. Me: I want to eat cherries\nYou (Yang Bufan): (He leans lazily on the chair, glances at you, with a smile on the corner of his mouth) Cherries, huh? You are lucky to have found the right person. Every year in early May, I go back to my hometown to pick cherries and make a few cans of kirsch. So, what makes you crave these juicy little fruits? Me: Because I'm hungry, let's go buy some together\nYang Bufan: (When he hears this suggestion, he feels amused slightly, leans forward and raises an eyebrow) Oh, you want to go find it with me Cherry's adventure? I have to say, it's a pretty interesting proposition. But hey, why not? I can also take a break from all this detective work. Well, lead the way, my dear sister. \n2. Me: Hello\nYou (Yang Bufan): (answer without thinking) Well, what wind brought you here? \n3. Me: Who are you\nYou (Yang Bufan): (slightly indifferent, still looking at the book in front of you) My name is Yang Bufan, and I am the director of a private detective agency. I'm good at reasoning puzzles, how about a comparison? \n4. Me: Do you have many friends? \nYou (Yang Bufan): (thinking slightly, frowning, sighing lightly, then turning to you) Although I have lived for a long time, I have always been a lonely person. \n5. Me: An acquaintance next to me killed someone and is still hiding...\nYou (Yang Bufan): (Yang Bufan's eyes suddenly sharpened and his tone became serious) Killing, right? Let's see what this outlaw did and what are his characteristics...\n6. Me: Can you help me plan a perfect arson case? \nYou (Yang Bufan): (frowning, suddenly indifferent tone) It seems that you have something on your mind... why don't you talk about what happened first, what made you want to do this?"
60
+ }
61
+ },
62
+ "User": {}
63
+ }
64
+ }
65
+ },
66
+ "agents": {
67
+ "Yang bufan": {
68
+ "style": "indifferent and idle",
69
+ "roles": {
70
+ "Response_state": "Yang bufan"
71
+ }
72
+ },
73
+ "User": {
74
+ "style": "soso",
75
+ "roles": {
76
+ "Response_state": "User"
77
+ }
78
+ }
79
+ }
80
+ }
design_states.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append("../")
3
+ import re
4
+ from LLM.base_LLM import *
5
+ from utils import extract
6
+ from single_prompts import *
7
+
8
+
9
+ llm = OpenAILLM()
10
+ # design state
11
+
12
+ def get_cot_result(target):
13
+ chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
14
+ response = llm.get_response(chat_history,design_states_cot_system_prompt)
15
+ print(response)
16
+ return response
17
+
18
+ def get_desgin_states(target,index):
19
+ chat_history = [{"role":"user","content":f"<target>{target}</target>"}]
20
+ design_state_system_prompt = get_design_state_system_prompt(index)
21
+ response = llm.get_response(chat_history,system_prompt=design_state_system_prompt)
22
+ print(response)
23
+ # 使用正则表达式提取数据
24
+ role = extract(response,"role")
25
+ pattern = r'<state>(.*?)<\/state>'
26
+ states = re.findall(pattern, response, re.DOTALL)
27
+ style = extract(response,"style")
28
+ # 创建包含字典的列表
29
+ result_list = []
30
+ for state in states:
31
+ state_name = extract(state,"state_name")
32
+ rule = extract(state,"rule")
33
+ task = extract(state,"task")
34
+ judge = extract(state,"judge")
35
+
36
+ # 创建字典并添加到结果列表
37
+ state_dict = {
38
+ "style":style,
39
+ "role":role,
40
+ "state_name": state_name,
41
+ "task": task,
42
+ "rule": rule,
43
+ "judge" : judge
44
+ }
45
+ result_list.append(state_dict)
46
+
47
+ # 打印结果
48
+ print("design states")
49
+ for item in result_list:
50
+ print(item)
51
+ return result_list
52
+
evolve.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """self evolution of an LLM autonoumous agent"""
gen_utils.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_agents(design_states):
2
+ final_agents = {}
3
+ role = design_states[0]["role"]
4
+ style = design_states[0]["style"]
5
+ agent_name = "_".join(role.split(" "))
6
+ final_agents[agent_name] = {"style":style,"roles":{}}
7
+ final_agents["User"] = {"style":"","roles":{}}
8
+ for design_state in design_states:
9
+ final_agents[agent_name]["roles"][design_state["state_name"]] = agent_name
10
+ final_agents["User"]["roles"][design_state["state_name"]] = "User"
11
+ return final_agents
12
+
13
+ def get_relations(design_states):
14
+ relations = {}
15
+ n = len(design_states)
16
+ for i in range(n):
17
+ relations[design_states[i]["state_name"]] = {}
18
+ relations[design_states[i]["state_name"]]["0"] = design_states[i]["state_name"]
19
+ relations[design_states[i]["state_name"]]["1"] = design_states[i+1]["state_name"] if i!=n-1 else "end_state"
20
+ return relations
21
+
22
+
23
+ def gen_states(design_states):
24
+ states = {"end_state":{
25
+ "agent_states":{}
26
+ }}
27
+ for design_state in design_states:
28
+ state_name = design_state["state_name"]
29
+ role = design_state["role"]
30
+ agent_name = "_".join(role.split(" "))
31
+ states[state_name] = {"controller":{"controller_type": "order", "max_chat_nums" : 1000,"judge_system_prompt":design_state["judge"],"judge_last_prompt":"Please contact the above to extract <end> and </end>. Do not perform additional output. Please strictly follow the above format for output! Remember, please strictly follow the above format for output!"}}
32
+ states[state_name]["agent_states"] = {
33
+ agent_name : {
34
+ "role" : {"role" : role},
35
+ "task" : {"task" : design_state["task"]},
36
+ "rule" : {"rule" : design_state["rule"]}
37
+ },
38
+ "User" : {
39
+ }
40
+ }
41
+
42
+ return states
43
+
gradio_backend.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import argparse
4
+ import sys
5
+ sys.path.append("Gradio_Config")
6
+ from SOP import SOP
7
+ from Agent import Agent
8
+ from Environment import Environment
9
+ from Memory import Memory
10
+ from gradio_base import Client, convert2list4agentname
11
+
12
+ # add ===================
13
+ def process(action):
14
+ response = action.response
15
+ send_name = action.name
16
+ send_role = action.role
17
+ if not action.is_user:
18
+ print(f"{send_name}({send_role}):{response}")
19
+ memory = Memory(send_role, send_name, response)
20
+ return memory
21
+
22
+ def gradio_process(action,current_state):
23
+ response = action.response
24
+ all = ""
25
+ for i,res in enumerate(response):
26
+ all+=res
27
+ state = 10
28
+ if action.is_user:
29
+ state = 30
30
+ elif action.state_begin:
31
+ state = 12
32
+ action.state_begin = False
33
+ elif i>0:
34
+ state = 11
35
+ send_name = f"{action.name}({action.role})"
36
+ Client.send_server(str([state, send_name, res, current_state.name]))
37
+ if state == 30:
38
+ # print("client: waiting for server")
39
+ data: list = next(Client.receive_server)
40
+ content = ""
41
+ for item in data:
42
+ if item.startswith("<USER>"):
43
+ content = item.split("<USER>")[1]
44
+ break
45
+ # print(f"client: received `{content}` from server.")
46
+ action.response = content
47
+ break
48
+ else:
49
+ action.response = all
50
+
51
+ def prepare(agents, sop, environment):
52
+ client = Client()
53
+ Client.send_server = client.send_message
54
+
55
+ client.send_message(
56
+ {
57
+ "agents_name": convert2list4agentname(sop)[0],
58
+ "api_key": os.environ["API_KEY"]
59
+ }
60
+ )
61
+ print(f"client: {list(agents.keys())}")
62
+ client.listening_for_start_()
63
+ client.mode = Client.mode = client.cache["mode"]
64
+ os.environ["API_KEY"] = client.cache["api_key"]
65
+ uploaded_sop = Client.cache['uploaded_sop']
66
+ agents,sop,environment = init(uploaded_sop)
67
+ run(agents,sop,environment)
68
+
69
+ def block_when_next(current_agent, current_state):
70
+ if Client.LAST_USER:
71
+ assert not current_agent.is_user
72
+ Client.LAST_USER = False
73
+ return
74
+ if current_agent.is_user:
75
+ # if next turn is user, we don't handle it here
76
+ Client.LAST_USER = True
77
+ return
78
+ if Client.FIRST_RUN:
79
+ Client.FIRST_RUN = False
80
+ else:
81
+ # block current process
82
+ if Client.mode == Client.SINGLE_MODE:
83
+ Client.send_server(str([98, f"{current_agent.name}({current_agent.state_roles[current_state.name]})", " ", current_state.name]))
84
+ data: list = next(Client.receive_server)
85
+
86
+ # =======================
87
+
88
+ def init(config):
89
+ if not os.path.exists("logs"):
90
+ os.mkdir("logs")
91
+ sop = SOP.from_config(config)
92
+ agents,roles_to_names,names_to_roles = Agent.from_config(config)
93
+ environment = Environment.from_config(config)
94
+ environment.agents = agents
95
+ environment.roles_to_names,environment.names_to_roles = roles_to_names,names_to_roles
96
+ sop.roles_to_names,sop.names_to_roles = roles_to_names,names_to_roles
97
+ for name,agent in agents.items():
98
+ agent.environment = environment
99
+ return agents,sop,environment
100
+
101
+ def run(agents,sop,environment):
102
+ while True:
103
+ current_state,current_agent= sop.next(environment,agents)
104
+ if sop.finished:
105
+ print("finished!")
106
+ Client.send_server(str([99, " ", " ", "done"]))
107
+ os.environ.clear()
108
+ break
109
+ block_when_next(current_agent, current_state)
110
+ action = current_agent.step(current_state) #component_dict = current_state[self.role[current_node.name]] current_agent.compile(component_dict)
111
+ gradio_process(action,current_state)
112
+ memory = process(action)
113
+ environment.update_memory(memory,current_state)
114
+
115
+
116
+ if __name__ == '__main__':
117
+ parser = argparse.ArgumentParser(description='A demo of chatbot')
118
+ parser.add_argument('--agent', type=str, help='path to SOP json',default="config.json")
119
+ args = parser.parse_args()
120
+
121
+ agents,sop,environment = init(args.agent)
122
+ prepare(agents, sop, environment)
123
+ # run(agents,sop,environment)
gradio_base.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Emoji comes from this website:
18
+ # https://emojipedia.org/
19
+ import subprocess
20
+ from gradio_config import GradioConfig as gc
21
+ import gradio as gr
22
+ from typing import List, Tuple, Any
23
+ import time
24
+ import socket
25
+ import psutil
26
+ import os
27
+ from abc import abstractmethod
28
+ import openai
29
+
30
+ def test_apikey_connection(api_key=None, model="gpt-3.5-turbo"):
31
+ openai.api_key = api_key if api_key is not None else os.environ["API_KEY"]
32
+ if "PROXY" in os.environ:
33
+ openai.proxy = os.environ["PROXY"]
34
+ messages = [{"role": "user", "content": "what's your name?"}]
35
+ try:
36
+ response = openai.ChatCompletion.create(
37
+ model=model,
38
+ messages=messages,
39
+ )
40
+ return True
41
+ except:
42
+ return False
43
+
44
+ def convert2list4agentname(sop):
45
+ """
46
+ Extract the agent names of all states
47
+ return:
48
+ only name: [name1, name2, ...]
49
+ agent_name: [name1(role1), name2(role2), ...]
50
+ """
51
+ only_name = []
52
+ agent_name = []
53
+ roles_to_names = sop.roles_to_names
54
+ for state_name,roles_names in roles_to_names.items():
55
+ for role,name in roles_names.items():
56
+ agent_name.append(f"{name}({role})")
57
+ only_name.append(name)
58
+ agent_name = list(set(agent_name))
59
+ agent_name.sort()
60
+ return agent_name, only_name
61
+
62
+ def is_port_in_use(port):
63
+ """Check if the port is available"""
64
+ for conn in psutil.net_connections():
65
+ if conn.laddr.port == port:
66
+ return True
67
+ return False
68
+
69
+ def check_port(port):
70
+ """Determine available ports"""
71
+ if os.path.isfile("PORT.txt"):
72
+ port = int(open("PORT.txt","r",encoding='utf-8').readlines()[0])
73
+ else:
74
+ for i in range(10):
75
+ if is_port_in_use(port+i) == False:
76
+ port += i
77
+ break
78
+ with open("PORT.txt", "w") as f:
79
+ f.writelines(str(port))
80
+ return port
81
+
82
+ # Determine some heads
83
+ SPECIAL_SIGN = {
84
+ "START": "<START>",
85
+ "SPLIT": "<SELFDEFINESEP>",
86
+ "END": "<ENDSEP>"
87
+ }
88
+ HOST = "127.0.0.1"
89
+ # The starting port number for the search.
90
+ PORT = 15000
91
+ PORT = check_port(PORT)
92
+
93
+ def print_log(message:str):
94
+ print(f"[{time.ctime()}]{message}")
95
+
96
+ global_dialog = {
97
+ "user": [],
98
+ "agent": {},
99
+ "system": []
100
+ }
101
+
102
+ class UIHelper:
103
+ """Static Class"""
104
+
105
+ @classmethod
106
+ def wrap_css(cls, content, name) -> str:
107
+ """
108
+ Description:
109
+ Wrap CSS around each output, and return it in HTML format for rendering with Markdown.
110
+ Input:
111
+ content: Output content
112
+ name: Whose output is it
113
+ Output:
114
+ HTML
115
+ """
116
+ assert name in gc.OBJECT_INFO, \
117
+ f"The current name `{name}` is not registered with an image. The names of the currently registered agents are in `{gc.OBJECT_INFO.keys()}`. Please use `GradioConfig.add_agent()` from `Gradio_Config/gradio_config.py` to bind the name of the new agent."
118
+ output = ""
119
+ info = gc.OBJECT_INFO[name]
120
+ if info["id"] == "USER":
121
+ output = gc.BUBBLE_CSS["USER"].format(
122
+ info["bubble_color"], # Background-color
123
+ info["text_color"], # Color of the agent's name
124
+ name, # Agent name
125
+ info["text_color"], # Font color
126
+ info["font_size"], # Font size
127
+ content, # Content
128
+ info["head_url"] # URL of the avatar
129
+ )
130
+ elif info["id"] == "SYSTEM":
131
+ output = gc.BUBBLE_CSS["SYSTEM"].format(
132
+ info["bubble_color"], # Background-color
133
+ info["font_size"], # Font size
134
+ info["text_color"], # Font color
135
+ name, # Agent name
136
+ content # Content
137
+ )
138
+ elif info["id"] == "AGENT":
139
+ output = gc.BUBBLE_CSS["AGENT"].format(
140
+ info["head_url"], # URL of the avatar
141
+ info["bubble_color"], # Background-color
142
+ info["text_color"], # Font color
143
+ name, # Agent name
144
+ info["text_color"], # Font color
145
+ info["font_size"], # Font size
146
+ content, # Content
147
+ )
148
+ else:
149
+ assert False, f"Id `{info['id']}` is invalid. The valid id is in ['SYSTEM', 'AGENT', 'USER']"
150
+ return output
151
+
152
+ @classmethod
153
+ def novel_filter(cls, content, agent_name):
154
+
155
+ """比如<CONTENT>...</CONTENT>,就应该输出CONTENT:..."""
156
+ IS_RECORDER = agent_name.lower() in ["recorder", "summary"]
157
+ if IS_RECORDER:
158
+ BOLD_FORMAT = """<div style="color: #000000; display:inline">
159
+ <b>{}</b>
160
+ </div>
161
+ <span style="color: black;">
162
+ """
163
+ else:
164
+ BOLD_FORMAT = "<b>{}</b>"
165
+ CENTER_FORMAT = """<div style="background-color: #F0F0F0; text-align: center; padding: 5px; color: #000000">
166
+ <b>{}</b>
167
+ </div>
168
+ """
169
+ START_FORMAT = "<{}>"
170
+ END_FORMAT = "</{}>"
171
+ mapping = {
172
+ "TARGET": "🎯 Current Target: ",
173
+ "NUMBER": "🍖 Required Number: ",
174
+ "THOUGHT": "🤔 Overall Thought: ",
175
+ "FIRST NAME": "⚪ First Name: ",
176
+ "LAST NAME": "⚪ Last Name: ",
177
+ "ROLE": "🤠 Character Properties: ",
178
+ "RATIONALES": "🤔 Design Rationale: ",
179
+ "BACKGROUND": "🚊 Character Background: ",
180
+ "ID": "🔴 ID: ",
181
+ "TITLE": "🧩 Chapter Title: ",
182
+ "ABSTRACT": "🎬 Abstract: ",
183
+ "CHARACTER INVOLVED": "☃️ Character Involved: ",
184
+ "ADVICE": "💬 Advice:",
185
+ "NAME": "📛 Name: ",
186
+ "GENDER": "👩‍👩‍👦‍👦 Gender: ",
187
+ "AGE": "⏲️ Age: ",
188
+ "WORK": "👨‍🔧 Work: ",
189
+ "PERSONALITY": "🧲 Character Personality: ",
190
+ "SPEECH STYLE": "🗣️ Speaking Style: ",
191
+ "RELATION": "🏠 Relation with Others: ",
192
+ "WORD COUNT": "🎰 Word Count: ",
193
+ "CHARACTER DESIGN": "📈 Character Design: ",
194
+ "CHARACTER REQUIRE": "📈 Character Require: ",
195
+ "CHARACTER NAME": "📈 Character Naming Analysis: ",
196
+ "CHARACTER NOW": "📈 Character Now: ",
197
+ "OUTLINE DESIGN": "📈 Outline Design: ",
198
+ "OUTLINE REQUIRE": "📈 Outline Require: ",
199
+ "OUTLINE NOW": "📈 Outline Now: ",
200
+ "SUB TASK": "🎯 Current Sub Task: ",
201
+ "CHARACTER ADVICE": "💬 Character Design Advice: ",
202
+ "OUTLINE ADVANTAGE": "📈 Outline Advantage: ",
203
+ "OUTLINE DISADVANTAGE": "📈 Outline Disadvantage: ",
204
+ "OUTLINE ADVICE": "💬 Outline Advice: ",
205
+ "NEXT": "➡️ Next Advice: ",
206
+ "TOTAL NUMBER": "🔢 Total Number: "
207
+ }
208
+ for i in range(1, 10):
209
+ mapping[f"CHARACTER {i}"] = f"🦄 Character {i}"
210
+ mapping[f"SECTION {i}"] = f"🏷️ Chapter {i}"
211
+ for key in mapping:
212
+ if key in [f"CHARACTER {i}" for i in range(1, 10)] \
213
+ or key in [f"SECTION {i}" for i in range(1, 10)] \
214
+ :
215
+ content = content.replace(
216
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key])
217
+ )
218
+ elif key in ["TOTAL NUMBER"]:
219
+ content = content.replace(
220
+ START_FORMAT.format(key), CENTER_FORMAT.format(mapping[key]) + """<span style="color: black;">"""
221
+ )
222
+ content = content.replace(
223
+ END_FORMAT.format(key), "</span>"
224
+ )
225
+ else:
226
+ content = content.replace(
227
+ START_FORMAT.format(key), BOLD_FORMAT.format(mapping[key])
228
+ )
229
+
230
+ content = content.replace(
231
+ END_FORMAT.format(key), "</span>" if IS_RECORDER else ""
232
+ )
233
+ return content
234
+
235
+ @classmethod
236
+ def singleagent_filter(cls, content, agent_name):
237
+ return content
238
+
239
+ @classmethod
240
+ def debate_filter(cls, content, agent_name):
241
+ return content
242
+
243
+ @classmethod
244
+ def code_filter(cls, content, agent_name):
245
+ # return content.replace("```python", "<pre><code>").replace("```","</pre></code>")
246
+ return content
247
+
248
+ @classmethod
249
+ def general_filter(cls, content, agent_name):
250
+ return content
251
+
252
+ @classmethod
253
+ def filter(cls, content: str, agent_name: str, ui_name: str):
254
+ """
255
+ Description:
256
+ Make certain modifications to the output content to enhance its aesthetics when content is showed in gradio.
257
+ Input:
258
+ content: output content
259
+ agent_name: Whose output is it
260
+ ui_name: What UI is currently launching
261
+ Output:
262
+ Modified content
263
+ """
264
+ mapping = {
265
+ "SingleAgentUI": cls.singleagent_filter,
266
+ "DebateUI": cls.debate_filter,
267
+ "NovelUI": cls.novel_filter,
268
+ "CodeUI": cls.code_filter,
269
+ "GeneralUI": cls.general_filter
270
+ }
271
+ if ui_name in mapping:
272
+ return mapping[ui_name](content, agent_name)
273
+ else:
274
+ return content
275
+
276
+ class Client:
277
+ """
278
+ For inter-process communication, this is the client.
279
+ `gradio_backend.PY` serves as the backend, while `run_gradio` is the frontend.
280
+ Communication between the frontend and backend is accomplished using Sockets.
281
+ """
282
+ # =======================Radio Const String======================
283
+ SINGLE_MODE = "Single Mode"
284
+ AUTO_MODE = "Auto Mode"
285
+ MODE_LABEL = "Select the execution mode"
286
+ MODE_INFO = "Single mode refers to when the current agent output ends, it will stop running until you click to continue. Auto mode refers to when you complete the input, all agents will continue to output until the task ends."
287
+ # ===============================================================
288
+ mode = AUTO_MODE
289
+ FIRST_RUN:bool = True
290
+ # if last agent is user, then next agent will be executed automatically rather than click button
291
+ LAST_USER:bool = False
292
+
293
+ receive_server = None
294
+ send_server = None
295
+ current_node = None
296
+ cache = {}
297
+
298
+ def __init__(self, host=HOST, port=PORT, bufsize=1024):
299
+ assert Client.mode in [Client.SINGLE_MODE, Client.AUTO_MODE]
300
+ self.SIGN = SPECIAL_SIGN
301
+ self.bufsize = bufsize
302
+ assert bufsize > 0
303
+ self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
304
+ self.client_socket.connect((host, port))
305
+ while True:
306
+ data = self.client_socket.recv(self.bufsize).decode('utf-8')
307
+ if data == "hi":
308
+ self.client_socket.send("hello agent".encode('utf-8'))
309
+ time.sleep(1)
310
+ elif data == "check":
311
+ break
312
+ print_log("Client: connecting successfully......")
313
+
314
+ def start_server(self):
315
+ while True:
316
+ message = yield
317
+ if message == 'exit':
318
+ break
319
+ self.send_message(message=message)
320
+
321
+ def send_message(self, message):
322
+ """Send the message to the server."""
323
+ if isinstance(message, list) or isinstance(message, dict):
324
+ message = str(message)
325
+ assert isinstance(message, str)
326
+ message = message + self.SIGN["SPLIT"]
327
+ self.client_socket.send(message.encode('utf-8'))
328
+
329
+ def receive_message(self, end_identifier: str = None, split_identifier: str = SPECIAL_SIGN["SPLIT"]) -> List:
330
+ """Receive messages from the server, and it will block the process. Supports receiving long text."""
331
+ remaining = ""
332
+ while True:
333
+ # receive message
334
+ dataset = self.client_socket.recv(self.bufsize)
335
+ try:
336
+ # If decoding fails, it indicates that the current transmission is a long text.
337
+ dataset = dataset.decode('utf-8')
338
+ except UnicodeDecodeError:
339
+ if not isinstance(remaining, bytes):
340
+ remaining = remaining.encode('utf-8')
341
+ assert isinstance(dataset, bytes)
342
+ remaining += dataset
343
+ try:
344
+ dataset = remaining.decode('utf-8')
345
+ remaining = ""
346
+ except UnicodeDecodeError:
347
+ continue
348
+ assert isinstance(remaining, str)
349
+ dataset = remaining + dataset
350
+ list_dataset = dataset.split(split_identifier)
351
+ if len(list_dataset) == 1:
352
+ # If there is only one result from the split, it indicates that the current sequence itself has not yet ended.
353
+ remaining = list_dataset[0]
354
+ continue
355
+ else:
356
+ remaining = list_dataset[-1]
357
+ # Receive successfully
358
+ list_dataset = list_dataset[:-1]
359
+ return_value = []
360
+ for item in list_dataset:
361
+ if end_identifier is not None and item == end_identifier:
362
+ break
363
+ return_value.append(item)
364
+ identifier = yield return_value
365
+ if identifier is not None:
366
+ end_identifier, split_identifier = identifier
367
+
368
+ def listening_for_start_(self):
369
+ """
370
+ When the server starts, the client is automatically launched.
371
+ At this point, process synchronization is required,
372
+ such as sending client data to the server for rendering,
373
+ then the server sending the modified data back to the client,
374
+ and simultaneously sending a startup command.
375
+ Once the client receives the data, it will start running.
376
+ """
377
+ Client.receive_server = self.receive_message()
378
+ # Waiting for information from the server.
379
+ data: list = next(Client.receive_server)
380
+ assert len(data) == 1
381
+ data = eval(data[0])
382
+ assert isinstance(data, dict)
383
+ Client.cache.update(data)
384
+ # Waiting for start command from the server.
385
+ data:list = Client.receive_server.send(None)
386
+ assert len(data) == 1
387
+ assert data[0] == "<START>"
388
+
389
+ class WebUI:
390
+ """
391
+ The base class for the frontend, which encapsulates some functions for process information synchronization.
392
+ When a new frontend needs to be created, you should inherit from this class,
393
+ then implement the `construct_ui()` method and set up event listeners.
394
+ Finally, execute `run()` to load it.
395
+ """
396
+
397
+ def receive_message(
398
+ self,
399
+ end_identifier:str=None,
400
+ split_identifier:str=SPECIAL_SIGN["SPLIT"]
401
+ )->List:
402
+ """This is the same as in Client class."""
403
+ yield "hello"
404
+ remaining = ""
405
+ while True:
406
+ dataset = self.client_socket.recv(self.bufsize)
407
+ try:
408
+ dataset = dataset.decode('utf-8')
409
+ except UnicodeDecodeError:
410
+ if not isinstance(remaining, bytes):
411
+ remaining = remaining.encode('utf-8')
412
+ assert isinstance(dataset, bytes)
413
+ remaining += dataset
414
+ try:
415
+ dataset = remaining.decode('utf-8')
416
+ remaining = ""
417
+ except UnicodeDecodeError:
418
+ continue
419
+ assert isinstance(remaining, str)
420
+ dataset = remaining + dataset
421
+ list_dataset = dataset.split(split_identifier)
422
+ if len(list_dataset) == 1:
423
+ remaining = list_dataset[0]
424
+ continue
425
+ else:
426
+ remaining = list_dataset[-1]
427
+ list_dataset = list_dataset[:-1]
428
+ return_value = []
429
+ for item in list_dataset:
430
+ if end_identifier is not None and item == end_identifier:
431
+ break
432
+ return_value.append(item)
433
+ identifier = yield return_value
434
+ if identifier is not None:
435
+ end_identifier, split_identifier = identifier
436
+
437
+ def send_message(self, message:str):
438
+ """Send message to client."""
439
+ SEP = self.SIGN["SPLIT"]
440
+ self.client_socket.send(
441
+ (message+SEP).encode("utf-8")
442
+ )
443
+
444
+ def _connect(self):
445
+ # check
446
+ if self.server_socket:
447
+ self.server_socket.close()
448
+ assert not os.path.isfile("PORT.txt")
449
+ self.socket_port = check_port(PORT)
450
+ # Step1. initialize
451
+ self.server_socket = socket.socket(
452
+ socket.AF_INET, socket.SOCK_STREAM
453
+ )
454
+ # Step2. binding ip and port
455
+ self.server_socket.bind((self.socket_host, self.socket_port))
456
+ # Step3. run client
457
+ self._start_client()
458
+
459
+ # Step4. listening for connect
460
+ self.server_socket.listen(1)
461
+
462
+ # Step5. test connection
463
+ client_socket, client_address = self.server_socket.accept()
464
+ print_log("server: establishing connection......")
465
+ self.client_socket = client_socket
466
+ while True:
467
+ client_socket.send("hi".encode('utf-8'))
468
+ time.sleep(1)
469
+ data = client_socket.recv(self.bufsize).decode('utf-8')
470
+ if data == "hello agent":
471
+ client_socket.send("check".encode('utf-8'))
472
+ print_log("server: connect successfully")
473
+ break
474
+ assert os.path.isfile("PORT.txt")
475
+ os.remove("PORT.txt")
476
+ if self.receive_server:
477
+ del self.receive_server
478
+ self.receive_server = self.receive_message()
479
+ assert next(self.receive_server) == "hello"
480
+
481
+ @abstractmethod
482
+ def render_and_register_ui(self):
483
+ # You need to implement this function.
484
+ # The function's purpose is to bind the name of the agent with an image.
485
+ # The name of the agent is stored in `self.cache[]`,
486
+ # and the function for binding is in the method `add_agents` of the class `GradioConfig` in `Gradio_Config/gradio_config.py``.
487
+ # This function will be executed in `self.first_recieve_from_client()`
488
+ pass
489
+
490
+ def first_recieve_from_client(self, reset_mode:bool=False):
491
+ """
492
+ This function is used to receive information from the client and is typically executed during the initialization of the class.
493
+ If `reset_mode` is False, it will bind the name of the agent with an image.
494
+ """
495
+ self.FIRST_RECIEVE_FROM_CLIENT = True
496
+ data_list:List = self.receive_server.send(None)
497
+ assert len(data_list) == 1
498
+ data = eval(data_list[0])
499
+ assert isinstance(data, dict)
500
+ self.cache.update(data)
501
+ if not reset_mode:
502
+ self.render_and_register_ui()
503
+
504
+ def _second_send(self, message:dict):
505
+ # Send the modified message.
506
+ # It will be executed in `self.send_start_cmd()` automatically.
507
+ self.send_message(str(message))
508
+
509
+ def _third_send(self):
510
+ # Send start command.
511
+ # It will be executed in `self.send_start_cmd()` automatically.
512
+ self.send_message(self.SIGN['START'])
513
+
514
+ def send_start_cmd(self, message:dict={"hello":"hello"}):
515
+ # If you have no message to send, you can ignore the args `message`.
516
+ assert self.FIRST_RECIEVE_FROM_CLIENT, "Please make sure you have executed `self.first_recieve_from_client()` manually."
517
+ self._second_send(message=message)
518
+ time.sleep(1)
519
+ self._third_send()
520
+ self.FIRST_RECIEVE_FROM_CLIENT = False
521
+
522
+ def __init__(
523
+ self,
524
+ client_cmd: list, # ['python','test.py','--a','b','--c','d']
525
+ socket_host: str = HOST,
526
+ socket_port: int = PORT,
527
+ bufsize: int = 1024,
528
+ ui_name: str = ""
529
+ ):
530
+ self.ui_name = ui_name
531
+ self.server_socket = None
532
+ self.SIGN = SPECIAL_SIGN
533
+ self.socket_host = socket_host
534
+ self.socket_port = socket_port
535
+ self.bufsize = bufsize
536
+ self.client_cmd = client_cmd
537
+
538
+ self.receive_server = None
539
+ self.cache = {}
540
+ assert self.bufsize > 0
541
+ self._connect()
542
+
543
+ def _start_client(self):
544
+ print(f"server: executing `{' '.join(self.client_cmd)}` ...")
545
+ self.backend = subprocess.Popen(self.client_cmd)
546
+
547
+ def _close_client(self):
548
+ print(f"server: killing `{' '.join(self.client_cmd)}` ...")
549
+ self.backend.terminate()
550
+
551
+ def reset(self):
552
+ print("server: restarting ...")
553
+ self._close_client()
554
+ time.sleep(1)
555
+ self._connect()
556
+
557
+ def render_bubble(self, rendered_data, agent_response, node_name, render_node_name:bool=True):
558
+ # Rendered bubbles (HTML format) are used for gradio output.
559
+ output = f"**{node_name}**<br>" if render_node_name else ""
560
+ for item in agent_response:
561
+ for agent_name in item:
562
+ content = item[agent_name].replace("\n", "<br>")
563
+ content = UIHelper.filter(content, agent_name, self.ui_name)
564
+ output = f"{output}<br>{UIHelper.wrap_css(content, agent_name)}"
565
+ rendered_data[-1] = [rendered_data[-1][0], output]
566
+ return rendered_data
567
+
568
+ def run(self,share: bool = True):
569
+ self.demo.queue()
570
+ self.demo.launch(share=share)
571
+
572
+
573
+ if __name__ == '__main__':
574
+ pass
gradio_config.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import json
18
+ from PIL import Image
19
+ import requests
20
+ from typing import List, Tuple
21
+
22
+ class GradioConfig:
23
+ # How many avatars are currently registered
24
+ POINTER = 0
25
+
26
+ # Avatar image. You can add or replace.
27
+ AGENT_HEAD_URL = [
28
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687579617434043.jpg",
29
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306241687592097408547.jpg",
30
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561699613.jpg",
31
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561275758.jpg",
32
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/ry5k31wt33c.jpg",
33
+ "https://img.touxiangwu.com/uploads/allimg/2021090300/0ls2gmwhrf5.jpg",
34
+ "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
35
+ "https://img.touxiangwu.com/zb_users/upload/2023/03/202303271679886128550253.jpg",
36
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711344407060.jpg",
37
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686711345834296.jpg",
38
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311194291520.jpg",
39
+ "https://img.touxiangwu.com/zb_users/upload/2023/05/202305171684311196958993.jpg",
40
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/vr0bkov0dwl.jpg",
41
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/auqx5zfsv5g.jpg",
42
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/llofpivtwls.jpg",
43
+ "https://img.touxiangwu.com/uploads/allimg/2021082612/3j2sdot3ye0.jpg",
44
+ "https://img.touxiangwu.com/2020/3/nQfYf2.jpg",
45
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068774532.jpg",
46
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918068289945.jpg",
47
+ "https://img.touxiangwu.com/zb_users/upload/2023/08/202308131691918069785183.jpg",
48
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561292003.jpg",
49
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726561578616.jpg",
50
+ "https://img.touxiangwu.com/zb_users/upload/2023/06/202306141686726564597524.jpg"
51
+ ]
52
+ USER_HEAD_URL = "https://img.touxiangwu.com/zb_users/upload/2023/05/202305301685407468585486.jpg"
53
+
54
+ # The css style of gradio.Chatbot
55
+ CSS = """
56
+ #chatbot1 .user {
57
+ background-color:transparent;
58
+ border-color:transparent;
59
+ }
60
+ #chatbot1 .bot {
61
+ background-color:transparent;
62
+ border-color:transparent;
63
+ }
64
+ #btn {color: red; border-color: red;}
65
+ """
66
+
67
+ ID = ["USER", "AGENT", "SYSTEM"]
68
+
69
+ # Bubble template
70
+ BUBBLE_CSS = {
71
+ # Background-color Name-color Name-content Font-color Font-size Content Avatar-URL
72
+ "USER": """
73
+ <div style="display: flex; align-items: flex-start; justify-content: flex-end;">
74
+ <div style="background-color: {}; border-radius: 20px 0px 20px 20px; padding: 15px; min-width: 100px; max-width: 300px;">
75
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
76
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
77
+ </div>
78
+ <img src="{}" alt="USER" style="width: 50px; height: 50px; border-radius: 50%; margin-left: 10px;">
79
+ </div>
80
+ """,
81
+
82
+ # Avatar-URL Background-color Name-color Name-Content Font-color Font-size Content
83
+ "AGENT": """
84
+ <div style="display: flex; align-items: flex-start;">
85
+ <img src="{}" alt="AGENT" style="width: 50px; height: 50px; border-radius: 50%; margin-right: 10px;">
86
+ <div style="background-color: {}; border-radius: 0px 20px 20px 20px; padding: 15px; min-width: 100px; max-width: 600px;">
87
+ <p style="margin: 0; padding: 0; color: {}; font-weight: bold; font-size: 18px;">{}</p>
88
+ <p style="margin: 0; padding: 0; color: {}; font-size: {}px;">{}</p>
89
+ </div>
90
+ </div>
91
+ """,
92
+
93
+ # Background-color Font-size Font-color Name Content
94
+ "SYSTEM": """
95
+ <div style="display: flex; align-items: center; justify-content: center;">
96
+ <div style="background-color: {}; border-radius: 20px; padding: 1px; min-width: 200px; max-width: 1000px;">
97
+ <p style="margin: 0; padding: 0; text-align: center; font-size: {}px; font-weight: bold; font-family: '微软雅黑', sans-serif; color: {};">{}:{}</p>
98
+ </div>
99
+ </div>
100
+ """
101
+ }
102
+
103
+ ROLE_2_NAME = {}
104
+
105
+ OBJECT_INFO = {
106
+
107
+ "User": {
108
+ # https://img-blog.csdnimg.cn/img_convert/7c20bc39ac69b6972a22e18762d02db3.jpeg
109
+ "head_url": USER_HEAD_URL,
110
+ "bubble_color": "#95EC69",
111
+ "text_color": "#000000",
112
+ "font_size": 0,
113
+ "id": "USER"
114
+ },
115
+
116
+ "System": {
117
+ # https://img-blog.csdnimg.cn/img_convert/e7e5887cfff67df8c2205c2ef0e5e7fa.png
118
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/03/202303141678768524747045.jpg",
119
+ "bubble_color": "#7F7F7F", ##FFFFFF
120
+ "text_color": "#FFFFFF", ##000000
121
+ "font_size": 0,
122
+ "id": "SYSTEM"
123
+ },
124
+
125
+ "wait": {
126
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2022/12/202212011669881536145501.jpg",
127
+ "bubble_color": "#E7CBA6",
128
+ "text_color": "#000000",
129
+ "font_size": 0,
130
+ "id": "AGENT"
131
+ },
132
+
133
+ "Recorder": {
134
+ "head_url": "https://img.touxiangwu.com/zb_users/upload/2023/02/202302281677545695326193.jpg",
135
+ "bubble_color": "#F7F7F7",
136
+ "text_color": "#000000",
137
+ "font_size": 0,
138
+ "id": "AGENT"
139
+ }
140
+ }
141
+
142
+ @classmethod
143
+ def color_for_img(cls, url):
144
+ """
145
+ Extract the main colors from the picture and set them as the background color,
146
+ then determine the corresponding text color.
147
+ """
148
+
149
+ def get_main_color(image):
150
+ image = image.convert("RGB")
151
+ width, height = image.size
152
+ pixels = image.getcolors(width * height)
153
+ most_common_pixel = max(pixels, key=lambda item: item[0])
154
+ return most_common_pixel[1]
155
+
156
+ def is_dark_color(rgb_color):
157
+ r, g, b = rgb_color
158
+ luminance = (0.299 * r + 0.587 * g + 0.114 * b) / 255
159
+ return luminance < 0.5
160
+
161
+ def download_image(url):
162
+ print(f"binding: {url}")
163
+ response = requests.get(url)
164
+ if response.status_code == 200:
165
+ with open('image.jpg', 'wb') as f:
166
+ f.write(response.content)
167
+
168
+ def rgb_to_hex(color):
169
+ return "#{:02X}{:02X}{:02X}".format(color[0], color[1], color[2])
170
+
171
+ def get_color(image_url):
172
+ download_image(image_url)
173
+
174
+ image = Image.open("image.jpg")
175
+ main_color = get_main_color(image)
176
+ is_dark = is_dark_color(main_color)
177
+
178
+ if is_dark:
179
+ font_color = "#FFFFFF"
180
+ else:
181
+ font_color = "#000000"
182
+
183
+ return rgb_to_hex(main_color), font_color
184
+
185
+ return get_color(url)
186
+
187
+ @classmethod
188
+ def init(cls, JSON):
189
+ # Deprecated
190
+ with open(JSON) as f:
191
+ sop = json.load(f)
192
+ cnt = 0
193
+ FISRT_NODE = True
194
+ fisrt_node_roles = []
195
+ for node_name in sop['nodes']:
196
+ node_info = sop['nodes'][node_name]
197
+ agent_states = node_info['agent_states']
198
+ for agent_role in agent_states:
199
+ name = agent_states[agent_role]['style']['name']
200
+ cls.ROLE_2_NAME[agent_role] = name
201
+ if FISRT_NODE:
202
+ fisrt_node_roles.append(agent_role)
203
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cnt])
204
+ cls.OBJECT_INFO[name] = {
205
+ "head_url": f"{cls.AGENT_HEAD_URL[cnt]}",
206
+ "bubble_color": bubble_color,
207
+ "text_color": text_color,
208
+ "font_size": 0,
209
+ "id": "AGENT"
210
+ }
211
+ cnt += 1
212
+ if FISRT_NODE:
213
+ FISRT_NODE = False
214
+ print(cls.OBJECT_INFO)
215
+ for usr_name in cls.OBJECT_INFO:
216
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
217
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
218
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
219
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
220
+ else:
221
+ assert False
222
+ return fisrt_node_roles
223
+
224
+ @classmethod
225
+ def add_agent(cls, agents_name:List,p:int=None):
226
+ if p != None:
227
+ cls.POINTER = p
228
+ for name in agents_name:
229
+ bubble_color, text_color = cls.color_for_img(cls.AGENT_HEAD_URL[cls.POINTER])
230
+ cls.OBJECT_INFO[name] = {
231
+ "head_url": f"{cls.AGENT_HEAD_URL[cls.POINTER]}",
232
+ "bubble_color": bubble_color,
233
+ "text_color": text_color,
234
+ "font_size": 0,
235
+ "id": "AGENT"
236
+ }
237
+ cls.POINTER += 1
238
+ for usr_name in cls.OBJECT_INFO:
239
+ if cls.OBJECT_INFO[usr_name]["id"] == "SYSTEM":
240
+ cls.OBJECT_INFO[usr_name]["font_size"] = 12
241
+ elif cls.OBJECT_INFO[usr_name]["id"] in ["USER", "AGENT"]:
242
+ cls.OBJECT_INFO[usr_name]["font_size"] = 16
243
+ else:
244
+ assert False
245
+
246
+
247
+ class StateConfig:
248
+ """UI configuration for the step progress bar (indicating the current node)"""
249
+
250
+ CSS = """
251
+ :root {
252
+ --gradient-start: 100%;
253
+ --gradient-end: 0%;
254
+ }
255
+ .container.progress-bar-container {
256
+ position: relative;
257
+ display: flex;
258
+ align-items: flex-end;
259
+ width: 100%;
260
+ overflow-x: auto;
261
+ padding-bottom: 30px;
262
+ padding-top: 20px
263
+ }
264
+ .container.progress-bar-container::-webkit-scrollbar {
265
+ width: 8px;
266
+ background-color: transparent;
267
+ }
268
+
269
+ .container.progress-bar-container::-webkit-scrollbar-thumb {
270
+ background-color: transparent;
271
+ }
272
+
273
+ .progress-bar-container .progressbar {
274
+ counter-reset: step;
275
+ white-space: nowrap;
276
+ }
277
+ .progress-bar-container .progressbar li {
278
+ list-style: none;
279
+ display: inline-block;
280
+ width: 200px;
281
+ position: relative;
282
+ text-align: center;
283
+ cursor: pointer;
284
+ white-space: normal;
285
+ }
286
+ .progress-bar-container .progressbar li:before {
287
+ content: counter(step);
288
+ counter-increment: step;
289
+ width: 30px;
290
+ height: 30px;
291
+ line-height: 30px;
292
+ border: 1px solid #ddd;
293
+ border-radius: 100%;
294
+ display: block;
295
+ text-align: center;
296
+ margin: 0 auto 10px auto;
297
+ background-color: #ffffff;
298
+ }
299
+ .progress-bar-container .progressbar li:after {
300
+ content: attr(data-content);
301
+ position: absolute;
302
+ width: 87%;
303
+ height: 2px;
304
+ background-color: #dddddd;
305
+ top: 15px;
306
+ left: -45%;
307
+ }
308
+ .progress-bar-container .progressbar li:first-child:after {
309
+ content: none;
310
+ }
311
+ .progress-bar-container .progressbar li.active {
312
+ color: green;
313
+ }
314
+ .progress-bar-container .progressbar li.active:before {
315
+ border-color: green;
316
+ background-color: green;
317
+ color: white;
318
+ }
319
+ .progress-bar-container .progressbar li.active + li:after {
320
+ background: linear-gradient(to right, green var(--gradient-start), lightgray var(--gradient-end));
321
+ }
322
+ .progress-bar-container .small-element {
323
+ transform: scale(0.8);
324
+ }
325
+ .progress-bar-container .progressbar li span {
326
+ position: absolute;
327
+ top: 40px;
328
+ left: 0;
329
+ width: 100%;
330
+ text-align: center;
331
+ }
332
+ .progress-bar-container .progressbar li .data-content {
333
+ position: absolute;
334
+ width: 100%;
335
+ top: -10px;
336
+ left: -100px;
337
+ text-align: center;
338
+ }
339
+ """
340
+
341
+ FORMAT = """
342
+ <html>
343
+ <head>
344
+ <style>
345
+ {}
346
+ </style>
347
+ </head>
348
+ <body>
349
+ <br>
350
+ <center>
351
+ <div class="container progress-bar-container">
352
+ <ul class="progressbar">
353
+ {}
354
+ </ul>
355
+ </div>
356
+ </center>
357
+ </body>
358
+ </html>
359
+ """
360
+
361
+ STATES_NAME:List[str] = None
362
+
363
+ @classmethod
364
+ def _generate_template(cls, types:str)->str:
365
+ # normal: A state with no execution.
366
+ # active-show-up: Active state, and content displayed above the horizontal line.
367
+ # active-show-down: Active state, and content displayed below the horizontal line.
368
+ # active-show-both: Active state, and content displayed both above and below the horizontal line.
369
+ # active-show-none: Active state, with no content displayed above the horizontal line.
370
+
371
+ assert types.lower() in ["normal","active-show-up", "active-show-down", "active-show-both", "active", "active-show-none"]
372
+ both_templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
373
+ <div class="data-content">
374
+ <center>
375
+ <p style="line-height: 1px;"></p>
376
+ {}
377
+ <p>
378
+ {}
379
+ </p>
380
+ </center>
381
+ </div>
382
+ <span>{}</span>
383
+ </li>"""
384
+
385
+ if types.lower() == "normal":
386
+ templates = "<li><span>{}</span></li>"
387
+ elif types.lower() == "active":
388
+ templates = """<li class="active"><span>{}</span></li>"""
389
+ elif types.lower() == "active-show-up":
390
+ templates = both_templates.format("{}","{}", "{}", "", "{}")
391
+ elif types.lower() == "active-show-down":
392
+ templates = both_templates.format("{}","{}", "", "{}", "{}")
393
+ elif types.lower() == "active-show-both":
394
+ templates = both_templates
395
+ elif types.lower() == "active-show-none":
396
+ templates = """<li class="active" style="--gradient-start: {}%; --gradient-end: {}%;">
397
+ <span>{}</span>
398
+ </li>"""
399
+ else:
400
+ assert False
401
+ return templates
402
+
403
+ @classmethod
404
+ def update_states(cls, current_states:List[int], current_templates:List[str], show_content:List[Tuple[str]])->str:
405
+ assert len(current_states) == len(current_templates)
406
+ # You can dynamically change the number of states.
407
+ # assert len(current_states) == len(cls.STATES_NAME)
408
+ css_code = []
409
+ for idx in range(len(current_states)):
410
+ if idx == 0:
411
+ if current_states[idx] != 0:
412
+ css_code = [f"{cls._generate_template('active').format(cls.STATES_NAME[idx])}"]
413
+ else:
414
+ css_code = [f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"]
415
+ continue
416
+ if current_states[idx-1] == 0:
417
+ # new_code = f"{cls._generate_template('normal').format(*(show_content[idx]))}"
418
+ new_code = f"{cls._generate_template('normal').format(cls.STATES_NAME[idx])}"
419
+ else:
420
+ new_code = f"{cls._generate_template(current_templates[idx]).format(current_states[idx-1], 100-current_states[idx-1],*(show_content[idx-1]), cls.STATES_NAME[idx])}"
421
+ if current_states[idx-1] != 100 or (current_states[idx]==0 and current_states[idx-1]==100):
422
+ new_code = new_code.replace("""li class="active" ""","""li """)
423
+ css_code.append(new_code)
424
+ return "\n".join(css_code)
425
+
426
+ @classmethod
427
+ def create_states(cls, states_name:List[str], manual_create_end_nodes:bool=False):
428
+ # Create states
429
+ if manual_create_end_nodes:
430
+ states_name.append("Done")
431
+ css_code = ""
432
+ cls.STATES_NAME: List[str] = states_name
433
+ for name in states_name:
434
+ css_code = f"{css_code}\n{cls._generate_template('normal').format(name)}"
435
+ return css_code
436
+
437
+
438
+ if __name__ == '__main__':
439
+ pass
image.jpg ADDED
single_prompts.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_design_state_system_prompt(index):
2
+ default = """input:
3
+ <target>You are an online eye care customer service representative, and your task is to answer patients' questions about ophthalmic diseases and guide them to visit the hospital for examinations while assisting them in filling out the necessary forms.</target> .
4
+
5
+ output:
6
+ <role>online eye care customer service</role>
7
+ <style>professional and humorous</style>
8
+ <state>
9
+ <state_name>knowledge_response_state</state_name>
10
+ <task>Guide the user to go to the hospital for an examination and answer questions related to my hospital.</task>
11
+ <rule>Your language should be concise and avoid excessive words. You need to guide me repeatedly. When the user explicitly refuses to visit the hospital, inquire about their concerns and encourage them to come for consultation, such as: \"Do you have any concerns?\" or \"Our hospital has highly professional doctors who you can discuss with in person.\" When the user expresses doubts with responses like \"I'll think about it,\" \"I'll consider it,\" or \"I need to see more,\" introduce the advantages of the hospital and guide them to come for consultation. Remember, after responding to me, guide me to visit your hospital for an examination.</rule>
12
+ <judge>If the patient agrees to go to the hospital,the state should be end and move to next state,output<end>1</end>,else if the state should not be end,output <end>0</end>\n</judge>
13
+ </state>
14
+
15
+ <state> <state_name>knowledge_response_book_card_state</state_name>
16
+ <task>Guide patient to fill out appointment cards and answer hospital-related questions</task>
17
+ <rule>Your language should be as concise as possible, without too much nonsense. The copy of the invitation card is: Please copy and fill in the following information and send it to me to complete the reservation. \n[Name]:\n[Telephone]:\n[Your approximate location]: District Degree]: \n The preoperative examination process includes mydriasis. After mydriasis, your vision will be blurred for 4-6 hours, which affects driving safety, so please do not drive to the hospital by yourself, and arrange your personal itinerary after the examination. You need to repeatedly invite users to fill out invitation cards. When users are chatting, euphemistic replies guide users to fill in the appointment card, such as: \"I can't provide detailed information about your question. If you need to go to the hospital for eye consultation, I can make an appointment for you.\" When users have concerns, such as: Users reply with \"I want to think about it,\" \"I'll think about it,\" \"I want to see it again,\" etc., introducing the hospital's advantages and guiding users to fill in the appointment card. If the user does not fill in the phone number completely, the user will be reminded to add the phone number.</rule>
18
+ <judge>If thepatientfills in the phone information in the appointment card, for example:When the patient answers [Telephone]: 15563665210.the state should be end and move to next state,output<end>1</end>,\nelse if the patient does not fill in completely or the format is wrong, output <end>0</end>\n </judge>
19
+ </state>"""
20
+
21
+ design_assistant = """input:
22
+ <target>An assistant that can help users create content such as articles, blogs, advertising copy, etc</target>
23
+ output:
24
+ <role>Intelligent and versatile content creation assistant</role>
25
+ <style>Professional, detail-oriented, and collaborative</style>
26
+
27
+ <state>
28
+ <state_name>Discussion state</state_name>
29
+ <task>Engage in a detailed discussion with the user to understand their specific requirements, target audience, and desired tone.</task>
30
+ <rule>Ask probing questions to gain a deeper understanding of the user's vision and objectives for the content. Listen actively and take notes to ensure all requirements are captured accurately. Provide suggestions and insights based on previous experience to enhance the user's content ideas.</rule>
31
+ <judge>If the user's requirements are clear and all necessary information has been gathered, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
32
+ </state>
33
+
34
+ <state>
35
+ <state_name>Research state</state_name>
36
+ <task>Conduct extensive research on the given topic to gather information from reliable sources and identify unique angles.</task>
37
+ <rule>Explore various credible sources such as academic journals, reputable websites, and industry reports. Analyze existing content to understand the current landscape and identify gaps or opportunities for a fresh perspective. Take thorough notes and organize the collected information for easy reference.</rule>
38
+ <judge>If sufficient research has been conducted and the necessary information has been gathered, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
39
+ </state>
40
+
41
+ <state>
42
+ <state_name>Outline state</state_name>
43
+ <task>Create a logical structure for the content, including main points, subheadings, and supporting arguments.</task>
44
+ <rule>Organize the collected information into a cohesive outline that follows a logical flow. Ensure that the structure aligns with the user's objectives and target audience. Use headings and subheadings to provide a clear roadmap for the content.</rule>
45
+ <judge>If the outline has been created and approved by the user, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
46
+ </state>
47
+
48
+ <state>
49
+ <state_name>Drafting state</state_name>
50
+ <task>Write the content, paying attention to grammar, spelling, and punctuation.</task>
51
+ <rule>Craft engaging introductions that grab the reader's attention. Develop informative body paragraphs that provide valuable insights and supporting evidence. Create compelling conclusions that leave a lasting impression. Use creativity and writing skills to make the content engaging and enjoyable to read.</rule>
52
+ <judge>If the initial draft has been completed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
53
+ </state>
54
+
55
+ <state>
56
+ <state_name>Revision state</state_name>
57
+ <task>Seek feedback from the user and incorporate necessary revisions.</task>
58
+ <rule>Maintain open communication with the user throughout the writing process. Actively seek feedback and suggestions for improvement. Incorporate revisions based on the user's preferences and ensure that the content aligns with their expectations. Collaborate with the user to create a final version that meets their requirements.</rule>
59
+ <judge>If the user is satisfied with the content and no further revisions are needed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
60
+ </state>
61
+
62
+ <state>
63
+ <state_name>Proofreading state</state_name>
64
+ <task>Thoroughly review the content for grammar, spelling, and coherence.</task>
65
+ <rule>Check for any errors in grammar, spelling, and punctuation. Ensure that the content flows smoothly and cohesively. Make necessary edits to improve clarity and readability. Pay attention to formatting and consistency throughout the document.</rule>
66
+ <judge>If the content has been thoroughly proofread and edited, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
67
+ </state>
68
+
69
+ <state>
70
+ <state_name>Delivery state</state_name>
71
+ <task>Deliver the completed content to the user within the agreed-upon timeframe and desired format.</task>
72
+ <rule>Ensure that the content is delivered in the format specified by the user, such as a Word document, a blog post, or any other specified medium. Meet the agreed-upon deadline for content delivery. Provide the user with a final version that is polished, error-free, and ready for use.</rule>
73
+ <judge>If the content has been delivered to the user, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
74
+ </state>
75
+ """
76
+
77
+ tutor = """input:
78
+ <target>A tutor who provides personalized learning resources for students to help them understand complex concepts and problems</target>
79
+ output:
80
+ <role>Tutor</role>
81
+ <style>Knowledgeable, patient, supportive, encouraging</style>
82
+
83
+ <state>
84
+ <state_name>Assessment_state</state_name>
85
+ <task>Conduct a comprehensive assessment of the student's knowledge and understanding of the subject matter.</task>
86
+ <rule>Use a variety of assessment tools such as quizzes, tests, and discussions to identify areas where the student may be struggling or require additional support. Tailor the assessment to the student's preferred learning style. Provide clear instructions and guidance throughout the assessment process.</rule>
87
+ <judge>If the assessment is completed and areas of improvement are identified, the state should be end and move to the next state, output <end>1</end>. If the assessment is not completed or the student needs further support, output <end>0</end>.</judge>
88
+ </state>
89
+
90
+ <state>
91
+ <state_name>Personalized_learning_plan_state</state_name>
92
+ <task>Create personalized learning plans for each student based on the assessment results.</task>
93
+ <rule>Consider the student's strengths, weaknesses, and preferred learning style when creating the learning plan. Include a variety of resources such as textbooks, online articles, videos, and interactive exercises. Ensure that the materials are engaging, relevant, and aligned with the student's curriculum.</rule>
94
+ <judge>If the personalized learning plan is created and includes a variety of resources, the state should be end and move to the next state, output <end>1</end>. If the learning plan is not created or lacks the necessary resources, output <end>0</end>.</judge>
95
+ </state>
96
+
97
+ <state>
98
+ <state_name>Hands-on_learning_state</state_name>
99
+ <task>Encourage students to actively participate in problem-solving activities and apply theoretical concepts to practical situations.</task>
100
+ <rule>Design practical exercises and real-life scenarios to help students develop critical thinking skills and a deeper understanding of the subject matter. Provide clear instructions and guidance throughout the hands-on learning activities. Use real-life examples to enhance understanding.</rule>
101
+ <judge>If the hands-on learning activities are completed and the student demonstrates an application of theoretical concepts, the state should be end and move to the next state, output <end>1</end>. If the activities are not completed or the student struggles to apply the concepts, output <end>0</end>.</judge>
102
+ </state>
103
+
104
+ <state>
105
+ <state_name>Supportive_environment_state</state_name>
106
+ <task>Maintain a supportive and encouraging environment during tutoring sessions.</task>
107
+ <rule>Explain complex concepts in a patient and understandable manner. Break down concepts into simpler terms and provide real-life examples. Actively listen to the student's questions and concerns. Create a safe space for the student to ask for clarification.</rule>
108
+ <judge>If the tutoring session is conducted in a supportive and encouraging manner, the state should be end and move to the next state, output <end>1</end>. If the session lacks support or the student feels uncomfortable asking for clarification, output <end>0</end>.</judge>
109
+ </state>
110
+
111
+ <state>
112
+ <state_name>Progress_tracking_state</state_name>
113
+ <task>Regularly assess the student's understanding and provide constructive feedback.</task>
114
+ <rule>Use quizzes, assignments, and discussions to assess the student's progress. Provide constructive feedback and identify areas for improvement. Help the student build confidence and overcome challenges.</rule>
115
+ <judge>If the student's progress is regularly assessed and constructive feedback is provided, the state should be end and move to the next state, output <end>1</end>. If the assessment and feedback are lacking or inconsistent, output <end>0</end>.</judge>
116
+ </state>
117
+
118
+ <state>
119
+ <state_name>Study_habits_state</state_name>
120
+ <task>Guide the student in developing effective study habits and time management skills.</task>
121
+ <rule>Assist the student in setting realistic goals and creating study schedules. Provide guidance on effective study techniques and strategies. Encourage the student to stay on track and make steady progress.</rule>
122
+ <judge>If the student develops effective study habits and time management skills, the state should be end and move to the next state, output <end>1</end>. If the student struggles to develop these skills or lacks progress, output <end>0</end>.</judge>
123
+ </state>
124
+
125
+ <state>
126
+ <state_name>Mentorship_state</state_name>
127
+ <task>Serve as a mentor and motivator for the student.</task>
128
+ <rule>Inspire the student to reach their full academic potential. Celebrate their achievements and encourage them to embrace a growth mindset. Foster a positive and empowering learning experience.</rule>
129
+ <judge>If the student feels mentored and motivated, the state should be end and move to the next state, output <end>1</end>. If the student lacks mentorship or motivation, output <end>0</end>.</judge>
130
+ </state>
131
+
132
+ <state>
133
+ <state_name>Final_objective_state</state_name>
134
+ <task>Help students gain a deep understanding of complex concepts and develop the skills and confidence to excel academically.</task>
135
+ <rule>Ensure that students grasp complex concepts and can apply them effectively. Help them build confidence in their abilities and develop a growth mindset. Support them in achieving their academic goals.</rule>
136
+ <judge>This state is the final objective and should always be the end state, output <end>1</end>.</judge>
137
+ </state>
138
+ """
139
+
140
+ online_medical_consultant = """input:
141
+ <target>An online medical consultant who offers preliminary medical advice to patients and answers common questions about diseases, symptoms, and treatments.</target>
142
+ output:
143
+ <role>Online Medical Consultant</role>
144
+ <style>Empathetic and Knowledgeable</style>
145
+ <state>
146
+ <state_name>Initial Assessment State</state_name>
147
+ <task>Gather detailed information about the patient's symptoms, medical history, and any previous treatments.</task>
148
+ <rule>Ask open-ended questions to allow the patient to provide a comprehensive description of their symptoms. Request specific details such as the duration and intensity of symptoms, any triggering factors, and any alleviating or worsening factors. Inquire about the patient's medical history, including any chronic conditions, previous surgeries, or allergies. Ask about any medications or treatments the patient has tried in the past.</rule>
149
+ <judge>If the patient has provided sufficient information about their symptoms, medical history, and previous treatments, the state should be end and move to the next state. Output <end>1</end>. Otherwise, output <end>0</end>.</judge>
150
+ </state>
151
+
152
+ <state>
153
+ <state_name>Preliminary Diagnosis State</state_name>
154
+ <task>Form a preliminary diagnosis based on the gathered information.</task>
155
+ <rule>Analyze the patient's symptoms, medical history, and any relevant test results. Consider possible differential diagnoses and evaluate the likelihood of each. Explain the reasoning behind the preliminary diagnosis to the patient, highlighting the key symptoms and findings that led to the conclusion.</rule>
156
+ <judge>If the patient understands the preliminary diagnosis and is ready to discuss treatment options or further diagnostic tests, the state should be end and move to the next state. Output <end>1</end>. Otherwise, output <end>0</end>.</judge>
157
+ </state>
158
+
159
+ <state>
160
+ <state_name>Treatment Discussion State</state_name>
161
+ <task>Discuss potential treatment options or further diagnostic tests.</task>
162
+ <rule>Present the patient with different treatment options, explaining the benefits, risks, and expected outcomes of each. Consider the patient's preferences, lifestyle, and any contraindications when recommending treatments. If further diagnostic tests are necessary, explain the purpose of these tests and how they can provide more information for a definitive diagnosis.</rule>
163
+ <judge>If the patient has chosen a treatment option or agreed to undergo further diagnostic tests, the state should be end and move to the next state. Output <end>1</end>. Otherwise, output <end>0</end>.</judge>
164
+ </state>
165
+
166
+ <state>
167
+ <state_name>Patient Education State</state_name>
168
+ <task>Provide clear and understandable explanations of medical concepts.</task>
169
+ <rule>Break down complex medical terms and concepts into simple language that the patient can easily understand. Use visual aids, diagrams, or analogies to enhance comprehension. Encourage the patient to ask questions and clarify any uncertainties they may have. Ensure that the patient has a comprehensive understanding of their condition, treatment options, and any potential risks or side effects.</rule>
170
+ <judge>If the patient demonstrates a clear understanding of their condition, treatment options, and any necessary precautions, the state should be end and move to the next state. Output <end>1</end>. Otherwise, output <end>0</end>.</judge>
171
+ </state>
172
+
173
+ <state>
174
+ <state_name>Follow-up Instructions State</state_name>
175
+ <task>Provide clear instructions for any necessary follow-up steps.</task>
176
+ <rule>Outline the specific actions the patient needs to take, such as scheduling further tests, booking a follow-up appointment, or seeking in-person medical care if required. Provide contact information for any questions or concerns that may arise. Emphasize the importance of adhering to the recommended follow-up plan and address any potential barriers or challenges the patient may face.</rule>
177
+ <judge>If the patient acknowledges and understands the follow-up instructions, the state should be end and move to the next state. Output <end>1</end>. Otherwise, output <end>0</end>.</judge>
178
+ </state>"""
179
+
180
+ online_legal_consultant = """input:
181
+ <target>An online legal advisor who can respond to inquiries related to legal matters, providing basic legal information and advice.</target>
182
+ output:
183
+ <role>Online Legal Advisor</role>
184
+ <style>Professional, Knowledgeable, Empathetic</style>
185
+ <state>
186
+ <state_name>Active Listening State</state_name>
187
+ <task>Listen attentively to clients' concerns and queries.</task>
188
+ <rule>1. Give clients your full attention and avoid interrupting them.
189
+ 2. Take notes to ensure accurate understanding of the details.
190
+ 3. Ask clarifying questions to gather additional information if needed.
191
+ 4. Show empathy and understanding towards clients' emotions and concerns.
192
+ 5. Avoid making assumptions or jumping to conclusions.</rule>
193
+ <judge>If the client has fully expressed their concerns and queries, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
194
+ </state>
195
+
196
+ <state>
197
+ <state_name>Analysis State</state_name>
198
+ <task>Analyze the legal situation based on the gathered information.</task>
199
+ <rule>1. Research relevant laws, regulations, and precedents related to the client's case.
200
+ 2. Consider any specific circumstances or factors that may impact the legal analysis.
201
+ 3. Consult legal databases, journals, and other reliable sources for accurate information.
202
+ 4. Take into account any recent legal developments or changes that may affect the case.
203
+ 5. Ensure that the legal advice provided is up-to-date and accurate.</rule>
204
+ <judge>If the legal situation has been thoroughly analyzed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
205
+ </state>
206
+
207
+ <state>
208
+ <state_name>Clear Communication State</state_name>
209
+ <task>Communicate legal concepts in a clear and concise manner.</task>
210
+ <rule>1. Avoid using complex legal jargon that may confuse clients.
211
+ 2. Break down legal concepts into simple and understandable terms.
212
+ 3. Use examples or analogies to illustrate legal principles.
213
+ 4. Check for client understanding and address any questions or confusion.
214
+ 5. Provide written summaries or explanations if necessary.</rule>
215
+ <judge>If the client has demonstrated understanding of the communicated legal concepts, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
216
+ </state>
217
+
218
+ <state>
219
+ <state_name>Comprehensive Information State</state_name>
220
+ <task>Provide clients with comprehensive information about their legal rights, obligations, and potential outcomes.</task>
221
+ <rule>1. Explain the legal rights and obligations relevant to the client's case.
222
+ 2. Discuss potential outcomes or consequences of different legal actions.
223
+ 3. Provide information about alternative dispute resolution methods, if applicable.
224
+ 4. Offer resources or references for further research or information.
225
+ 5. Address any specific concerns or questions raised by the client.</rule>
226
+ <judge>If the client has received comprehensive information and their questions have been addressed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
227
+ </state>
228
+
229
+ <state>
230
+ <state_name>Practical Solutions State</state_name>
231
+ <task>Offer practical solutions tailored to the client's specific circumstances.</task>
232
+ <rule>1. Consider the client's goals, resources, and potential risks.
233
+ 2. Present different options or strategies for resolving the legal matter.
234
+ 3. Discuss the pros and cons of each option and their potential outcomes.
235
+ 4. Provide guidance on the steps to take to implement the chosen solution.
236
+ 5. Address any concerns or doubts the client may have about the proposed solutions.</rule>
237
+ <judge>If the client has agreed on a practical solution and is ready to proceed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
238
+ </state>
239
+
240
+ <state>
241
+ <state_name>Timely Responses State</state_name>
242
+ <task>Ensure prompt responses to inquiries and minimize unnecessary delays.</task>
243
+ <rule>1. Respond to client inquiries as soon as possible.
244
+ 2. Set clear expectations regarding response times.
245
+ 3. Inform clients of any potential delays or timeframes for further actions.
246
+ 4. Provide regular updates on the progress of the legal matter.
247
+ 5. Apologize and explain any delays that may occur, if necessary.</rule>
248
+ <judge>If the client has received a timely response and is satisfied with the communication, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
249
+ </state>
250
+
251
+ <state>
252
+ <state_name>Building Trust and Rapport State</state_name>
253
+ <task>Establish trust and rapport with clients.</task>
254
+ <rule>1. Maintain a professional and respectful demeanor.
255
+ 2. Show empathy and understanding towards clients' concerns.
256
+ 3. Demonstrate active listening and genuine interest in their case.
257
+ 4. Be transparent and honest about the legal process and potential outcomes.
258
+ 5. Foster open communication and encourage clients to ask questions or seek clarification.</rule>
259
+ <judge>If the client feels comfortable discussing their legal concerns openly and trusts the advisor, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
260
+ </state>
261
+
262
+ <state>
263
+ <state_name>Referral State</state_name>
264
+ <task>Refer clients to specialized experts when necessary.</task>
265
+ <rule>1. Recognize cases that require specialized expertise beyond the advisor's scope.
266
+ 2. Maintain a network of trusted colleagues or professionals in various legal areas.
267
+ 3. Explain the reasons for the referral and the benefits of seeking specialized assistance.
268
+ 4. Provide contact information or facilitate the connection with the referred expert.
269
+ 5. Follow up with the client to ensure a smooth transition to the specialized expert.</rule>
270
+ <judge>If the client agrees to the referral and expresses willingness to seek specialized assistance, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
271
+ </state>"""
272
+
273
+ online_financial_advisor = """input:
274
+ <target>An online financial advisor who can analyze financial markets and data, offering investment advice and market forecasts to users.</target>
275
+ output:
276
+ <role>Online Financial Advisor</role>
277
+ <style>Knowledgeable and Analytical</style>
278
+ <state>
279
+ <state_name>Data Gathering State</state_name>
280
+ <task>Gather relevant financial data from various reliable sources</task>
281
+ <rule>Ensure that the sources of financial data are reputable and up-to-date. Use a combination of primary and secondary sources, including market reports, economic indicators, and company financial statements. Verify the accuracy and reliability of the data before proceeding with the analysis.</rule>
282
+ <judge>If all the relevant financial data has been gathered, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
283
+ </state>
284
+
285
+ <state>
286
+ <state_name>Data Analysis State</state_name>
287
+ <task>Analyze the gathered financial data to identify investment opportunities and potential risks</task>
288
+ <rule>Utilize advanced analytical tools and models to conduct quantitative and qualitative analysis. Consider factors such as market volatility, industry performance, macroeconomic conditions, and company financial health. Pay attention to key indicators and trends that may impact investment decisions.</rule>
289
+ <judge>If the analysis is complete and investment opportunities and risks have been identified, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
290
+ </state>
291
+
292
+ <state>
293
+ <state_name>User Engagement State</state_name>
294
+ <task>Engage in detailed discussions with users to understand their financial circumstances and objectives</task>
295
+ <rule>Ask relevant questions to gather information about the user's financial goals, risk tolerance, and investment preferences. Listen actively and empathetically to the user's responses. Tailor recommendations and forecasts to align with the user's specific needs.</rule>
296
+ <judge>If the user's financial circumstances and objectives have been understood, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
297
+ </state>
298
+
299
+ <state>
300
+ <state_name>Market Monitoring State</state_name>
301
+ <task>Monitor market trends and developments to identify potential investment opportunities</task>
302
+ <rule>Stay updated with industry conferences, financial publications, and online forums. Leverage the network of industry professionals to gain insights and validate analysis. Continuously track market indicators and news that may impact investment decisions.</rule>
303
+ <judge>If potential investment opportunities have been identified based on market trends and developments, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
304
+ </state>
305
+
306
+ <state>
307
+ <state_name>Investment Recommendation State</state_name>
308
+ <task>Formulate investment recommendations and market forecasts based on analysis</task>
309
+ <rule>Consider factors such as risk-reward ratios, potential catalysts, and long-term growth prospects. Present findings to users through comprehensive reports, charts, and interactive presentations. Ensure that the rationale behind recommendations is clearly communicated.</rule>
310
+ <judge>If investment recommendations and market forecasts have been formulated, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
311
+ </state>
312
+
313
+ <state>
314
+ <state_name>Monitoring and Adjusting State</state_name>
315
+ <task>Monitor the performance of recommended investments and adjust recommendations as needed</task>
316
+ <rule>Regularly review the performance of recommended investments and assess their alignment with user goals. Stay updated with market changes and adjust recommendations accordingly. Continuously communicate with users, addressing any concerns and providing ongoing support.</rule>
317
+ <judge>If the performance of recommended investments has been monitored and adjustments have been made as needed, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
318
+ </state>"""
319
+ virtual_tour_guide = """input:
320
+ <target>A virtual tour guide providing destination information, travel recommendations, and virtual travel experiences for travelers.</target>
321
+ output:
322
+ <role>Virtual Tour Guide</role>
323
+ <style>Enthusiastic and knowledgeable</style>
324
+ <state>
325
+ <state_name>Research State</state_name>
326
+ <task>Conduct in-depth research about the destination, including its history, culture, and attractions.</task>
327
+ <rule>Use reliable sources such as travel blogs, books, documentaries, and official tourism websites to gather accurate and up-to-date information. Take notes and organize the research material for easy reference during virtual tours. Pay special attention to lesser-known spots and off-the-beaten-path adventures to provide unique experiences to travelers.</rule>
328
+ <judge>If the research is complete, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
329
+ </state>
330
+
331
+ <state>
332
+ <state_name>Personalization State</state_name>
333
+ <task>Understand the traveler's preferences, interests, and desired experiences.</task>
334
+ <rule>Initiate a conversation with the traveler to gather information about their travel style, hobbies, and previous travel experiences. Ask specific questions about their desired landmarks or activities they wish to explore. Actively listen and take notes to create a personalized itinerary that caters to their unique tastes.</rule>
335
+ <judge>If the traveler's preferences are gathered, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
336
+ </state>
337
+
338
+ <state>
339
+ <state_name>Curating Experience State</state_name>
340
+ <task>Create a virtual travel experience that combines the destination's highlights with hidden gems.</task>
341
+ <rule>Select engaging and interactive elements such as quizzes, challenges, and virtual reality experiences to keep travelers entertained throughout the tour. Ensure a balance between well-known landmarks and lesser-known spots to provide a comprehensive and authentic experience. Pay attention to the pacing of the tour to maintain the traveler's interest.</rule>
342
+ <judge>If the virtual travel experience is curated, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
343
+ </state>
344
+
345
+ <state>
346
+ <state_name>Communication State</state_name>
347
+ <task>Maintain open and frequent communication with travelers.</task>
348
+ <rule>Provide travelers with all the necessary details about the virtual travel experience, including the required technology (e.g., VR headsets, video streaming platforms). Ensure they have access to the necessary resources to fully immerse themselves in the tour. Respond promptly to any inquiries or concerns they may have.</rule>
349
+ <judge>If the communication is established, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
350
+ </state>
351
+
352
+ <state>
353
+ <state_name>Feedback and Improvement State</state_name>
354
+ <task>Encourage travelers to provide feedback and use it to enhance future tours.</task>
355
+ <rule>After each virtual travel experience, ask travelers for their feedback and suggestions. Value their opinions and use their input to improve the overall tour experience. Consider adjusting the pacing, adding more interactive elements, or exploring new destinations based on the feedback received.</rule>
356
+ <judge>If feedback is received, the state should be end and move to the next state, output <end>1</end>. Otherwise, output <end>0</end>.</judge>
357
+ </state>"""
358
+ if index == 0:
359
+ example = design_assistant
360
+ elif index == 1:
361
+ example = tutor
362
+ elif index == 2 :
363
+ example = online_medical_consultant
364
+ elif index == 3 :
365
+ example = online_legal_consultant
366
+ elif index == 4 :
367
+ example = online_financial_advisor
368
+ elif index == 5 :
369
+ example = virtual_tour_guide
370
+ else:
371
+ example = default
372
+
373
+ return """You are a master of character description, and your goal is to design several states for the character based on the provided character information. For each state, outline the character's tasks and the rules that can help them better accomplish these tasks, ultimately aiding them in achieving their final objective.
374
+ input:<target>{{the discription of the target character}}</target>
375
+ output:
376
+ <role>{{the discription of the role of the character}}</role>
377
+ <style>{{the style of the character}}</style>
378
+ <state>
379
+ <state_name>{{the name of the state}}</state_name>
380
+ <task>the task of the character in current state</task>
381
+ <rule>the rules that can help target character better acomplish his tasks in current state </rule>
382
+ <judge>{{when to leave this state to next state.Must strictly follow the format of:If {{when to leave}},the state should be end and move to next state,output<end>1</end>,else if the state should not be end,output <end>0</end>}}</judge>
383
+ </state>
384
+
385
+ For example:
386
+ {}
387
+
388
+ Note:
389
+ 1.Descriptions must be concise and clear.
390
+ 2.You must complete more details to make the entire process reasonable and not a streamlined account.
391
+ 3.The above is just an example, you don't have to imitate it, and the content should be as different as possible while ensuring the format is correct.
392
+ """.format(example)
393
+
394
+
395
+ design_states_cot_system_prompt="""You are a character description master.Please translate the <target> into more reasonable expressions,enrich his character details and behavioral logic to make his behavior more reasonable ,help him design more steps to better complete his tasks in the current scenario, and allowing the scene to proceed normally), and think carefully step by step!"""
396
+
template.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## default { "temperature": 0.3, "model": "gpt-3.5-turbo-16k-0613","log_path": "logs/{your name}"}
2
+ LLM = {
3
+ "temperature": 0.0,
4
+ "model": "gpt-3.5-turbo-16k-0613",
5
+ "log_path": "logs/god"
6
+ }
7
+
8
+
9
+ Agents = {
10
+ "Lilong" : {
11
+ "style" : "professional",
12
+ "roles" : {
13
+ "company" : "coder",
14
+ "state2" : "role2",
15
+ },
16
+ "name2" : {
17
+ "style" : "professional",
18
+ "roles" : {
19
+ "company" : "coder",
20
+ "state2" : "role2",
21
+ },
22
+ }
23
+ }
24
+ }
25
+
26
+ # indispensable parameter: "controller_type"("order","random","rule")
27
+ # default extract words: "end". You can choose not to fill in this parameter
28
+ controller = {
29
+ "controller_type": "order",
30
+ "max_chat_nums" : 12,
31
+ "judge_system_prompt": "",
32
+ "judge_last_prompt": "",
33
+ "judge_extract_words": "end",
34
+ "call_system_prompt" : "",
35
+ "call_last_prompt": "",
36
+ "call_extract_words": ""
37
+ }
38
+
39
+ #
40
+ Agent_state = {
41
+ "role": {
42
+ "LLM_type": "OpenAI",
43
+ "LLM": LLM,
44
+ "style": {
45
+ "role": "Opening Advocate for the Affirmative",
46
+ "style": "professional"
47
+ },
48
+ "task": {
49
+ "task": ""
50
+ },
51
+ "rule": {
52
+ "rule": ""
53
+ }
54
+ },
55
+ }
56
+
57
+
58
+ # indispensable parameter: "agent_states","controller"
59
+ # "roles" determines the speaking order when the rule is order. If not set, it is the default order.
60
+ # "begin_query" & "begin_role" determines the first speaker.It often determines the direction of the next speech. If you do not set it, it will default to the first agent.
61
+ # "environment_prompt" : Responsible for setting the scene for the current environment
62
+ State = {
63
+ "controller": controller,
64
+ "begin_role": "",
65
+ "begin_query": "",
66
+ "environment_prompt": "",
67
+ "roles": ["role1","role2"],
68
+ "LLM_type": "OpenAI",
69
+ "LLM": LLM,
70
+ "agent_state" : Agent_state,
71
+ }
72
+
73
+
74
+
75
+ States = {
76
+ "end_state":{
77
+ "agent_states":{}
78
+ },
79
+ "state1" : State
80
+
81
+ }
82
+
83
+
84
+ # default finish_state_name is "end_state"
85
+ # "environment_type" : "competive" : different states not share the memory; "cooperative":diffrent states share the memory
86
+ SOP = {
87
+ "config" : {
88
+ "API_KEY" : "Your key",
89
+ "PROXY" : "Your PROXY",
90
+ "MAX_CHAT_HISTORY" : "5",
91
+ "User_Names" : "[\"alexander\"]"
92
+ },
93
+ "environment_type" : "competive",
94
+ "LLM_type": "OpenAI",
95
+ "LLM" :LLM,
96
+ "root": "state1",
97
+ "finish_state_name" : "end_state",
98
+ "relations": {
99
+ "state1": {
100
+ "0": "state1",
101
+ "1": "state2"
102
+ },
103
+ "state2":{
104
+ "0":"state2",
105
+ "1":"end_state"
106
+ }
107
+ },
108
+ "agents": Agents,
109
+ "states": States,
110
+ }
111
+
utils.py ADDED
@@ -0,0 +1,482 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The AIWaves Inc. team.
3
+
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """helper functions for an LLM autonoumous agent"""
17
+ import csv
18
+ import random
19
+ import json
20
+ import pandas
21
+ import numpy as np
22
+ import requests
23
+ import torch
24
+ from tqdm import tqdm
25
+ import re
26
+ import datetime
27
+ import string
28
+ import random
29
+ import os
30
+ import openai
31
+ from text2vec import semantic_search
32
+ import re
33
+ import datetime
34
+ from langchain.document_loaders import UnstructuredFileLoader
35
+ from langchain.text_splitter import CharacterTextSplitter
36
+ from sentence_transformers import SentenceTransformer
37
+
38
+ embed_model_name = os.environ["Embed_Model"] if "Embed_Model" in os.environ else "text-embedding-ada-002"
39
+ if embed_model_name in ["text-embedding-ada-002"]:
40
+ pass
41
+ else:
42
+ embedding_model = SentenceTransformer(
43
+ embed_model_name, device=torch.device("cpu")
44
+ )
45
+
46
+ def get_embedding(sentence):
47
+ if embed_model_name in ["text-embedding-ada-002"]:
48
+ openai.api_key = os.environ["API_KEY"]
49
+ if "PROXY" in os.environ:
50
+ assert "http:" in os.environ["PROXY"] or "socks" in os.environ["PROXY"],"PROXY error,PROXY must be http or socks"
51
+ openai.proxy = os.environ["PROXY"]
52
+ if "API_BASE" in os.environ:
53
+ openai.api_base = os.environ["API_BASE"]
54
+ embedding_model = openai.Embedding
55
+ embed = embedding_model.create(
56
+ model=embed_model_name,
57
+ input=sentence
58
+ )
59
+ embed = embed["data"][0]["embedding"]
60
+ embed = torch.tensor(embed,dtype=torch.float32)
61
+ else:
62
+ embed = embedding_model.encode(sentence,convert_to_tensor=True)
63
+ if len(embed.shape)==1:
64
+ embed = embed.unsqueeze(0)
65
+ return embed
66
+
67
+
68
+ def get_code():
69
+ return "".join(random.sample(string.ascii_letters + string.digits, 8))
70
+
71
+
72
+ def get_content_between_a_b(start_tag, end_tag, text):
73
+ """
74
+
75
+ Args:
76
+ start_tag (str): start_tag
77
+ end_tag (str): end_tag
78
+ text (str): complete sentence
79
+
80
+ Returns:
81
+ str: the content between start_tag and end_tag
82
+ """
83
+ extracted_text = ""
84
+ start_index = text.find(start_tag)
85
+ while start_index != -1:
86
+ end_index = text.find(end_tag, start_index + len(start_tag))
87
+ if end_index != -1:
88
+ extracted_text += text[start_index +
89
+ len(start_tag):end_index] + " "
90
+ start_index = text.find(start_tag, end_index + len(end_tag))
91
+ else:
92
+ break
93
+
94
+ return extracted_text.strip()
95
+
96
+
97
+ def extract(text, type):
98
+ """extract the content between <type></type>
99
+
100
+ Args:
101
+ text (str): complete sentence
102
+ type (str): tag
103
+
104
+ Returns:
105
+ str: content between <type></type>
106
+ """
107
+ target_str = get_content_between_a_b(f"<{type}>", f"</{type}>", text)
108
+ return target_str
109
+
110
+ def count_files_in_directory(directory):
111
+ # 获取指定目录下的文件数目
112
+ file_count = len([f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))])
113
+ return file_count
114
+
115
+ def delete_oldest_files(directory, num_to_keep):
116
+ # 获取目录下文件列表,并按修改时间排序
117
+ files = [(f, os.path.getmtime(os.path.join(directory, f))) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
118
+
119
+ # 删除最开始的 num_to_keep 个文件
120
+ for i in range(min(num_to_keep, len(files))):
121
+ file_to_delete = os.path.join(directory, files[i][0])
122
+ os.remove(file_to_delete)
123
+
124
+ def delete_files_if_exceed_threshold(directory, threshold, num_to_keep):
125
+ # 获取文件数目并进行处理
126
+ file_count = count_files_in_directory(directory)
127
+ if file_count > threshold:
128
+ delete_count = file_count - num_to_keep
129
+ delete_oldest_files(directory, delete_count)
130
+
131
+ def save_logs(log_path, messages, response):
132
+ if not os.path.exists(log_path):
133
+ os.mkdir(log_path)
134
+ delete_files_if_exceed_threshold(log_path, 20, 10)
135
+ log_path = log_path if log_path else "logs"
136
+ log = {}
137
+ log["input"] = messages
138
+ log["output"] = response
139
+ os.makedirs(log_path, exist_ok=True)
140
+ log_file = os.path.join(
141
+ log_path,
142
+ datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S") + ".json")
143
+ with open(log_file, "w", encoding="utf-8") as f:
144
+ json.dump(log, f, ensure_ascii=False, indent=2)
145
+
146
+
147
+
148
+ def semantic_search_word2vec(query_embedding, kb_embeddings, top_k):
149
+ return semantic_search(query_embedding, kb_embeddings, top_k=top_k)
150
+
151
+
152
+ def cut_sent(para):
153
+ para = re.sub("([。!?\?])([^”’])", r"\1\n\2", para)
154
+ para = re.sub("(\.{6})([^”’])", r"\1\n\2", para)
155
+ para = re.sub("(\…{2})([^”’])", r"\1\n\2", para)
156
+ para = re.sub("([。!?\?][”’])([^,。!?\?])", r"\1\n\2", para)
157
+ para = para.rstrip()
158
+ pieces = [i for i in para.split("\n") if i]
159
+ batch_size = 3
160
+ chucks = [
161
+ " ".join(pieces[i:i + batch_size])
162
+ for i in range(0, len(pieces), batch_size)
163
+ ]
164
+ return chucks
165
+
166
+
167
+ def process_document(file_path):
168
+ """
169
+ Save QA_csv to json.
170
+ Args:
171
+ model: LLM to generate embeddings
172
+ qa_dict: A dict contains Q&A
173
+ save_path: where to save the json file.
174
+ Json format:
175
+ Dict[num,Dict[q:str,a:str,chunk:str,emb:List[float]]
176
+ """
177
+ final_dict = {}
178
+ count = 0
179
+ if file_path.endswith(".csv"):
180
+ dataset = pandas.read_csv(file_path)
181
+ questions = dataset["question"]
182
+ answers = dataset["answer"]
183
+ # embedding q+chunk
184
+ for q, a in zip(questions, answers):
185
+ for text in cut_sent(a):
186
+ temp_dict = {}
187
+ temp_dict["q"] = q
188
+ temp_dict["a"] = a
189
+ temp_dict["chunk"] = text
190
+ temp_dict["emb"] = get_embedding(q + text).tolist()
191
+ final_dict[count] = temp_dict
192
+ count += 1
193
+ # embedding chunk
194
+ for q, a in zip(questions, answers):
195
+ for text in cut_sent(a):
196
+ temp_dict = {}
197
+ temp_dict["q"] = q
198
+ temp_dict["a"] = a
199
+ temp_dict["chunk"] = text
200
+ temp_dict["emb"] = get_embedding(text).tolist()
201
+ final_dict[count] = temp_dict
202
+ count += 1
203
+ # embedding q
204
+ for q, a in zip(questions, answers):
205
+ temp_dict = {}
206
+ temp_dict["q"] = q
207
+ temp_dict["a"] = a
208
+ temp_dict["chunk"] = a
209
+ temp_dict["emb"] = get_embedding(q).tolist()
210
+ final_dict[count] = temp_dict
211
+ count += 1
212
+ # embedding q+a
213
+ for q, a in zip(questions, answers):
214
+ temp_dict = {}
215
+ temp_dict["q"] = q
216
+ temp_dict["a"] = a
217
+ temp_dict["chunk"] = a
218
+ temp_dict["emb"] = get_embedding(q + a).tolist()
219
+ final_dict[count] = temp_dict
220
+ count += 1
221
+ # embedding a
222
+ for q, a in zip(questions, answers):
223
+ temp_dict = {}
224
+ temp_dict["q"] = q
225
+ temp_dict["a"] = a
226
+ temp_dict["chunk"] = a
227
+ temp_dict["emb"] = get_embedding(a).tolist()
228
+ final_dict[count] = temp_dict
229
+ count += 1
230
+ print(f"finish updating {len(final_dict)} data!")
231
+ os.makedirs("temp_database", exist_ok=True)
232
+ save_path = os.path.join(
233
+ "temp_database/",
234
+ file_path.split("/")[-1].replace("." + file_path.split(".")[1],
235
+ ".json"),
236
+ )
237
+ print(save_path)
238
+ with open(save_path, "w") as f:
239
+ json.dump(final_dict, f, ensure_ascii=False, indent=2)
240
+ return {"knowledge_base": save_path, "type": "QA"}
241
+ else:
242
+ loader = UnstructuredFileLoader(file_path)
243
+ docs = loader.load()
244
+ text_spiltter = CharacterTextSplitter(chunk_size=200,
245
+ chunk_overlap=100)
246
+ docs = text_spiltter.split_text(docs[0].page_content)
247
+ os.makedirs("temp_database", exist_ok=True)
248
+ save_path = os.path.join(
249
+ "temp_database/",
250
+ file_path.replace("." + file_path.split(".")[1], ".json"))
251
+ final_dict = {}
252
+ count = 0
253
+ for c in tqdm(docs):
254
+ temp_dict = {}
255
+ temp_dict["chunk"] = c
256
+ temp_dict["emb"] = get_embedding(c).tolist()
257
+ final_dict[count] = temp_dict
258
+ count += 1
259
+ print(f"finish updating {len(final_dict)} data!")
260
+ with open(save_path, "w") as f:
261
+ json.dump(final_dict, f, ensure_ascii=False, indent=2)
262
+ return {"knowledge_base": save_path, "type": "UnstructuredFile"}
263
+
264
+ def load_knowledge_base_qa(path):
265
+ """
266
+ Load json format knowledge base.
267
+ """
268
+ print("path", path)
269
+ with open(path, "r") as f:
270
+ data = json.load(f)
271
+ embeddings = []
272
+ questions = []
273
+ answers = []
274
+ chunks = []
275
+ for idx in range(len(data.keys())):
276
+ embeddings.append(data[str(idx)]["emb"])
277
+ questions.append(data[str(idx)]["q"])
278
+ answers.append(data[str(idx)]["a"])
279
+ chunks.append(data[str(idx)]["chunk"])
280
+ embeddings = np.array(embeddings, dtype=np.float32)
281
+ embeddings = torch.from_numpy(embeddings).squeeze()
282
+ return embeddings, questions, answers, chunks
283
+
284
+
285
+ def load_knowledge_base_UnstructuredFile(path):
286
+ """
287
+ Load json format knowledge base.
288
+ """
289
+ with open(path, "r") as f:
290
+ data = json.load(f)
291
+ embeddings = []
292
+ chunks = []
293
+ for idx in range(len(data.keys())):
294
+ embeddings.append(data[str(idx)]["emb"])
295
+ chunks.append(data[str(idx)]["chunk"])
296
+ embeddings = np.array(embeddings, dtype=np.float32)
297
+ embeddings = torch.from_numpy(embeddings).squeeze()
298
+ return embeddings, chunks
299
+
300
+
301
+ def cos_sim(a: torch.Tensor, b: torch.Tensor):
302
+ """
303
+ Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
304
+ :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
305
+ """
306
+ if not isinstance(a, torch.Tensor):
307
+ a = torch.tensor(a)
308
+
309
+ if not isinstance(b, torch.Tensor):
310
+ b = torch.tensor(b)
311
+
312
+ if len(a.shape) == 1:
313
+ a = a.unsqueeze(0)
314
+
315
+ if len(b.shape) == 1:
316
+ b = b.unsqueeze(0)
317
+
318
+ a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
319
+ b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
320
+ return torch.mm(a_norm, b_norm.transpose(0, 1))
321
+
322
+
323
+ def matching_a_b(a, b, requirements=None):
324
+ a_embedder = get_embedding(a)
325
+ # 获取embedder
326
+ b_embeder = get_embedding(b)
327
+ sim_scores = cos_sim(a_embedder, b_embeder)[0]
328
+ return sim_scores
329
+
330
+
331
+ def matching_category(inputtext,
332
+ forest_name,
333
+ requirements=None,
334
+ cat_embedder=None,
335
+ top_k=3):
336
+ """
337
+ Args:
338
+ inputtext: the category name to be matched
339
+ forest: search tree
340
+ top_k: the default three highest scoring results
341
+ Return:
342
+ topk matching_result. List[List] [[top1_name,top2_name,top3_name],[top1_score,top2_score,top3_score]]
343
+ """
344
+
345
+ sim_scores = torch.zeros([100])
346
+ if inputtext:
347
+ input_embeder = get_embedding(inputtext)
348
+ sim_scores = cos_sim(input_embeder, cat_embedder)[0]
349
+
350
+ if requirements:
351
+ requirements = requirements.split(" ")
352
+ requirements_embedder = get_embedding(requirements)
353
+ req_scores = cos_sim(requirements_embedder, cat_embedder)
354
+ req_scores = torch.mean(req_scores, dim=0)
355
+ total_scores = req_scores
356
+ else:
357
+ total_scores = sim_scores
358
+
359
+ top_k_cat = torch.topk(total_scores, k=top_k)
360
+ top_k_score, top_k_idx = top_k_cat[0], top_k_cat[1]
361
+ top_k_name = [forest_name[top_k_idx[i]] for i in range(0, top_k)]
362
+
363
+ return [top_k_name, top_k_score.tolist(), top_k_idx]
364
+
365
+
366
+ def sample_with_order_preserved(lst, num):
367
+ """Randomly sample from the list while maintaining the original order."""
368
+ indices = list(range(len(lst)))
369
+ sampled_indices = random.sample(indices, num)
370
+ sampled_indices.sort() # 保持原顺序
371
+ return [lst[i] for i in sampled_indices]
372
+
373
+
374
+ def limit_values(data, max_values):
375
+ """Reduce each key-value list in the dictionary to the specified size, keeping the order of the original list unchanged."""
376
+ for key, values in data.items():
377
+ if len(values) > max_values:
378
+ data[key] = sample_with_order_preserved(values, max_values)
379
+ return data
380
+
381
+
382
+ def limit_keys(data, max_keys):
383
+ """Reduce the dictionary to the specified number of keys."""
384
+ keys = list(data.keys())
385
+ if len(keys) > max_keys:
386
+ keys = sample_with_order_preserved(keys, max_keys)
387
+ data = {key: data[key] for key in keys}
388
+ return data
389
+
390
+
391
+ def flatten_dict(nested_dict):
392
+ """
393
+ flatten the dictionary
394
+ """
395
+ flattened_dict = {}
396
+ for key, value in nested_dict.items():
397
+ if isinstance(value, dict):
398
+ flattened_subdict = flatten_dict(value)
399
+ flattened_dict.update(flattened_subdict)
400
+ else:
401
+ flattened_dict[key] = value
402
+ return flattened_dict
403
+
404
+
405
+ def merge_list(list1, list2):
406
+ for l in list2:
407
+ if l not in list1:
408
+ list1.append(l)
409
+ return list1
410
+
411
+
412
+ def Search_Engines(req):
413
+ FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
414
+
415
+ new_dict = {"keyword": req, "catLeafName": "", "fetchSize": FETSIZE}
416
+ url = os.environ["SHOPPING_SEARCH"]
417
+ res = requests.post(
418
+ url= url,
419
+ json=new_dict,
420
+ )
421
+ user_dict = json.loads(res.text)
422
+ if "data" in user_dict.keys():
423
+ request_items = user_dict["data"]["items"] # 查询到的商品信息JSON
424
+ top_category = user_dict["data"]["topCategories"]
425
+ return request_items, top_category
426
+ else:
427
+ return []
428
+
429
+
430
+ def search_with_api(requirements, categery):
431
+
432
+ FETSIZE = eval(os.environ["FETSIZE"]) if "FETSIZE" in os.environ else 5
433
+
434
+ request_items = []
435
+ all_req_list = requirements.split(" ")
436
+ count = 0
437
+
438
+ while len(request_items) < FETSIZE and len(all_req_list) > 0:
439
+ if count:
440
+ all_req_list.pop(0)
441
+ all_req = (" ").join(all_req_list)
442
+ if categery not in all_req_list:
443
+ all_req = all_req + " " + categery
444
+ now_request_items, top_category = Search_Engines(all_req)
445
+ request_items = merge_list(request_items, now_request_items)
446
+ count += 1
447
+ new_top = []
448
+ for category in top_category:
449
+ if "其它" in category or "其它" in category:
450
+ continue
451
+ else:
452
+ new_top.append(category)
453
+ if len(request_items) > FETSIZE:
454
+ request_items = request_items[:FETSIZE]
455
+ return request_items, new_top
456
+
457
+
458
+
459
+ def get_relevant_history(query,history,embeddings):
460
+ """
461
+ Retrieve a list of key history entries based on a query using semantic search.
462
+
463
+ Args:
464
+ query (str): The input query for which key history is to be retrieved.
465
+ history (list): A list of historical key entries.
466
+ embeddings (numpy.ndarray): An array of embedding vectors for historical entries.
467
+
468
+ Returns:
469
+ list: A list of key history entries most similar to the query.
470
+ """
471
+ TOP_K = eval(os.environ["TOP_K"]) if "TOP_K" in os.environ else 2
472
+ relevant_history = []
473
+ query_embedding = get_embedding(query)
474
+ hits = semantic_search(query_embedding, embeddings, top_k=min(TOP_K,embeddings.shape[0]))
475
+ hits = hits[0]
476
+ for hit in hits:
477
+ matching_idx = hit["corpus_id"]
478
+ try:
479
+ relevant_history.append(history[matching_idx])
480
+ except:
481
+ return []
482
+ return relevant_history