Spaces:
Runtime error
Runtime error
init
Browse files- .DS_Store +0 -0
- app.py +60 -0
- custom_tools.py +272 -0
- db_utils.py +239 -0
- img/crewai_logo.png +0 -0
- img/crews.png +0 -0
- img/favicon.ico +0 -0
- img/kickoff.png +0 -0
- llms.py +106 -0
- my_agent.py +124 -0
- my_crew.py +262 -0
- my_task.py +90 -0
- my_tools.py +368 -0
- pg_agents.py +80 -0
- pg_crew_run.py +175 -0
- pg_crews.py +31 -0
- pg_export_crew.py +528 -0
- pg_tasks.py +81 -0
- pg_tools.py +69 -0
- utils.py +24 -0
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit import session_state as ss
|
3 |
+
import db_utils
|
4 |
+
from pg_agents import PageAgents
|
5 |
+
from pg_tasks import PageTasks
|
6 |
+
from pg_crews import PageCrews
|
7 |
+
from pg_tools import PageTools
|
8 |
+
from pg_crew_run import PageCrewRun
|
9 |
+
from pg_export_crew import PageExportCrew
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
import os
|
12 |
+
def pages():
|
13 |
+
return {
|
14 |
+
'Crews': PageCrews(),
|
15 |
+
'Tools': PageTools(),
|
16 |
+
'Agents': PageAgents(),
|
17 |
+
'Tasks': PageTasks(),
|
18 |
+
'Kickoff!': PageCrewRun(),
|
19 |
+
'Import/export': PageExportCrew()
|
20 |
+
}
|
21 |
+
|
22 |
+
def load_data():
|
23 |
+
ss.agents = db_utils.load_agents()
|
24 |
+
ss.tasks = db_utils.load_tasks()
|
25 |
+
ss.crews = db_utils.load_crews()
|
26 |
+
ss.tools = db_utils.load_tools()
|
27 |
+
ss.enabled_tools = db_utils.load_tools_state()
|
28 |
+
|
29 |
+
|
30 |
+
def draw_sidebar():
|
31 |
+
with st.sidebar:
|
32 |
+
st.image("img/crewai_logo.png")
|
33 |
+
|
34 |
+
if 'page' not in ss:
|
35 |
+
ss.page = 'Crews'
|
36 |
+
|
37 |
+
selected_page = st.radio('Page', list(pages().keys()), index=list(pages().keys()).index(ss.page),label_visibility="collapsed")
|
38 |
+
if selected_page != ss.page:
|
39 |
+
ss.page = selected_page
|
40 |
+
st.rerun()
|
41 |
+
|
42 |
+
def main():
|
43 |
+
st.set_page_config(page_title="CrewAI Studio", page_icon="img/favicon.ico", layout="wide")
|
44 |
+
load_dotenv()
|
45 |
+
if (str(os.getenv('AGENTOPS_ENABLED')).lower() in ['true', '1']) and not ss.get('agentops_failed', False):
|
46 |
+
try:
|
47 |
+
import agentops
|
48 |
+
agentops.init(api_key=os.getenv('AGENTOPS_API_KEY'),auto_start_session=False)
|
49 |
+
except ModuleNotFoundError as e:
|
50 |
+
ss.agentops_failed = True
|
51 |
+
print(f"Error initializing AgentOps: {str(e)}")
|
52 |
+
|
53 |
+
db_utils.initialize_db()
|
54 |
+
load_data()
|
55 |
+
draw_sidebar()
|
56 |
+
PageCrewRun.maintain_session_state() #this will persist the session state for the crew run page so crew run can be run in a separate thread
|
57 |
+
pages()[ss.page].draw()
|
58 |
+
|
59 |
+
if __name__ == '__main__':
|
60 |
+
main()
|
custom_tools.py
ADDED
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Optional, Dict, Any, List, Type
|
3 |
+
from crewai_tools import BaseTool
|
4 |
+
import requests
|
5 |
+
import importlib.util
|
6 |
+
from pydantic.v1 import BaseModel, Field,root_validator, ValidationError
|
7 |
+
import docker
|
8 |
+
import base64
|
9 |
+
|
10 |
+
class FixedCustomFileWriteToolInputSchema(BaseModel):
|
11 |
+
content: str = Field(..., description="The content to write or append to the file")
|
12 |
+
mode: str = Field(..., description="Mode to open the file in, either 'w' or 'a'")
|
13 |
+
|
14 |
+
class CustomFileWriteToolInputSchema(FixedCustomFileWriteToolInputSchema):
|
15 |
+
content: str = Field(..., description="The content to write or append to the file")
|
16 |
+
mode: str = Field(..., description="Mode to open the file in, either 'w' or 'a'")
|
17 |
+
filename: str = Field(..., description="The name of the file to write to or append")
|
18 |
+
|
19 |
+
class CustomFileWriteTool(BaseTool):
|
20 |
+
name: str = "Write File"
|
21 |
+
description: str = "Tool to write or append to files"
|
22 |
+
args_schema = CustomFileWriteToolInputSchema
|
23 |
+
filename: Optional[str] = None
|
24 |
+
|
25 |
+
def __init__(self, base_folder: str, filename: Optional[str] = None, **kwargs):
|
26 |
+
super().__init__(**kwargs)
|
27 |
+
if filename is not None and len(filename) > 0:
|
28 |
+
self.args_schema = FixedCustomFileWriteToolInputSchema
|
29 |
+
self._base_folder = base_folder
|
30 |
+
self.filename = filename or None
|
31 |
+
self._ensure_base_folder_exists()
|
32 |
+
self._generate_description()
|
33 |
+
|
34 |
+
|
35 |
+
def _ensure_base_folder_exists(self):
|
36 |
+
os.makedirs(self._base_folder, exist_ok=True)
|
37 |
+
|
38 |
+
def _get_full_path(self, filename: Optional[str]) -> str:
|
39 |
+
if filename is None and self.filename is None:
|
40 |
+
raise ValueError("No filename specified and no default file set.")
|
41 |
+
|
42 |
+
chosen_file = filename or self.filename
|
43 |
+
full_path = os.path.abspath(os.path.join(self._base_folder, chosen_file))
|
44 |
+
|
45 |
+
if not full_path.startswith(os.path.abspath(self._base_folder)):
|
46 |
+
raise ValueError("Access outside the base directory is not allowed.") #TODO: add validations for path traversal
|
47 |
+
|
48 |
+
return full_path
|
49 |
+
|
50 |
+
def _run(self, content: str, mode: str, filename: Optional[str] = None) -> Dict[str, Any]:
|
51 |
+
full_path = self._get_full_path(filename)
|
52 |
+
try:
|
53 |
+
with open(full_path, 'a' if mode == 'a' else 'w') as file:
|
54 |
+
file.write(content)
|
55 |
+
return {
|
56 |
+
"status": "success",
|
57 |
+
"message": f"Content successfully {'appended to' if mode == 'a' else 'written to'} {full_path}"
|
58 |
+
}
|
59 |
+
except Exception as e:
|
60 |
+
return {
|
61 |
+
"status": "error",
|
62 |
+
"message": str(e)
|
63 |
+
}
|
64 |
+
|
65 |
+
def run(self, input_data: CustomFileWriteToolInputSchema) -> Any:
|
66 |
+
response_data = self._run(
|
67 |
+
content=input_data.content,
|
68 |
+
mode=input_data.mode,
|
69 |
+
filename=input_data.filename
|
70 |
+
)
|
71 |
+
return response_data
|
72 |
+
|
73 |
+
class CustomApiToolInputSchema(BaseModel):
|
74 |
+
endpoint: str = Field(..., description="The specific endpoint for the API call")
|
75 |
+
method: str = Field(..., description="HTTP method to use (GET, POST, PUT, DELETE)")
|
76 |
+
headers: Optional[Dict[str, str]] = Field(None, description="HTTP headers to include in the request")
|
77 |
+
query_params: Optional[Dict[str, Any]] = Field(None, description="Query parameters for the request")
|
78 |
+
body: Optional[Dict[str, Any]] = Field(None, description="Body of the request for POST/PUT methods")
|
79 |
+
|
80 |
+
class CustomApiTool(BaseTool):
|
81 |
+
name: str = "Call Api"
|
82 |
+
description: str = "Tool to make API calls with customizable parameters"
|
83 |
+
args_schema = CustomApiToolInputSchema
|
84 |
+
base_url: Optional[str] = None
|
85 |
+
default_headers: Optional[Dict[str, str]] = None
|
86 |
+
default_query_params: Optional[Dict[str, Any]] = None
|
87 |
+
|
88 |
+
def __init__(self, base_url: Optional[str] = None, headers: Optional[Dict[str, str]] = None, query_params: Optional[Dict[str, Any]] = None, **kwargs):
|
89 |
+
super().__init__(**kwargs)
|
90 |
+
self.base_url = base_url
|
91 |
+
self.default_headers = headers or {}
|
92 |
+
self.default_query_params = query_params or {}
|
93 |
+
self._generate_description()
|
94 |
+
|
95 |
+
|
96 |
+
def _run(self, endpoint: str, method: str, headers: Optional[Dict[str, str]] = None, query_params: Optional[Dict[str, Any]] = None, body: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
97 |
+
url = f"{self.base_url}/{endpoint}".rstrip("/")
|
98 |
+
headers = {**self.default_headers, **(headers or {})}
|
99 |
+
query_params = {**self.default_query_params, **(query_params or {})}
|
100 |
+
|
101 |
+
try:
|
102 |
+
response = requests.request(
|
103 |
+
method=method.upper(),
|
104 |
+
url=url,
|
105 |
+
headers=headers,
|
106 |
+
params=query_params,
|
107 |
+
json=body,
|
108 |
+
verify=False #TODO: add option to disable SSL verification
|
109 |
+
)
|
110 |
+
return {
|
111 |
+
"status_code": response.status_code,
|
112 |
+
"response": response.json() if response.headers.get("Content-Type") == "application/json" else response.text
|
113 |
+
}
|
114 |
+
except Exception as e:
|
115 |
+
return {
|
116 |
+
"status_code": 500,
|
117 |
+
"response": str(e)
|
118 |
+
}
|
119 |
+
|
120 |
+
def run(self, input_data: CustomApiToolInputSchema) -> Any:
|
121 |
+
response_data = self._run(
|
122 |
+
endpoint=input_data.endpoint,
|
123 |
+
method=input_data.method,
|
124 |
+
headers=input_data.headers,
|
125 |
+
query_params=input_data.query_params,
|
126 |
+
body=input_data.body
|
127 |
+
|
128 |
+
)
|
129 |
+
return response_data
|
130 |
+
|
131 |
+
class CustomCodeInterpreterSchema(BaseModel):
|
132 |
+
"""Input for CustomCodeInterpreterTool."""
|
133 |
+
code: Optional[str] = Field(
|
134 |
+
None,
|
135 |
+
description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code",
|
136 |
+
)
|
137 |
+
|
138 |
+
run_script: Optional[str] = Field(
|
139 |
+
None,
|
140 |
+
description="Relative path to the script to run in the Docker container. The script should contain the code to be executed.",
|
141 |
+
)
|
142 |
+
|
143 |
+
libraries_used: str = Field(
|
144 |
+
...,
|
145 |
+
description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4",
|
146 |
+
)
|
147 |
+
|
148 |
+
@root_validator
|
149 |
+
def check_code_or_run_script(cls, values):
|
150 |
+
code = values.get('code')
|
151 |
+
run_script = values.get('run_script')
|
152 |
+
if not code and not run_script:
|
153 |
+
raise ValueError('Either code or run_script must be provided')
|
154 |
+
if code and run_script:
|
155 |
+
raise ValueError('Only one of code or run_script should be provided')
|
156 |
+
return values
|
157 |
+
|
158 |
+
class CustomCodeInterpreterTool(BaseTool):
|
159 |
+
name: str = "Code Interpreter"
|
160 |
+
description: str = "Interprets Python3 code strings with a final print statement. Requires eighter code or run_script to be provided."
|
161 |
+
args_schema: Type[BaseModel] = CustomCodeInterpreterSchema
|
162 |
+
code: Optional[str] = None
|
163 |
+
run_script: Optional[str] = None
|
164 |
+
workspace_dir: Optional[str] = None
|
165 |
+
|
166 |
+
def __init__(self, workspace_dir: Optional[str] = None, **kwargs):
|
167 |
+
super().__init__(**kwargs)
|
168 |
+
if workspace_dir is not None and len(workspace_dir) > 0:
|
169 |
+
self.workspace_dir = os.path.abspath(workspace_dir)
|
170 |
+
os.makedirs(self.workspace_dir, exist_ok=True)
|
171 |
+
self._generate_description()
|
172 |
+
|
173 |
+
@staticmethod
|
174 |
+
def _get_installed_package_path():
|
175 |
+
spec = importlib.util.find_spec('crewai_tools')
|
176 |
+
return os.path.dirname(spec.origin)
|
177 |
+
|
178 |
+
def _verify_docker_image(self) -> None:
|
179 |
+
"""
|
180 |
+
Verify if the Docker image is available
|
181 |
+
"""
|
182 |
+
image_tag = "code-interpreter:latest"
|
183 |
+
client = docker.from_env()
|
184 |
+
|
185 |
+
try:
|
186 |
+
client.images.get(image_tag)
|
187 |
+
|
188 |
+
except docker.errors.ImageNotFound:
|
189 |
+
package_path = self._get_installed_package_path()
|
190 |
+
dockerfile_path = os.path.join(package_path, "tools/code_interpreter_tool")
|
191 |
+
if not os.path.exists(dockerfile_path):
|
192 |
+
raise FileNotFoundError(f"Dockerfile not found in {dockerfile_path}")
|
193 |
+
|
194 |
+
client.images.build(
|
195 |
+
path=dockerfile_path,
|
196 |
+
tag=image_tag,
|
197 |
+
rm=True,
|
198 |
+
)
|
199 |
+
|
200 |
+
def _install_libraries(
|
201 |
+
self, container: docker.models.containers.Container, libraries: str
|
202 |
+
) -> None:
|
203 |
+
"""
|
204 |
+
Install missing libraries in the Docker container
|
205 |
+
"""
|
206 |
+
if libraries and len(libraries) > 0:
|
207 |
+
for library in libraries.split(","):
|
208 |
+
print(f"Installing library: {library}")
|
209 |
+
install_result = container.exec_run(f"pip install {library}")
|
210 |
+
if install_result.exit_code != 0:
|
211 |
+
print(f"Something went wrong while installing the library: {library}")
|
212 |
+
print(install_result.output.decode("utf-8"))
|
213 |
+
|
214 |
+
|
215 |
+
def _get_existing_container(self, container_name: str) -> Optional[docker.models.containers.Container]:
|
216 |
+
client = docker.from_env()
|
217 |
+
try:
|
218 |
+
existing_container = client.containers.get(container_name)
|
219 |
+
if existing_container.status == 'running':
|
220 |
+
return existing_container
|
221 |
+
if existing_container.status == 'exited':
|
222 |
+
existing_container.remove()
|
223 |
+
except docker.errors.NotFound:
|
224 |
+
pass
|
225 |
+
return None
|
226 |
+
|
227 |
+
def _init_docker_container(self) -> docker.models.containers.Container:
|
228 |
+
client = docker.from_env()
|
229 |
+
volumes = {}
|
230 |
+
if self.workspace_dir:
|
231 |
+
volumes[self.workspace_dir] = {"bind": "/workspace", "mode": "rw"}
|
232 |
+
container_name = "custom-code-interpreter"
|
233 |
+
existing_container = self._get_existing_container(container_name)
|
234 |
+
if existing_container:
|
235 |
+
return existing_container
|
236 |
+
return client.containers.run(
|
237 |
+
"code-interpreter", detach=True, tty=True, working_dir="/workspace", name=container_name, volumes=volumes
|
238 |
+
)
|
239 |
+
|
240 |
+
def run_code_in_docker(self, code: str, libraries_used: str) -> str:
|
241 |
+
self._verify_docker_image()
|
242 |
+
container = self._init_docker_container()
|
243 |
+
self._install_libraries(container, libraries_used)
|
244 |
+
|
245 |
+
# Encode the code to base64
|
246 |
+
encoded_code = base64.b64encode(code.encode('utf-8')).decode('utf-8')
|
247 |
+
|
248 |
+
# Create a command to decode the base64 string and run the Python code
|
249 |
+
cmd_to_run = f'python3 -c "import base64; exec(base64.b64decode(\'{encoded_code}\').decode(\'utf-8\'))"'
|
250 |
+
|
251 |
+
print(f"Running code in container: \n{code}")
|
252 |
+
|
253 |
+
exec_result = container.exec_run(cmd_to_run)
|
254 |
+
|
255 |
+
if exec_result.exit_code != 0:
|
256 |
+
print(f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}")
|
257 |
+
return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}"
|
258 |
+
print(f"Code run output: \n{exec_result.output.decode('utf-8')}")
|
259 |
+
return exec_result.output.decode("utf-8")
|
260 |
+
|
261 |
+
def _run_script(self, run_script: str,libraries_used: str) -> str:
|
262 |
+
with open(f"{self.workspace_dir}/{run_script}", "r") as file:
|
263 |
+
code = file.read()
|
264 |
+
return self.run_code_in_docker(code, libraries_used)
|
265 |
+
|
266 |
+
def _run(self, **kwargs) -> str:
|
267 |
+
code = kwargs.get("code", self.code)
|
268 |
+
run_script = kwargs.get("run_script", self.run_script)
|
269 |
+
libraries_used = kwargs.get("libraries_used", [])
|
270 |
+
if run_script:
|
271 |
+
return self._run_script(run_script, libraries_used)
|
272 |
+
return self.run_code_in_docker(code, libraries_used)
|
db_utils.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sqlite3
|
2 |
+
import os
|
3 |
+
import json
|
4 |
+
from my_tools import TOOL_CLASSES
|
5 |
+
|
6 |
+
DB_NAME = 'crewai.db'
|
7 |
+
|
8 |
+
def get_db_connection():
|
9 |
+
conn = sqlite3.connect(DB_NAME)
|
10 |
+
conn.row_factory = sqlite3.Row
|
11 |
+
return conn
|
12 |
+
|
13 |
+
def create_tables():
|
14 |
+
conn = get_db_connection()
|
15 |
+
cursor = conn.cursor()
|
16 |
+
|
17 |
+
cursor.execute('''
|
18 |
+
CREATE TABLE IF NOT EXISTS entities (
|
19 |
+
id TEXT PRIMARY KEY,
|
20 |
+
entity_type TEXT,
|
21 |
+
data TEXT
|
22 |
+
)
|
23 |
+
''')
|
24 |
+
|
25 |
+
conn.commit()
|
26 |
+
conn.close()
|
27 |
+
|
28 |
+
def initialize_db():
|
29 |
+
if not os.path.exists(DB_NAME):
|
30 |
+
create_tables()
|
31 |
+
else:
|
32 |
+
conn = get_db_connection()
|
33 |
+
cursor = conn.cursor()
|
34 |
+
cursor.execute('SELECT name FROM sqlite_master WHERE type="table" AND name="entities"')
|
35 |
+
table_exists = cursor.fetchone()
|
36 |
+
if not table_exists:
|
37 |
+
create_tables()
|
38 |
+
conn.close()
|
39 |
+
|
40 |
+
def save_entity(entity_type, entity_id, data):
|
41 |
+
conn = get_db_connection()
|
42 |
+
cursor = conn.cursor()
|
43 |
+
cursor.execute('''
|
44 |
+
INSERT OR REPLACE INTO entities (id, entity_type, data)
|
45 |
+
VALUES (?, ?, ?)
|
46 |
+
''', (entity_id, entity_type, json.dumps(data)))
|
47 |
+
conn.commit()
|
48 |
+
conn.close()
|
49 |
+
|
50 |
+
def load_entities(entity_type):
|
51 |
+
conn = get_db_connection()
|
52 |
+
cursor = conn.cursor()
|
53 |
+
cursor.execute('SELECT * FROM entities WHERE entity_type = ?', (entity_type,))
|
54 |
+
rows = cursor.fetchall()
|
55 |
+
conn.close()
|
56 |
+
return [(row['id'], json.loads(row['data'])) for row in rows]
|
57 |
+
|
58 |
+
def delete_entity(entity_type, entity_id):
|
59 |
+
conn = get_db_connection()
|
60 |
+
cursor = conn.cursor()
|
61 |
+
cursor.execute('''
|
62 |
+
DELETE FROM entities WHERE id = ? AND entity_type = ?
|
63 |
+
''', (entity_id, entity_type))
|
64 |
+
conn.commit()
|
65 |
+
conn.close()
|
66 |
+
|
67 |
+
def save_tools_state(enabled_tools):
|
68 |
+
data = {
|
69 |
+
'enabled_tools': enabled_tools
|
70 |
+
}
|
71 |
+
save_entity('tools_state', 'enabled_tools', data)
|
72 |
+
|
73 |
+
def load_tools_state():
|
74 |
+
rows = load_entities('tools_state')
|
75 |
+
if rows:
|
76 |
+
return rows[0][1].get('enabled_tools', {})
|
77 |
+
return {}
|
78 |
+
|
79 |
+
def save_agent(agent):
|
80 |
+
data = {
|
81 |
+
'created_at': agent.created_at,
|
82 |
+
'role': agent.role,
|
83 |
+
'backstory': agent.backstory,
|
84 |
+
'goal': agent.goal,
|
85 |
+
'allow_delegation': agent.allow_delegation,
|
86 |
+
'verbose': agent.verbose,
|
87 |
+
'cache': agent.cache,
|
88 |
+
'llm_provider_model': agent.llm_provider_model,
|
89 |
+
'temperature': agent.temperature,
|
90 |
+
'max_iter': agent.max_iter,
|
91 |
+
'tool_ids': [tool.tool_id for tool in agent.tools] # Save tool IDs
|
92 |
+
}
|
93 |
+
save_entity('agent', agent.id, data)
|
94 |
+
|
95 |
+
def load_agents():
|
96 |
+
from my_agent import MyAgent
|
97 |
+
rows = load_entities('agent')
|
98 |
+
tools_dict = {tool.tool_id: tool for tool in load_tools()}
|
99 |
+
agents = []
|
100 |
+
for row in rows:
|
101 |
+
data = row[1]
|
102 |
+
tool_ids = data.pop('tool_ids', [])
|
103 |
+
agent = MyAgent(id=row[0], **data)
|
104 |
+
agent.tools = [tools_dict[tool_id] for tool_id in tool_ids if tool_id in tools_dict]
|
105 |
+
agents.append(agent)
|
106 |
+
return sorted(agents, key=lambda x: x.created_at)
|
107 |
+
|
108 |
+
def delete_agent(agent_id):
|
109 |
+
delete_entity('agent', agent_id)
|
110 |
+
|
111 |
+
def save_task(task):
|
112 |
+
data = {
|
113 |
+
'description': task.description,
|
114 |
+
'expected_output': task.expected_output,
|
115 |
+
'async_execution': task.async_execution,
|
116 |
+
'agent_id': task.agent.id if task.agent else None,
|
117 |
+
'context_from_async_tasks_ids': task.context_from_async_tasks_ids,
|
118 |
+
'context_from_sync_tasks_ids': task.context_from_sync_tasks_ids,
|
119 |
+
'created_at': task.created_at
|
120 |
+
}
|
121 |
+
save_entity('task', task.id, data)
|
122 |
+
|
123 |
+
def load_tasks():
|
124 |
+
from my_task import MyTask
|
125 |
+
rows = load_entities('task')
|
126 |
+
agents_dict = {agent.id: agent for agent in load_agents()}
|
127 |
+
tasks = []
|
128 |
+
for row in rows:
|
129 |
+
data = row[1]
|
130 |
+
agent_id = data.pop('agent_id', None)
|
131 |
+
task = MyTask(id=row[0], agent=agents_dict.get(agent_id), **data)
|
132 |
+
tasks.append(task)
|
133 |
+
return sorted(tasks, key=lambda x: x.created_at)
|
134 |
+
|
135 |
+
def delete_task(task_id):
|
136 |
+
delete_entity('task', task_id)
|
137 |
+
|
138 |
+
def save_crew(crew):
|
139 |
+
data = {
|
140 |
+
'name': crew.name,
|
141 |
+
'process': crew.process,
|
142 |
+
'verbose': crew.verbose,
|
143 |
+
'agent_ids': [agent.id for agent in crew.agents],
|
144 |
+
'task_ids': [task.id for task in crew.tasks],
|
145 |
+
'memory': crew.memory,
|
146 |
+
'cache': crew.cache,
|
147 |
+
'planning': crew.planning,
|
148 |
+
'max_rpm' : crew.max_rpm,
|
149 |
+
'manager_llm': crew.manager_llm,
|
150 |
+
'manager_agent_id': crew.manager_agent.id if crew.manager_agent else None,
|
151 |
+
'created_at': crew.created_at
|
152 |
+
}
|
153 |
+
save_entity('crew', crew.id, data)
|
154 |
+
|
155 |
+
def load_crews():
|
156 |
+
from my_crew import MyCrew
|
157 |
+
rows = load_entities('crew')
|
158 |
+
agents_dict = {agent.id: agent for agent in load_agents()}
|
159 |
+
tasks_dict = {task.id: task for task in load_tasks()}
|
160 |
+
crews = []
|
161 |
+
for row in rows:
|
162 |
+
data = row[1]
|
163 |
+
crew = MyCrew(
|
164 |
+
id=row[0],
|
165 |
+
name=data['name'],
|
166 |
+
process=data['process'],
|
167 |
+
verbose=data['verbose'],
|
168 |
+
created_at=data['created_at'],
|
169 |
+
memory=data.get('memory'),
|
170 |
+
cache=data.get('cache'),
|
171 |
+
planning=data.get('planning'),
|
172 |
+
max_rpm=data.get('max_rpm'),
|
173 |
+
manager_llm=data.get('manager_llm'),
|
174 |
+
manager_agent=agents_dict.get(data.get('manager_agent_id'))
|
175 |
+
)
|
176 |
+
crew.agents = [agents_dict[agent_id] for agent_id in data['agent_ids'] if agent_id in agents_dict]
|
177 |
+
crew.tasks = [tasks_dict[task_id] for task_id in data['task_ids'] if task_id in tasks_dict]
|
178 |
+
crews.append(crew)
|
179 |
+
return sorted(crews, key=lambda x: x.created_at)
|
180 |
+
|
181 |
+
def delete_crew(crew_id):
|
182 |
+
delete_entity('crew', crew_id)
|
183 |
+
|
184 |
+
def save_tool(tool):
|
185 |
+
data = {
|
186 |
+
'name': tool.name,
|
187 |
+
'description': tool.description,
|
188 |
+
'parameters': tool.get_parameters()
|
189 |
+
}
|
190 |
+
save_entity('tool', tool.tool_id, data)
|
191 |
+
|
192 |
+
def load_tools():
|
193 |
+
rows = load_entities('tool')
|
194 |
+
tools = []
|
195 |
+
for row in rows:
|
196 |
+
data = row[1]
|
197 |
+
tool_class = TOOL_CLASSES[data['name']]
|
198 |
+
tool = tool_class(tool_id=row[0])
|
199 |
+
tool.set_parameters(**data['parameters'])
|
200 |
+
tools.append(tool)
|
201 |
+
return tools
|
202 |
+
|
203 |
+
def delete_tool(tool_id):
|
204 |
+
delete_entity('tool', tool_id)
|
205 |
+
|
206 |
+
def export_to_json(file_path):
|
207 |
+
conn = get_db_connection()
|
208 |
+
cursor = conn.cursor()
|
209 |
+
cursor.execute('SELECT * FROM entities')
|
210 |
+
rows = cursor.fetchall()
|
211 |
+
conn.close()
|
212 |
+
|
213 |
+
data = []
|
214 |
+
for row in rows:
|
215 |
+
entity = {
|
216 |
+
'id': row['id'],
|
217 |
+
'entity_type': row['entity_type'],
|
218 |
+
'data': json.loads(row['data'])
|
219 |
+
}
|
220 |
+
data.append(entity)
|
221 |
+
|
222 |
+
with open(file_path, 'w') as f:
|
223 |
+
json.dump(data, f, indent=4)
|
224 |
+
|
225 |
+
def import_from_json(file_path):
|
226 |
+
with open(file_path, 'r') as f:
|
227 |
+
data = json.load(f)
|
228 |
+
|
229 |
+
conn = get_db_connection()
|
230 |
+
cursor = conn.cursor()
|
231 |
+
|
232 |
+
for entity in data:
|
233 |
+
cursor.execute('''
|
234 |
+
INSERT OR REPLACE INTO entities (id, entity_type, data)
|
235 |
+
VALUES (?, ?, ?)
|
236 |
+
''', (entity['id'], entity['entity_type'], json.dumps(entity['data'])))
|
237 |
+
|
238 |
+
conn.commit()
|
239 |
+
conn.close()
|
img/crewai_logo.png
ADDED
img/crews.png
ADDED
img/favicon.ico
ADDED
img/kickoff.png
ADDED
llms.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from langchain_openai import ChatOpenAI
|
3 |
+
from langchain_groq import ChatGroq
|
4 |
+
from langchain_anthropic import ChatAnthropic
|
5 |
+
|
6 |
+
#from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
+
# from langchain_huggingface import ChatHuggingFace
|
8 |
+
from dotenv import load_dotenv
|
9 |
+
|
10 |
+
def create_openai_llm(model, temperature):
|
11 |
+
safe_pop_env_var('OPENAI_API_KEY')
|
12 |
+
safe_pop_env_var('OPENAI_API_BASE')
|
13 |
+
load_dotenv(override=True)
|
14 |
+
api_key = os.getenv('OPENAI_API_KEY')
|
15 |
+
api_base = os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1/')
|
16 |
+
|
17 |
+
if model == "gpt-4o-mini":
|
18 |
+
max_tokens = 16383
|
19 |
+
else:
|
20 |
+
max_tokens = 4095
|
21 |
+
if api_key:
|
22 |
+
return ChatOpenAI(openai_api_key=api_key, openai_api_base=api_base, model_name=model, temperature=temperature, max_tokens=max_tokens)
|
23 |
+
else:
|
24 |
+
raise ValueError("OpenAI API key not set in .env file")
|
25 |
+
|
26 |
+
def create_anthropic_llm(model, temperature):
|
27 |
+
api_key = os.getenv('ANTHROPIC_API_KEY')
|
28 |
+
if api_key:
|
29 |
+
return ChatAnthropic(anthropic_api_key=api_key, model_name=model, temperature=temperature,max_tokens=4095)
|
30 |
+
else:
|
31 |
+
raise ValueError("Anthropic API key not set in .env file")
|
32 |
+
|
33 |
+
def create_groq_llm(model, temperature):
|
34 |
+
api_key = os.getenv('GROQ_API_KEY')
|
35 |
+
if api_key:
|
36 |
+
return ChatGroq(groq_api_key=api_key, model_name=model, temperature=temperature, max_tokens=4095)
|
37 |
+
else:
|
38 |
+
raise ValueError("Groq API key not set in .env file")
|
39 |
+
|
40 |
+
# def create_googleai_llm(model, temperature):
|
41 |
+
# api_key = os.getenv('GOOGLE_API_KEY')
|
42 |
+
# if api_key:
|
43 |
+
# return ChatGoogleGenerativeAI(model=model, temperature=temperature)
|
44 |
+
# else:
|
45 |
+
# raise ValueError("Google AI API key not set in .env file")
|
46 |
+
|
47 |
+
# def create_huggingfacehub_llm(model, temperature):
|
48 |
+
# api_key = os.getenv('HUGGINGFACEHUB_API_KEY')
|
49 |
+
# if api_key:
|
50 |
+
# return ChatHuggingFace(repo_id=model, huggingfacehub_api_token=api_key, model_kwargs={"temperature":temperature, "max_tokens": 4096})
|
51 |
+
# else:
|
52 |
+
# raise ValueError("HuggingFace API key not set in .env file")
|
53 |
+
|
54 |
+
def create_lmstudio_llm(model, temperature):
|
55 |
+
api_base = os.getenv('LMSTUDIO_API_BASE')
|
56 |
+
os.environ["OPENAI_API_KEY"] = "lm-studio"
|
57 |
+
os.environ["OPENAI_API_BASE"] = api_base
|
58 |
+
if api_base:
|
59 |
+
return ChatOpenAI(openai_api_key='lm-studio', openai_api_base=api_base, temperature=temperature, max_tokens=4095)
|
60 |
+
else:
|
61 |
+
raise ValueError("LM Studio API base not set in .env file")
|
62 |
+
|
63 |
+
LLM_CONFIG = {
|
64 |
+
"OpenAI": {
|
65 |
+
"models": ["gpt-4o","gpt-4o-mini","gpt-3.5-turbo", "gpt-4-turbo"],
|
66 |
+
"create_llm": create_openai_llm
|
67 |
+
},
|
68 |
+
"Groq": {
|
69 |
+
"models": ["llama3-8b-8192","llama3-70b-8192", "mixtral-8x7b-32768"],
|
70 |
+
"create_llm": create_groq_llm
|
71 |
+
},
|
72 |
+
# "GoogleAI": {
|
73 |
+
# "models": ["gemini-1.5-pro","gemini-1.5-flash", "gemini-1.0-pro"],
|
74 |
+
# "create_llm": create_googleai_llm
|
75 |
+
# },
|
76 |
+
# "HuggingFaceHub": {
|
77 |
+
# "models": ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Codestral-22B-v0.1", "EleutherAI/gpt-neo-2.7B"],
|
78 |
+
# "create_llm": create_huggingfacehub_llm
|
79 |
+
# },
|
80 |
+
"Anthropic": {
|
81 |
+
"models": ["claude-3-5-sonnet-20240620"],
|
82 |
+
"create_llm": create_anthropic_llm
|
83 |
+
},
|
84 |
+
"LM Studio": {
|
85 |
+
"models": ["lms-default"],
|
86 |
+
"create_llm": create_lmstudio_llm
|
87 |
+
}
|
88 |
+
|
89 |
+
}
|
90 |
+
|
91 |
+
def llm_providers_and_models():
|
92 |
+
return [f"{provider}: {model}" for provider in LLM_CONFIG.keys() for model in LLM_CONFIG[provider]["models"]]
|
93 |
+
|
94 |
+
def create_llm(provider_and_model, temperature=0.1):
|
95 |
+
provider, model = provider_and_model.split(": ")
|
96 |
+
create_llm_func = LLM_CONFIG.get(provider, {}).get("create_llm")
|
97 |
+
if create_llm_func:
|
98 |
+
return create_llm_func(model, temperature)
|
99 |
+
else:
|
100 |
+
raise ValueError(f"LLM provider {provider} is not recognized or not supported")
|
101 |
+
|
102 |
+
def safe_pop_env_var(key):
|
103 |
+
try:
|
104 |
+
os.environ.pop(key)
|
105 |
+
except KeyError:
|
106 |
+
pass
|
my_agent.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crewai import Agent
|
2 |
+
import streamlit as st
|
3 |
+
from utils import rnd_id, fix_columns_width
|
4 |
+
from streamlit import session_state as ss
|
5 |
+
from db_utils import save_agent, delete_agent
|
6 |
+
from llms import llm_providers_and_models, create_llm
|
7 |
+
from datetime import datetime
|
8 |
+
|
9 |
+
class MyAgent:
|
10 |
+
def __init__(self, id=None, role=None, backstory=None, goal=None, temperature=None, allow_delegation=False, verbose=False, cache= None, llm_provider_model=None, max_iter=None, created_at=None, tools=None):
|
11 |
+
self.id = id or "A_" + rnd_id()
|
12 |
+
self.role = role or "Senior Researcher"
|
13 |
+
self.backstory = backstory or "Driven by curiosity, you're at the forefront of innovation, eager to explore and share knowledge that could change the world."
|
14 |
+
self.goal = goal or "Uncover groundbreaking technologies in AI"
|
15 |
+
self.temperature = temperature or 0.1
|
16 |
+
self.allow_delegation = allow_delegation if allow_delegation is not None else False
|
17 |
+
self.verbose = verbose if verbose is not None else True
|
18 |
+
self.llm_provider_model = llm_providers_and_models()[0] if llm_provider_model is None else llm_provider_model
|
19 |
+
self.created_at = created_at or datetime.now().isoformat()
|
20 |
+
self.tools = tools or []
|
21 |
+
self.max_iter = max_iter or 25
|
22 |
+
self.cache = cache if cache is not None else True
|
23 |
+
self.edit_key = f'edit_{self.id}'
|
24 |
+
if self.edit_key not in ss:
|
25 |
+
ss[self.edit_key] = False
|
26 |
+
|
27 |
+
@property
|
28 |
+
def edit(self):
|
29 |
+
return ss[self.edit_key]
|
30 |
+
|
31 |
+
@edit.setter
|
32 |
+
def edit(self, value):
|
33 |
+
ss[self.edit_key] = value
|
34 |
+
|
35 |
+
def get_crewai_agent(self) -> Agent:
|
36 |
+
llm = create_llm(self.llm_provider_model, temperature=self.temperature)
|
37 |
+
tools = [tool.create_tool() for tool in self.tools]
|
38 |
+
return Agent(
|
39 |
+
role=self.role,
|
40 |
+
backstory=self.backstory,
|
41 |
+
goal=self.goal,
|
42 |
+
allow_delegation=self.allow_delegation,
|
43 |
+
verbose=self.verbose,
|
44 |
+
max_iter=self.max_iter,
|
45 |
+
cache=self.cache,
|
46 |
+
tools=tools,
|
47 |
+
llm=llm
|
48 |
+
)
|
49 |
+
|
50 |
+
def delete(self):
|
51 |
+
ss.agents = [agent for agent in ss.agents if agent.id != self.id]
|
52 |
+
delete_agent(self.id)
|
53 |
+
|
54 |
+
def get_tool_display_name(self, tool):
|
55 |
+
first_param_name = tool.get_parameter_names()[0] if tool.get_parameter_names() else None
|
56 |
+
first_param_value = tool.parameters.get(first_param_name, '') if first_param_name else ''
|
57 |
+
return f"{tool.name} ({first_param_value if first_param_value else tool.tool_id})"
|
58 |
+
|
59 |
+
def is_valid(self, show_warning=False):
|
60 |
+
for tool in self.tools:
|
61 |
+
if not tool.is_valid(show_warning=show_warning):
|
62 |
+
if show_warning:
|
63 |
+
st.warning(f"Tool {tool.name} is not valid")
|
64 |
+
return False
|
65 |
+
return True
|
66 |
+
|
67 |
+
def validate_llm_provider_model(self):
|
68 |
+
available_models = llm_providers_and_models()
|
69 |
+
if self.llm_provider_model not in available_models:
|
70 |
+
self.llm_provider_model = available_models[0]
|
71 |
+
|
72 |
+
def draw(self, key=None):
|
73 |
+
self.validate_llm_provider_model()
|
74 |
+
expander_title = f"{self.role[:60]} -{self.llm_provider_model.split(':')[1]}" if self.is_valid() else f"❗ {self.role[:20]} -{self.llm_provider_model.split(':')[1]}"
|
75 |
+
if self.edit:
|
76 |
+
with st.expander(f"Agent: {self.role}", expanded=True):
|
77 |
+
with st.form(key=f'form_{self.id}' if key is None else key):
|
78 |
+
self.role = st.text_input("Role", value=self.role)
|
79 |
+
self.backstory = st.text_area("Backstory", value=self.backstory)
|
80 |
+
self.goal = st.text_area("Goal", value=self.goal)
|
81 |
+
self.allow_delegation = st.checkbox("Allow delegation", value=self.allow_delegation)
|
82 |
+
self.verbose = st.checkbox("Verbose", value=self.verbose)
|
83 |
+
self.cache = st.checkbox("Cache", value=self.cache)
|
84 |
+
self.llm_provider_model = st.selectbox("LLM Provider and Model", options=llm_providers_and_models(), index=llm_providers_and_models().index(self.llm_provider_model))
|
85 |
+
self.temperature = st.slider("Temperature", value=self.temperature, min_value=0.0, max_value=1.0)
|
86 |
+
self.max_iter = st.number_input("Max Iterations", value=self.max_iter, min_value=1, max_value=100)
|
87 |
+
enabled_tools = [tool for tool in ss.tools]
|
88 |
+
selected_tools = st.multiselect(
|
89 |
+
"Select Tools",
|
90 |
+
[self.get_tool_display_name(tool) for tool in enabled_tools],
|
91 |
+
default=[self.get_tool_display_name(tool) for tool in self.tools],
|
92 |
+
key=f"{self.id}_tools{key}"
|
93 |
+
)
|
94 |
+
submitted = st.form_submit_button("Save")
|
95 |
+
if submitted:
|
96 |
+
self.tools = [tool for tool in enabled_tools if self.get_tool_display_name(tool) in selected_tools]
|
97 |
+
self.set_editable(False)
|
98 |
+
else:
|
99 |
+
fix_columns_width()
|
100 |
+
with st.expander(expander_title, expanded=False):
|
101 |
+
st.markdown(f"**Role:** {self.role}")
|
102 |
+
st.markdown(f"**Backstory:** {self.backstory}")
|
103 |
+
st.markdown(f"**Goal:** {self.goal}")
|
104 |
+
st.markdown(f"**Allow delegation:** {self.allow_delegation}")
|
105 |
+
st.markdown(f"**Verbose:** {self.verbose}")
|
106 |
+
st.markdown(f"**Cache:** {self.cache}")
|
107 |
+
st.markdown(f"**LLM Provider and Model:** {self.llm_provider_model}")
|
108 |
+
st.markdown(f"**Temperature:** {self.temperature}")
|
109 |
+
st.markdown(f"**Max Iterations:** {self.max_iter}")
|
110 |
+
st.markdown(f"**Tools:** {[self.get_tool_display_name(tool) for tool in self.tools]}")
|
111 |
+
|
112 |
+
self.is_valid(show_warning=True)
|
113 |
+
|
114 |
+
col1, col2 = st.columns(2)
|
115 |
+
with col1:
|
116 |
+
st.button("Edit", on_click=self.set_editable, args=(True,), key=rnd_id())
|
117 |
+
with col2:
|
118 |
+
st.button("Delete", on_click=self.delete, key=rnd_id())
|
119 |
+
|
120 |
+
def set_editable(self, edit):
|
121 |
+
self.edit = edit
|
122 |
+
save_agent(self)
|
123 |
+
if not edit:
|
124 |
+
st.rerun()
|
my_crew.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crewai import Crew, Process
|
2 |
+
import streamlit as st
|
3 |
+
from utils import rnd_id, fix_columns_width
|
4 |
+
from streamlit import session_state as ss
|
5 |
+
from datetime import datetime
|
6 |
+
from llms import llm_providers_and_models, create_llm
|
7 |
+
import db_utils
|
8 |
+
|
9 |
+
class MyCrew:
|
10 |
+
def __init__(self, id=None, name=None, agents=None, tasks=None, process=None, cache=None,max_rpm=None, verbose=None, manager_llm=None, manager_agent=None, created_at=None, memory=None, planning=None):
|
11 |
+
self.id = id or "C_" + rnd_id()
|
12 |
+
self.name = name or "Crew 1"
|
13 |
+
self.agents = agents or []
|
14 |
+
self.tasks = tasks or []
|
15 |
+
self.process = process or Process.sequential
|
16 |
+
self.verbose = bool(verbose) if verbose is not None else True
|
17 |
+
self.manager_llm = manager_llm
|
18 |
+
self.manager_agent = manager_agent
|
19 |
+
self.memory = memory if memory is not None else False
|
20 |
+
self.cache = cache if cache is not None else True
|
21 |
+
self.max_rpm = max_rpm or 1000
|
22 |
+
self.planning = planning if planning is not None else False
|
23 |
+
self.created_at = created_at or datetime.now().isoformat()
|
24 |
+
self.edit_key = f'edit_{self.id}'
|
25 |
+
if self.edit_key not in ss:
|
26 |
+
ss[self.edit_key] = False
|
27 |
+
self.tasks_order_key = f'tasks_order_{self.id}'
|
28 |
+
if self.tasks_order_key not in ss:
|
29 |
+
ss[self.tasks_order_key] = [task.id for task in self.tasks]
|
30 |
+
|
31 |
+
@property
|
32 |
+
def edit(self):
|
33 |
+
return ss[self.edit_key]
|
34 |
+
|
35 |
+
@edit.setter
|
36 |
+
def edit(self, value):
|
37 |
+
ss[self.edit_key] = value
|
38 |
+
|
39 |
+
def get_crewai_crew(self, *args, **kwargs) -> Crew:
|
40 |
+
crewai_agents = [agent.get_crewai_agent() for agent in self.agents]
|
41 |
+
|
42 |
+
# Create a dictionary to hold the Task objects
|
43 |
+
task_objects = {}
|
44 |
+
|
45 |
+
def create_task(task):
|
46 |
+
if task.id in task_objects:
|
47 |
+
return task_objects[task.id]
|
48 |
+
|
49 |
+
context_tasks = []
|
50 |
+
if task.async_execution or task.context_from_async_tasks_ids or task.context_from_sync_tasks_ids:
|
51 |
+
for context_task_id in (task.context_from_async_tasks_ids or []) + (task.context_from_sync_tasks_ids or []):
|
52 |
+
if context_task_id not in task_objects:
|
53 |
+
context_task = next((t for t in self.tasks if t.id == context_task_id), None)
|
54 |
+
if context_task:
|
55 |
+
context_tasks.append(create_task(context_task))
|
56 |
+
else:
|
57 |
+
print(f"Warning: Context task with id {context_task_id} not found for task {task.id}")
|
58 |
+
else:
|
59 |
+
context_tasks.append(task_objects[context_task_id])
|
60 |
+
|
61 |
+
# Only pass context if it's an async task or if specific context is defined
|
62 |
+
if task.async_execution or context_tasks:
|
63 |
+
crewai_task = task.get_crewai_task(context_from_async_tasks=context_tasks)
|
64 |
+
else:
|
65 |
+
crewai_task = task.get_crewai_task()
|
66 |
+
|
67 |
+
task_objects[task.id] = crewai_task
|
68 |
+
return crewai_task
|
69 |
+
|
70 |
+
# Create all tasks, resolving dependencies recursively
|
71 |
+
for task in self.tasks:
|
72 |
+
create_task(task)
|
73 |
+
|
74 |
+
# Collect the final list of tasks in the original order
|
75 |
+
crewai_tasks = [task_objects[task.id] for task in self.tasks]
|
76 |
+
|
77 |
+
if self.manager_llm:
|
78 |
+
return Crew(
|
79 |
+
agents=crewai_agents,
|
80 |
+
tasks=crewai_tasks,
|
81 |
+
cache=self.cache,
|
82 |
+
process=self.process,
|
83 |
+
max_rpm=self.max_rpm,
|
84 |
+
verbose=self.verbose,
|
85 |
+
manager_llm=create_llm(self.manager_llm),
|
86 |
+
memory=self.memory,
|
87 |
+
planning=self.planning,
|
88 |
+
*args, **kwargs
|
89 |
+
)
|
90 |
+
elif self.manager_agent:
|
91 |
+
return Crew(
|
92 |
+
agents=crewai_agents,
|
93 |
+
tasks=crewai_tasks,
|
94 |
+
cache=self.cache,
|
95 |
+
process=self.process,
|
96 |
+
max_rpm=self.max_rpm,
|
97 |
+
verbose=self.verbose,
|
98 |
+
manager_agent=self.manager_agent.get_crewai_agent(),
|
99 |
+
memory=self.memory,
|
100 |
+
planning=self.planning,
|
101 |
+
*args, **kwargs
|
102 |
+
)
|
103 |
+
cr = Crew(
|
104 |
+
agents=crewai_agents,
|
105 |
+
tasks=crewai_tasks,
|
106 |
+
cache=self.cache,
|
107 |
+
process=self.process,
|
108 |
+
max_rpm=self.max_rpm,
|
109 |
+
verbose=self.verbose,
|
110 |
+
memory=self.memory,
|
111 |
+
planning=self.planning,
|
112 |
+
*args, **kwargs
|
113 |
+
)
|
114 |
+
return cr
|
115 |
+
|
116 |
+
def delete(self):
|
117 |
+
ss.crews = [crew for crew in ss.crews if crew.id != self.id]
|
118 |
+
db_utils.delete_crew(self.id)
|
119 |
+
|
120 |
+
def update_name(self):
|
121 |
+
self.name = ss[f'name_{self.id}']
|
122 |
+
db_utils.save_crew(self)
|
123 |
+
|
124 |
+
def update_process(self):
|
125 |
+
self.process = ss[f'process_{self.id}']
|
126 |
+
db_utils.save_crew(self)
|
127 |
+
|
128 |
+
def update_tasks(self):
|
129 |
+
selected_tasks_ids = ss[f'tasks_{self.id}']
|
130 |
+
self.tasks = [task for task in ss.tasks if task.id in selected_tasks_ids and task.agent.id in [agent.id for agent in self.agents]]
|
131 |
+
self.tasks = sorted(self.tasks, key=lambda task: selected_tasks_ids.index(task.id))
|
132 |
+
ss[self.tasks_order_key] = selected_tasks_ids
|
133 |
+
db_utils.save_crew(self)
|
134 |
+
|
135 |
+
def update_verbose(self):
|
136 |
+
self.verbose = ss[f'verbose_{self.id}']
|
137 |
+
db_utils.save_crew(self)
|
138 |
+
|
139 |
+
def update_agents(self):
|
140 |
+
selected_agents = ss[f'agents_{self.id}']
|
141 |
+
self.agents = [agent for agent in ss.agents if agent.role in selected_agents]
|
142 |
+
db_utils.save_crew(self)
|
143 |
+
|
144 |
+
def update_manager_llm(self):
|
145 |
+
selected_llm = ss[f'manager_llm_{self.id}']
|
146 |
+
self.manager_llm = selected_llm if selected_llm != "None" else None
|
147 |
+
if self.manager_llm:
|
148 |
+
self.manager_agent = None
|
149 |
+
db_utils.save_crew(self)
|
150 |
+
|
151 |
+
def update_manager_agent(self):
|
152 |
+
selected_agent_role = ss[f'manager_agent_{self.id}']
|
153 |
+
self.manager_agent = next((agent for agent in ss.agents if agent.role == selected_agent_role), None) if selected_agent_role != "None" else None
|
154 |
+
if self.manager_agent:
|
155 |
+
self.manager_llm = None
|
156 |
+
db_utils.save_crew(self)
|
157 |
+
|
158 |
+
def update_memory(self):
|
159 |
+
self.memory = ss[f'memory_{self.id}']
|
160 |
+
db_utils.save_crew(self)
|
161 |
+
|
162 |
+
def update_max_rpm(self):
|
163 |
+
self.max_rpm = ss[f'max_rpm_{self.id}']
|
164 |
+
db_utils.save_crew(self)
|
165 |
+
|
166 |
+
def update_cache(self):
|
167 |
+
self.cache = ss[f'cache_{self.id}']
|
168 |
+
db_utils.save_crew(self)
|
169 |
+
|
170 |
+
def update_planning(self):
|
171 |
+
self.planning = ss[f'planning_{self.id}']
|
172 |
+
db_utils.save_crew(self)
|
173 |
+
|
174 |
+
def is_valid(self, show_warning=False):
|
175 |
+
if len(self.agents) == 0:
|
176 |
+
if show_warning:
|
177 |
+
st.warning(f"Crew {self.name} has no agents")
|
178 |
+
return False
|
179 |
+
if len(self.tasks) == 0:
|
180 |
+
if show_warning:
|
181 |
+
st.warning(f"Crew {self.name} has no tasks")
|
182 |
+
return False
|
183 |
+
if any([not agent.is_valid(show_warning=show_warning) for agent in self.agents]):
|
184 |
+
return False
|
185 |
+
if any([not task.is_valid(show_warning=show_warning) for task in self.tasks]):
|
186 |
+
return False
|
187 |
+
if self.process == Process.hierarchical and not (self.manager_llm or self.manager_agent):
|
188 |
+
if show_warning:
|
189 |
+
st.warning(f"Crew {self.name} has no manager agent or manager llm set for hierarchical process")
|
190 |
+
return False
|
191 |
+
return True
|
192 |
+
|
193 |
+
def validate_manager_llm(self):
|
194 |
+
available_models = llm_providers_and_models()
|
195 |
+
if self.manager_llm and self.manager_llm not in available_models:
|
196 |
+
self.manager_llm = None
|
197 |
+
|
198 |
+
def draw(self,expanded=False, buttons=True):
|
199 |
+
self.validate_manager_llm()
|
200 |
+
name_key = f"name_{self.id}"
|
201 |
+
process_key = f"process_{self.id}"
|
202 |
+
verbose_key = f"verbose_{self.id}"
|
203 |
+
agents_key = f"agents_{self.id}"
|
204 |
+
tasks_key = f"tasks_{self.id}"
|
205 |
+
manager_llm_key = f"manager_llm_{self.id}"
|
206 |
+
manager_agent_key = f"manager_agent_{self.id}"
|
207 |
+
memory_key = f"memory_{self.id}"
|
208 |
+
planning_key = f"planning_{self.id}"
|
209 |
+
cache_key = f"cache_{self.id}"
|
210 |
+
max_rpm_key = f"max_rpm_{self.id}"
|
211 |
+
|
212 |
+
if self.edit:
|
213 |
+
with st.container(border=True):
|
214 |
+
st.text_input("Name (just id, it doesn't affect anything)", value=self.name, key=name_key, on_change=self.update_name)
|
215 |
+
st.selectbox("Process", options=[Process.sequential, Process.hierarchical], index=[Process.sequential, Process.hierarchical].index(self.process), key=process_key, on_change=self.update_process)
|
216 |
+
st.multiselect("Agents", options=[agent.role for agent in ss.agents], default=[agent.role for agent in self.agents], key=agents_key, on_change=self.update_agents)
|
217 |
+
# Filter tasks by selected agents
|
218 |
+
available_tasks = [task for task in ss.tasks if task.agent and task.agent.id in [agent.id for agent in self.agents]]
|
219 |
+
available_task_ids = [task.id for task in available_tasks]
|
220 |
+
default_task_ids = [task.id for task in self.tasks if task.id in available_task_ids]
|
221 |
+
st.multiselect("Tasks", options=available_task_ids, default=default_task_ids, format_func=lambda x: next(task.description for task in ss.tasks if task.id == x), key=tasks_key, on_change=self.update_tasks)
|
222 |
+
st.selectbox("Manager LLM", options=["None"] + llm_providers_and_models(), index=0 if self.manager_llm is None else llm_providers_and_models().index(self.manager_llm) + 1, key=manager_llm_key, on_change=self.update_manager_llm, disabled=(self.process != Process.hierarchical))
|
223 |
+
st.selectbox("Manager Agent", options=["None"] + [agent.role for agent in ss.agents], index=0 if self.manager_agent is None else [agent.role for agent in ss.agents].index(self.manager_agent.role) + 1, key=manager_agent_key, on_change=self.update_manager_agent, disabled=(self.process != Process.hierarchical))
|
224 |
+
st.checkbox("Verbose", value=self.verbose, key=verbose_key, on_change=self.update_verbose)
|
225 |
+
st.checkbox("Memory", value=self.memory, key=memory_key, on_change=self.update_memory)
|
226 |
+
st.checkbox("Cache", value=self.cache, key=cache_key, on_change=self.update_cache)
|
227 |
+
st.checkbox("Planning", value=self.planning, key=planning_key, on_change=self.update_planning)
|
228 |
+
st.number_input("Max req/min", value=self.max_rpm, key=max_rpm_key, on_change=self.update_max_rpm)
|
229 |
+
st.button("Save", on_click=self.set_editable, args=(False,), key=rnd_id())
|
230 |
+
else:
|
231 |
+
fix_columns_width()
|
232 |
+
expander_title = f"Crew: {self.name}" if self.is_valid() else f"❗ Crew: {self.name}"
|
233 |
+
with st.expander(expander_title, expanded=expanded):
|
234 |
+
st.markdown(f"**Process:** {self.process}")
|
235 |
+
if self.process == Process.hierarchical:
|
236 |
+
st.markdown(f"**Manager LLM:** {self.manager_llm}")
|
237 |
+
st.markdown(f"**Manager Agent:** {self.manager_agent.role if self.manager_agent else 'None'}")
|
238 |
+
st.markdown(f"**Verbose:** {self.verbose}")
|
239 |
+
st.markdown(f"**Memory:** {self.memory}")
|
240 |
+
st.markdown(f"**Cache:** {self.cache}")
|
241 |
+
st.markdown(f"**Planning:** {self.planning}")
|
242 |
+
st.markdown(f"**Max req/min:** {self.max_rpm}")
|
243 |
+
st.markdown("**Tasks:**")
|
244 |
+
for i, task in enumerate([task for task in self.tasks if task.agent and task.agent.id in [agent.id for agent in self.agents]], 1):
|
245 |
+
with st.container(border=True):
|
246 |
+
async_tag = "(async)" if task.async_execution else ""
|
247 |
+
st.markdown(f"**{i}.{async_tag} {task.description}**")
|
248 |
+
st.markdown(f"**Agent:** {task.agent.role if task.agent else 'None'}")
|
249 |
+
tools_list = ", ".join([tool.name for tool in task.agent.tools]) if task.agent else "None"
|
250 |
+
st.markdown(f" **Tools:** {tools_list}")
|
251 |
+
st.markdown(f" **LLM:** {task.agent.llm_provider_model}")
|
252 |
+
if buttons:
|
253 |
+
col1, col2 = st.columns(2)
|
254 |
+
with col1:
|
255 |
+
st.button("Edit", on_click=self.set_editable, key=rnd_id(), args=(True,))
|
256 |
+
with col2:
|
257 |
+
st.button("Delete", on_click=self.delete, key=rnd_id())
|
258 |
+
self.is_valid(show_warning=True)
|
259 |
+
|
260 |
+
def set_editable(self, edit):
|
261 |
+
self.edit = edit
|
262 |
+
db_utils.save_crew(self)
|
my_task.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crewai import Task
|
2 |
+
import streamlit as st
|
3 |
+
from utils import rnd_id, fix_columns_width
|
4 |
+
from streamlit import session_state as ss
|
5 |
+
from db_utils import save_task, delete_task
|
6 |
+
from datetime import datetime
|
7 |
+
|
8 |
+
class MyTask:
|
9 |
+
def __init__(self, id=None, description=None, expected_output=None, agent=None, async_execution=None, created_at=None, context_from_async_tasks_ids=None, context_from_sync_tasks_ids=None, **kwargs):
|
10 |
+
self.id = id or "T_" + rnd_id()
|
11 |
+
self.description = description or "Identify the next big trend in AI. Focus on identifying pros and cons and the overall narrative."
|
12 |
+
self.expected_output = expected_output or "A comprehensive 3 paragraphs long report on the latest AI trends."
|
13 |
+
self.agent = agent or ss.agents[0] if ss.agents else None
|
14 |
+
self.async_execution = async_execution or False
|
15 |
+
self.context_from_async_tasks_ids = context_from_async_tasks_ids or None
|
16 |
+
self.context_from_sync_tasks_ids = context_from_sync_tasks_ids or None
|
17 |
+
self.created_at = created_at or datetime.now().isoformat()
|
18 |
+
self.edit_key = f'edit_{self.id}'
|
19 |
+
if self.edit_key not in ss:
|
20 |
+
ss[self.edit_key] = False
|
21 |
+
|
22 |
+
@property
|
23 |
+
def edit(self):
|
24 |
+
return ss[self.edit_key]
|
25 |
+
|
26 |
+
@edit.setter
|
27 |
+
def edit(self, value):
|
28 |
+
ss[self.edit_key] = value
|
29 |
+
|
30 |
+
def get_crewai_task(self, context_from_async_tasks=None, context_from_sync_tasks=None) -> Task:
|
31 |
+
context = []
|
32 |
+
if context_from_async_tasks:
|
33 |
+
context.extend(context_from_async_tasks)
|
34 |
+
if context_from_sync_tasks:
|
35 |
+
context.extend(context_from_sync_tasks)
|
36 |
+
|
37 |
+
if context:
|
38 |
+
return Task(description=self.description, expected_output=self.expected_output, async_execution=self.async_execution, agent=self.agent.get_crewai_agent(), context=context)
|
39 |
+
else:
|
40 |
+
return Task(description=self.description, expected_output=self.expected_output, async_execution=self.async_execution, agent=self.agent.get_crewai_agent())
|
41 |
+
|
42 |
+
def delete(self):
|
43 |
+
ss.tasks = [task for task in ss.tasks if task.id != self.id]
|
44 |
+
delete_task(self.id)
|
45 |
+
|
46 |
+
def is_valid(self, show_warning=False):
|
47 |
+
if not self.agent:
|
48 |
+
if show_warning:
|
49 |
+
st.warning(f"Task {self.description} has no agent")
|
50 |
+
return False
|
51 |
+
if not self.agent.is_valid(show_warning):
|
52 |
+
return False
|
53 |
+
return True
|
54 |
+
|
55 |
+
def draw(self, key=None):
|
56 |
+
agent_options = [agent.role for agent in ss.agents]
|
57 |
+
expander_title = f"({self.agent.role if self.agent else 'unassigned'}) - {self.description}" if self.is_valid() else f"❗ ({self.agent.role if self.agent else 'unassigned'}) - {self.description}"
|
58 |
+
if self.edit:
|
59 |
+
with st.expander(expander_title, expanded=True):
|
60 |
+
with st.form(key=f'form_{self.id}' if key is None else key):
|
61 |
+
self.description = st.text_area("Description", value=self.description)
|
62 |
+
self.expected_output = st.text_area("Expected output", value=self.expected_output)
|
63 |
+
self.agent = st.selectbox("Agent", options=ss.agents, format_func=lambda x: x.role, index=0 if self.agent is None else agent_options.index(self.agent.role))
|
64 |
+
self.async_execution = st.checkbox("Async execution", value=self.async_execution)
|
65 |
+
self.context_from_async_tasks_ids = st.multiselect("Context from async tasks", options=[task.id for task in ss.tasks if task.async_execution], default=self.context_from_async_tasks_ids, format_func=lambda x: [task.description[:120] for task in ss.tasks if task.id == x][0])
|
66 |
+
self.context_from_sync_tasks_ids = st.multiselect("Context from sync tasks", options=[task.id for task in ss.tasks if not task.async_execution], default=self.context_from_sync_tasks_ids, format_func=lambda x: [task.description[:120] for task in ss.tasks if task.id == x][0])
|
67 |
+
submitted = st.form_submit_button("Save")
|
68 |
+
if submitted:
|
69 |
+
self.set_editable(False)
|
70 |
+
else:
|
71 |
+
fix_columns_width()
|
72 |
+
with st.expander(expander_title):
|
73 |
+
st.markdown(f"**Description:** {self.description}")
|
74 |
+
st.markdown(f"**Expected output:** {self.expected_output}")
|
75 |
+
st.markdown(f"**Agent:** {self.agent.role if self.agent else 'None'}")
|
76 |
+
st.markdown(f"**Async execution:** {self.async_execution}")
|
77 |
+
st.markdown(f"**Context from async tasks:** {', '.join([task.description[:120] for task in ss.tasks if task.id in self.context_from_async_tasks_ids]) if self.context_from_async_tasks_ids else 'None'}")
|
78 |
+
st.markdown(f"**Context from sync tasks:** {', '.join([task.description[:120] for task in ss.tasks if task.id in self.context_from_sync_tasks_ids]) if self.context_from_sync_tasks_ids else 'None'}")
|
79 |
+
col1, col2 = st.columns(2)
|
80 |
+
with col1:
|
81 |
+
st.button("Edit", on_click=self.set_editable, args=(True,), key=rnd_id())
|
82 |
+
with col2:
|
83 |
+
st.button("Delete", on_click=self.delete, key=rnd_id())
|
84 |
+
self.is_valid(show_warning=True)
|
85 |
+
|
86 |
+
def set_editable(self, edit):
|
87 |
+
self.edit = edit
|
88 |
+
save_task(self)
|
89 |
+
if not edit:
|
90 |
+
st.rerun()
|
my_tools.py
ADDED
@@ -0,0 +1,368 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import os
|
3 |
+
from utils import rnd_id
|
4 |
+
from crewai_tools import CodeInterpreterTool,ScrapeElementFromWebsiteTool,TXTSearchTool,SeleniumScrapingTool,PGSearchTool,PDFSearchTool,MDXSearchTool,JSONSearchTool,GithubSearchTool,EXASearchTool,DOCXSearchTool,CSVSearchTool,ScrapeWebsiteTool, FileReadTool, DirectorySearchTool, DirectoryReadTool, CodeDocsSearchTool, YoutubeVideoSearchTool,SerperDevTool,YoutubeChannelSearchTool,WebsiteSearchTool
|
5 |
+
from custom_tools import CustomApiTool,CustomFileWriteTool,CustomCodeInterpreterTool
|
6 |
+
from langchain_community.tools import YahooFinanceNewsTool
|
7 |
+
|
8 |
+
class MyTool:
|
9 |
+
def __init__(self, tool_id, name, description, parameters, **kwargs):
|
10 |
+
self.tool_id = tool_id or rnd_id()
|
11 |
+
self.name = name
|
12 |
+
self.description = description
|
13 |
+
self.parameters = kwargs
|
14 |
+
self.parameters_metadata = parameters
|
15 |
+
|
16 |
+
def create_tool(self):
|
17 |
+
pass
|
18 |
+
|
19 |
+
def get_parameters(self):
|
20 |
+
return self.parameters
|
21 |
+
|
22 |
+
def set_parameters(self, **kwargs):
|
23 |
+
self.parameters.update(kwargs)
|
24 |
+
|
25 |
+
def get_parameter_names(self):
|
26 |
+
return list(self.parameters_metadata.keys())
|
27 |
+
|
28 |
+
def is_parameter_mandatory(self, param_name):
|
29 |
+
return self.parameters_metadata.get(param_name, {}).get('mandatory', False)
|
30 |
+
|
31 |
+
def is_valid(self,show_warning=False):
|
32 |
+
for param_name, metadata in self.parameters_metadata.items():
|
33 |
+
if metadata['mandatory'] and not self.parameters.get(param_name):
|
34 |
+
if show_warning:
|
35 |
+
st.warning(f"Parameter '{param_name}' is mandatory for tool '{self.name}'")
|
36 |
+
return False
|
37 |
+
return True
|
38 |
+
|
39 |
+
class MyScrapeWebsiteTool(MyTool):
|
40 |
+
def __init__(self, tool_id=None, website_url=None):
|
41 |
+
parameters = {
|
42 |
+
'website_url': {'mandatory': False}
|
43 |
+
}
|
44 |
+
super().__init__(tool_id, 'ScrapeWebsiteTool', "A tool that can be used to read website content.", parameters, website_url=website_url)
|
45 |
+
|
46 |
+
def create_tool(self) -> ScrapeWebsiteTool:
|
47 |
+
return ScrapeWebsiteTool(self.parameters.get('website_url') if self.parameters.get('website_url') else None)
|
48 |
+
|
49 |
+
class MyFileReadTool(MyTool):
|
50 |
+
def __init__(self, tool_id=None, file_path=None):
|
51 |
+
parameters = {
|
52 |
+
'file_path': {'mandatory': False}
|
53 |
+
}
|
54 |
+
super().__init__(tool_id, 'FileReadTool', "A tool that can be used to read a file's content.", parameters, file_path=file_path)
|
55 |
+
|
56 |
+
def create_tool(self) -> FileReadTool:
|
57 |
+
return FileReadTool(self.parameters.get('file_path') if self.parameters.get('file_path') else None)
|
58 |
+
|
59 |
+
class MyDirectorySearchTool(MyTool):
|
60 |
+
def __init__(self, tool_id=None, directory=None):
|
61 |
+
parameters = {
|
62 |
+
'directory': {'mandatory': False}
|
63 |
+
}
|
64 |
+
super().__init__(tool_id, 'DirectorySearchTool', "A tool that can be used to semantic search a query from a directory's content.", parameters, directory_path=directory)
|
65 |
+
|
66 |
+
def create_tool(self) -> DirectorySearchTool:
|
67 |
+
return DirectorySearchTool(self.parameters.get('directory') if self.parameters.get('directory') else None)
|
68 |
+
|
69 |
+
class MyDirectoryReadTool(MyTool):
|
70 |
+
def __init__(self, tool_id=None, directory_contents=None):
|
71 |
+
parameters = {
|
72 |
+
'directory_contents': {'mandatory': True}
|
73 |
+
}
|
74 |
+
super().__init__(tool_id, 'DirectoryReadTool', "Use the tool to list the contents of the specified directory", parameters, directory_contents=directory_contents)
|
75 |
+
|
76 |
+
def create_tool(self) -> DirectoryReadTool:
|
77 |
+
return DirectoryReadTool(self.parameters.get('directory_contents'))
|
78 |
+
|
79 |
+
class MyCodeDocsSearchTool(MyTool):
|
80 |
+
def __init__(self, tool_id=None, code_docs=None):
|
81 |
+
parameters = {
|
82 |
+
'code_docs': {'mandatory': False}
|
83 |
+
}
|
84 |
+
super().__init__(tool_id, 'CodeDocsSearchTool', "A tool that can be used to search through code documentation.", parameters, code_docs=code_docs)
|
85 |
+
|
86 |
+
def create_tool(self) -> CodeDocsSearchTool:
|
87 |
+
return CodeDocsSearchTool(self.parameters.get('code_docs') if self.parameters.get('code_docs') else None)
|
88 |
+
|
89 |
+
class MyYoutubeVideoSearchTool(MyTool):
|
90 |
+
def __init__(self, tool_id=None, youtube_video_url=None):
|
91 |
+
parameters = {
|
92 |
+
'youtube_video_url': {'mandatory': False}
|
93 |
+
}
|
94 |
+
super().__init__(tool_id, 'YoutubeVideoSearchTool', "A tool that can be used to semantic search a query from a Youtube Video content.", parameters, youtube_video_url=youtube_video_url)
|
95 |
+
|
96 |
+
def create_tool(self) -> YoutubeVideoSearchTool:
|
97 |
+
return YoutubeVideoSearchTool(self.parameters.get('youtube_video_url') if self.parameters.get('youtube_video_url') else None)
|
98 |
+
|
99 |
+
class MySerperDevTool(MyTool):
|
100 |
+
def __init__(self, tool_id=None, SERPER_API_KEY=None):
|
101 |
+
parameters = {
|
102 |
+
'SERPER_API_KEY': {'mandatory': True}
|
103 |
+
}
|
104 |
+
|
105 |
+
super().__init__(tool_id, 'SerperDevTool', "A tool that can be used to search the internet with a search_query", parameters)
|
106 |
+
|
107 |
+
def create_tool(self) -> SerperDevTool:
|
108 |
+
os.environ['SERPER_API_KEY'] = self.parameters.get('SERPER_API_KEY')
|
109 |
+
return SerperDevTool()
|
110 |
+
|
111 |
+
class MyYoutubeChannelSearchTool(MyTool):
|
112 |
+
def __init__(self, tool_id=None, youtube_channel_handle=None):
|
113 |
+
parameters = {
|
114 |
+
'youtube_channel_handle': {'mandatory': False}
|
115 |
+
}
|
116 |
+
super().__init__(tool_id, 'YoutubeChannelSearchTool', "A tool that can be used to semantic search a query from a Youtube Channels content. Channel can be added as @channel", parameters, youtube_channel_handle=youtube_channel_handle)
|
117 |
+
|
118 |
+
def create_tool(self) -> YoutubeChannelSearchTool:
|
119 |
+
return YoutubeChannelSearchTool(self.parameters.get('youtube_channel_handle') if self.parameters.get('youtube_channel_handle') else None)
|
120 |
+
|
121 |
+
class MyWebsiteSearchTool(MyTool):
|
122 |
+
def __init__(self, tool_id=None, website=None):
|
123 |
+
parameters = {
|
124 |
+
'website': {'mandatory': False}
|
125 |
+
}
|
126 |
+
super().__init__(tool_id, 'WebsiteSearchTool', "A tool that can be used to semantic search a query from a specific URL content.", parameters, website=website)
|
127 |
+
|
128 |
+
def create_tool(self) -> WebsiteSearchTool:
|
129 |
+
return WebsiteSearchTool(self.parameters.get('website') if self.parameters.get('website') else None)
|
130 |
+
|
131 |
+
class MyCSVSearchTool(MyTool):
|
132 |
+
def __init__(self, tool_id=None, csv=None):
|
133 |
+
parameters = {
|
134 |
+
'csv': {'mandatory': False}
|
135 |
+
}
|
136 |
+
super().__init__(tool_id, 'CSVSearchTool', "A tool that can be used to semantic search a query from a CSV's content.", parameters, csv=csv)
|
137 |
+
|
138 |
+
def create_tool(self) -> CSVSearchTool:
|
139 |
+
return CSVSearchTool(csv=self.parameters.get('csv') if self.parameters.get('csv') else None)
|
140 |
+
|
141 |
+
class MyDocxSearchTool(MyTool):
|
142 |
+
def __init__(self, tool_id=None, docx=None):
|
143 |
+
parameters = {
|
144 |
+
'docx': {'mandatory': False}
|
145 |
+
}
|
146 |
+
super().__init__(tool_id, 'DOCXSearchTool', "A tool that can be used to semantic search a query from a DOCX's content.", parameters, docx=docx)
|
147 |
+
|
148 |
+
def create_tool(self) -> DOCXSearchTool:
|
149 |
+
return DOCXSearchTool(docx=self.parameters.get('docx') if self.parameters.get('docx') else None)
|
150 |
+
|
151 |
+
class MyEXASearchTool(MyTool):
|
152 |
+
def __init__(self, tool_id=None, EXA_API_KEY=None):
|
153 |
+
parameters = {
|
154 |
+
'EXA_API_KEY': {'mandatory': True}
|
155 |
+
}
|
156 |
+
super().__init__(tool_id, 'EXASearchTool', "A tool that can be used to search the internet from a search_query", parameters, EXA_API_KEY=EXA_API_KEY)
|
157 |
+
|
158 |
+
def create_tool(self) -> EXASearchTool:
|
159 |
+
os.environ['EXA_API_KEY'] = self.parameters.get('EXA_API_KEY')
|
160 |
+
return EXASearchTool()
|
161 |
+
|
162 |
+
class MyGithubSearchTool(MyTool):
|
163 |
+
def __init__(self, tool_id=None, github_repo=None, gh_token=None, content_types=None):
|
164 |
+
parameters = {
|
165 |
+
'github_repo': {'mandatory': False},
|
166 |
+
'gh_token': {'mandatory': True},
|
167 |
+
'content_types': {'mandatory': False}
|
168 |
+
}
|
169 |
+
super().__init__(tool_id, 'GithubSearchTool', "A tool that can be used to semantic search a query from a Github repository's content. Valid content_types: code,repo,pr,issue (comma sepparated)", parameters, github_repo=github_repo, gh_token=gh_token, content_types=content_types)
|
170 |
+
|
171 |
+
def create_tool(self) -> GithubSearchTool:
|
172 |
+
return GithubSearchTool(
|
173 |
+
github_repo=self.parameters.get('github_repo') if self.parameters.get('github_repo') else None,
|
174 |
+
gh_token=self.parameters.get('gh_token'),
|
175 |
+
content_types=self.parameters.get('search_query').split(",") if self.parameters.get('search_query') else ["code", "repo", "pr", "issue"]
|
176 |
+
)
|
177 |
+
|
178 |
+
class MyJSONSearchTool(MyTool):
|
179 |
+
def __init__(self, tool_id=None, json_path=None):
|
180 |
+
parameters = {
|
181 |
+
'json_path': {'mandatory': False}
|
182 |
+
}
|
183 |
+
super().__init__(tool_id, 'JSONSearchTool', "A tool that can be used to semantic search a query from a JSON's content.", parameters, json_path=json_path)
|
184 |
+
|
185 |
+
def create_tool(self) -> JSONSearchTool:
|
186 |
+
return JSONSearchTool(json_path=self.parameters.get('json_path') if self.parameters.get('json_path') else None)
|
187 |
+
|
188 |
+
class MyMDXSearchTool(MyTool):
|
189 |
+
def __init__(self, tool_id=None, mdx=None):
|
190 |
+
parameters = {
|
191 |
+
'mdx': {'mandatory': False}
|
192 |
+
}
|
193 |
+
super().__init__(tool_id, 'MDXSearchTool', "A tool that can be used to semantic search a query from a MDX's content.", parameters, mdx=mdx)
|
194 |
+
|
195 |
+
def create_tool(self) -> MDXSearchTool:
|
196 |
+
return MDXSearchTool(mdx=self.parameters.get('mdx') if self.parameters.get('mdx') else None)
|
197 |
+
|
198 |
+
class MyPDFSearchTool(MyTool):
|
199 |
+
def __init__(self, tool_id=None, pdf=None):
|
200 |
+
parameters = {
|
201 |
+
'pdf': {'mandatory': False}
|
202 |
+
}
|
203 |
+
super().__init__(tool_id, 'PDFSearchTool', "A tool that can be used to semantic search a query from a PDF's content.", parameters, pdf=pdf)
|
204 |
+
|
205 |
+
def create_tool(self) -> PDFSearchTool:
|
206 |
+
return PDFSearchTool(self.parameters.get('pdf') if self.parameters.get('pdf') else None)
|
207 |
+
|
208 |
+
class MyPGSearchTool(MyTool):
|
209 |
+
def __init__(self, tool_id=None, db_uri=None):
|
210 |
+
parameters = {
|
211 |
+
'db_uri': {'mandatory': True}
|
212 |
+
}
|
213 |
+
super().__init__(tool_id, 'PGSearchTool', "A tool that can be used to semantic search a query from a database table's content.", parameters, db_uri=db_uri)
|
214 |
+
|
215 |
+
def create_tool(self) -> PGSearchTool:
|
216 |
+
return PGSearchTool(self.parameters.get('db_uri'))
|
217 |
+
|
218 |
+
class MySeleniumScrapingTool(MyTool):
|
219 |
+
def __init__(self, tool_id=None, website_url=None, css_element=None, cookie=None, wait_time=None):
|
220 |
+
parameters = {
|
221 |
+
'website_url': {'mandatory': False},
|
222 |
+
'css_element': {'mandatory': False},
|
223 |
+
'cookie': {'mandatory': False},
|
224 |
+
'wait_time': {'mandatory': False}
|
225 |
+
}
|
226 |
+
super().__init__(
|
227 |
+
tool_id,
|
228 |
+
'SeleniumScrapingTool',
|
229 |
+
"A tool that can be used to read a specific part of website content. CSS elements are separated by comma, cookies are in format {key1\:value1},{key2\:value2}",
|
230 |
+
parameters,
|
231 |
+
website_url=website_url,
|
232 |
+
css_element=css_element,
|
233 |
+
cookie=cookie,
|
234 |
+
wait_time=wait_time
|
235 |
+
)
|
236 |
+
def create_tool(self) -> SeleniumScrapingTool:
|
237 |
+
cookie_arrayofdicts = [{k: v} for k, v in (item.strip('{}').split(':') for item in self.parameters.get('cookie', '').split(','))] if self.parameters.get('cookie') else None
|
238 |
+
|
239 |
+
return SeleniumScrapingTool(
|
240 |
+
website_url=self.parameters.get('website_url') if self.parameters.get('website_url') else None,
|
241 |
+
css_element=self.parameters.get('css_element').split(',') if self.parameters.get('css_element') else None,
|
242 |
+
cookie=cookie_arrayofdicts,
|
243 |
+
wait_time=self.parameters.get('wait_time') if self.parameters.get('wait_time') else 10
|
244 |
+
)
|
245 |
+
|
246 |
+
class MyTXTSearchTool(MyTool):
|
247 |
+
def __init__(self, tool_id=None, txt=None):
|
248 |
+
parameters = {
|
249 |
+
'txt': {'mandatory': False}
|
250 |
+
}
|
251 |
+
super().__init__(tool_id, 'TXTSearchTool', "A tool that can be used to semantic search a query from a TXT's content.", parameters, txt=txt)
|
252 |
+
|
253 |
+
def create_tool(self) -> TXTSearchTool:
|
254 |
+
return TXTSearchTool(self.parameters.get('txt'))
|
255 |
+
|
256 |
+
class MyScrapeElementFromWebsiteTool(MyTool):
|
257 |
+
def __init__(self, tool_id=None, website_url=None, css_element=None, cookie=None):
|
258 |
+
parameters = {
|
259 |
+
'website_url': {'mandatory': False},
|
260 |
+
'css_element': {'mandatory': False},
|
261 |
+
'cookie': {'mandatory': False}
|
262 |
+
}
|
263 |
+
super().__init__(
|
264 |
+
tool_id,
|
265 |
+
'ScrapeElementFromWebsiteTool',
|
266 |
+
"A tool that can be used to read a specific part of website content. CSS elements are separated by comma, cookies are in format {key1\:value1},{key2\:value2}",
|
267 |
+
parameters,
|
268 |
+
website_url=website_url,
|
269 |
+
css_element=css_element,
|
270 |
+
cookie=cookie
|
271 |
+
)
|
272 |
+
|
273 |
+
def create_tool(self) -> ScrapeElementFromWebsiteTool:
|
274 |
+
cookie_arrayofdicts = [{k: v} for k, v in (item.strip('{}').split(':') for item in self.parameters.get('cookie', '').split(','))] if self.parameters.get('cookie') else None
|
275 |
+
return ScrapeElementFromWebsiteTool(
|
276 |
+
website_url=self.parameters.get('website_url') if self.parameters.get('website_url') else None,
|
277 |
+
css_element=self.parameters.get('css_element').split(",") if self.parameters.get('css_element') else None,
|
278 |
+
cookie=cookie_arrayofdicts
|
279 |
+
)
|
280 |
+
|
281 |
+
class MyYahooFinanceNewsTool(MyTool):
|
282 |
+
def __init__(self, tool_id=None):
|
283 |
+
parameters = {}
|
284 |
+
super().__init__(tool_id, 'YahooFinanceNewsTool', "A tool that can be used to search Yahoo Finance News.", parameters)
|
285 |
+
|
286 |
+
def create_tool(self) -> YahooFinanceNewsTool:
|
287 |
+
return YahooFinanceNewsTool()
|
288 |
+
|
289 |
+
class MyCustomApiTool(MyTool):
|
290 |
+
def __init__(self, tool_id=None, base_url=None, headers=None, query_params=None):
|
291 |
+
parameters = {
|
292 |
+
'base_url': {'mandatory': False},
|
293 |
+
'headers': {'mandatory': False},
|
294 |
+
'query_params': {'mandatory': False}
|
295 |
+
}
|
296 |
+
super().__init__(tool_id, 'CustomApiTool', "A tool that can be used to make API calls with customizable parameters.", parameters, base_url=base_url, headers=headers, query_params=query_params)
|
297 |
+
|
298 |
+
def create_tool(self) -> CustomApiTool:
|
299 |
+
return CustomApiTool(
|
300 |
+
base_url=self.parameters.get('base_url') if self.parameters.get('base_url') else None,
|
301 |
+
headers=eval(self.parameters.get('headers')) if self.parameters.get('headers') else None,
|
302 |
+
query_params=self.parameters.get('query_params') if self.parameters.get('query_params') else None
|
303 |
+
)
|
304 |
+
|
305 |
+
class MyCustomFileWriteTool(MyTool):
|
306 |
+
def __init__(self, tool_id=None, base_folder=None, filename=None):
|
307 |
+
parameters = {
|
308 |
+
'base_folder': {'mandatory': True},
|
309 |
+
'filename': {'mandatory': False}
|
310 |
+
}
|
311 |
+
super().__init__(tool_id, 'CustomFileWriteTool', "A tool that can be used to write a file to a specific folder.", parameters,base_folder=base_folder, filename=filename)
|
312 |
+
|
313 |
+
def create_tool(self) -> CustomFileWriteTool:
|
314 |
+
return CustomFileWriteTool(
|
315 |
+
base_folder=self.parameters.get('base_folder') if self.parameters.get('base_folder') else "workspace",
|
316 |
+
filename=self.parameters.get('filename') if self.parameters.get('filename') else None
|
317 |
+
)
|
318 |
+
|
319 |
+
|
320 |
+
class MyCodeInterpreterTool(MyTool):
|
321 |
+
def __init__(self, tool_id=None):
|
322 |
+
parameters = {}
|
323 |
+
super().__init__(tool_id, 'CodeInterpreterTool', "This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code. Docker required.", parameters)
|
324 |
+
|
325 |
+
def create_tool(self) -> CodeInterpreterTool:
|
326 |
+
return CodeInterpreterTool()
|
327 |
+
|
328 |
+
|
329 |
+
class MyCustomCodeInterpreterTool(MyTool):
|
330 |
+
def __init__(self, tool_id=None,workspace_dir=None):
|
331 |
+
parameters = {
|
332 |
+
'workspace_dir': {'mandatory': False}
|
333 |
+
}
|
334 |
+
super().__init__(tool_id, 'CustomCodeInterpreterTool', "This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code. Worskpace folder is shared. Docker required.", parameters, workspace_dir=workspace_dir)
|
335 |
+
|
336 |
+
def create_tool(self) -> CustomCodeInterpreterTool:
|
337 |
+
return CustomCodeInterpreterTool(workspace_dir=self.parameters.get('workspace_dir') if self.parameters.get('workspace_dir') else "workspace")
|
338 |
+
|
339 |
+
# Register all tools here
|
340 |
+
TOOL_CLASSES = {
|
341 |
+
'SerperDevTool': MySerperDevTool,
|
342 |
+
'WebsiteSearchTool': MyWebsiteSearchTool,
|
343 |
+
'ScrapeWebsiteTool': MyScrapeWebsiteTool,
|
344 |
+
'SeleniumScrapingTool': MySeleniumScrapingTool,
|
345 |
+
'ScrapeElementFromWebsiteTool': MyScrapeElementFromWebsiteTool,
|
346 |
+
'CustomApiTool': MyCustomApiTool,
|
347 |
+
'CodeInterpreterTool': MyCodeInterpreterTool,
|
348 |
+
'CustomCodeInterpreterTool': MyCustomCodeInterpreterTool,
|
349 |
+
'FileReadTool': MyFileReadTool,
|
350 |
+
'CustomFileWriteTool': MyCustomFileWriteTool,
|
351 |
+
'DirectorySearchTool': MyDirectorySearchTool,
|
352 |
+
'DirectoryReadTool': MyDirectoryReadTool,
|
353 |
+
|
354 |
+
'YoutubeVideoSearchTool': MyYoutubeVideoSearchTool,
|
355 |
+
'YoutubeChannelSearchTool' :MyYoutubeChannelSearchTool,
|
356 |
+
'GithubSearchTool': MyGithubSearchTool,
|
357 |
+
'CodeDocsSearchTool': MyCodeDocsSearchTool,
|
358 |
+
'YahooFinanceNewsTool': MyYahooFinanceNewsTool,
|
359 |
+
|
360 |
+
'TXTSearchTool': MyTXTSearchTool,
|
361 |
+
'CSVSearchTool': MyCSVSearchTool,
|
362 |
+
'DOCXSearchTool': MyDocxSearchTool,
|
363 |
+
'EXASearchTool': MyEXASearchTool,
|
364 |
+
'JSONSearchTool': MyJSONSearchTool,
|
365 |
+
'MDXSearchTool': MyMDXSearchTool,
|
366 |
+
'PDFSearchTool': MyPDFSearchTool,
|
367 |
+
'PGSearchTool': MyPGSearchTool
|
368 |
+
}
|
pg_agents.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit import session_state as ss
|
3 |
+
from my_agent import MyAgent
|
4 |
+
import db_utils
|
5 |
+
|
6 |
+
class PageAgents:
|
7 |
+
def __init__(self):
|
8 |
+
self.name = "Agents"
|
9 |
+
|
10 |
+
def create_agent(self, crew=None):
|
11 |
+
agent = MyAgent()
|
12 |
+
if 'agents' not in ss:
|
13 |
+
ss.agents = [MyAgent]
|
14 |
+
ss.agents.append(agent)
|
15 |
+
agent.edit = True
|
16 |
+
db_utils.save_agent(agent) # Save agent to database
|
17 |
+
|
18 |
+
if crew:
|
19 |
+
crew.agents.append(agent)
|
20 |
+
db_utils.save_crew(crew)
|
21 |
+
|
22 |
+
return agent
|
23 |
+
|
24 |
+
def draw(self):
|
25 |
+
with st.container():
|
26 |
+
st.subheader(self.name)
|
27 |
+
editing = False
|
28 |
+
if 'agents' not in ss:
|
29 |
+
ss.agents = db_utils.load_agents() # Load agents from database
|
30 |
+
if 'crews' not in ss:
|
31 |
+
ss.crews = db_utils.load_crews() # Load crews from database
|
32 |
+
|
33 |
+
# Dictionary to track agent assignment
|
34 |
+
agent_assignment = {agent.id: [] for agent in ss.agents}
|
35 |
+
|
36 |
+
# Assign agents to crews
|
37 |
+
for crew in ss.crews:
|
38 |
+
for agent in crew.agents:
|
39 |
+
agent_assignment[agent.id].append(crew.name)
|
40 |
+
|
41 |
+
# Display agents grouped by crew in tabs
|
42 |
+
tabs = ["All Agents"] + ["Unassigned Agents"] + [crew.name for crew in ss.crews]
|
43 |
+
tab_objects = st.tabs(tabs)
|
44 |
+
|
45 |
+
# Display all agents
|
46 |
+
with tab_objects[0]:
|
47 |
+
st.markdown("#### All Agents")
|
48 |
+
for agent in ss.agents:
|
49 |
+
agent.draw()
|
50 |
+
if agent.edit:
|
51 |
+
editing = True
|
52 |
+
st.button('Create agent', on_click=self.create_agent, disabled=editing, key="create_agent_all")
|
53 |
+
|
54 |
+
# Display unassigned agents
|
55 |
+
with tab_objects[1]:
|
56 |
+
st.markdown("#### Unassigned Agents")
|
57 |
+
unassigned_agents = [agent for agent in ss.agents if not agent_assignment[agent.id]]
|
58 |
+
for agent in unassigned_agents:
|
59 |
+
unique_key = f"{agent.id}_unassigned"
|
60 |
+
agent.draw(key=unique_key)
|
61 |
+
if agent.edit:
|
62 |
+
editing = True
|
63 |
+
st.button('Create agent', on_click=self.create_agent, disabled=editing, key="create_agent_unassigned")
|
64 |
+
|
65 |
+
# Display agents grouped by crew
|
66 |
+
for i, crew in enumerate(ss.crews, 2):
|
67 |
+
with tab_objects[i]:
|
68 |
+
st.markdown(f"#### {crew.name}")
|
69 |
+
assigned_agents = [agent for agent in crew.agents]
|
70 |
+
for agent in assigned_agents:
|
71 |
+
unique_key = f"{agent.id}_{crew.name}"
|
72 |
+
agent.draw(key=unique_key)
|
73 |
+
if agent.edit:
|
74 |
+
editing = True
|
75 |
+
st.button('Create agent', on_click=self.create_agent, disabled=editing, kwargs={'crew': crew}, key=f"create_agent_{crew.name}")
|
76 |
+
|
77 |
+
if len(ss.agents) == 0:
|
78 |
+
st.write("No agents defined yet.")
|
79 |
+
st.button('Create agent', on_click=self.create_agent, disabled=editing)
|
80 |
+
|
pg_crew_run.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import streamlit as st
|
3 |
+
from streamlit import session_state as ss
|
4 |
+
import threading
|
5 |
+
import ctypes
|
6 |
+
import queue
|
7 |
+
import time
|
8 |
+
import traceback
|
9 |
+
import os
|
10 |
+
|
11 |
+
class PageCrewRun:
|
12 |
+
def __init__(self):
|
13 |
+
self.name = "Kickoff!"
|
14 |
+
self.maintain_session_state()
|
15 |
+
|
16 |
+
@staticmethod
|
17 |
+
def maintain_session_state():
|
18 |
+
defaults = {
|
19 |
+
'crew_thread': None,
|
20 |
+
'result': None,
|
21 |
+
'running': False,
|
22 |
+
'message_queue': queue.Queue(),
|
23 |
+
'selected_crew_name': None,
|
24 |
+
'placeholders': {}
|
25 |
+
}
|
26 |
+
for key, value in defaults.items():
|
27 |
+
if key not in ss:
|
28 |
+
ss[key] = value
|
29 |
+
|
30 |
+
@staticmethod
|
31 |
+
def extract_placeholders(text):
|
32 |
+
return re.findall(r'\{(.*?)\}', text)
|
33 |
+
|
34 |
+
def get_placeholders_from_crew(self, crew):
|
35 |
+
placeholders = set()
|
36 |
+
attributes = ['description', 'expected_output', 'role', 'backstory', 'goal']
|
37 |
+
|
38 |
+
for task in crew.tasks:
|
39 |
+
placeholders.update(self.extract_placeholders(task.description))
|
40 |
+
placeholders.update(self.extract_placeholders(task.expected_output))
|
41 |
+
|
42 |
+
for agent in crew.agents:
|
43 |
+
for attr in attributes[2:]:
|
44 |
+
placeholders.update(self.extract_placeholders(getattr(agent, attr)))
|
45 |
+
|
46 |
+
return placeholders
|
47 |
+
|
48 |
+
def run_crew(self, crewai_crew, inputs, message_queue):
|
49 |
+
if (str(os.getenv('AGENTOPS_ENABLED')).lower() in ['true', '1']) and not ss.get('agentops_failed', False):
|
50 |
+
import agentops
|
51 |
+
agentops.start_session()
|
52 |
+
try:
|
53 |
+
result = crewai_crew.kickoff(inputs=inputs)
|
54 |
+
message_queue.put({"result": result})
|
55 |
+
except Exception as e:
|
56 |
+
if (str(os.getenv('AGENTOPS_ENABLED')).lower() in ['true', '1']) and not ss.get('agentops_failed', False):
|
57 |
+
agentops.end_session()
|
58 |
+
stack_trace = traceback.format_exc()
|
59 |
+
message_queue.put({"result": f"Error running crew: {str(e)}", "stack_trace": stack_trace})
|
60 |
+
|
61 |
+
def get_mycrew_by_name(self, crewname):
|
62 |
+
return next((crew for crew in ss.crews if crew.name == crewname), None)
|
63 |
+
|
64 |
+
def draw_placeholders(self, crew):
|
65 |
+
placeholders = self.get_placeholders_from_crew(crew)
|
66 |
+
if placeholders:
|
67 |
+
st.write('Placeholders to fill in:')
|
68 |
+
for placeholder in placeholders:
|
69 |
+
placeholder_key = f'placeholder_{placeholder}'
|
70 |
+
ss.placeholders[placeholder_key] = st.text_input(
|
71 |
+
label=placeholder,
|
72 |
+
key=placeholder_key,
|
73 |
+
value=ss.placeholders.get(placeholder_key, ''),
|
74 |
+
disabled=ss.running
|
75 |
+
)
|
76 |
+
|
77 |
+
def draw_crews(self):
|
78 |
+
if 'crews' not in ss or not ss.crews:
|
79 |
+
st.write("No crews defined yet.")
|
80 |
+
ss.selected_crew_name = None # Reset selected crew name if there are no crews
|
81 |
+
return
|
82 |
+
|
83 |
+
# Check if the selected crew name still exists
|
84 |
+
if ss.selected_crew_name not in [crew.name for crew in ss.crews]:
|
85 |
+
ss.selected_crew_name = None
|
86 |
+
|
87 |
+
selected_crew_name = st.selectbox(
|
88 |
+
label="Select crew to run",
|
89 |
+
options=[crew.name for crew in ss.crews],
|
90 |
+
index=0 if ss.selected_crew_name is None else [crew.name for crew in ss.crews].index(ss.selected_crew_name) if ss.selected_crew_name in [crew.name for crew in ss.crews] else 0,
|
91 |
+
disabled=ss.running
|
92 |
+
)
|
93 |
+
|
94 |
+
if selected_crew_name != ss.selected_crew_name:
|
95 |
+
ss.selected_crew_name = selected_crew_name
|
96 |
+
st.rerun()
|
97 |
+
|
98 |
+
selected_crew = self.get_mycrew_by_name(ss.selected_crew_name)
|
99 |
+
|
100 |
+
if selected_crew:
|
101 |
+
selected_crew.draw(expanded=False,buttons=False)
|
102 |
+
self.draw_placeholders(selected_crew)
|
103 |
+
|
104 |
+
if not selected_crew.is_valid(show_warning=True):
|
105 |
+
st.error("Selected crew is not valid. Please fix the issues.")
|
106 |
+
self.control_buttons(selected_crew)
|
107 |
+
|
108 |
+
def control_buttons(self, selected_crew):
|
109 |
+
if st.button('Run crew!', disabled=not selected_crew.is_valid() or ss.running):
|
110 |
+
inputs = {key.split('_')[1]: value for key, value in ss.placeholders.items()}
|
111 |
+
ss.result = None
|
112 |
+
try:
|
113 |
+
crew = selected_crew.get_crewai_crew(full_output=True)
|
114 |
+
except Exception as e:
|
115 |
+
st.exception(e)
|
116 |
+
traceback.print_exc()
|
117 |
+
return
|
118 |
+
|
119 |
+
ss.running = True
|
120 |
+
ss.crew_thread = threading.Thread(
|
121 |
+
target=self.run_crew,
|
122 |
+
kwargs={
|
123 |
+
"crewai_crew": crew,
|
124 |
+
"inputs": inputs,
|
125 |
+
"message_queue": ss.message_queue
|
126 |
+
}
|
127 |
+
)
|
128 |
+
ss.crew_thread.start()
|
129 |
+
ss.result = None
|
130 |
+
ss.running = True
|
131 |
+
st.rerun()
|
132 |
+
|
133 |
+
if st.button('Stop crew!', disabled=not ss.running):
|
134 |
+
self.force_stop_thread(ss.crew_thread)
|
135 |
+
ss.message_queue.queue.clear()
|
136 |
+
ss.running = False
|
137 |
+
ss.crew_thread = None
|
138 |
+
ss.result = None
|
139 |
+
st.success("Crew stopped successfully.")
|
140 |
+
st.rerun()
|
141 |
+
|
142 |
+
def display_result(self):
|
143 |
+
if ss.result is not None:
|
144 |
+
if isinstance(ss.result, dict):
|
145 |
+
if 'final_output' in ss.result["result"]: #old version of crewai
|
146 |
+
st.expander("Final output", expanded=True).write(ss.result["result"]['final_output'])
|
147 |
+
elif hasattr(ss.result["result"], 'raw'): #new version of crewai
|
148 |
+
st.expander("Final output", expanded=True).write(ss.result['result'].raw)
|
149 |
+
st.expander("Full output", expanded=False).write(ss.result)
|
150 |
+
else:
|
151 |
+
st.error(ss.result)
|
152 |
+
elif ss.running and ss.crew_thread is not None:
|
153 |
+
with st.spinner("Running crew..."):
|
154 |
+
while ss.running:
|
155 |
+
time.sleep(1)
|
156 |
+
if not ss.message_queue.empty():
|
157 |
+
ss.result = ss.message_queue.get()
|
158 |
+
ss.running = False
|
159 |
+
st.rerun()
|
160 |
+
|
161 |
+
@staticmethod
|
162 |
+
def force_stop_thread(thread):
|
163 |
+
if thread:
|
164 |
+
tid = ctypes.c_long(thread.ident)
|
165 |
+
if tid:
|
166 |
+
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(SystemExit))
|
167 |
+
if res == 0:
|
168 |
+
st.error("Nonexistent thread id")
|
169 |
+
else:
|
170 |
+
st.success("Thread stopped successfully.")
|
171 |
+
|
172 |
+
def draw(self):
|
173 |
+
st.subheader(self.name)
|
174 |
+
self.draw_crews()
|
175 |
+
self.display_result()
|
pg_crews.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit import session_state as ss
|
3 |
+
from my_crew import MyCrew
|
4 |
+
import db_utils
|
5 |
+
|
6 |
+
class PageCrews:
|
7 |
+
def __init__(self):
|
8 |
+
self.name = "Crews"
|
9 |
+
|
10 |
+
def create_crew(self):
|
11 |
+
crew = MyCrew()
|
12 |
+
if 'crews' not in ss:
|
13 |
+
ss.crews = [MyCrew]
|
14 |
+
ss.crews.append(crew)
|
15 |
+
crew.edit = True
|
16 |
+
db_utils.save_crew(crew) # Save crew to database
|
17 |
+
return crew
|
18 |
+
|
19 |
+
def draw(self):
|
20 |
+
with st.container():
|
21 |
+
st.subheader(self.name)
|
22 |
+
editing = False
|
23 |
+
if 'crews' not in ss:
|
24 |
+
ss.crews = db_utils.load_crews() # Load crews from database
|
25 |
+
for crew in ss.crews:
|
26 |
+
crew.draw()
|
27 |
+
if crew.edit:
|
28 |
+
editing = True
|
29 |
+
if len(ss.crews) == 0:
|
30 |
+
st.write("No crews defined yet.")
|
31 |
+
st.button('Create crew', on_click=self.create_crew, disabled=editing)
|
pg_export_crew.py
ADDED
@@ -0,0 +1,528 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit import session_state as ss
|
3 |
+
import zipfile
|
4 |
+
import os
|
5 |
+
import re
|
6 |
+
import json
|
7 |
+
import shutil
|
8 |
+
import db_utils
|
9 |
+
from utils import escape_quotes
|
10 |
+
from my_tools import TOOL_CLASSES
|
11 |
+
from crewai import Process
|
12 |
+
from my_crew import MyCrew
|
13 |
+
from my_agent import MyAgent
|
14 |
+
from my_task import MyTask
|
15 |
+
from datetime import datetime
|
16 |
+
|
17 |
+
class PageExportCrew:
|
18 |
+
def __init__(self):
|
19 |
+
self.name = "Import/export"
|
20 |
+
|
21 |
+
def extract_placeholders(self, text):
|
22 |
+
return re.findall(r'\{(.*?)\}', text)
|
23 |
+
|
24 |
+
def get_placeholders_from_crew(self, crew):
|
25 |
+
placeholders = set()
|
26 |
+
for task in crew.tasks:
|
27 |
+
placeholders.update(self.extract_placeholders(task.description))
|
28 |
+
placeholders.update(self.extract_placeholders(task.expected_output))
|
29 |
+
return list(placeholders)
|
30 |
+
|
31 |
+
def generate_streamlit_app(self, crew, output_dir):
|
32 |
+
agents = crew.agents
|
33 |
+
tasks = crew.tasks
|
34 |
+
|
35 |
+
# Check if any custom tools are used
|
36 |
+
custom_tools_used = any(tool.name in ["CustomApiTool", "CustomFileWriteTool", "CustomCodeInterpreterTool"]
|
37 |
+
for agent in agents for tool in agent.tools)
|
38 |
+
|
39 |
+
def json_dumps_python(obj):
|
40 |
+
if isinstance(obj, bool):
|
41 |
+
return str(obj)
|
42 |
+
return json.dumps(obj)
|
43 |
+
|
44 |
+
def format_tool_instance(tool):
|
45 |
+
tool_class = TOOL_CLASSES.get(tool.name)
|
46 |
+
if tool_class:
|
47 |
+
params = ', '.join([f'{key}={json_dumps_python(value)}' for key, value in tool.parameters.items() if value is not None])
|
48 |
+
return f'{tool.name}({params})' if params else f'{tool.name}()'
|
49 |
+
return None
|
50 |
+
|
51 |
+
agent_definitions = ",\n ".join([
|
52 |
+
f"""
|
53 |
+
Agent(
|
54 |
+
role={json_dumps_python(agent.role)},
|
55 |
+
backstory={json_dumps_python(agent.backstory)},
|
56 |
+
goal={json_dumps_python(agent.goal)},
|
57 |
+
allow_delegation={json_dumps_python(agent.allow_delegation)},
|
58 |
+
verbose={json_dumps_python(agent.verbose)},
|
59 |
+
tools=[{', '.join([format_tool_instance(tool) for tool in agent.tools])}],
|
60 |
+
llm=create_llm({json_dumps_python(agent.llm_provider_model)}, {json_dumps_python(agent.temperature)})
|
61 |
+
)
|
62 |
+
"""
|
63 |
+
for agent in agents
|
64 |
+
])
|
65 |
+
|
66 |
+
task_definitions = ",\n ".join([
|
67 |
+
f"""
|
68 |
+
Task(
|
69 |
+
description={json_dumps_python(task.description)},
|
70 |
+
expected_output={json_dumps_python(task.expected_output)},
|
71 |
+
agent=next(agent for agent in agents if agent.role == {json_dumps_python(task.agent.role)}),
|
72 |
+
async_execution={json_dumps_python(task.async_execution)}
|
73 |
+
)
|
74 |
+
"""
|
75 |
+
for task in tasks
|
76 |
+
])
|
77 |
+
|
78 |
+
placeholders = self.get_placeholders_from_crew(crew)
|
79 |
+
placeholder_inputs = "\n ".join([
|
80 |
+
f'{placeholder} = st.text_input({json_dumps_python(placeholder.capitalize())})'
|
81 |
+
for placeholder in placeholders
|
82 |
+
])
|
83 |
+
placeholders_dict = ", ".join([f'{json_dumps_python(placeholder)}: {placeholder}' for placeholder in placeholders])
|
84 |
+
|
85 |
+
manager_llm_definition = ""
|
86 |
+
if crew.process == Process.hierarchical and crew.manager_llm:
|
87 |
+
manager_llm_definition = f'manager_llm=create_llm({json_dumps_python(crew.manager_llm)})'
|
88 |
+
elif crew.process == Process.hierarchical and crew.manager_agent:
|
89 |
+
manager_llm_definition = f'manager_agent=next(agent for agent in agents if agent.role == {json_dumps_python(crew.manager_agent.role)})'
|
90 |
+
|
91 |
+
app_content = f"""
|
92 |
+
import streamlit as st
|
93 |
+
from crewai import Agent, Task, Crew, Process
|
94 |
+
from langchain_openai import ChatOpenAI
|
95 |
+
from langchain_groq import ChatGroq
|
96 |
+
from langchain_anthropic import ChatAnthropic
|
97 |
+
from dotenv import load_dotenv
|
98 |
+
import os
|
99 |
+
from crewai_tools import *
|
100 |
+
{'''from custom_tools import CustomApiTool, CustomFileWriteTool, CustomCodeInterpreterTool''' if custom_tools_used else ''}
|
101 |
+
|
102 |
+
load_dotenv()
|
103 |
+
|
104 |
+
def create_lmstudio_llm(model, temperature):
|
105 |
+
api_base = os.getenv('LMSTUDIO_API_BASE')
|
106 |
+
os.environ["OPENAI_API_KEY"] = "lm-studio"
|
107 |
+
os.environ["OPENAI_API_BASE"] = api_base
|
108 |
+
if api_base:
|
109 |
+
return ChatOpenAI(openai_api_key='lm-studio', openai_api_base=api_base, temperature=temperature)
|
110 |
+
else:
|
111 |
+
raise ValueError("LM Studio API base not set in .env file")
|
112 |
+
|
113 |
+
def create_openai_llm(model, temperature):
|
114 |
+
safe_pop_env_var('OPENAI_API_KEY')
|
115 |
+
safe_pop_env_var('OPENAI_API_BASE')
|
116 |
+
load_dotenv(override=True)
|
117 |
+
api_key = os.getenv('OPENAI_API_KEY')
|
118 |
+
api_base = os.getenv('OPENAI_API_BASE', 'https://api.openai.com/v1/')
|
119 |
+
if api_key:
|
120 |
+
return ChatOpenAI(openai_api_key=api_key, openai_api_base=api_base, model_name=model, temperature=temperature)
|
121 |
+
else:
|
122 |
+
raise ValueError("OpenAI API key not set in .env file")
|
123 |
+
|
124 |
+
def create_groq_llm(model, temperature):
|
125 |
+
api_key = os.getenv('GROQ_API_KEY')
|
126 |
+
if api_key:
|
127 |
+
return ChatGroq(groq_api_key=api_key, model_name=model, temperature=temperature)
|
128 |
+
else:
|
129 |
+
raise ValueError("Groq API key not set in .env file")
|
130 |
+
|
131 |
+
def create_anthropic_llm(model, temperature):
|
132 |
+
api_key = os.getenv('ANTHROPIC_API_KEY')
|
133 |
+
if api_key:
|
134 |
+
return ChatAnthropic(anthropic_api_key=api_key, model_name=model, temperature=temperature)
|
135 |
+
else:
|
136 |
+
raise ValueError("Anthropic API key not set in .env file")
|
137 |
+
|
138 |
+
def safe_pop_env_var(key):
|
139 |
+
try:
|
140 |
+
os.environ.pop(key)
|
141 |
+
except KeyError:
|
142 |
+
pass
|
143 |
+
|
144 |
+
LLM_CONFIG = {{
|
145 |
+
"OpenAI": {{
|
146 |
+
"create_llm": create_openai_llm
|
147 |
+
}},
|
148 |
+
"Groq": {{
|
149 |
+
"create_llm": create_groq_llm
|
150 |
+
}},
|
151 |
+
"LM Studio": {{
|
152 |
+
"create_llm": create_lmstudio_llm
|
153 |
+
}},
|
154 |
+
"Anthropic": {{
|
155 |
+
"create_llm": create_anthropic_llm
|
156 |
+
}}
|
157 |
+
}}
|
158 |
+
|
159 |
+
def create_llm(provider_and_model, temperature=0.1):
|
160 |
+
provider, model = provider_and_model.split(": ")
|
161 |
+
create_llm_func = LLM_CONFIG.get(provider, {{}}).get("create_llm")
|
162 |
+
if create_llm_func:
|
163 |
+
return create_llm_func(model, temperature)
|
164 |
+
else:
|
165 |
+
raise ValueError(f"LLM provider {{provider}} is not recognized or not supported")
|
166 |
+
|
167 |
+
def load_agents():
|
168 |
+
agents = [
|
169 |
+
{agent_definitions}
|
170 |
+
]
|
171 |
+
return agents
|
172 |
+
|
173 |
+
def load_tasks(agents):
|
174 |
+
tasks = [
|
175 |
+
{task_definitions}
|
176 |
+
]
|
177 |
+
return tasks
|
178 |
+
|
179 |
+
def main():
|
180 |
+
st.title({json_dumps_python(crew.name)})
|
181 |
+
|
182 |
+
agents = load_agents()
|
183 |
+
tasks = load_tasks(agents)
|
184 |
+
crew = Crew(
|
185 |
+
agents=agents,
|
186 |
+
tasks=tasks,
|
187 |
+
process={json_dumps_python(crew.process)},
|
188 |
+
verbose={json_dumps_python(crew.verbose)},
|
189 |
+
memory={json_dumps_python(crew.memory)},
|
190 |
+
cache={json_dumps_python(crew.cache)},
|
191 |
+
max_rpm={json_dumps_python(crew.max_rpm)},
|
192 |
+
{manager_llm_definition}
|
193 |
+
)
|
194 |
+
|
195 |
+
{placeholder_inputs}
|
196 |
+
|
197 |
+
placeholders = {{
|
198 |
+
{placeholders_dict}
|
199 |
+
}}
|
200 |
+
|
201 |
+
if st.button("Run Crew"):
|
202 |
+
with st.spinner("Running crew..."):
|
203 |
+
try:
|
204 |
+
result = crew.kickoff(inputs=placeholders)
|
205 |
+
if isinstance(result, dict):
|
206 |
+
with st.expander("Final output", expanded=True):
|
207 |
+
st.write(result.get('final_output', 'No final output available'))
|
208 |
+
with st.expander("Full output", expanded=False):
|
209 |
+
st.write(result)
|
210 |
+
else:
|
211 |
+
st.write("Result:")
|
212 |
+
st.write(result)
|
213 |
+
except Exception as e:
|
214 |
+
st.error(f"An error occurred: {{str(e)}}")
|
215 |
+
|
216 |
+
if __name__ == '__main__':
|
217 |
+
main()
|
218 |
+
"""
|
219 |
+
with open(os.path.join(output_dir, 'app.py'), 'w') as f:
|
220 |
+
f.write(app_content)
|
221 |
+
|
222 |
+
# If custom tools are used, copy the custom_tools.py file
|
223 |
+
if custom_tools_used:
|
224 |
+
source_path = os.path.join(os.path.dirname(__file__), 'custom_tools.py')
|
225 |
+
dest_path = os.path.join(output_dir, 'custom_tools.py')
|
226 |
+
shutil.copy2(source_path, dest_path)
|
227 |
+
|
228 |
+
def create_env_file(self, output_dir):
|
229 |
+
env_content = """
|
230 |
+
# OPENAI_API_KEY="FILL-IN-YOUR-OPENAI-API-KEY"
|
231 |
+
# OPENAI_API_BASE="OPTIONAL-FILL-IN-YOUR-OPENAI-API-BASE"
|
232 |
+
# GROQ_API_KEY="FILL-IN-YOUR-GROQ-API-KEY"
|
233 |
+
# ANTHROPIC_API_KEY="FILL-IN-YOUR-ANTHROPIC-API-KEY"
|
234 |
+
# LMSTUDIO_API_BASE="http://localhost:1234/v1"
|
235 |
+
"""
|
236 |
+
with open(os.path.join(output_dir, '.env'), 'w') as f:
|
237 |
+
f.write(env_content)
|
238 |
+
|
239 |
+
def create_shell_scripts(self, output_dir):
|
240 |
+
install_sh_content = """
|
241 |
+
#!/bin/bash
|
242 |
+
|
243 |
+
# Create a virtual environment
|
244 |
+
python -m venv venv || { echo "Failed to create venv"; exit 1; }
|
245 |
+
|
246 |
+
# Activate the virtual environment
|
247 |
+
source venv/bin/activate || { echo "Failed to activate venv"; exit 1; }
|
248 |
+
|
249 |
+
# Install requirements
|
250 |
+
pip install -r requirements.txt || { echo "Failed to install requirements"; exit 1; }
|
251 |
+
|
252 |
+
echo "Installation completed successfully."
|
253 |
+
"""
|
254 |
+
with open(os.path.join(output_dir, 'install.sh'), 'w') as f:
|
255 |
+
f.write(install_sh_content)
|
256 |
+
os.chmod(os.path.join(output_dir, 'install.sh'), 0o755)
|
257 |
+
|
258 |
+
run_sh_content = """
|
259 |
+
#!/bin/bash
|
260 |
+
|
261 |
+
# Get the directory where the script is located
|
262 |
+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
|
263 |
+
|
264 |
+
# Activate the virtual environment
|
265 |
+
source "$SCRIPT_DIR/venv/bin/activate" || { echo "Failed to activate venv"; exit 1; }
|
266 |
+
|
267 |
+
cd "$SCRIPT_DIR"
|
268 |
+
|
269 |
+
streamlit run app.py --server.headless True
|
270 |
+
"""
|
271 |
+
with open(os.path.join(output_dir, 'run.sh'), 'w') as f:
|
272 |
+
f.write(run_sh_content)
|
273 |
+
os.chmod(os.path.join(output_dir, 'run.sh'), 0o755)
|
274 |
+
|
275 |
+
install_bat_content = """
|
276 |
+
@echo off
|
277 |
+
|
278 |
+
:: Create a virtual environment
|
279 |
+
python -m venv venv || (
|
280 |
+
echo Failed to create venv
|
281 |
+
exit /b 1
|
282 |
+
)
|
283 |
+
|
284 |
+
:: Activate the virtual environment
|
285 |
+
call venv\\Scripts\\activate || (
|
286 |
+
echo Failed to activate venv
|
287 |
+
exit /b 1
|
288 |
+
)
|
289 |
+
|
290 |
+
:: Install requirements
|
291 |
+
pip install -r requirements.txt || (
|
292 |
+
echo Failed to install requirements
|
293 |
+
exit /b 1
|
294 |
+
)
|
295 |
+
|
296 |
+
echo Installation completed successfully.
|
297 |
+
"""
|
298 |
+
with open(os.path.join(output_dir, 'install.bat'), 'w') as f:
|
299 |
+
f.write(install_bat_content)
|
300 |
+
|
301 |
+
run_bat_content = """
|
302 |
+
@echo off
|
303 |
+
|
304 |
+
:: Activate the virtual environment
|
305 |
+
call venv\\Scripts\\activate || (
|
306 |
+
echo Failed to activate venv
|
307 |
+
exit /b 1
|
308 |
+
)
|
309 |
+
|
310 |
+
:: Run the Streamlit app
|
311 |
+
streamlit run app.py --server.headless true
|
312 |
+
"""
|
313 |
+
with open(os.path.join(output_dir, 'run.bat'), 'w') as f:
|
314 |
+
f.write(run_bat_content)
|
315 |
+
|
316 |
+
# Copy the main project's requirements.txt
|
317 |
+
source_requirements = os.path.join(os.path.dirname(__file__), '..', 'requirements.txt')
|
318 |
+
dest_requirements = os.path.join(output_dir, 'requirements.txt')
|
319 |
+
shutil.copy2(source_requirements, dest_requirements)
|
320 |
+
|
321 |
+
def zip_directory(self, folder_path, output_path):
|
322 |
+
with zipfile.ZipFile(output_path, 'w') as zip_file:
|
323 |
+
for foldername, subfolders, filenames in os.walk(folder_path):
|
324 |
+
for filename in filenames:
|
325 |
+
file_path = os.path.join(foldername, filename)
|
326 |
+
arcname = os.path.relpath(file_path, folder_path)
|
327 |
+
zip_file.write(file_path, arcname)
|
328 |
+
|
329 |
+
def create_export(self, crew_name):
|
330 |
+
output_dir = f"{crew_name}_app"
|
331 |
+
if not os.path.exists(output_dir):
|
332 |
+
os.makedirs(output_dir)
|
333 |
+
|
334 |
+
selected_crew = next((crew for crew in ss.crews if crew.name == crew_name), None)
|
335 |
+
if selected_crew:
|
336 |
+
self.generate_streamlit_app(selected_crew, output_dir)
|
337 |
+
self.create_env_file(output_dir)
|
338 |
+
self.create_shell_scripts(output_dir)
|
339 |
+
|
340 |
+
zip_path = f"{crew_name}_app.zip"
|
341 |
+
self.zip_directory(output_dir, zip_path)
|
342 |
+
return zip_path
|
343 |
+
|
344 |
+
def export_crew_to_json(self, crew):
|
345 |
+
crew_data = {
|
346 |
+
'id': crew.id,
|
347 |
+
'name': crew.name,
|
348 |
+
'process': crew.process,
|
349 |
+
'verbose': crew.verbose,
|
350 |
+
'memory': crew.memory,
|
351 |
+
'cache': crew.cache,
|
352 |
+
'max_rpm': crew.max_rpm,
|
353 |
+
'manager_llm': crew.manager_llm,
|
354 |
+
'manager_agent': crew.manager_agent.id if crew.manager_agent else None,
|
355 |
+
'created_at': crew.created_at,
|
356 |
+
'agents': [],
|
357 |
+
'tasks': [],
|
358 |
+
'tools': []
|
359 |
+
}
|
360 |
+
|
361 |
+
tool_ids = set()
|
362 |
+
|
363 |
+
for agent in crew.agents:
|
364 |
+
agent_data = {
|
365 |
+
'id': agent.id,
|
366 |
+
'role': agent.role,
|
367 |
+
'backstory': agent.backstory,
|
368 |
+
'goal': agent.goal,
|
369 |
+
'allow_delegation': agent.allow_delegation,
|
370 |
+
'verbose': agent.verbose,
|
371 |
+
'cache': agent.cache,
|
372 |
+
'llm_provider_model': agent.llm_provider_model,
|
373 |
+
'temperature': agent.temperature,
|
374 |
+
'max_iter': agent.max_iter,
|
375 |
+
'tool_ids': [tool.tool_id for tool in agent.tools]
|
376 |
+
}
|
377 |
+
crew_data['agents'].append(agent_data)
|
378 |
+
tool_ids.update(agent_data['tool_ids'])
|
379 |
+
|
380 |
+
for task in crew.tasks:
|
381 |
+
task_data = {
|
382 |
+
'id': task.id,
|
383 |
+
'description': task.description,
|
384 |
+
'expected_output': task.expected_output,
|
385 |
+
'async_execution': task.async_execution,
|
386 |
+
'agent_id': task.agent.id if task.agent else None,
|
387 |
+
'context_from_async_tasks_ids': task.context_from_async_tasks_ids,
|
388 |
+
'created_at': task.created_at
|
389 |
+
}
|
390 |
+
crew_data['tasks'].append(task_data)
|
391 |
+
|
392 |
+
for tool_id in tool_ids:
|
393 |
+
tool = next((t for t in ss.tools if t.tool_id == tool_id), None)
|
394 |
+
if tool:
|
395 |
+
tool_data = {
|
396 |
+
'tool_id': tool.tool_id,
|
397 |
+
'name': tool.name,
|
398 |
+
'description': tool.description,
|
399 |
+
'parameters': tool.get_parameters()
|
400 |
+
}
|
401 |
+
crew_data['tools'].append(tool_data)
|
402 |
+
|
403 |
+
return json.dumps(crew_data, indent=2)
|
404 |
+
|
405 |
+
def import_crew_from_json(self, crew_data):
|
406 |
+
# Create tools
|
407 |
+
for tool_data in crew_data['tools']:
|
408 |
+
tool_class = TOOL_CLASSES[tool_data['name']]
|
409 |
+
tool = tool_class(tool_id=tool_data['tool_id'])
|
410 |
+
tool.set_parameters(**tool_data['parameters'])
|
411 |
+
if tool not in ss.tools:
|
412 |
+
ss.tools.append(tool)
|
413 |
+
db_utils.save_tool(tool)
|
414 |
+
|
415 |
+
# Create agents
|
416 |
+
agents = []
|
417 |
+
for agent_data in crew_data['agents']:
|
418 |
+
agent = MyAgent(
|
419 |
+
id=agent_data['id'],
|
420 |
+
role=agent_data['role'],
|
421 |
+
backstory=agent_data['backstory'],
|
422 |
+
goal=agent_data['goal'],
|
423 |
+
allow_delegation=agent_data['allow_delegation'],
|
424 |
+
verbose=agent_data['verbose'],
|
425 |
+
cache=agent_data.get('cache', True),
|
426 |
+
llm_provider_model=agent_data['llm_provider_model'],
|
427 |
+
temperature=agent_data['temperature'],
|
428 |
+
max_iter=agent_data['max_iter'],
|
429 |
+
created_at=agent_data.get('created_at')
|
430 |
+
)
|
431 |
+
agent.tools = [next(tool for tool in ss.tools if tool.tool_id == tool_id) for tool_id in agent_data['tool_ids']]
|
432 |
+
agents.append(agent)
|
433 |
+
db_utils.save_agent(agent)
|
434 |
+
|
435 |
+
# Create tasks
|
436 |
+
tasks = []
|
437 |
+
for task_data in crew_data['tasks']:
|
438 |
+
task = MyTask(
|
439 |
+
id=task_data['id'],
|
440 |
+
description=task_data['description'],
|
441 |
+
expected_output=task_data['expected_output'],
|
442 |
+
async_execution=task_data['async_execution'],
|
443 |
+
agent=next((agent for agent in agents if agent.id == task_data['agent_id']), None),
|
444 |
+
context_from_async_tasks_ids=task_data['context_from_async_tasks_ids'],
|
445 |
+
created_at=task_data['created_at']
|
446 |
+
)
|
447 |
+
tasks.append(task)
|
448 |
+
db_utils.save_task(task)
|
449 |
+
|
450 |
+
# Create crew
|
451 |
+
crew = MyCrew(
|
452 |
+
id=crew_data['id'],
|
453 |
+
name=crew_data['name'],
|
454 |
+
process=crew_data['process'],
|
455 |
+
verbose=crew_data['verbose'],
|
456 |
+
memory=crew_data['memory'],
|
457 |
+
cache=crew_data['cache'],
|
458 |
+
max_rpm=crew_data['max_rpm'],
|
459 |
+
manager_llm=crew_data['manager_llm'],
|
460 |
+
manager_agent=next((agent for agent in agents if agent.id == crew_data['manager_agent']), None),
|
461 |
+
created_at=crew_data['created_at']
|
462 |
+
)
|
463 |
+
crew.agents = agents
|
464 |
+
crew.tasks = tasks
|
465 |
+
db_utils.save_crew(crew)
|
466 |
+
|
467 |
+
if crew not in ss.crews:
|
468 |
+
ss.crews.append(crew)
|
469 |
+
|
470 |
+
return crew
|
471 |
+
|
472 |
+
def draw(self):
|
473 |
+
st.subheader(self.name)
|
474 |
+
|
475 |
+
# Full JSON Export Button
|
476 |
+
if st.button("Export everything to json"):
|
477 |
+
current_datetime = datetime.now().strftime("%Y%m%d_%H%M%S")
|
478 |
+
file_path = f"all_crews_{current_datetime}.json"
|
479 |
+
db_utils.export_to_json(file_path)
|
480 |
+
with open(file_path, "rb") as fp:
|
481 |
+
st.download_button(
|
482 |
+
label="Download All Crews JSON",
|
483 |
+
data=fp,
|
484 |
+
file_name=file_path,
|
485 |
+
mime="application/json"
|
486 |
+
)
|
487 |
+
|
488 |
+
# JSON Import Button
|
489 |
+
uploaded_file = st.file_uploader("Import JSON file", type="json")
|
490 |
+
if uploaded_file is not None:
|
491 |
+
json_data = json.load(uploaded_file)
|
492 |
+
|
493 |
+
if isinstance(json_data, list): # Full database export
|
494 |
+
with open("uploaded_file.json", "w") as f:
|
495 |
+
json.dump(json_data, f)
|
496 |
+
db_utils.import_from_json("uploaded_file.json")
|
497 |
+
st.success("Full database JSON file imported successfully!")
|
498 |
+
elif isinstance(json_data, dict) and 'id' in json_data: # Single crew export
|
499 |
+
imported_crew = self.import_crew_from_json(json_data)
|
500 |
+
st.success(f"Crew '{imported_crew.name}' imported successfully!")
|
501 |
+
else:
|
502 |
+
st.error("Invalid JSON format. Please upload a valid crew or full database export file.")
|
503 |
+
|
504 |
+
if 'crews' not in ss or len(ss.crews) == 0:
|
505 |
+
st.write("No crews defined yet.")
|
506 |
+
else:
|
507 |
+
crew_names = [crew.name for crew in ss.crews]
|
508 |
+
selected_crew_name = st.selectbox("Select crew to export", crew_names)
|
509 |
+
|
510 |
+
if st.button("Export singlepage app"):
|
511 |
+
zip_path = self.create_export(selected_crew_name)
|
512 |
+
with open(zip_path, "rb") as fp:
|
513 |
+
st.download_button(
|
514 |
+
label="Download Exported App",
|
515 |
+
data=fp,
|
516 |
+
file_name=f"{selected_crew_name}_app.zip",
|
517 |
+
mime="application/zip"
|
518 |
+
)
|
519 |
+
if st.button("Export crew to JSON"):
|
520 |
+
selected_crew = next((crew for crew in ss.crews if crew.name == selected_crew_name), None)
|
521 |
+
if selected_crew:
|
522 |
+
crew_json = self.export_crew_to_json(selected_crew)
|
523 |
+
st.download_button(
|
524 |
+
label="Download Crew JSON",
|
525 |
+
data=crew_json,
|
526 |
+
file_name=f"{selected_crew_name}_export.json",
|
527 |
+
mime="application/json"
|
528 |
+
)
|
pg_tasks.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from streamlit import session_state as ss
|
3 |
+
from my_task import MyTask
|
4 |
+
import db_utils
|
5 |
+
|
6 |
+
class PageTasks:
|
7 |
+
def __init__(self):
|
8 |
+
self.name = "Tasks"
|
9 |
+
|
10 |
+
def create_task(self, crew=None):
|
11 |
+
task = MyTask()
|
12 |
+
if 'tasks' not in ss:
|
13 |
+
ss.tasks = [MyTask]
|
14 |
+
ss.tasks.append(task)
|
15 |
+
task.edit = True
|
16 |
+
db_utils.save_task(task) # Save task to database
|
17 |
+
|
18 |
+
if crew:
|
19 |
+
crew.tasks.append(task)
|
20 |
+
db_utils.save_crew(crew)
|
21 |
+
|
22 |
+
return task
|
23 |
+
|
24 |
+
def draw(self):
|
25 |
+
with st.container():
|
26 |
+
st.subheader(self.name)
|
27 |
+
editing = False
|
28 |
+
if 'tasks' not in ss:
|
29 |
+
ss.tasks = db_utils.load_tasks() # Load tasks from database
|
30 |
+
if 'crews' not in ss:
|
31 |
+
ss.crews = db_utils.load_crews() # Load crews from database
|
32 |
+
|
33 |
+
# Dictionary to track task assignment
|
34 |
+
task_assignment = {task.id: [] for task in ss.tasks}
|
35 |
+
|
36 |
+
# Assign tasks to crews
|
37 |
+
for crew in ss.crews:
|
38 |
+
for task in crew.tasks:
|
39 |
+
task_assignment[task.id].append(crew.name)
|
40 |
+
|
41 |
+
# Display tasks grouped by crew in tabs
|
42 |
+
tabs = ["All Tasks"] + ["Unassigned Tasks"] + [crew.name for crew in ss.crews]
|
43 |
+
tab_objects = st.tabs(tabs)
|
44 |
+
|
45 |
+
# Display all tasks
|
46 |
+
with tab_objects[0]:
|
47 |
+
st.markdown("#### All Tasks")
|
48 |
+
for task in ss.tasks:
|
49 |
+
task.draw()
|
50 |
+
if task.edit:
|
51 |
+
editing = True
|
52 |
+
st.button('Create task', on_click=self.create_task, disabled=editing, key="create_task_all")
|
53 |
+
|
54 |
+
# Display unassigned tasks
|
55 |
+
with tab_objects[1]:
|
56 |
+
st.markdown("#### Unassigned Tasks")
|
57 |
+
unassigned_tasks = [task for task in ss.tasks if not task_assignment[task.id]]
|
58 |
+
for task in unassigned_tasks:
|
59 |
+
unique_key = f"{task.id}_unasigned"
|
60 |
+
task.draw(key=unique_key)
|
61 |
+
if task.edit:
|
62 |
+
editing = True
|
63 |
+
st.button('Create task', on_click=self.create_task, disabled=editing, key="create_task_unassigned")
|
64 |
+
|
65 |
+
# Display tasks grouped by crew
|
66 |
+
for i, crew in enumerate(ss.crews, 2):
|
67 |
+
with tab_objects[i]:
|
68 |
+
st.markdown(f"#### {crew.name}")
|
69 |
+
assigned_tasks = [task for task in crew.tasks]
|
70 |
+
for task in assigned_tasks:
|
71 |
+
unique_key = f"{task.id}_{crew.name}"
|
72 |
+
task.draw(key=unique_key)
|
73 |
+
if task.edit:
|
74 |
+
editing = True
|
75 |
+
st.button('Create task', on_click=self.create_task, disabled=editing,kwargs={'crew': crew}, key=f"create_task_{crew.name}")
|
76 |
+
|
77 |
+
|
78 |
+
if len(ss.tasks) == 0:
|
79 |
+
st.write("No tasks defined yet.")
|
80 |
+
st.button('Create task', on_click=self.create_task, disabled=editing)
|
81 |
+
|
pg_tools.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from utils import rnd_id
|
3 |
+
from my_tools import TOOL_CLASSES
|
4 |
+
from streamlit import session_state as ss
|
5 |
+
import db_utils
|
6 |
+
|
7 |
+
class PageTools:
|
8 |
+
def __init__(self):
|
9 |
+
self.name = "Tools"
|
10 |
+
self.available_tools = TOOL_CLASSES
|
11 |
+
|
12 |
+
def create_tool(self, tool_name):
|
13 |
+
tool_class = self.available_tools[tool_name]
|
14 |
+
tool_instance = tool_class(rnd_id())
|
15 |
+
if 'tools' not in ss:
|
16 |
+
ss.tools = []
|
17 |
+
ss.tools.append(tool_instance)
|
18 |
+
db_utils.save_tool(tool_instance) # Save tool to database
|
19 |
+
|
20 |
+
def remove_tool(self, tool_id):
|
21 |
+
ss.tools = [tool for tool in ss.tools if tool.tool_id != tool_id]
|
22 |
+
db_utils.delete_tool(tool_id)
|
23 |
+
st.rerun()
|
24 |
+
|
25 |
+
def set_tool_parameter(self, tool_id, param_name, value):
|
26 |
+
if value == "":
|
27 |
+
value = None
|
28 |
+
for tool in ss.tools:
|
29 |
+
if tool.tool_id == tool_id:
|
30 |
+
tool.set_parameters(**{param_name: value})
|
31 |
+
db_utils.save_tool(tool)
|
32 |
+
break
|
33 |
+
|
34 |
+
def get_tool_display_name(self, tool):
|
35 |
+
first_param_name = tool.get_parameter_names()[0] if tool.get_parameter_names() else None
|
36 |
+
first_param_value = tool.parameters.get(first_param_name, '') if first_param_name else ''
|
37 |
+
return f"{tool.name} ({first_param_value if first_param_value else tool.tool_id})"
|
38 |
+
|
39 |
+
def draw_tools(self):
|
40 |
+
c1,c2 = st.columns([1, 3])
|
41 |
+
#st.write("Available Tools:")
|
42 |
+
with c1:
|
43 |
+
for tool_name in self.available_tools.keys():
|
44 |
+
tool_class = self.available_tools[tool_name]
|
45 |
+
tool_instance = tool_class()
|
46 |
+
tool_description = tool_instance.description
|
47 |
+
if st.button(f"{tool_name}", key=f"enable_{tool_name}", help=tool_description):
|
48 |
+
self.create_tool(tool_name)
|
49 |
+
with c2:
|
50 |
+
if 'tools' in ss:
|
51 |
+
st.write("##### Enabled Tools")
|
52 |
+
for tool in ss.tools:
|
53 |
+
display_name = self.get_tool_display_name(tool)
|
54 |
+
is_complete = tool.is_valid()
|
55 |
+
expander_title = display_name if is_complete else f"❗ {display_name}"
|
56 |
+
with st.expander(expander_title):
|
57 |
+
st.write(tool.description)
|
58 |
+
for param_name in tool.get_parameter_names():
|
59 |
+
param_value = tool.parameters.get(param_name, "")
|
60 |
+
placeholder = "Required" if tool.is_parameter_mandatory(param_name) else "Optional"
|
61 |
+
new_value = st.text_input(f"{param_name}", value=param_value, key=f"{tool.tool_id}_{param_name}", placeholder=placeholder)
|
62 |
+
if new_value != param_value:
|
63 |
+
self.set_tool_parameter(tool.tool_id, param_name, new_value)
|
64 |
+
if st.button(f"Remove", key=f"remove_{tool.tool_id}"):
|
65 |
+
self.remove_tool(tool.tool_id)
|
66 |
+
|
67 |
+
def draw(self):
|
68 |
+
st.subheader(self.name)
|
69 |
+
self.draw_tools()
|
utils.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import string
|
3 |
+
from streamlit import markdown
|
4 |
+
|
5 |
+
def rnd_id(length=8):
|
6 |
+
characters = string.ascii_letters + string.digits
|
7 |
+
random_text = ''.join(random.choice(characters) for _ in range(length))
|
8 |
+
return random_text
|
9 |
+
|
10 |
+
def escape_quotes(s):
|
11 |
+
return s.replace('"', '\\"').replace("'", "\\'")
|
12 |
+
|
13 |
+
def fix_columns_width():
|
14 |
+
markdown("""
|
15 |
+
<style>
|
16 |
+
div[data-testid="column"] {
|
17 |
+
width: fit-content !important;
|
18 |
+
flex: unset;
|
19 |
+
}
|
20 |
+
div[data-testid="column"] * {
|
21 |
+
width: fit-content !important;
|
22 |
+
}
|
23 |
+
</style>
|
24 |
+
""", unsafe_allow_html=True)
|