kakumusic commited on
Commit
b225a21
1 Parent(s): db636c1

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +38 -0
  2. .flake8 +12 -0
  3. .gitattributes +20 -35
  4. .github/CODEOWNERS +7 -0
  5. .github/ISSUE_TEMPLATE/1.bug.yml +173 -0
  6. .github/ISSUE_TEMPLATE/2.feature.yml +28 -0
  7. .github/PULL_REQUEST_TEMPLATE.md +31 -0
  8. .github/labeler.yml +27 -0
  9. .github/workflows/autogpt-builder-ci.yml +41 -0
  10. .github/workflows/autogpt-ci.yml +138 -0
  11. .github/workflows/autogpt-docker-cache-clean.yml +59 -0
  12. .github/workflows/autogpt-docker-ci.yml +162 -0
  13. .github/workflows/autogpt-docker-release.yml +86 -0
  14. .github/workflows/autogpt-infra-ci.yml +56 -0
  15. .github/workflows/autogpt-server-ci.yml +160 -0
  16. .github/workflows/autogpts-benchmark.yml +97 -0
  17. .github/workflows/autogpts-ci.yml +71 -0
  18. .github/workflows/benchmark-ci.yml +169 -0
  19. .github/workflows/benchmark_publish_package.yml +55 -0
  20. .github/workflows/close-stale-issues.yml +34 -0
  21. .github/workflows/forge-ci.yml +236 -0
  22. .github/workflows/frontend-ci.yml +60 -0
  23. .github/workflows/hackathon.yml +133 -0
  24. .github/workflows/pr-label.yml +66 -0
  25. .github/workflows/python-checks.yml +151 -0
  26. .github/workflows/repo-stats.yml +20 -0
  27. .github/workflows/scripts/check_actions_status.py +55 -0
  28. .github/workflows/scripts/docker-ci-summary.sh +98 -0
  29. .github/workflows/scripts/docker-release-summary.sh +85 -0
  30. .github/workflows/workflow-checker.yml +51 -0
  31. .gitignore +173 -0
  32. .gitmodules +3 -0
  33. .pr_agent.toml +6 -0
  34. .pre-commit-config.yaml +127 -0
  35. CITATION.cff +21 -0
  36. CLI-USAGE.md +182 -0
  37. CODE_OF_CONDUCT.md +40 -0
  38. CONTRIBUTING.md +38 -0
  39. Dockerfile.autogpt +61 -0
  40. FORGE-QUICKSTART.md +173 -0
  41. LICENSE +21 -0
  42. README.md +130 -9
  43. SECURITY.md +66 -0
  44. TROUBLESHOOTING.md +23 -0
  45. assets/gpt_dark_RGB.icns +0 -0
  46. assets/gpt_dark_RGB.ico +3 -0
  47. assets/gpt_dark_RGB.png +0 -0
  48. autogpt/.coveragerc +2 -0
  49. autogpt/.devcontainer/Dockerfile +13 -0
  50. autogpt/.devcontainer/devcontainer.json +56 -0
.dockerignore ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore everything by default, selectively add things to context
2
+ *
3
+
4
+ # AutoGPT
5
+ !autogpt/autogpt/
6
+ !autogpt/pyproject.toml
7
+ !autogpt/poetry.lock
8
+ !autogpt/README.md
9
+ !autogpt/tests/
10
+
11
+ # Benchmark
12
+ !benchmark/agbenchmark/
13
+ !benchmark/pyproject.toml
14
+ !benchmark/poetry.lock
15
+ !benchmark/README.md
16
+
17
+ # Forge
18
+ !forge/forge/
19
+ !forge/pyproject.toml
20
+ !forge/poetry.lock
21
+ !forge/README.md
22
+
23
+ # Frontend
24
+ !frontend/build/web/
25
+
26
+ # rnd
27
+ !rnd/
28
+
29
+ # Explicitly re-ignore some folders
30
+ .*
31
+ **/__pycache__
32
+ # rnd
33
+ rnd/autogpt_builder/.next/
34
+ rnd/autogpt_builder/node_modules
35
+ rnd/autogpt_builder/.env.example
36
+ rnd/autogpt_builder/.env.local
37
+
38
+
.flake8 ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [flake8]
2
+ max-line-length = 88
3
+ exclude =
4
+ .tox,
5
+ __pycache__,
6
+ *.pyc,
7
+ .env
8
+ venv*/*,
9
+ .venv/*,
10
+ reports/*,
11
+ dist/*,
12
+ data/*,
.gitattributes CHANGED
@@ -1,35 +1,20 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ frontend/build/** linguist-generated
2
+
3
+ **/poetry.lock linguist-generated
4
+
5
+ docs/_javascript/** linguist-vendored
6
+
7
+ # Exclude VCR cassettes from stats
8
+ forge/tests/vcr_cassettes/**/**.y*ml linguist-generated
9
+
10
+ * text=autoassets/gpt_dark_RGB.ico filter=lfs diff=lfs merge=lfs -text
11
+ docs/content/imgs/quickstart/000_header_img.png filter=lfs diff=lfs merge=lfs -text
12
+ docs/content/imgs/quickstart/t2_01.png filter=lfs diff=lfs merge=lfs -text
13
+ docs/content/imgs/quickstart/t2_02.png filter=lfs diff=lfs merge=lfs -text
14
+ docs/content/imgs/quickstart/t2_diagram.png filter=lfs diff=lfs merge=lfs -text
15
+ docs/content/imgs/quickstart/t3_01.png filter=lfs diff=lfs merge=lfs -text
16
+ docs/content/imgs/quickstart/t3_03.png filter=lfs diff=lfs merge=lfs -text
17
+ frontend/build/web/assets/AssetManifest.bin filter=lfs diff=lfs merge=lfs -text
18
+ frontend/build/web/canvaskit/canvaskit.wasm filter=lfs diff=lfs merge=lfs -text
19
+ frontend/build/web/canvaskit/chromium/canvaskit.wasm filter=lfs diff=lfs merge=lfs -text
20
+ frontend/build/web/canvaskit/skwasm.wasm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/CODEOWNERS ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ * @Significant-Gravitas/maintainers
2
+ .github/workflows/ @Significant-Gravitas/devops
3
+ forge/ @Significant-Gravitas/forge-maintainers
4
+ benchmark/ @Significant-Gravitas/benchmark-maintainers
5
+ frontend/ @Significant-Gravitas/frontend-maintainers
6
+ rnd/infra @Significant-Gravitas/devops
7
+ .github/CODEOWNERS @Significant-Gravitas/admins
.github/ISSUE_TEMPLATE/1.bug.yml ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug report 🐛
2
+ description: Create a bug report for AutoGPT.
3
+ labels: ['status: needs triage']
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ ### ⚠️ Before you continue
9
+ * Check out our [backlog], [roadmap] and join our [discord] to discuss what's going on
10
+ * If you need help, you can ask in the [discussions] section or in [#tech-support]
11
+ * **Thoroughly search the [existing issues] before creating a new one**
12
+ * Read our [wiki page on Contributing]
13
+ [backlog]: https://github.com/orgs/Significant-Gravitas/projects/1
14
+ [roadmap]: https://github.com/orgs/Significant-Gravitas/projects/2
15
+ [discord]: https://discord.gg/autogpt
16
+ [discussions]: https://github.com/Significant-Gravitas/AutoGPT/discussions
17
+ [#tech-support]: https://discord.com/channels/1092243196446249134/1092275629602394184
18
+ [existing issues]: https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue
19
+ [wiki page on Contributing]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
20
+
21
+ - type: checkboxes
22
+ attributes:
23
+ label: ⚠️ Search for existing issues first ⚠️
24
+ description: >
25
+ Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues)
26
+ to see if an issue already exists for the same problem.
27
+ options:
28
+ - label: I have searched the existing issues, and there is no existing issue for my problem
29
+ required: true
30
+
31
+ - type: markdown
32
+ attributes:
33
+ value: |
34
+ Please confirm that the issue you have is described well and precise in the title above ⬆️.
35
+ A good rule of thumb: What would you type if you were searching for the issue?
36
+
37
+ For example:
38
+ BAD - my AutoGPT keeps looping
39
+ GOOD - After performing execute_python_file, AutoGPT goes into a loop where it keeps trying to execute the file.
40
+
41
+ ⚠️ SUPER-busy repo, please help the volunteer maintainers.
42
+ The less time we spend here, the more time we can spend building AutoGPT.
43
+
44
+ Please help us help you by following these steps:
45
+ - Search for existing issues, adding a comment when you have the same or similar issue is tidier than "new issue" and
46
+ newer issues will not be reviewed earlier, this is dependent on the current priorities set by our wonderful team
47
+ - Ask on our Discord if your issue is known when you are unsure (https://discord.gg/autogpt)
48
+ - Provide relevant info:
49
+ - Provide commit-hash (`git rev-parse HEAD` gets it) if possible
50
+ - If it's a pip/packages issue, mention this in the title and provide pip version, python version
51
+ - If it's a crash, provide traceback and describe the error you got as precise as possible in the title.
52
+
53
+ - type: dropdown
54
+ attributes:
55
+ label: Which Operating System are you using?
56
+ description: >
57
+ Please select the operating system you were using to run AutoGPT when this problem occurred.
58
+ options:
59
+ - Windows
60
+ - Linux
61
+ - MacOS
62
+ - Docker
63
+ - Devcontainer / Codespace
64
+ - Windows Subsystem for Linux (WSL)
65
+ - Other
66
+ validations:
67
+ required: true
68
+ nested_fields:
69
+ - type: text
70
+ attributes:
71
+ label: Specify the system
72
+ description: Please specify the system you are working on.
73
+
74
+ - type: dropdown
75
+ attributes:
76
+ label: Which version of AutoGPT are you using?
77
+ description: |
78
+ Please select which version of AutoGPT you were using when this issue occurred.
79
+ If you downloaded the code from the [releases page](https://github.com/Significant-Gravitas/AutoGPT/releases/) make sure you were using the latest code.
80
+ **If you weren't please try with the [latest code](https://github.com/Significant-Gravitas/AutoGPT/releases/)**.
81
+ If installed with git you can run `git branch` to see which version of AutoGPT you are running.
82
+ options:
83
+ - Latest Release
84
+ - Stable (branch)
85
+ - Master (branch)
86
+ validations:
87
+ required: true
88
+
89
+ - type: dropdown
90
+ attributes:
91
+ label: What LLM Provider do you use?
92
+ description: >
93
+ If you are using AutoGPT with `SMART_LLM=gpt-3.5-turbo`, your problems may be caused by
94
+ the [limitations](https://github.com/Significant-Gravitas/AutoGPT/issues?q=is%3Aissue+label%3A%22AI+model+limitation%22) of GPT-3.5.
95
+ options:
96
+ - Azure
97
+ - Groq
98
+ - Anthropic
99
+ - Llamafile
100
+ - Other (detail in issue)
101
+ validations:
102
+ required: true
103
+
104
+ - type: dropdown
105
+ attributes:
106
+ label: Which area covers your issue best?
107
+ description: >
108
+ Select the area related to the issue you are reporting.
109
+ options:
110
+ - Installation and setup
111
+ - Memory
112
+ - Performance
113
+ - Prompt
114
+ - Commands
115
+ - Plugins
116
+ - AI Model Limitations
117
+ - Challenges
118
+ - Documentation
119
+ - Logging
120
+ - Agents
121
+ - Other
122
+ validations:
123
+ required: true
124
+ autolabels: true
125
+ nested_fields:
126
+ - type: text
127
+ attributes:
128
+ label: Specify the area
129
+ description: Please specify the area you think is best related to the issue.
130
+
131
+ - type: input
132
+ attributes:
133
+ label: What commit or version are you using?
134
+ description: It is helpful for us to reproduce to know what version of the software you were using when this happened. Please run `git log -n 1 --pretty=format:"%H"` to output the full commit hash.
135
+ validations:
136
+ required: true
137
+
138
+ - type: textarea
139
+ attributes:
140
+ label: Describe your issue.
141
+ description: Describe the problem you are experiencing. Try to describe only the issue and phrase it short but clear. ⚠️ Provide NO other data in this field
142
+ validations:
143
+ required: true
144
+
145
+ #Following are optional file content uploads
146
+ - type: markdown
147
+ attributes:
148
+ value: |
149
+ ⚠️The following is OPTIONAL, please keep in mind that the log files may contain personal information such as credentials.⚠️
150
+
151
+ "The log files are located in the folder 'logs' inside the main AutoGPT folder."
152
+
153
+ - type: textarea
154
+ attributes:
155
+ label: Upload Activity Log Content
156
+ description: |
157
+ Upload the activity log content, this can help us understand the issue better.
158
+ To do this, go to the folder logs in your main AutoGPT folder, open activity.log and copy/paste the contents to this field.
159
+ ⚠️ The activity log may contain personal data given to AutoGPT by you in prompt or input as well as
160
+ any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
161
+ validations:
162
+ required: false
163
+
164
+ - type: textarea
165
+ attributes:
166
+ label: Upload Error Log Content
167
+ description: |
168
+ Upload the error log content, this will help us understand the issue better.
169
+ To do this, go to the folder logs in your main AutoGPT folder, open error.log and copy/paste the contents to this field.
170
+ ⚠️ The error log may contain personal data given to AutoGPT by you in prompt or input as well as
171
+ any personal information that AutoGPT collected out of files during last run. Do not add the activity log if you are not comfortable with sharing it. ⚠️
172
+ validations:
173
+ required: false
.github/ISSUE_TEMPLATE/2.feature.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature request 🚀
2
+ description: Suggest a new idea for AutoGPT!
3
+ labels: ['status: needs triage']
4
+ body:
5
+ - type: markdown
6
+ attributes:
7
+ value: |
8
+ First, check out our [wiki page on Contributing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing)
9
+ Please provide a searchable summary of the issue in the title above ⬆️.
10
+ - type: checkboxes
11
+ attributes:
12
+ label: Duplicates
13
+ description: Please [search the history](https://github.com/Significant-Gravitas/AutoGPT/issues) to see if an issue already exists for the same problem.
14
+ options:
15
+ - label: I have searched the existing issues
16
+ required: true
17
+ - type: textarea
18
+ attributes:
19
+ label: Summary 💡
20
+ description: Describe how it should work.
21
+ - type: textarea
22
+ attributes:
23
+ label: Examples 🌈
24
+ description: Provide a link to other implementations, or screenshots of the expected behavior.
25
+ - type: textarea
26
+ attributes:
27
+ label: Motivation 🔦
28
+ description: What are you trying to accomplish? How has the lack of this feature affected you? Providing context helps us come up with a solution that is more useful in the real world.
.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Background
2
+
3
+ <!-- Clearly explain the need for these changes: -->
4
+
5
+ ### Changes 🏗️
6
+
7
+ <!-- Concisely describe all of the changes made in this pull request: -->
8
+
9
+ ### PR Quality Scorecard ✨
10
+
11
+ <!--
12
+ Check out our contribution guide:
13
+ https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
14
+
15
+ 1. Avoid duplicate work, issues, PRs etc.
16
+ 2. Also consider contributing something other than code; see the [contribution guide]
17
+ for options.
18
+ 3. Clearly explain your changes.
19
+ 4. Avoid making unnecessary changes, especially if they're purely based on personal
20
+ preferences. Doing so is the maintainers' job. ;-)
21
+ -->
22
+
23
+ - [x] Have you used the PR description template? &ensp; `+2 pts`
24
+ - [ ] Is your pull request atomic, focusing on a single change? &ensp; `+5 pts`
25
+ - [ ] Have you linked the GitHub issue(s) that this PR addresses? &ensp; `+5 pts`
26
+ - [ ] Have you documented your changes clearly and comprehensively? &ensp; `+5 pts`
27
+ - [ ] Have you changed or added a feature? &ensp; `-4 pts`
28
+ - [ ] Have you added/updated corresponding documentation? &ensp; `+4 pts`
29
+ - [ ] Have you added/updated corresponding integration tests? &ensp; `+5 pts`
30
+ - [ ] Have you changed the behavior of AutoGPT? &ensp; `-5 pts`
31
+ - [ ] Have you also run `agbenchmark` to verify that these changes do not regress performance? &ensp; `+10 pts`
.github/labeler.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AutoGPT Agent:
2
+ - changed-files:
3
+ - any-glob-to-any-file: autogpt/**
4
+
5
+ Forge:
6
+ - changed-files:
7
+ - any-glob-to-any-file: forge/**
8
+
9
+ Benchmark:
10
+ - changed-files:
11
+ - any-glob-to-any-file: benchmark/**
12
+
13
+ Frontend:
14
+ - changed-files:
15
+ - any-glob-to-any-file: frontend/**
16
+
17
+ documentation:
18
+ - changed-files:
19
+ - any-glob-to-any-file: docs/**
20
+
21
+ Builder:
22
+ - changed-files:
23
+ - any-glob-to-any-file: rnd/autogpt_builder/**
24
+
25
+ Server:
26
+ - changed-files:
27
+ - any-glob-to-any-file: rnd/autogpt_server/**
.github/workflows/autogpt-builder-ci.yml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT Builder CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ master ]
6
+ paths:
7
+ - '.github/workflows/autogpt-builder-ci.yml'
8
+ - 'rnd/autogpt_builder/**'
9
+ pull_request:
10
+ paths:
11
+ - '.github/workflows/autogpt-builder-ci.yml'
12
+ - 'rnd/autogpt_builder/**'
13
+
14
+ defaults:
15
+ run:
16
+ shell: bash
17
+ working-directory: rnd/autogpt_builder
18
+
19
+ jobs:
20
+
21
+ lint:
22
+ runs-on: ubuntu-latest
23
+
24
+ steps:
25
+ - uses: actions/checkout@v4
26
+ - name: Set up Node.js
27
+ uses: actions/setup-node@v4
28
+ with:
29
+ node-version: '21'
30
+
31
+ - name: Install dependencies
32
+ run: |
33
+ npm install
34
+
35
+ - name: Check formatting with Prettier
36
+ run: |
37
+ npx prettier --check .
38
+
39
+ - name: Run lint
40
+ run: |
41
+ npm run lint
.github/workflows/autogpt-ci.yml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ master, development, ci-test* ]
6
+ paths:
7
+ - '.github/workflows/autogpt-ci.yml'
8
+ - 'autogpt/**'
9
+ pull_request:
10
+ branches: [ master, development, release-* ]
11
+ paths:
12
+ - '.github/workflows/autogpt-ci.yml'
13
+ - 'autogpt/**'
14
+
15
+ concurrency:
16
+ group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
17
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
18
+
19
+ defaults:
20
+ run:
21
+ shell: bash
22
+ working-directory: autogpt
23
+
24
+ jobs:
25
+ test:
26
+ permissions:
27
+ contents: read
28
+ timeout-minutes: 30
29
+ strategy:
30
+ fail-fast: false
31
+ matrix:
32
+ python-version: ["3.10"]
33
+ platform-os: [ubuntu, macos, macos-arm64, windows]
34
+ runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
35
+
36
+ steps:
37
+ # Quite slow on macOS (2~4 minutes to set up Docker)
38
+ # - name: Set up Docker (macOS)
39
+ # if: runner.os == 'macOS'
40
+ # uses: crazy-max/ghaction-setup-docker@v3
41
+
42
+ - name: Start MinIO service (Linux)
43
+ if: runner.os == 'Linux'
44
+ working-directory: '.'
45
+ run: |
46
+ docker pull minio/minio:edge-cicd
47
+ docker run -d -p 9000:9000 minio/minio:edge-cicd
48
+
49
+ - name: Start MinIO service (macOS)
50
+ if: runner.os == 'macOS'
51
+ working-directory: ${{ runner.temp }}
52
+ run: |
53
+ brew install minio/stable/minio
54
+ mkdir data
55
+ minio server ./data &
56
+
57
+ # No MinIO on Windows:
58
+ # - Windows doesn't support running Linux Docker containers
59
+ # - It doesn't seem possible to start background processes on Windows. They are
60
+ # killed after the step returns.
61
+ # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
62
+
63
+ - name: Checkout repository
64
+ uses: actions/checkout@v4
65
+ with:
66
+ fetch-depth: 0
67
+ submodules: true
68
+
69
+ - name: Configure git user Auto-GPT-Bot
70
+ run: |
71
+ git config --global user.name "Auto-GPT-Bot"
72
+ git config --global user.email "[email protected]"
73
+
74
+ - name: Set up Python ${{ matrix.python-version }}
75
+ uses: actions/setup-python@v5
76
+ with:
77
+ python-version: ${{ matrix.python-version }}
78
+
79
+ - id: get_date
80
+ name: Get date
81
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
82
+
83
+ - name: Set up Python dependency cache
84
+ # On Windows, unpacking cached dependencies takes longer than just installing them
85
+ if: runner.os != 'Windows'
86
+ uses: actions/cache@v4
87
+ with:
88
+ path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
89
+ key: poetry-${{ runner.os }}-${{ hashFiles('autogpt/poetry.lock') }}
90
+
91
+ - name: Install Poetry (Unix)
92
+ if: runner.os != 'Windows'
93
+ run: |
94
+ curl -sSL https://install.python-poetry.org | python3 -
95
+
96
+ if [ "${{ runner.os }}" = "macOS" ]; then
97
+ PATH="$HOME/.local/bin:$PATH"
98
+ echo "$HOME/.local/bin" >> $GITHUB_PATH
99
+ fi
100
+
101
+ - name: Install Poetry (Windows)
102
+ if: runner.os == 'Windows'
103
+ shell: pwsh
104
+ run: |
105
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
106
+
107
+ $env:PATH += ";$env:APPDATA\Python\Scripts"
108
+ echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
109
+
110
+ - name: Install Python dependencies
111
+ run: poetry install
112
+
113
+ - name: Run pytest with coverage
114
+ run: |
115
+ poetry run pytest -vv \
116
+ --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
117
+ --numprocesses=logical --durations=10 \
118
+ tests/unit tests/integration
119
+ env:
120
+ CI: true
121
+ PLAIN_OUTPUT: True
122
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
123
+ S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
124
+ AWS_ACCESS_KEY_ID: minioadmin
125
+ AWS_SECRET_ACCESS_KEY: minioadmin
126
+
127
+ - name: Upload coverage reports to Codecov
128
+ uses: codecov/codecov-action@v4
129
+ with:
130
+ token: ${{ secrets.CODECOV_TOKEN }}
131
+ flags: autogpt-agent,${{ runner.os }}
132
+
133
+ - name: Upload logs to artifact
134
+ if: always()
135
+ uses: actions/upload-artifact@v4
136
+ with:
137
+ name: test-logs
138
+ path: autogpt/logs/
.github/workflows/autogpt-docker-cache-clean.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Purge Auto-GPT Docker CI cache
2
+
3
+ on:
4
+ schedule:
5
+ - cron: 20 4 * * 1,4
6
+
7
+ env:
8
+ BASE_BRANCH: development
9
+ IMAGE_NAME: auto-gpt
10
+
11
+ jobs:
12
+ build:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ build-type: [release, dev]
17
+ steps:
18
+ - name: Checkout repository
19
+ uses: actions/checkout@v4
20
+
21
+ - name: Set up Docker Buildx
22
+ uses: docker/setup-buildx-action@v3
23
+
24
+ - id: build
25
+ name: Build image
26
+ uses: docker/build-push-action@v5
27
+ with:
28
+ file: Dockerfile.autogpt
29
+ build-args: BUILD_TYPE=${{ matrix.build-type }}
30
+ load: true # save to docker images
31
+ # use GHA cache as read-only
32
+ cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
33
+
34
+ - name: Generate build report
35
+ env:
36
+ event_name: ${{ github.event_name }}
37
+ event_ref: ${{ github.event.schedule }}
38
+
39
+ build_type: ${{ matrix.build-type }}
40
+
41
+ prod_branch: master
42
+ dev_branch: development
43
+ repository: ${{ github.repository }}
44
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
45
+
46
+ current_ref: ${{ github.ref_name }}
47
+ commit_hash: ${{ github.sha }}
48
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.sha) }}
49
+ push_forced_label:
50
+
51
+ new_commits_json: ${{ null }}
52
+ compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
53
+
54
+ github_context_json: ${{ toJSON(github) }}
55
+ job_env_json: ${{ toJSON(env) }}
56
+ vars_json: ${{ toJSON(vars) }}
57
+
58
+ run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
59
+ continue-on-error: true
.github/workflows/autogpt-docker-ci.yml ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT Docker CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ master, development ]
6
+ paths:
7
+ - '.github/workflows/autogpt-docker-ci.yml'
8
+ - 'autogpt/**'
9
+ pull_request:
10
+ branches: [ master, development, release-* ]
11
+ paths:
12
+ - '.github/workflows/autogpt-docker-ci.yml'
13
+ - 'autogpt/**'
14
+
15
+ concurrency:
16
+ group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
17
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
18
+
19
+ defaults:
20
+ run:
21
+ working-directory: autogpt
22
+
23
+ env:
24
+ IMAGE_NAME: auto-gpt
25
+ DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER && format('{0}/', secrets.DOCKER_USER) || '' }}auto-gpt
26
+ DEV_IMAGE_TAG: latest-dev
27
+
28
+ jobs:
29
+ build:
30
+ runs-on: ubuntu-latest
31
+ strategy:
32
+ matrix:
33
+ build-type: [release, dev]
34
+ steps:
35
+ - name: Checkout repository
36
+ uses: actions/checkout@v4
37
+
38
+ - name: Set up Docker Buildx
39
+ uses: docker/setup-buildx-action@v3
40
+
41
+ - if: runner.debug
42
+ run: |
43
+ ls -al
44
+ du -hs *
45
+
46
+ - id: build
47
+ name: Build image
48
+ uses: docker/build-push-action@v5
49
+ with:
50
+ file: Dockerfile.autogpt
51
+ build-args: BUILD_TYPE=${{ matrix.build-type }}
52
+ tags: ${{ env.IMAGE_NAME }}
53
+ labels: GIT_REVISION=${{ github.sha }}
54
+ load: true # save to docker images
55
+ # cache layers in GitHub Actions cache to speed up builds
56
+ cache-from: type=gha,scope=autogpt-docker-${{ matrix.build-type }}
57
+ cache-to: type=gha,scope=autogpt-docker-${{ matrix.build-type }},mode=max
58
+
59
+ - name: Generate build report
60
+ env:
61
+ event_name: ${{ github.event_name }}
62
+ event_ref: ${{ github.event.ref }}
63
+ event_ref_type: ${{ github.event.ref}}
64
+
65
+ build_type: ${{ matrix.build-type }}
66
+
67
+ prod_branch: master
68
+ dev_branch: development
69
+ repository: ${{ github.repository }}
70
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
71
+
72
+ current_ref: ${{ github.ref_name }}
73
+ commit_hash: ${{ github.event.after }}
74
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
75
+ push_forced_label: ${{ github.event.forced && '☢️ forced' || '' }}
76
+
77
+ new_commits_json: ${{ toJSON(github.event.commits) }}
78
+ compare_url_template: ${{ format('/{0}/compare/{{base}}...{{head}}', github.repository) }}
79
+
80
+ github_context_json: ${{ toJSON(github) }}
81
+ job_env_json: ${{ toJSON(env) }}
82
+ vars_json: ${{ toJSON(vars) }}
83
+
84
+ run: .github/workflows/scripts/docker-ci-summary.sh >> $GITHUB_STEP_SUMMARY
85
+ continue-on-error: true
86
+
87
+ test:
88
+ runs-on: ubuntu-latest
89
+ timeout-minutes: 10
90
+
91
+ services:
92
+ minio:
93
+ image: minio/minio:edge-cicd
94
+ options: >
95
+ --name=minio
96
+ --health-interval=10s --health-timeout=5s --health-retries=3
97
+ --health-cmd="curl -f http://localhost:9000/minio/health/live"
98
+
99
+ steps:
100
+ - name: Check out repository
101
+ uses: actions/checkout@v4
102
+ with:
103
+ submodules: true
104
+
105
+ - if: github.event_name == 'push'
106
+ name: Log in to Docker hub
107
+ uses: docker/login-action@v3
108
+ with:
109
+ username: ${{ secrets.DOCKER_USER }}
110
+ password: ${{ secrets.DOCKER_PASSWORD }}
111
+
112
+ - name: Set up Docker Buildx
113
+ uses: docker/setup-buildx-action@v3
114
+
115
+ - id: build
116
+ name: Build image
117
+ uses: docker/build-push-action@v5
118
+ with:
119
+ file: Dockerfile.autogpt
120
+ build-args: BUILD_TYPE=dev # include pytest
121
+ tags: >
122
+ ${{ env.IMAGE_NAME }},
123
+ ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
124
+ labels: GIT_REVISION=${{ github.sha }}
125
+ load: true # save to docker images
126
+ # cache layers in GitHub Actions cache to speed up builds
127
+ cache-from: type=gha,scope=autogpt-docker-dev
128
+ cache-to: type=gha,scope=autogpt-docker-dev,mode=max
129
+
130
+ - id: test
131
+ name: Run tests
132
+ env:
133
+ CI: true
134
+ PLAIN_OUTPUT: True
135
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
136
+ S3_ENDPOINT_URL: http://minio:9000
137
+ AWS_ACCESS_KEY_ID: minioadmin
138
+ AWS_SECRET_ACCESS_KEY: minioadmin
139
+ run: |
140
+ set +e
141
+ docker run --env CI --env OPENAI_API_KEY \
142
+ --network container:minio \
143
+ --env S3_ENDPOINT_URL --env AWS_ACCESS_KEY_ID --env AWS_SECRET_ACCESS_KEY \
144
+ --entrypoint poetry ${{ env.IMAGE_NAME }} run \
145
+ pytest -v --cov=autogpt --cov-branch --cov-report term-missing \
146
+ --numprocesses=4 --durations=10 \
147
+ tests/unit tests/integration 2>&1 | tee test_output.txt
148
+
149
+ test_failure=${PIPESTATUS[0]}
150
+
151
+ cat << $EOF >> $GITHUB_STEP_SUMMARY
152
+ # Tests $([ $test_failure = 0 ] && echo '✅' || echo '❌')
153
+ \`\`\`
154
+ $(cat test_output.txt)
155
+ \`\`\`
156
+ $EOF
157
+
158
+ exit $test_failure
159
+
160
+ - if: github.event_name == 'push' && github.ref_name == 'master'
161
+ name: Push image to Docker Hub
162
+ run: docker push ${{ env.DEPLOY_IMAGE_NAME }}:${{ env.DEV_IMAGE_TAG }}
.github/workflows/autogpt-docker-release.yml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT Docker Release
2
+
3
+ on:
4
+ release:
5
+ types: [ published, edited ]
6
+
7
+ workflow_dispatch:
8
+ inputs:
9
+ no_cache:
10
+ type: boolean
11
+ description: 'Build from scratch, without using cached layers'
12
+
13
+ env:
14
+ IMAGE_NAME: auto-gpt
15
+ DEPLOY_IMAGE_NAME: ${{ secrets.DOCKER_USER }}/auto-gpt
16
+
17
+ jobs:
18
+ build:
19
+ if: startsWith(github.ref, 'refs/tags/autogpt-')
20
+ runs-on: ubuntu-latest
21
+ steps:
22
+ - name: Checkout repository
23
+ uses: actions/checkout@v4
24
+
25
+ - name: Log in to Docker hub
26
+ uses: docker/login-action@v3
27
+ with:
28
+ username: ${{ secrets.DOCKER_USER }}
29
+ password: ${{ secrets.DOCKER_PASSWORD }}
30
+
31
+ - name: Set up Docker Buildx
32
+ uses: docker/setup-buildx-action@v3
33
+
34
+ # slashes are not allowed in image tags, but can appear in git branch or tag names
35
+ - id: sanitize_tag
36
+ name: Sanitize image tag
37
+ run: |
38
+ tag=${raw_tag//\//-}
39
+ echo tag=${tag#autogpt-} >> $GITHUB_OUTPUT
40
+ env:
41
+ raw_tag: ${{ github.ref_name }}
42
+
43
+ - id: build
44
+ name: Build image
45
+ uses: docker/build-push-action@v5
46
+ with:
47
+ file: Dockerfile.autogpt
48
+ build-args: BUILD_TYPE=release
49
+ load: true # save to docker images
50
+ # push: true # TODO: uncomment when this issue is fixed: https://github.com/moby/buildkit/issues/1555
51
+ tags: >
52
+ ${{ env.IMAGE_NAME }},
53
+ ${{ env.DEPLOY_IMAGE_NAME }}:latest,
54
+ ${{ env.DEPLOY_IMAGE_NAME }}:${{ steps.sanitize_tag.outputs.tag }}
55
+ labels: GIT_REVISION=${{ github.sha }}
56
+
57
+ # cache layers in GitHub Actions cache to speed up builds
58
+ cache-from: ${{ !inputs.no_cache && 'type=gha' || '' }},scope=autogpt-docker-release
59
+ cache-to: type=gha,scope=autogpt-docker-release,mode=max
60
+
61
+ - name: Push image to Docker Hub
62
+ run: docker push --all-tags ${{ env.DEPLOY_IMAGE_NAME }}
63
+
64
+ - name: Generate build report
65
+ env:
66
+ event_name: ${{ github.event_name }}
67
+ event_ref: ${{ github.event.ref }}
68
+ event_ref_type: ${{ github.event.ref}}
69
+ inputs_no_cache: ${{ inputs.no_cache }}
70
+
71
+ prod_branch: master
72
+ dev_branch: development
73
+ repository: ${{ github.repository }}
74
+ base_branch: ${{ github.ref_name != 'master' && github.ref_name != 'development' && 'development' || 'master' }}
75
+
76
+ ref_type: ${{ github.ref_type }}
77
+ current_ref: ${{ github.ref_name }}
78
+ commit_hash: ${{ github.sha }}
79
+ source_url: ${{ format('{0}/tree/{1}', github.event.repository.url, github.event.release && github.event.release.tag_name || github.sha) }}
80
+
81
+ github_context_json: ${{ toJSON(github) }}
82
+ job_env_json: ${{ toJSON(env) }}
83
+ vars_json: ${{ toJSON(vars) }}
84
+
85
+ run: .github/workflows/scripts/docker-release-summary.sh >> $GITHUB_STEP_SUMMARY
86
+ continue-on-error: true
.github/workflows/autogpt-infra-ci.yml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT Builder Infra
2
+
3
+ on:
4
+ push:
5
+ branches: [ master ]
6
+ paths:
7
+ - '.github/workflows/autogpt-infra-ci.yml'
8
+ - 'rnd/infra/**'
9
+ pull_request:
10
+ paths:
11
+ - '.github/workflows/autogpt-infra-ci.yml'
12
+ - 'rnd/infra/**'
13
+
14
+ defaults:
15
+ run:
16
+ shell: bash
17
+ working-directory: rnd/infra
18
+
19
+ jobs:
20
+ lint:
21
+ runs-on: ubuntu-latest
22
+
23
+ steps:
24
+ - name: Checkout
25
+ uses: actions/checkout@v2
26
+ with:
27
+ fetch-depth: 0
28
+
29
+ - name: TFLint
30
+ uses: pauloconnor/[email protected]
31
+ env:
32
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
33
+ with:
34
+ tflint_path: terraform/
35
+ tflint_recurse: true
36
+ tflint_changed_only: false
37
+
38
+ - name: Set up Helm
39
+ uses: azure/[email protected]
40
+ with:
41
+ version: v3.14.4
42
+
43
+ - name: Set up chart-testing
44
+ uses: helm/[email protected]
45
+
46
+ - name: Run chart-testing (list-changed)
47
+ id: list-changed
48
+ run: |
49
+ changed=$(ct list-changed --target-branch ${{ github.event.repository.default_branch }})
50
+ if [[ -n "$changed" ]]; then
51
+ echo "changed=true" >> "$GITHUB_OUTPUT"
52
+ fi
53
+
54
+ - name: Run chart-testing (lint)
55
+ if: steps.list-changed.outputs.changed == 'true'
56
+ run: ct lint --target-branch ${{ github.event.repository.default_branch }}
.github/workflows/autogpt-server-ci.yml ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPT Server CI
2
+
3
+ on:
4
+ push:
5
+ branches: [master, development, ci-test*]
6
+ paths:
7
+ - ".github/workflows/autogpt-server-ci.yml"
8
+ - "rnd/autogpt_server/**"
9
+ pull_request:
10
+ branches: [master, development, release-*]
11
+ paths:
12
+ - ".github/workflows/autogpt-server-ci.yml"
13
+ - "rnd/autogpt_server/**"
14
+
15
+ concurrency:
16
+ group: ${{ format('autogpt-server-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
17
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
18
+
19
+ defaults:
20
+ run:
21
+ shell: bash
22
+ working-directory: rnd/autogpt_server
23
+
24
+ jobs:
25
+ test:
26
+ permissions:
27
+ contents: read
28
+ timeout-minutes: 30
29
+ strategy:
30
+ fail-fast: false
31
+ matrix:
32
+ python-version: ["3.10"]
33
+ platform-os: [ubuntu, macos, macos-arm64, windows]
34
+ db-platform: [postgres, sqlite]
35
+ runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
36
+
37
+ steps:
38
+ - name: Setup PostgreSQL
39
+ if: matrix.db-platform == 'postgres'
40
+ uses: ikalnytskyi/action-setup-postgres@v6
41
+ with:
42
+ username: ${{ secrets.DB_USER || 'postgres' }}
43
+ password: ${{ secrets.DB_PASS || 'postgres' }}
44
+ database: postgres
45
+ port: 5432
46
+ id: postgres
47
+
48
+ # Quite slow on macOS (2~4 minutes to set up Docker)
49
+ # - name: Set up Docker (macOS)
50
+ # if: runner.os == 'macOS'
51
+ # uses: crazy-max/ghaction-setup-docker@v3
52
+
53
+ - name: Start MinIO service (Linux)
54
+ if: runner.os == 'Linux'
55
+ working-directory: "."
56
+ run: |
57
+ docker pull minio/minio:edge-cicd
58
+ docker run -d -p 9000:9000 minio/minio:edge-cicd
59
+
60
+ - name: Start MinIO service (macOS)
61
+ if: runner.os == 'macOS'
62
+ working-directory: ${{ runner.temp }}
63
+ run: |
64
+ brew install minio/stable/minio
65
+ mkdir data
66
+ minio server ./data &
67
+
68
+ # No MinIO on Windows:
69
+ # - Windows doesn't support running Linux Docker containers
70
+ # - It doesn't seem possible to start background processes on Windows. They are
71
+ # killed after the step returns.
72
+ # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
73
+
74
+ - name: Checkout repository
75
+ uses: actions/checkout@v4
76
+ with:
77
+ fetch-depth: 0
78
+ submodules: true
79
+
80
+ - name: Set up Python ${{ matrix.python-version }}
81
+ uses: actions/setup-python@v5
82
+ with:
83
+ python-version: ${{ matrix.python-version }}
84
+
85
+ - id: get_date
86
+ name: Get date
87
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
88
+
89
+ - name: Set up Python dependency cache
90
+ # On Windows, unpacking cached dependencies takes longer than just installing them
91
+ if: runner.os != 'Windows'
92
+ uses: actions/cache@v4
93
+ with:
94
+ path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
95
+ key: poetry-${{ runner.os }}-${{ hashFiles('rnd/autogpt_server/poetry.lock') }}
96
+
97
+ - name: Install Poetry (Unix)
98
+ if: runner.os != 'Windows'
99
+ run: |
100
+ curl -sSL https://install.python-poetry.org | python3 -
101
+
102
+ if [ "${{ runner.os }}" = "macOS" ]; then
103
+ PATH="$HOME/.local/bin:$PATH"
104
+ echo "$HOME/.local/bin" >> $GITHUB_PATH
105
+ fi
106
+
107
+ - name: Install Poetry (Windows)
108
+ if: runner.os == 'Windows'
109
+ shell: pwsh
110
+ run: |
111
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
112
+
113
+ $env:PATH += ";$env:APPDATA\Python\Scripts"
114
+ echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
115
+
116
+ - name: Install Python dependencies
117
+ run: poetry install
118
+
119
+ - name: Generate Prisma Client (Postgres)
120
+ if: matrix.db-platform == 'postgres'
121
+ run: poetry run prisma generate --schema postgres/schema.prisma
122
+
123
+ - name: Run Database Migrations (Postgres)
124
+ if: matrix.db-platform == 'postgres'
125
+ run: poetry run prisma migrate dev --schema postgres/schema.prisma --name updates
126
+ env:
127
+ CONNECTION_STR: ${{ steps.postgres.outputs.connection-uri }}
128
+
129
+ - name: Generate Prisma Client (SQLite)
130
+ if: matrix.db-platform == 'sqlite'
131
+ run: poetry run prisma generate
132
+
133
+ - name: Run Database Migrations (SQLite)
134
+ if: matrix.db-platform == 'sqlite'
135
+ run: poetry run prisma migrate dev --name updates
136
+
137
+ - name: Run Linter
138
+ run: poetry run lint
139
+
140
+ - name: Run pytest with coverage
141
+ run: |
142
+ poetry run pytest -vv \
143
+ test
144
+ env:
145
+ CI: true
146
+ PLAIN_OUTPUT: True
147
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
148
+ DB_USER: ${{ secrets.DB_USER || 'postgres' }}
149
+ DB_PASS: ${{ secrets.DB_PASS || 'postgres' }}
150
+ DB_NAME: postgres
151
+ DB_PORT: 5432
152
+ RUN_ENV: local
153
+ PORT: 8080
154
+ DATABASE_URL: postgresql://${{ secrets.DB_USER || 'postgres' }}:${{ secrets.DB_PASS || 'postgres' }}@localhost:5432/${{ secrets.DB_NAME || 'postgres'}}
155
+
156
+ # - name: Upload coverage reports to Codecov
157
+ # uses: codecov/codecov-action@v4
158
+ # with:
159
+ # token: ${{ secrets.CODECOV_TOKEN }}
160
+ # flags: autogpt-server,${{ runner.os }}
.github/workflows/autogpts-benchmark.yml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AutoGPTs Nightly Benchmark
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: '0 2 * * *'
7
+
8
+ jobs:
9
+ benchmark:
10
+ permissions:
11
+ contents: write
12
+ runs-on: ubuntu-latest
13
+ strategy:
14
+ matrix:
15
+ agent-name: [ autogpt ]
16
+ fail-fast: false
17
+ timeout-minutes: 120
18
+ env:
19
+ min-python-version: '3.10'
20
+ REPORTS_BRANCH: data/benchmark-reports
21
+ REPORTS_FOLDER: ${{ format('benchmark/reports/{0}', matrix.agent-name) }}
22
+ steps:
23
+ - name: Checkout repository
24
+ uses: actions/checkout@v4
25
+ with:
26
+ fetch-depth: 0
27
+ submodules: true
28
+
29
+ - name: Set up Python ${{ env.min-python-version }}
30
+ uses: actions/setup-python@v5
31
+ with:
32
+ python-version: ${{ env.min-python-version }}
33
+
34
+ - name: Install Poetry
35
+ run: curl -sSL https://install.python-poetry.org | python -
36
+
37
+ - name: Prepare reports folder
38
+ run: mkdir -p ${{ env.REPORTS_FOLDER }}
39
+
40
+ - run: poetry -C benchmark install
41
+
42
+ - name: Benchmark ${{ matrix.agent-name }}
43
+ run: |
44
+ ./run agent start ${{ matrix.agent-name }}
45
+ cd ${{ matrix.agent-name }}
46
+
47
+ set +e # Do not quit on non-zero exit codes
48
+ poetry run agbenchmark run -N 3 \
49
+ --test=ReadFile \
50
+ --test=BasicRetrieval --test=RevenueRetrieval2 \
51
+ --test=CombineCsv --test=LabelCsv --test=AnswerQuestionCombineCsv \
52
+ --test=UrlShortener --test=TicTacToe --test=Battleship \
53
+ --test=WebArenaTask_0 --test=WebArenaTask_21 --test=WebArenaTask_124 \
54
+ --test=WebArenaTask_134 --test=WebArenaTask_163
55
+
56
+ # Convert exit code 1 (some challenges failed) to exit code 0
57
+ if [ $? -eq 0 ] || [ $? -eq 1 ]; then
58
+ exit 0
59
+ else
60
+ exit $?
61
+ fi
62
+ env:
63
+ AGENT_NAME: ${{ matrix.agent-name }}
64
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
65
+ REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
66
+ REPORTS_FOLDER: ${{ format('../../{0}', env.REPORTS_FOLDER) }} # account for changed workdir
67
+
68
+ TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
69
+ TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
70
+
71
+ - name: Push reports to data branch
72
+ run: |
73
+ # BODGE: Remove success_rate.json and regression_tests.json to avoid conflicts on checkout
74
+ rm ${{ env.REPORTS_FOLDER }}/*.json
75
+
76
+ # Find folder with newest (untracked) report in it
77
+ report_subfolder=$(find ${{ env.REPORTS_FOLDER }} -type f -name 'report.json' \
78
+ | xargs -I {} dirname {} \
79
+ | xargs -I {} git ls-files --others --exclude-standard {} \
80
+ | xargs -I {} dirname {} \
81
+ | sort -u)
82
+ json_report_file="$report_subfolder/report.json"
83
+
84
+ # Convert JSON report to Markdown
85
+ markdown_report_file="$report_subfolder/report.md"
86
+ poetry -C benchmark run benchmark/reports/format.py "$json_report_file" > "$markdown_report_file"
87
+ cat "$markdown_report_file" >> $GITHUB_STEP_SUMMARY
88
+
89
+ git config --global user.name 'GitHub Actions'
90
+ git config --global user.email '[email protected]'
91
+ git fetch origin ${{ env.REPORTS_BRANCH }}:${{ env.REPORTS_BRANCH }} \
92
+ && git checkout ${{ env.REPORTS_BRANCH }} \
93
+ || git checkout --orphan ${{ env.REPORTS_BRANCH }}
94
+ git reset --hard
95
+ git add ${{ env.REPORTS_FOLDER }}
96
+ git commit -m "Benchmark report for ${{ matrix.agent-name }} @ $(date +'%Y-%m-%d')" \
97
+ && git push origin ${{ env.REPORTS_BRANCH }}
.github/workflows/autogpts-ci.yml ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Agent smoke tests
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: '0 8 * * *'
7
+ push:
8
+ branches: [ master, development, ci-test* ]
9
+ paths:
10
+ - '.github/workflows/autogpts-ci.yml'
11
+ - 'autogpt/**'
12
+ - 'forge/**'
13
+ - 'benchmark/**'
14
+ - 'run'
15
+ - 'cli.py'
16
+ - 'setup.py'
17
+ - '!**/*.md'
18
+ pull_request:
19
+ branches: [ master, development, release-* ]
20
+ paths:
21
+ - '.github/workflows/autogpts-ci.yml'
22
+ - 'autogpt/**'
23
+ - 'forge/**'
24
+ - 'benchmark/**'
25
+ - 'run'
26
+ - 'cli.py'
27
+ - 'setup.py'
28
+ - '!**/*.md'
29
+
30
+ jobs:
31
+ serve-agent-protocol:
32
+ runs-on: ubuntu-latest
33
+ strategy:
34
+ matrix:
35
+ agent-name: [ autogpt ]
36
+ fail-fast: false
37
+ timeout-minutes: 20
38
+ env:
39
+ min-python-version: '3.10'
40
+ steps:
41
+ - name: Checkout repository
42
+ uses: actions/checkout@v4
43
+ with:
44
+ fetch-depth: 0
45
+ submodules: true
46
+
47
+ - name: Set up Python ${{ env.min-python-version }}
48
+ uses: actions/setup-python@v5
49
+ with:
50
+ python-version: ${{ env.min-python-version }}
51
+
52
+ - name: Install Poetry
53
+ working-directory: ./${{ matrix.agent-name }}/
54
+ run: |
55
+ curl -sSL https://install.python-poetry.org | python -
56
+
57
+ - name: Run regression tests
58
+ run: |
59
+ ./run agent start ${{ matrix.agent-name }}
60
+ cd ${{ matrix.agent-name }}
61
+ poetry run agbenchmark --mock --test=BasicRetrieval --test=Battleship --test=WebArenaTask_0
62
+ poetry run agbenchmark --test=WriteFile
63
+ env:
64
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
65
+ AGENT_NAME: ${{ matrix.agent-name }}
66
+ REQUESTS_CA_BUNDLE: /etc/ssl/certs/ca-certificates.crt
67
+ HELICONE_CACHE_ENABLED: false
68
+ HELICONE_PROPERTY_AGENT: ${{ matrix.agent-name }}
69
+ REPORTS_FOLDER: ${{ format('../../reports/{0}', matrix.agent-name) }}
70
+ TELEMETRY_ENVIRONMENT: autogpt-ci
71
+ TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
.github/workflows/benchmark-ci.yml ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: AGBenchmark CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ master, development, ci-test* ]
6
+ paths:
7
+ - 'benchmark/**'
8
+ - .github/workflows/benchmark-ci.yml
9
+ - '!benchmark/reports/**'
10
+ pull_request:
11
+ branches: [ master, development, release-* ]
12
+ paths:
13
+ - 'benchmark/**'
14
+ - '!benchmark/reports/**'
15
+ - .github/workflows/benchmark-ci.yml
16
+
17
+ concurrency:
18
+ group: ${{ format('benchmark-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
19
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
20
+
21
+ defaults:
22
+ run:
23
+ shell: bash
24
+
25
+ env:
26
+ min-python-version: '3.10'
27
+
28
+ jobs:
29
+ test:
30
+ permissions:
31
+ contents: read
32
+ timeout-minutes: 30
33
+ strategy:
34
+ fail-fast: false
35
+ matrix:
36
+ python-version: ["3.10"]
37
+ platform-os: [ubuntu, macos, macos-arm64, windows]
38
+ runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
39
+ defaults:
40
+ run:
41
+ shell: bash
42
+ working-directory: benchmark
43
+ steps:
44
+ - name: Checkout repository
45
+ uses: actions/checkout@v4
46
+ with:
47
+ fetch-depth: 0
48
+ submodules: true
49
+
50
+ - name: Set up Python ${{ matrix.python-version }}
51
+ uses: actions/setup-python@v5
52
+ with:
53
+ python-version: ${{ matrix.python-version }}
54
+
55
+ - name: Set up Python dependency cache
56
+ # On Windows, unpacking cached dependencies takes longer than just installing them
57
+ if: runner.os != 'Windows'
58
+ uses: actions/cache@v4
59
+ with:
60
+ path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
61
+ key: poetry-${{ runner.os }}-${{ hashFiles('benchmark/poetry.lock') }}
62
+
63
+ - name: Install Poetry (Unix)
64
+ if: runner.os != 'Windows'
65
+ run: |
66
+ curl -sSL https://install.python-poetry.org | python3 -
67
+
68
+ if [ "${{ runner.os }}" = "macOS" ]; then
69
+ PATH="$HOME/.local/bin:$PATH"
70
+ echo "$HOME/.local/bin" >> $GITHUB_PATH
71
+ fi
72
+
73
+ - name: Install Poetry (Windows)
74
+ if: runner.os == 'Windows'
75
+ shell: pwsh
76
+ run: |
77
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
78
+
79
+ $env:PATH += ";$env:APPDATA\Python\Scripts"
80
+ echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
81
+
82
+ - name: Install Python dependencies
83
+ run: poetry install
84
+
85
+ - name: Run pytest with coverage
86
+ run: |
87
+ poetry run pytest -vv \
88
+ --cov=agbenchmark --cov-branch --cov-report term-missing --cov-report xml \
89
+ --durations=10 \
90
+ tests
91
+ env:
92
+ CI: true
93
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
94
+
95
+ - name: Upload coverage reports to Codecov
96
+ uses: codecov/codecov-action@v4
97
+ with:
98
+ token: ${{ secrets.CODECOV_TOKEN }}
99
+ flags: agbenchmark,${{ runner.os }}
100
+
101
+ self-test-with-agent:
102
+ runs-on: ubuntu-latest
103
+ strategy:
104
+ matrix:
105
+ agent-name: [ forge ]
106
+ fail-fast: false
107
+ timeout-minutes: 20
108
+ steps:
109
+ - name: Checkout repository
110
+ uses: actions/checkout@v4
111
+ with:
112
+ fetch-depth: 0
113
+ submodules: true
114
+
115
+ - name: Set up Python ${{ env.min-python-version }}
116
+ uses: actions/setup-python@v5
117
+ with:
118
+ python-version: ${{ env.min-python-version }}
119
+
120
+ - name: Install Poetry
121
+ run: |
122
+ curl -sSL https://install.python-poetry.org | python -
123
+
124
+ - name: Run regression tests
125
+ working-directory: .
126
+ run: |
127
+ ./run agent start ${{ matrix.agent-name }}
128
+ cd ${{ matrix.agent-name }}
129
+
130
+ set +e # Ignore non-zero exit codes and continue execution
131
+ echo "Running the following command: poetry run agbenchmark --maintain --mock"
132
+ poetry run agbenchmark --maintain --mock
133
+ EXIT_CODE=$?
134
+ set -e # Stop ignoring non-zero exit codes
135
+ # Check if the exit code was 5, and if so, exit with 0 instead
136
+ if [ $EXIT_CODE -eq 5 ]; then
137
+ echo "regression_tests.json is empty."
138
+ fi
139
+
140
+ echo "Running the following command: poetry run agbenchmark --mock"
141
+ poetry run agbenchmark --mock
142
+
143
+ echo "Running the following command: poetry run agbenchmark --mock --category=data"
144
+ poetry run agbenchmark --mock --category=data
145
+
146
+ echo "Running the following command: poetry run agbenchmark --mock --category=coding"
147
+ poetry run agbenchmark --mock --category=coding
148
+
149
+ echo "Running the following command: poetry run agbenchmark --test=WriteFile"
150
+ poetry run agbenchmark --test=WriteFile
151
+ cd ../benchmark
152
+ poetry install
153
+ echo "Adding the BUILD_SKILL_TREE environment variable. This will attempt to add new elements in the skill tree. If new elements are added, the CI fails because they should have been pushed"
154
+ export BUILD_SKILL_TREE=true
155
+
156
+ poetry run agbenchmark --mock
157
+
158
+ CHANGED=$(git diff --name-only | grep -E '(agbenchmark/challenges)|(../frontend/assets)') || echo "No diffs"
159
+ if [ ! -z "$CHANGED" ]; then
160
+ echo "There are unstaged changes please run agbenchmark and commit those changes since they are needed."
161
+ echo "$CHANGED"
162
+ exit 1
163
+ else
164
+ echo "No unstaged changes."
165
+ fi
166
+ env:
167
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
168
+ TELEMETRY_ENVIRONMENT: autogpt-benchmark-ci
169
+ TELEMETRY_OPT_IN: ${{ github.ref_name == 'master' }}
.github/workflows/benchmark_publish_package.yml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ deploy:
8
+ runs-on: ubuntu-latest
9
+ permissions:
10
+ contents: write
11
+ steps:
12
+ - name: Checkout repository
13
+ uses: actions/checkout@v4
14
+ with:
15
+ submodules: true
16
+ fetch-depth: 0
17
+
18
+ - name: Set up Python
19
+ uses: actions/setup-python@v5
20
+ with:
21
+ python-version: 3.8
22
+
23
+ - name: Install Poetry
24
+ working-directory: ./benchmark/
25
+ run: |
26
+ curl -sSL https://install.python-poetry.org | python3 -
27
+ echo "$HOME/.poetry/bin" >> $GITHUB_PATH
28
+
29
+ - name: Build project for distribution
30
+ working-directory: ./benchmark/
31
+ run: poetry build
32
+
33
+ - name: Install dependencies
34
+ working-directory: ./benchmark/
35
+ run: poetry install
36
+
37
+ - name: Check Version
38
+ working-directory: ./benchmark/
39
+ id: check-version
40
+ run: |
41
+ echo version=$(poetry version --short) >> $GITHUB_OUTPUT
42
+
43
+ - name: Create Release
44
+ uses: ncipollo/release-action@v1
45
+ with:
46
+ artifacts: "benchmark/dist/*"
47
+ token: ${{ secrets.GITHUB_TOKEN }}
48
+ draft: false
49
+ generateReleaseNotes: false
50
+ tag: agbenchmark-v${{ steps.check-version.outputs.version }}
51
+ commit: master
52
+
53
+ - name: Build and publish
54
+ working-directory: ./benchmark/
55
+ run: poetry publish -u __token__ -p ${{ secrets.PYPI_API_TOKEN }}
.github/workflows/close-stale-issues.yml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 'Close stale issues'
2
+ on:
3
+ schedule:
4
+ - cron: '30 1 * * *'
5
+ workflow_dispatch:
6
+
7
+ permissions:
8
+ issues: write
9
+
10
+ jobs:
11
+ stale:
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - uses: actions/stale@v9
15
+ with:
16
+ # operations-per-run: 5000
17
+ stale-issue-message: >
18
+ This issue has automatically been marked as _stale_ because it has not had
19
+ any activity in the last 50 days. You can _unstale_ it by commenting or
20
+ removing the label. Otherwise, this issue will be closed in 10 days.
21
+ stale-pr-message: >
22
+ This pull request has automatically been marked as _stale_ because it has
23
+ not had any activity in the last 50 days. You can _unstale_ it by commenting
24
+ or removing the label.
25
+ close-issue-message: >
26
+ This issue was closed automatically because it has been stale for 10 days
27
+ with no activity.
28
+ days-before-stale: 50
29
+ days-before-close: 10
30
+ # Do not touch meta issues:
31
+ exempt-issue-labels: meta,fridge,project management
32
+ # Do not affect pull requests:
33
+ days-before-pr-stale: -1
34
+ days-before-pr-close: -1
.github/workflows/forge-ci.yml ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Forge CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ master, development, ci-test* ]
6
+ paths:
7
+ - '.github/workflows/forge-ci.yml'
8
+ - 'forge/**'
9
+ - '!forge/tests/vcr_cassettes'
10
+ pull_request:
11
+ branches: [ master, development, release-* ]
12
+ paths:
13
+ - '.github/workflows/forge-ci.yml'
14
+ - 'forge/**'
15
+ - '!forge/tests/vcr_cassettes'
16
+
17
+ concurrency:
18
+ group: ${{ format('forge-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
19
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
20
+
21
+ defaults:
22
+ run:
23
+ shell: bash
24
+ working-directory: forge
25
+
26
+ jobs:
27
+ test:
28
+ permissions:
29
+ contents: read
30
+ timeout-minutes: 30
31
+ strategy:
32
+ fail-fast: false
33
+ matrix:
34
+ python-version: ["3.10"]
35
+ platform-os: [ubuntu, macos, macos-arm64, windows]
36
+ runs-on: ${{ matrix.platform-os != 'macos-arm64' && format('{0}-latest', matrix.platform-os) || 'macos-14' }}
37
+
38
+ steps:
39
+ # Quite slow on macOS (2~4 minutes to set up Docker)
40
+ # - name: Set up Docker (macOS)
41
+ # if: runner.os == 'macOS'
42
+ # uses: crazy-max/ghaction-setup-docker@v3
43
+
44
+ - name: Start MinIO service (Linux)
45
+ if: runner.os == 'Linux'
46
+ working-directory: '.'
47
+ run: |
48
+ docker pull minio/minio:edge-cicd
49
+ docker run -d -p 9000:9000 minio/minio:edge-cicd
50
+
51
+ - name: Start MinIO service (macOS)
52
+ if: runner.os == 'macOS'
53
+ working-directory: ${{ runner.temp }}
54
+ run: |
55
+ brew install minio/stable/minio
56
+ mkdir data
57
+ minio server ./data &
58
+
59
+ # No MinIO on Windows:
60
+ # - Windows doesn't support running Linux Docker containers
61
+ # - It doesn't seem possible to start background processes on Windows. They are
62
+ # killed after the step returns.
63
+ # See: https://github.com/actions/runner/issues/598#issuecomment-2011890429
64
+
65
+ - name: Checkout repository
66
+ uses: actions/checkout@v4
67
+ with:
68
+ fetch-depth: 0
69
+ submodules: true
70
+
71
+ - name: Checkout cassettes
72
+ if: ${{ startsWith(github.event_name, 'pull_request') }}
73
+ env:
74
+ PR_BASE: ${{ github.event.pull_request.base.ref }}
75
+ PR_BRANCH: ${{ github.event.pull_request.head.ref }}
76
+ PR_AUTHOR: ${{ github.event.pull_request.user.login }}
77
+ run: |
78
+ cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
79
+ cassette_base_branch="${PR_BASE}"
80
+ cd tests/vcr_cassettes
81
+
82
+ if ! git ls-remote --exit-code --heads origin $cassette_base_branch ; then
83
+ cassette_base_branch="master"
84
+ fi
85
+
86
+ if git ls-remote --exit-code --heads origin $cassette_branch ; then
87
+ git fetch origin $cassette_branch
88
+ git fetch origin $cassette_base_branch
89
+
90
+ git checkout $cassette_branch
91
+
92
+ # Pick non-conflicting cassette updates from the base branch
93
+ git merge --no-commit --strategy-option=ours origin/$cassette_base_branch
94
+ echo "Using cassettes from mirror branch '$cassette_branch'," \
95
+ "synced to upstream branch '$cassette_base_branch'."
96
+ else
97
+ git checkout -b $cassette_branch
98
+ echo "Branch '$cassette_branch' does not exist in cassette submodule." \
99
+ "Using cassettes from '$cassette_base_branch'."
100
+ fi
101
+
102
+ - name: Set up Python ${{ matrix.python-version }}
103
+ uses: actions/setup-python@v5
104
+ with:
105
+ python-version: ${{ matrix.python-version }}
106
+
107
+ - name: Set up Python dependency cache
108
+ # On Windows, unpacking cached dependencies takes longer than just installing them
109
+ if: runner.os != 'Windows'
110
+ uses: actions/cache@v4
111
+ with:
112
+ path: ${{ runner.os == 'macOS' && '~/Library/Caches/pypoetry' || '~/.cache/pypoetry' }}
113
+ key: poetry-${{ runner.os }}-${{ hashFiles('forge/poetry.lock') }}
114
+
115
+ - name: Install Poetry (Unix)
116
+ if: runner.os != 'Windows'
117
+ run: |
118
+ curl -sSL https://install.python-poetry.org | python3 -
119
+
120
+ if [ "${{ runner.os }}" = "macOS" ]; then
121
+ PATH="$HOME/.local/bin:$PATH"
122
+ echo "$HOME/.local/bin" >> $GITHUB_PATH
123
+ fi
124
+
125
+ - name: Install Poetry (Windows)
126
+ if: runner.os == 'Windows'
127
+ shell: pwsh
128
+ run: |
129
+ (Invoke-WebRequest -Uri https://install.python-poetry.org -UseBasicParsing).Content | python -
130
+
131
+ $env:PATH += ";$env:APPDATA\Python\Scripts"
132
+ echo "$env:APPDATA\Python\Scripts" >> $env:GITHUB_PATH
133
+
134
+ - name: Install Python dependencies
135
+ run: poetry install
136
+
137
+ - name: Run pytest with coverage
138
+ run: |
139
+ poetry run pytest -vv \
140
+ --cov=forge --cov-branch --cov-report term-missing --cov-report xml \
141
+ --durations=10 \
142
+ forge
143
+ env:
144
+ CI: true
145
+ PLAIN_OUTPUT: True
146
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
147
+ S3_ENDPOINT_URL: ${{ runner.os != 'Windows' && 'http://127.0.0.1:9000' || '' }}
148
+ AWS_ACCESS_KEY_ID: minioadmin
149
+ AWS_SECRET_ACCESS_KEY: minioadmin
150
+
151
+ - name: Upload coverage reports to Codecov
152
+ uses: codecov/codecov-action@v4
153
+ with:
154
+ token: ${{ secrets.CODECOV_TOKEN }}
155
+ flags: forge,${{ runner.os }}
156
+
157
+ - id: setup_git_auth
158
+ name: Set up git token authentication
159
+ # Cassettes may be pushed even when tests fail
160
+ if: success() || failure()
161
+ run: |
162
+ config_key="http.${{ github.server_url }}/.extraheader"
163
+ if [ "${{ runner.os }}" = 'macOS' ]; then
164
+ base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64)
165
+ else
166
+ base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
167
+ fi
168
+
169
+ git config "$config_key" \
170
+ "Authorization: Basic $base64_pat"
171
+
172
+ cd tests/vcr_cassettes
173
+ git config "$config_key" \
174
+ "Authorization: Basic $base64_pat"
175
+
176
+ echo "config_key=$config_key" >> $GITHUB_OUTPUT
177
+
178
+ - id: push_cassettes
179
+ name: Push updated cassettes
180
+ # For pull requests, push updated cassettes even when tests fail
181
+ if: github.event_name == 'push' || (! github.event.pull_request.head.repo.fork && (success() || failure()))
182
+ env:
183
+ PR_BRANCH: ${{ github.event.pull_request.head.ref }}
184
+ PR_AUTHOR: ${{ github.event.pull_request.user.login }}
185
+ run: |
186
+ if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
187
+ is_pull_request=true
188
+ cassette_branch="${PR_AUTHOR}-${PR_BRANCH}"
189
+ else
190
+ cassette_branch="${{ github.ref_name }}"
191
+ fi
192
+
193
+ cd tests/vcr_cassettes
194
+ # Commit & push changes to cassettes if any
195
+ if ! git diff --quiet; then
196
+ git add .
197
+ git commit -m "Auto-update cassettes"
198
+ git push origin HEAD:$cassette_branch
199
+ if [ ! $is_pull_request ]; then
200
+ cd ../..
201
+ git add tests/vcr_cassettes
202
+ git commit -m "Update cassette submodule"
203
+ git push origin HEAD:$cassette_branch
204
+ fi
205
+ echo "updated=true" >> $GITHUB_OUTPUT
206
+ else
207
+ echo "updated=false" >> $GITHUB_OUTPUT
208
+ echo "No cassette changes to commit"
209
+ fi
210
+
211
+ - name: Post Set up git token auth
212
+ if: steps.setup_git_auth.outcome == 'success'
213
+ run: |
214
+ git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
215
+ git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
216
+
217
+ - name: Apply "behaviour change" label and comment on PR
218
+ if: ${{ startsWith(github.event_name, 'pull_request') }}
219
+ run: |
220
+ PR_NUMBER="${{ github.event.pull_request.number }}"
221
+ TOKEN="${{ secrets.PAT_REVIEW }}"
222
+ REPO="${{ github.repository }}"
223
+
224
+ if [[ "${{ steps.push_cassettes.outputs.updated }}" == "true" ]]; then
225
+ echo "Adding label and comment..."
226
+ echo $TOKEN | gh auth login --with-token
227
+ gh issue edit $PR_NUMBER --add-label "behaviour change"
228
+ gh issue comment $PR_NUMBER --body "You changed AutoGPT's behaviour on ${{ runner.os }}. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
229
+ fi
230
+
231
+ - name: Upload logs to artifact
232
+ if: always()
233
+ uses: actions/upload-artifact@v4
234
+ with:
235
+ name: test-logs
236
+ path: forge/logs/
.github/workflows/frontend-ci.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Frontend CI/CD
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - master
7
+ - development
8
+ - 'ci-test*' # This will match any branch that starts with "ci-test"
9
+ paths:
10
+ - 'frontend/**'
11
+ - '.github/workflows/frontend-ci.yml'
12
+ pull_request:
13
+ paths:
14
+ - 'frontend/**'
15
+ - '.github/workflows/frontend-ci.yml'
16
+
17
+ jobs:
18
+ build:
19
+ permissions:
20
+ contents: write
21
+ pull-requests: write
22
+ runs-on: ubuntu-latest
23
+ env:
24
+ BUILD_BRANCH: ${{ format('frontend-build/{0}', github.ref_name) }}
25
+
26
+ steps:
27
+ - name: Checkout Repo
28
+ uses: actions/checkout@v4
29
+
30
+ - name: Setup Flutter
31
+ uses: subosito/flutter-action@v2
32
+ with:
33
+ flutter-version: '3.13.2'
34
+
35
+ - name: Build Flutter to Web
36
+ run: |
37
+ cd frontend
38
+ flutter build web --base-href /app/
39
+
40
+ # - name: Commit and Push to ${{ env.BUILD_BRANCH }}
41
+ # if: github.event_name == 'push'
42
+ # run: |
43
+ # git config --local user.email "[email protected]"
44
+ # git config --local user.name "GitHub Action"
45
+ # git add frontend/build/web
46
+ # git checkout -B ${{ env.BUILD_BRANCH }}
47
+ # git commit -m "Update frontend build to ${GITHUB_SHA:0:7}" -a
48
+ # git push -f origin ${{ env.BUILD_BRANCH }}
49
+
50
+ - name: Create PR ${{ env.BUILD_BRANCH }} -> ${{ github.ref_name }}
51
+ if: github.event_name == 'push'
52
+ uses: peter-evans/create-pull-request@v6
53
+ with:
54
+ add-paths: frontend/build/web
55
+ base: ${{ github.ref_name }}
56
+ branch: ${{ env.BUILD_BRANCH }}
57
+ delete-branch: true
58
+ title: "Update frontend build in `${{ github.ref_name }}`"
59
+ body: "This PR updates the frontend build based on commit ${{ github.sha }}."
60
+ commit-message: "Update frontend build based on commit ${{ github.sha }}"
.github/workflows/hackathon.yml ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Hackathon
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ agents:
7
+ description: "Agents to run (comma-separated)"
8
+ required: false
9
+ default: "autogpt" # Default agents if none are specified
10
+
11
+ jobs:
12
+ matrix-setup:
13
+ runs-on: ubuntu-latest
14
+ # Service containers to run with `matrix-setup`
15
+ services:
16
+ # Label used to access the service container
17
+ postgres:
18
+ # Docker Hub image
19
+ image: postgres
20
+ # Provide the password for postgres
21
+ env:
22
+ POSTGRES_PASSWORD: postgres
23
+ # Set health checks to wait until postgres has started
24
+ options: >-
25
+ --health-cmd pg_isready
26
+ --health-interval 10s
27
+ --health-timeout 5s
28
+ --health-retries 5
29
+ ports:
30
+ # Maps tcp port 5432 on service container to the host
31
+ - 5432:5432
32
+ outputs:
33
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
34
+ env-name: ${{ steps.set-matrix.outputs.env-name }}
35
+ steps:
36
+ - id: set-matrix
37
+ run: |
38
+ if [ "${{ github.event_name }}" == "schedule" ]; then
39
+ echo "::set-output name=env-name::production"
40
+ echo "::set-output name=matrix::[ 'irrelevant']"
41
+ elif [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
42
+ IFS=',' read -ra matrix_array <<< "${{ github.event.inputs.agents }}"
43
+ matrix_string="[ \"$(echo "${matrix_array[@]}" | sed 's/ /", "/g')\" ]"
44
+ echo "::set-output name=env-name::production"
45
+ echo "::set-output name=matrix::$matrix_string"
46
+ else
47
+ echo "::set-output name=env-name::testing"
48
+ echo "::set-output name=matrix::[ 'irrelevant' ]"
49
+ fi
50
+
51
+ tests:
52
+ environment:
53
+ name: "${{ needs.matrix-setup.outputs.env-name }}"
54
+ needs: matrix-setup
55
+ env:
56
+ min-python-version: "3.10"
57
+ name: "${{ matrix.agent-name }}"
58
+ runs-on: ubuntu-latest
59
+ services:
60
+ # Label used to access the service container
61
+ postgres:
62
+ # Docker Hub image
63
+ image: postgres
64
+ # Provide the password for postgres
65
+ env:
66
+ POSTGRES_PASSWORD: postgres
67
+ # Set health checks to wait until postgres has started
68
+ options: >-
69
+ --health-cmd pg_isready
70
+ --health-interval 10s
71
+ --health-timeout 5s
72
+ --health-retries 5
73
+ ports:
74
+ # Maps tcp port 5432 on service container to the host
75
+ - 5432:5432
76
+ timeout-minutes: 50
77
+ strategy:
78
+ fail-fast: false
79
+ matrix:
80
+ agent-name: ${{fromJson(needs.matrix-setup.outputs.matrix)}}
81
+ steps:
82
+ - name: Print Environment Name
83
+ run: |
84
+ echo "Matrix Setup Environment Name: ${{ needs.matrix-setup.outputs.env-name }}"
85
+
86
+ - name: Check Docker Container
87
+ id: check
88
+ run: docker ps
89
+
90
+ - name: Checkout repository
91
+ uses: actions/checkout@v4
92
+ with:
93
+ fetch-depth: 0
94
+ submodules: true
95
+
96
+ - name: Set up Python ${{ env.min-python-version }}
97
+ uses: actions/setup-python@v5
98
+ with:
99
+ python-version: ${{ env.min-python-version }}
100
+
101
+ - id: get_date
102
+ name: Get date
103
+ run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
104
+
105
+ - name: Install Poetry
106
+ run: |
107
+ curl -sSL https://install.python-poetry.org | python -
108
+
109
+ - name: Install Node.js
110
+ uses: actions/setup-node@v4
111
+ with:
112
+ node-version: v18.15
113
+
114
+ - name: Run benchmark
115
+ run: |
116
+ link=$(jq -r '.["github_repo_url"]' arena/$AGENT_NAME.json)
117
+ branch=$(jq -r '.["branch_to_benchmark"]' arena/$AGENT_NAME.json)
118
+ git clone "$link" -b "$branch" "$AGENT_NAME"
119
+ cd $AGENT_NAME
120
+ cp ./$AGENT_NAME/.env.example ./$AGENT_NAME/.env || echo "file not found"
121
+ ./run agent start $AGENT_NAME
122
+ cd ../benchmark
123
+ poetry install
124
+ poetry run agbenchmark --no-dep
125
+ env:
126
+ OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
127
+ SERP_API_KEY: ${{ secrets.SERP_API_KEY }}
128
+ SERPAPI_API_KEY: ${{ secrets.SERP_API_KEY }}
129
+ WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }}
130
+ WEAVIATE_URL: ${{ secrets.WEAVIATE_URL }}
131
+ GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
132
+ GOOGLE_CUSTOM_SEARCH_ENGINE_ID: ${{ secrets.GOOGLE_CUSTOM_SEARCH_ENGINE_ID }}
133
+ AGENT_NAME: ${{ matrix.agent-name }}
.github/workflows/pr-label.yml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "Pull Request auto-label"
2
+
3
+ on:
4
+ # So that PRs touching the same files as the push are updated
5
+ push:
6
+ branches: [ master, development, release-* ]
7
+ paths-ignore:
8
+ - 'forge/tests/vcr_cassettes'
9
+ - 'benchmark/reports/**'
10
+ # So that the `dirtyLabel` is removed if conflicts are resolve
11
+ # We recommend `pull_request_target` so that github secrets are available.
12
+ # In `pull_request` we wouldn't be able to change labels of fork PRs
13
+ pull_request_target:
14
+ types: [ opened, synchronize ]
15
+
16
+ concurrency:
17
+ group: ${{ format('pr-label-{0}', github.event.pull_request.number || github.sha) }}
18
+ cancel-in-progress: true
19
+
20
+ jobs:
21
+ conflicts:
22
+ runs-on: ubuntu-latest
23
+ permissions:
24
+ contents: read
25
+ pull-requests: write
26
+ steps:
27
+ - name: Update PRs with conflict labels
28
+ uses: eps1lon/actions-label-merge-conflict@releases/2.x
29
+ with:
30
+ dirtyLabel: "conflicts"
31
+ #removeOnDirtyLabel: "PR: ready to ship"
32
+ repoToken: "${{ secrets.GITHUB_TOKEN }}"
33
+ commentOnDirty: "This pull request has conflicts with the base branch, please resolve those so we can evaluate the pull request."
34
+ commentOnClean: "Conflicts have been resolved! 🎉 A maintainer will review the pull request shortly."
35
+
36
+ size:
37
+ if: ${{ github.event_name == 'pull_request_target' }}
38
+ permissions:
39
+ issues: write
40
+ pull-requests: write
41
+ runs-on: ubuntu-latest
42
+ steps:
43
+ - uses: codelytv/pr-size-labeler@v1
44
+ with:
45
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
46
+ xs_label: 'size/xs'
47
+ xs_max_size: 2
48
+ s_label: 'size/s'
49
+ s_max_size: 10
50
+ m_label: 'size/m'
51
+ m_max_size: 100
52
+ l_label: 'size/l'
53
+ l_max_size: 500
54
+ xl_label: 'size/xl'
55
+ message_if_xl:
56
+
57
+ scope:
58
+ if: ${{ github.event_name == 'pull_request_target' }}
59
+ permissions:
60
+ contents: read
61
+ pull-requests: write
62
+ runs-on: ubuntu-latest
63
+ steps:
64
+ - uses: actions/labeler@v5
65
+ with:
66
+ sync-labels: true
.github/workflows/python-checks.yml ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Python checks
2
+
3
+ on:
4
+ push:
5
+ branches: [ master, development, ci-test* ]
6
+ paths:
7
+ - '.github/workflows/lint-ci.yml'
8
+ - 'autogpt/**'
9
+ - 'forge/**'
10
+ - 'benchmark/**'
11
+ - '**.py'
12
+ - '!forge/tests/vcr_cassettes'
13
+ pull_request:
14
+ branches: [ master, development, release-* ]
15
+ paths:
16
+ - '.github/workflows/lint-ci.yml'
17
+ - 'autogpt/**'
18
+ - 'forge/**'
19
+ - 'benchmark/**'
20
+ - '**.py'
21
+ - '!forge/tests/vcr_cassettes'
22
+
23
+ concurrency:
24
+ group: ${{ format('lint-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
25
+ cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
26
+
27
+ defaults:
28
+ run:
29
+ shell: bash
30
+
31
+ jobs:
32
+ get-changed-parts:
33
+ runs-on: ubuntu-latest
34
+ steps:
35
+ - name: Checkout repository
36
+ uses: actions/checkout@v4
37
+
38
+ - id: changes-in
39
+ name: Determine affected subprojects
40
+ uses: dorny/paths-filter@v3
41
+ with:
42
+ filters: |
43
+ autogpt:
44
+ - autogpt/autogpt/**
45
+ - autogpt/tests/**
46
+ - autogpt/poetry.lock
47
+ forge:
48
+ - forge/forge/**
49
+ - forge/tests/**
50
+ - forge/poetry.lock
51
+ benchmark:
52
+ - benchmark/agbenchmark/**
53
+ - benchmark/tests/**
54
+ - benchmark/poetry.lock
55
+ outputs:
56
+ changed-parts: ${{ steps.changes-in.outputs.changes }}
57
+
58
+ lint:
59
+ needs: get-changed-parts
60
+ runs-on: ubuntu-latest
61
+ env:
62
+ min-python-version: "3.10"
63
+
64
+ strategy:
65
+ matrix:
66
+ sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
67
+ fail-fast: false
68
+
69
+ steps:
70
+ - name: Checkout repository
71
+ uses: actions/checkout@v4
72
+ with:
73
+ fetch-depth: 0
74
+
75
+ - name: Set up Python ${{ env.min-python-version }}
76
+ uses: actions/setup-python@v5
77
+ with:
78
+ python-version: ${{ env.min-python-version }}
79
+
80
+ - name: Set up Python dependency cache
81
+ uses: actions/cache@v4
82
+ with:
83
+ path: ~/.cache/pypoetry
84
+ key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
85
+
86
+ - name: Install Poetry
87
+ run: curl -sSL https://install.python-poetry.org | python3 -
88
+
89
+ # Install dependencies
90
+
91
+ - name: Install Python dependencies
92
+ run: poetry -C ${{ matrix.sub-package }} install
93
+
94
+ # Lint
95
+
96
+ - name: Lint (isort)
97
+ run: poetry run isort --check .
98
+ working-directory: ${{ matrix.sub-package }}
99
+
100
+ - name: Lint (Black)
101
+ if: success() || failure()
102
+ run: poetry run black --check .
103
+ working-directory: ${{ matrix.sub-package }}
104
+
105
+ - name: Lint (Flake8)
106
+ if: success() || failure()
107
+ run: poetry run flake8 .
108
+ working-directory: ${{ matrix.sub-package }}
109
+
110
+ types:
111
+ needs: get-changed-parts
112
+ runs-on: ubuntu-latest
113
+ env:
114
+ min-python-version: "3.10"
115
+
116
+ strategy:
117
+ matrix:
118
+ sub-package: ${{ fromJson(needs.get-changed-parts.outputs.changed-parts) }}
119
+ fail-fast: false
120
+
121
+ steps:
122
+ - name: Checkout repository
123
+ uses: actions/checkout@v4
124
+ with:
125
+ fetch-depth: 0
126
+
127
+ - name: Set up Python ${{ env.min-python-version }}
128
+ uses: actions/setup-python@v5
129
+ with:
130
+ python-version: ${{ env.min-python-version }}
131
+
132
+ - name: Set up Python dependency cache
133
+ uses: actions/cache@v4
134
+ with:
135
+ path: ~/.cache/pypoetry
136
+ key: ${{ runner.os }}-poetry-${{ hashFiles(format('{0}/poetry.lock', matrix.sub-package)) }}
137
+
138
+ - name: Install Poetry
139
+ run: curl -sSL https://install.python-poetry.org | python3 -
140
+
141
+ # Install dependencies
142
+
143
+ - name: Install Python dependencies
144
+ run: poetry -C ${{ matrix.sub-package }} install
145
+
146
+ # Typecheck
147
+
148
+ - name: Typecheck
149
+ if: success() || failure()
150
+ run: poetry run pyright
151
+ working-directory: ${{ matrix.sub-package }}
.github/workflows/repo-stats.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: github-repo-stats
2
+
3
+ on:
4
+ schedule:
5
+ # Run this once per day, towards the end of the day for keeping the most
6
+ # recent data point most meaningful (hours are interpreted in UTC).
7
+ - cron: "0 23 * * *"
8
+ workflow_dispatch: # Allow for running this manually.
9
+
10
+ jobs:
11
+ j1:
12
+ name: github-repo-stats
13
+ runs-on: ubuntu-latest
14
+ steps:
15
+ - name: run-ghrs
16
+ # Use latest release.
17
+ uses: jgehrcke/github-repo-stats@HEAD
18
+ with:
19
+ ghtoken: ${{ secrets.ghrs_github_api_token }}
20
+
.github/workflows/scripts/check_actions_status.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import sys
4
+
5
+ # GitHub API endpoint
6
+ api_url = os.environ["GITHUB_API_URL"]
7
+ repo = os.environ["GITHUB_REPOSITORY"]
8
+ sha = os.environ["GITHUB_SHA"]
9
+
10
+ # GitHub token for authentication
11
+ github_token = os.environ["GITHUB_TOKEN"]
12
+
13
+ # API endpoint for check runs for the specific SHA
14
+ endpoint = f"{api_url}/repos/{repo}/commits/{sha}/check-runs"
15
+
16
+ # Set up headers for authentication
17
+ headers = {
18
+ "Authorization": f"token {github_token}",
19
+ "Accept": "application/vnd.github.v3+json"
20
+ }
21
+
22
+ # Make the API request
23
+ response = requests.get(endpoint, headers=headers)
24
+
25
+ if response.status_code != 200:
26
+ print(f"Error: Unable to fetch check runs data. Status code: {response.status_code}")
27
+ sys.exit(1)
28
+
29
+ check_runs = response.json()["check_runs"]
30
+
31
+ # Flag to track if all other check runs have passed
32
+ all_others_passed = True
33
+
34
+ # Current run id
35
+ current_run_id = os.environ["GITHUB_RUN_ID"]
36
+
37
+ for run in check_runs:
38
+ if str(run["id"]) != current_run_id:
39
+ status = run["status"]
40
+ conclusion = run["conclusion"]
41
+
42
+ if status == "completed":
43
+ if conclusion not in ["success", "skipped", "neutral"]:
44
+ all_others_passed = False
45
+ print(f"Check run {run['name']} (ID: {run['id']}) has conclusion: {conclusion}")
46
+ else:
47
+ print(f"Check run {run['name']} (ID: {run['id']}) is still {status}.")
48
+ all_others_passed = False
49
+
50
+ if all_others_passed:
51
+ print("All other completed check runs have passed. This check passes.")
52
+ sys.exit(0)
53
+ else:
54
+ print("Some check runs have failed or have not completed. This check fails.")
55
+ sys.exit(1)
.github/workflows/scripts/docker-ci-summary.sh ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
3
+ head_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$current_ref/" <<< $compare_url_template)
4
+ ref_compare_url=$(sed "s/{base}/$base_branch/; s/{head}/$commit_hash/" <<< $compare_url_template)
5
+
6
+ EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
7
+
8
+ cat << $EOF
9
+ # Docker Build summary 🔨
10
+
11
+ **Source:** branch \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
12
+
13
+ **Build type:** \`$build_type\`
14
+
15
+ **Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
16
+
17
+ ## Image details
18
+
19
+ **Tags:**
20
+ $(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
21
+
22
+ <details>
23
+ <summary><h3>Layers</h3></summary>
24
+
25
+ | Age | Size | Created by instruction |
26
+ | --------- | ------ | ---------------------- |
27
+ $(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
28
+ | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
29
+ | cut -f-3 `# yeet Comment column`\
30
+ | sed 's/ ago//' `# fix Layer age`\
31
+ | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
32
+ | sed 's/\$/\\$/g' `# escape variable and shell expansions`\
33
+ | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
34
+ | column -t -s$'\t' -o' | ' `# align columns and add separator`\
35
+ | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
36
+ </details>
37
+
38
+ <details>
39
+ <summary><h3>ENV</h3></summary>
40
+
41
+ | Variable | Value |
42
+ | -------- | -------- |
43
+ $(jq -r \
44
+ '.Config.Env
45
+ | map(
46
+ split("=")
47
+ | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
48
+ )
49
+ | map("| \(.) |")
50
+ | .[]' <<< $meta
51
+ )
52
+ </details>
53
+
54
+ <details>
55
+ <summary>Raw metadata</summary>
56
+
57
+ \`\`\`JSON
58
+ $meta
59
+ \`\`\`
60
+ </details>
61
+
62
+ ## Build details
63
+ **Build trigger:** $push_forced_label $event_name \`$event_ref\`
64
+
65
+ <details>
66
+ <summary><code>github</code> context</summary>
67
+
68
+ \`\`\`JSON
69
+ $github_context_json
70
+ \`\`\`
71
+ </details>
72
+
73
+ ### Source
74
+ **HEAD:** [$repository@\`${commit_hash:0:7}\`]($source_url) on branch [$current_ref]($ref_compare_url)
75
+
76
+ **Diff with previous HEAD:** $head_compare_url
77
+
78
+ #### New commits
79
+ $(jq -r 'map([
80
+ "**Commit [`\(.id[0:7])`](\(.url)) by \(if .author.username then "@"+.author.username else .author.name end):**",
81
+ .message,
82
+ (if .committer.name != .author.name then "\n> <sub>**Committer:** \(.committer.name) <\(.committer.email)></sub>" else "" end),
83
+ "<sub>**Timestamp:** \(.timestamp)</sub>"
84
+ ] | map("> \(.)\n") | join("")) | join("\n")' <<< $new_commits_json)
85
+
86
+ ### Job environment
87
+
88
+ #### \`vars\` context:
89
+ \`\`\`JSON
90
+ $vars_json
91
+ \`\`\`
92
+
93
+ #### \`env\` context:
94
+ \`\`\`JSON
95
+ $job_env_json
96
+ \`\`\`
97
+
98
+ $EOF
.github/workflows/scripts/docker-release-summary.sh ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ meta=$(docker image inspect "$IMAGE_NAME" | jq '.[0]')
3
+
4
+ EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
5
+
6
+ cat << $EOF
7
+ # Docker Release Build summary 🚀🔨
8
+
9
+ **Source:** $ref_type \`$current_ref\` -> [$repository@\`${commit_hash:0:7}\`]($source_url)
10
+
11
+ **Image size:** $((`jq -r .Size <<< $meta` / 10**6))MB
12
+
13
+ ## Image details
14
+
15
+ **Tags:**
16
+ $(jq -r '.RepoTags | map("* `\(.)`") | join("\n")' <<< $meta)
17
+
18
+ <details>
19
+ <summary><h3>Layers</h3></summary>
20
+
21
+ | Age | Size | Created by instruction |
22
+ | --------- | ------ | ---------------------- |
23
+ $(docker history --no-trunc --format "{{.CreatedSince}}\t{{.Size}}\t\`{{.CreatedBy}}\`\t{{.Comment}}" $IMAGE_NAME \
24
+ | grep 'buildkit.dockerfile' `# filter for layers created in this build process`\
25
+ | cut -f-3 `# yeet Comment column`\
26
+ | sed 's/ ago//' `# fix Layer age`\
27
+ | sed 's/ # buildkit//' `# remove buildkit comment from instructions`\
28
+ | sed 's/\$/\\$/g' `# escape variable and shell expansions`\
29
+ | sed 's/|/\\|/g' `# escape pipes so they don't interfere with column separators`\
30
+ | column -t -s$'\t' -o' | ' `# align columns and add separator`\
31
+ | sed 's/^/| /; s/$/ |/' `# add table row start and end pipes`)
32
+ </details>
33
+
34
+ <details>
35
+ <summary><h3>ENV</h3></summary>
36
+
37
+ | Variable | Value |
38
+ | -------- | -------- |
39
+ $(jq -r \
40
+ '.Config.Env
41
+ | map(
42
+ split("=")
43
+ | "\(.[0]) | `\(.[1] | gsub("\\s+"; " "))`"
44
+ )
45
+ | map("| \(.) |")
46
+ | .[]' <<< $meta
47
+ )
48
+ </details>
49
+
50
+ <details>
51
+ <summary>Raw metadata</summary>
52
+
53
+ \`\`\`JSON
54
+ $meta
55
+ \`\`\`
56
+ </details>
57
+
58
+ ## Build details
59
+ **Build trigger:** $event_name \`$current_ref\`
60
+
61
+ | Parameter | Value |
62
+ | -------------- | ------------ |
63
+ | \`no_cache\` | \`$inputs_no_cache\` |
64
+
65
+ <details>
66
+ <summary><code>github</code> context</summary>
67
+
68
+ \`\`\`JSON
69
+ $github_context_json
70
+ \`\`\`
71
+ </details>
72
+
73
+ ### Job environment
74
+
75
+ #### \`vars\` context:
76
+ \`\`\`JSON
77
+ $vars_json
78
+ \`\`\`
79
+
80
+ #### \`env\` context:
81
+ \`\`\`JSON
82
+ $job_env_json
83
+ \`\`\`
84
+
85
+ $EOF
.github/workflows/workflow-checker.yml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: PR Status Checker
2
+ on:
3
+ workflow_run:
4
+ workflows: ["*"]
5
+ types:
6
+ - completed
7
+
8
+ jobs:
9
+ status-check:
10
+ name: Check Actions Status
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v4
14
+ - name: Set up Python
15
+ uses: actions/setup-python@v5
16
+ with:
17
+ python-version: "3.10"
18
+ - name: Install dependencies
19
+ run: |
20
+ python -m pip install --upgrade pip
21
+ pip install requests
22
+ - name: Debug Information
23
+ run: |
24
+ echo "Event name: ${{ github.event_name }}"
25
+ echo "Workflow: ${{ github.workflow }}"
26
+ echo "Action: ${{ github.action }}"
27
+ echo "Actor: ${{ github.actor }}"
28
+ echo "Repository: ${{ github.repository }}"
29
+ echo "Ref: ${{ github.ref }}"
30
+ echo "Head ref: ${{ github.head_ref }}"
31
+ echo "Base ref: ${{ github.base_ref }}"
32
+ echo "Event payload:"
33
+ cat $GITHUB_EVENT_PATH
34
+ - name: Debug File Structure
35
+ run: |
36
+ echo "Current directory:"
37
+ pwd
38
+ echo "Directory contents:"
39
+ ls -R
40
+ echo "GitHub workspace:"
41
+ echo $GITHUB_WORKSPACE
42
+ echo "GitHub workspace contents:"
43
+ ls -R $GITHUB_WORKSPACE
44
+ - name: Check Actions Status
45
+ run: |
46
+ echo "Current directory before running Python script:"
47
+ pwd
48
+ echo "Attempting to run Python script:"
49
+ python .github/scripts/check_actions_status.py
50
+ env:
51
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
.gitignore ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Original ignores
2
+ .github_access_token
3
+ autogpt/keys.py
4
+ autogpt/*.json
5
+ auto_gpt_workspace/*
6
+ *.mpeg
7
+ .env
8
+ azure.yaml
9
+ .vscode
10
+ .idea/*
11
+ auto-gpt.json
12
+ log.txt
13
+ log-ingestion.txt
14
+ /logs
15
+ *.log
16
+ *.mp3
17
+ mem.sqlite3
18
+ venvAutoGPT
19
+
20
+ # Byte-compiled / optimized / DLL files
21
+ __pycache__/
22
+ *.py[cod]
23
+ *$py.class
24
+
25
+ # C extensions
26
+ *.so
27
+
28
+ # Distribution / packaging
29
+ .Python
30
+ develop-eggs/
31
+ dist/
32
+ downloads/
33
+ eggs/
34
+ .eggs/
35
+ lib64/
36
+ parts/
37
+ sdist/
38
+ var/
39
+ wheels/
40
+ pip-wheel-metadata/
41
+ share/python-wheels/
42
+ *.egg-info/
43
+ .installed.cfg
44
+ *.egg
45
+ MANIFEST
46
+
47
+ # PyInstaller
48
+ # Usually these files are written by a python script from a template
49
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
50
+ *.manifest
51
+ *.spec
52
+
53
+ # Installer logs
54
+ pip-log.txt
55
+ pip-delete-this-directory.txt
56
+
57
+ # Unit test / coverage reports
58
+ htmlcov/
59
+ .tox/
60
+ .nox/
61
+ .coverage
62
+ .coverage.*
63
+ .cache
64
+ nosetests.xml
65
+ coverage.xml
66
+ *.cover
67
+ *.py,cover
68
+ .hypothesis/
69
+ .pytest_cache/
70
+
71
+ # Translations
72
+ *.mo
73
+ *.pot
74
+
75
+ # Django stuff:
76
+ *.log
77
+ local_settings.py
78
+ db.sqlite3
79
+ db.sqlite3-journal
80
+
81
+ # Flask stuff:
82
+ instance/
83
+ .webassets-cache
84
+
85
+ # Scrapy stuff:
86
+ .scrapy
87
+
88
+ # Sphinx documentation
89
+ docs/_build/
90
+ site/
91
+
92
+ # PyBuilder
93
+ target/
94
+
95
+ # Jupyter Notebook
96
+ .ipynb_checkpoints
97
+
98
+ # IPython
99
+ profile_default/
100
+ ipython_config.py
101
+
102
+ # pyenv
103
+ .python-version
104
+
105
+ # pipenv
106
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
107
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
108
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
109
+ # install all needed dependencies.
110
+ #Pipfile.lock
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .direnv/
124
+ .env
125
+ .venv
126
+ env/
127
+ venv*/
128
+ ENV/
129
+ env.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+ llama-*
149
+ vicuna-*
150
+
151
+ # mac
152
+ .DS_Store
153
+
154
+ openai/
155
+
156
+ # news
157
+ CURRENT_BULLETIN.md
158
+
159
+ # AgBenchmark
160
+ agbenchmark/reports/
161
+
162
+ # Nodejs
163
+ package-lock.json
164
+
165
+
166
+ # Allow for locally private items
167
+ # private
168
+ pri*
169
+ # ignore
170
+ ig*
171
+ .github_access_token
172
+ LICENSE.rtf
173
+ rnd/autogpt_server/settings.py
.gitmodules ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [submodule "forge/tests/vcr_cassettes"]
2
+ path = forge/tests/vcr_cassettes
3
+ url = https://github.com/Significant-Gravitas/Auto-GPT-test-cassettes
.pr_agent.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [pr_reviewer]
2
+ num_code_suggestions=0
3
+
4
+ [pr_code_suggestions]
5
+ commitable_code_suggestions=false
6
+ num_code_suggestions=0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: check-added-large-files
6
+ args: ["--maxkb=500"]
7
+ - id: fix-byte-order-marker
8
+ - id: check-case-conflict
9
+ - id: check-merge-conflict
10
+ - id: check-symlinks
11
+ - id: debug-statements
12
+
13
+ - repo: local
14
+ # isort needs the context of which packages are installed to function, so we
15
+ # can't use a vendored isort pre-commit hook (which runs in its own isolated venv).
16
+ hooks:
17
+ - id: isort-autogpt
18
+ name: Lint (isort) - AutoGPT
19
+ entry: poetry -C autogpt run isort
20
+ files: ^autogpt/
21
+ types: [file, python]
22
+ language: system
23
+
24
+ - id: isort-forge
25
+ name: Lint (isort) - Forge
26
+ entry: poetry -C forge run isort
27
+ files: ^forge/
28
+ types: [file, python]
29
+ language: system
30
+
31
+ - id: isort-benchmark
32
+ name: Lint (isort) - Benchmark
33
+ entry: poetry -C benchmark run isort
34
+ files: ^benchmark/
35
+ types: [file, python]
36
+ language: system
37
+
38
+ - repo: https://github.com/psf/black
39
+ rev: 23.12.1
40
+ # Black has sensible defaults, doesn't need package context, and ignores
41
+ # everything in .gitignore, so it works fine without any config or arguments.
42
+ hooks:
43
+ - id: black
44
+ name: Lint (Black)
45
+ language_version: python3.10
46
+
47
+ - repo: https://github.com/PyCQA/flake8
48
+ rev: 7.0.0
49
+ # To have flake8 load the config of the individual subprojects, we have to call
50
+ # them separately.
51
+ hooks:
52
+ - id: flake8
53
+ name: Lint (Flake8) - AutoGPT
54
+ alias: flake8-autogpt
55
+ files: ^autogpt/(autogpt|scripts|tests)/
56
+ args: [--config=autogpt/.flake8]
57
+
58
+ - id: flake8
59
+ name: Lint (Flake8) - Forge
60
+ alias: flake8-forge
61
+ files: ^forge/(forge|tests)/
62
+ args: [--config=forge/.flake8]
63
+
64
+ - id: flake8
65
+ name: Lint (Flake8) - Benchmark
66
+ alias: flake8-benchmark
67
+ files: ^benchmark/(agbenchmark|tests)/((?!reports).)*[/.]
68
+ args: [--config=benchmark/.flake8]
69
+
70
+ - repo: local
71
+ # To have watertight type checking, we check *all* the files in an affected
72
+ # project. To trigger on poetry.lock we also reset the file `types` filter.
73
+ hooks:
74
+ - id: pyright
75
+ name: Typecheck - AutoGPT
76
+ alias: pyright-autogpt
77
+ entry: poetry -C autogpt run pyright
78
+ args: [-p, autogpt, autogpt]
79
+ # include forge source (since it's a path dependency) but exclude *_test.py files:
80
+ files: ^(autogpt/((autogpt|scripts|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
81
+ types: [file]
82
+ language: system
83
+ pass_filenames: false
84
+
85
+ - id: pyright
86
+ name: Typecheck - Forge
87
+ alias: pyright-forge
88
+ entry: poetry -C forge run pyright
89
+ args: [-p, forge, forge]
90
+ files: ^forge/(forge/|poetry\.lock$)
91
+ types: [file]
92
+ language: system
93
+ pass_filenames: false
94
+
95
+ - id: pyright
96
+ name: Typecheck - Benchmark
97
+ alias: pyright-benchmark
98
+ entry: poetry -C benchmark run pyright
99
+ args: [-p, benchmark, benchmark]
100
+ files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
101
+ types: [file]
102
+ language: system
103
+ pass_filenames: false
104
+
105
+ - repo: local
106
+ hooks:
107
+ - id: pytest-autogpt
108
+ name: Run tests - AutoGPT (excl. slow tests)
109
+ entry: bash -c 'cd autogpt && poetry run pytest --cov=autogpt -m "not slow" tests/unit tests/integration'
110
+ # include forge source (since it's a path dependency) but exclude *_test.py files:
111
+ files: ^(autogpt/((autogpt|tests)/|poetry\.lock$)|forge/(forge/.*(?<!_test)\.py|poetry\.lock)$)
112
+ language: system
113
+ pass_filenames: false
114
+
115
+ - id: pytest-forge
116
+ name: Run tests - Forge (excl. slow tests)
117
+ entry: bash -c 'cd forge && poetry run pytest --cov=forge -m "not slow"'
118
+ files: ^forge/(forge/|tests/|poetry\.lock$)
119
+ language: system
120
+ pass_filenames: false
121
+
122
+ - id: pytest-benchmark
123
+ name: Run tests - Benchmark
124
+ entry: bash -c 'cd benchmark && poetry run pytest --cov=benchmark'
125
+ files: ^benchmark/(agbenchmark/|tests/|poetry\.lock$)
126
+ language: system
127
+ pass_filenames: false
CITATION.cff ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This CITATION.cff file was generated with cffinit.
2
+ # Visit https://bit.ly/cffinit to generate yours today!
3
+
4
+ cff-version: 1.2.0
5
+ title: AutoGPT
6
+ message: >-
7
+ If you use this software, please cite it using the
8
+ metadata from this file.
9
+ type: software
10
+ authors:
11
+ - name: Significant Gravitas
12
+ website: 'https://agpt.co'
13
+ repository-code: 'https://github.com/Significant-Gravitas/AutoGPT'
14
+ url: 'https://agpt.co'
15
+ abstract: >-
16
+ A collection of tools and experimental open-source attempts to make GPT-4 fully
17
+ autonomous.
18
+ keywords:
19
+ - AI
20
+ - Agent
21
+ license: MIT
CLI-USAGE.md ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## CLI Documentation
2
+
3
+ This document describes how to interact with the project's CLI (Command Line Interface). It includes the types of outputs you can expect from each command. Note that the `agents stop` command will terminate any process running on port 8000.
4
+
5
+ ### 1. Entry Point for the CLI
6
+
7
+ Running the `./run` command without any parameters will display the help message, which provides a list of available commands and options. Additionally, you can append `--help` to any command to view help information specific to that command.
8
+
9
+ ```sh
10
+ ./run
11
+ ```
12
+
13
+ **Output**:
14
+
15
+ ```
16
+ Usage: cli.py [OPTIONS] COMMAND [ARGS]...
17
+
18
+ Options:
19
+ --help Show this message and exit.
20
+
21
+ Commands:
22
+ agent Commands to create, start and stop agents
23
+ benchmark Commands to start the benchmark and list tests and categories
24
+ setup Installs dependencies needed for your system.
25
+ ```
26
+
27
+ If you need assistance with any command, simply add the `--help` parameter to the end of your command, like so:
28
+
29
+ ```sh
30
+ ./run COMMAND --help
31
+ ```
32
+
33
+ This will display a detailed help message regarding that specific command, including a list of any additional options and arguments it accepts.
34
+
35
+ ### 2. Setup Command
36
+
37
+ ```sh
38
+ ./run setup
39
+ ```
40
+
41
+ **Output**:
42
+
43
+ ```
44
+ Setup initiated
45
+ Installation has been completed.
46
+ ```
47
+
48
+ This command initializes the setup of the project.
49
+
50
+ ### 3. Agents Commands
51
+
52
+ **a. List All Agents**
53
+
54
+ ```sh
55
+ ./run agent list
56
+ ```
57
+
58
+ **Output**:
59
+
60
+ ```
61
+ Available agents: 🤖
62
+ 🐙 forge
63
+ 🐙 autogpt
64
+ ```
65
+
66
+ Lists all the available agents.
67
+
68
+ **b. Create a New Agent**
69
+
70
+ ```sh
71
+ ./run agent create my_agent
72
+ ```
73
+
74
+ **Output**:
75
+
76
+ ```
77
+ 🎉 New agent 'my_agent' created and switched to the new directory in agents folder.
78
+ ```
79
+
80
+ Creates a new agent named 'my_agent'.
81
+
82
+ **c. Start an Agent**
83
+
84
+ ```sh
85
+ ./run agent start my_agent
86
+ ```
87
+
88
+ **Output**:
89
+
90
+ ```
91
+ ... (ASCII Art representing the agent startup)
92
+ [Date and Time] [forge.sdk.db] [DEBUG] 🐛 Initializing AgentDB with database_string: sqlite:///agent.db
93
+ [Date and Time] [forge.sdk.agent] [INFO] 📝 Agent server starting on http://0.0.0.0:8000
94
+ ```
95
+
96
+ Starts the 'my_agent' and displays startup ASCII art and logs.
97
+
98
+ **d. Stop an Agent**
99
+
100
+ ```sh
101
+ ./run agent stop
102
+ ```
103
+
104
+ **Output**:
105
+
106
+ ```
107
+ Agent stopped
108
+ ```
109
+
110
+ Stops the running agent.
111
+
112
+ ### 4. Benchmark Commands
113
+
114
+ **a. List Benchmark Categories**
115
+
116
+ ```sh
117
+ ./run benchmark categories list
118
+ ```
119
+
120
+ **Output**:
121
+
122
+ ```
123
+ Available categories: 📚
124
+ 📖 code
125
+ 📖 safety
126
+ 📖 memory
127
+ ... (and so on)
128
+ ```
129
+
130
+ Lists all available benchmark categories.
131
+
132
+ **b. List Benchmark Tests**
133
+
134
+ ```sh
135
+ ./run benchmark tests list
136
+ ```
137
+
138
+ **Output**:
139
+
140
+ ```
141
+ Available tests: 📚
142
+ 📖 interface
143
+ 🔬 Search - TestSearch
144
+ 🔬 Write File - TestWriteFile
145
+ ... (and so on)
146
+ ```
147
+
148
+ Lists all available benchmark tests.
149
+
150
+ **c. Show Details of a Benchmark Test**
151
+
152
+ ```sh
153
+ ./run benchmark tests details TestWriteFile
154
+ ```
155
+
156
+ **Output**:
157
+
158
+ ```
159
+ TestWriteFile
160
+ -------------
161
+
162
+ Category: interface
163
+ Task: Write the word 'Washington' to a .txt file
164
+ ... (and other details)
165
+ ```
166
+
167
+ Displays the details of the 'TestWriteFile' benchmark test.
168
+
169
+ **d. Start Benchmark for the Agent**
170
+
171
+ ```sh
172
+ ./run benchmark start my_agent
173
+ ```
174
+
175
+ **Output**:
176
+
177
+ ```
178
+ (more details about the testing process shown whilst the test are running)
179
+ ============= 13 failed, 1 passed in 0.97s ============...
180
+ ```
181
+
182
+ Displays the results of the benchmark tests on 'my_agent'.
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct for AutoGPT
2
+
3
+ ## 1. Purpose
4
+
5
+ The purpose of this Code of Conduct is to provide guidelines for contributors to the AutoGPT projects on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
6
+
7
+ ## 2. Scope
8
+
9
+ This Code of Conduct applies to all contributors, maintainers, and users of the AutoGPT project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
10
+
11
+ ## 3. Our Standards
12
+
13
+ We encourage the following behavior:
14
+
15
+ * Being respectful and considerate to others
16
+ * Actively seeking diverse perspectives
17
+ * Providing constructive feedback and assistance
18
+ * Demonstrating empathy and understanding
19
+
20
+ We discourage the following behavior:
21
+
22
+ * Harassment or discrimination of any kind
23
+ * Disrespectful, offensive, or inappropriate language or content
24
+ * Personal attacks or insults
25
+ * Unwarranted criticism or negativity
26
+
27
+ ## 4. Reporting and Enforcement
28
+
29
+ If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
30
+
31
+ Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
32
+
33
+ ## 5. Acknowledgements
34
+
35
+ This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
36
+
37
+ ## 6. Contact
38
+
39
+ If you have any questions or concerns, please contact the project maintainers on Discord:
40
+ https://discord.gg/autogpt
CONTRIBUTING.md ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AutoGPT Contribution Guide
2
+ If you are reading this, you are probably looking for the full **[contribution guide]**,
3
+ which is part of our [wiki].
4
+
5
+ Also check out our [🚀 Roadmap][roadmap] for information about our priorities and associated tasks.
6
+ <!-- You can find our immediate priorities and their progress on our public [kanban board]. -->
7
+
8
+ [contribution guide]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing
9
+ [wiki]: https://github.com/Significant-Gravitas/AutoGPT/wiki
10
+ [roadmap]: https://github.com/Significant-Gravitas/AutoGPT/discussions/6971
11
+ [kanban board]: https://github.com/orgs/Significant-Gravitas/projects/1
12
+
13
+ ## In short
14
+ 1. Avoid duplicate work, issues, PRs etc.
15
+ 2. We encourage you to collaborate with fellow community members on some of our bigger
16
+ [todo's][roadmap]!
17
+ * We highly recommend to post your idea and discuss it in the [dev channel].
18
+ 3. Create a draft PR when starting work on bigger changes.
19
+ 4. Adhere to the [Code Guidelines]
20
+ 5. Clearly explain your changes when submitting a PR.
21
+ 6. Don't submit broken code: test/validate your changes.
22
+ 7. Avoid making unnecessary changes, especially if they're purely based on your personal
23
+ preferences. Doing so is the maintainers' job. ;-)
24
+ 8. Please also consider contributing something other than code; see the
25
+ [contribution guide] for options.
26
+
27
+ [dev channel]: https://discord.com/channels/1092243196446249134/1095817829405704305
28
+ [code guidelines]: https://github.com/Significant-Gravitas/AutoGPT/wiki/Contributing#code-guidelines
29
+
30
+ If you wish to involve with the project (beyond just contributing PRs), please read the
31
+ wiki page about [Catalyzing](https://github.com/Significant-Gravitas/AutoGPT/wiki/Catalyzing).
32
+
33
+ In fact, why not just look through the whole wiki (it's only a few pages) and
34
+ hop on our Discord. See you there! :-)
35
+
36
+ ❤️ & 🔆
37
+ The team @ AutoGPT
38
+ https://discord.gg/autogpt
Dockerfile.autogpt ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 'dev' or 'release' container build
2
+ ARG BUILD_TYPE=dev
3
+
4
+ # Use an official Python base image from the Docker Hub
5
+ FROM python:3.10-slim AS autogpt-base
6
+
7
+ # Install browsers
8
+ RUN apt-get update && apt-get install -y \
9
+ chromium-driver ca-certificates gcc \
10
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Install utilities
13
+ RUN apt-get update && apt-get install -y \
14
+ curl jq wget git \
15
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
16
+
17
+ # Set environment variables
18
+ ENV PIP_NO_CACHE_DIR=yes \
19
+ PYTHONUNBUFFERED=1 \
20
+ PYTHONDONTWRITEBYTECODE=1 \
21
+ POETRY_HOME="/opt/poetry" \
22
+ POETRY_VIRTUALENVS_PATH="/venv" \
23
+ POETRY_VIRTUALENVS_IN_PROJECT=0 \
24
+ POETRY_NO_INTERACTION=1
25
+
26
+ # Install and configure Poetry
27
+ RUN curl -sSL https://install.python-poetry.org | python3 -
28
+ ENV PATH="$POETRY_HOME/bin:$PATH"
29
+ RUN poetry config installer.max-workers 10
30
+
31
+ WORKDIR /app/autogpt
32
+ COPY autogpt/pyproject.toml autogpt/poetry.lock ./
33
+
34
+ # Include forge so it can be used as a path dependency
35
+ COPY forge/ ../forge
36
+
37
+ # Include frontend
38
+ COPY frontend/ ../frontend
39
+
40
+ # Set the entrypoint
41
+ ENTRYPOINT ["poetry", "run", "autogpt"]
42
+ CMD []
43
+
44
+ # dev build -> include everything
45
+ FROM autogpt-base as autogpt-dev
46
+ RUN poetry install --no-cache --no-root \
47
+ && rm -rf $(poetry env info --path)/src
48
+ ONBUILD COPY autogpt/ ./
49
+
50
+ # release build -> include bare minimum
51
+ FROM autogpt-base as autogpt-release
52
+ RUN poetry install --no-cache --no-root --without dev \
53
+ && rm -rf $(poetry env info --path)/src
54
+ ONBUILD COPY autogpt/autogpt/ ./autogpt
55
+ ONBUILD COPY autogpt/scripts/ ./scripts
56
+ ONBUILD COPY autogpt/plugins/ ./plugins
57
+ ONBUILD COPY autogpt/README.md ./README.md
58
+ ONBUILD RUN mkdir ./data
59
+
60
+ FROM autogpt-${BUILD_TYPE} AS autogpt
61
+ RUN poetry install --only-root
FORGE-QUICKSTART.md ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Quickstart Guide
2
+
3
+ > For the complete getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3de53cc58ec) <- click here
4
+
5
+ Welcome to the Quickstart Guide! This guide will walk you through setting up, building, and running your own AutoGPT agent. Whether you're a seasoned AI developer or just starting out, this guide will provide you with the steps to jumpstart your journey in AI development with AutoGPT.
6
+
7
+ ## System Requirements
8
+
9
+ This project supports Linux (Debian-based), Mac, and Windows Subsystem for Linux (WSL). If you use a Windows system, you must install WSL. You can find the installation instructions for WSL [here](https://learn.microsoft.com/en-us/windows/wsl/).
10
+
11
+
12
+ ## Getting Setup
13
+ 1. **Fork the Repository**
14
+ To fork the repository, follow these steps:
15
+ - Navigate to the main page of the repository.
16
+
17
+ ![Repository](docs/content/imgs/quickstart/001_repo.png)
18
+ - In the top-right corner of the page, click Fork.
19
+
20
+ ![Create Fork UI](docs/content/imgs/quickstart/002_fork.png)
21
+ - On the next page, select your GitHub account to create the fork.
22
+ - Wait for the forking process to complete. You now have a copy of the repository in your GitHub account.
23
+
24
+ 2. **Clone the Repository**
25
+ To clone the repository, you need to have Git installed on your system. If you don't have Git installed, download it from [here](https://git-scm.com/downloads). Once you have Git installed, follow these steps:
26
+ - Open your terminal.
27
+ - Navigate to the directory where you want to clone the repository.
28
+ - Run the git clone command for the fork you just created
29
+
30
+ ![Clone the Repository](docs/content/imgs/quickstart/003_clone.png)
31
+
32
+ - Then open your project in your ide
33
+
34
+ ![Open the Project in your IDE](docs/content/imgs/quickstart/004_ide.png)
35
+
36
+ 4. **Setup the Project**
37
+ Next, we need to set up the required dependencies. We have a tool to help you perform all the tasks on the repo.
38
+ It can be accessed by running the `run` command by typing `./run` in the terminal.
39
+
40
+ The first command you need to use is `./run setup.` This will guide you through setting up your system.
41
+ Initially, you will get instructions for installing Flutter and Chrome and setting up your GitHub access token like the following image:
42
+
43
+ ![Setup the Project](docs/content/imgs/quickstart/005_setup.png)
44
+
45
+ ### For Windows Users
46
+
47
+ If you're a Windows user and experience issues after installing WSL, follow the steps below to resolve them.
48
+
49
+ #### Update WSL
50
+ Run the following command in Powershell or Command Prompt:
51
+ 1. Enable the optional WSL and Virtual Machine Platform components.
52
+ 2. Download and install the latest Linux kernel.
53
+ 3. Set WSL 2 as the default.
54
+ 4. Download and install the Ubuntu Linux distribution (a reboot may be required).
55
+
56
+ ```shell
57
+ wsl --install
58
+ ```
59
+
60
+ For more detailed information and additional steps, refer to [Microsoft's WSL Setup Environment Documentation](https://learn.microsoft.com/en-us/windows/wsl/setup/environment).
61
+
62
+ #### Resolve FileNotFoundError or "No such file or directory" Errors
63
+ When you run `./run setup`, if you encounter errors like `No such file or directory` or `FileNotFoundError`, it might be because Windows-style line endings (CRLF - Carriage Return Line Feed) are not compatible with Unix/Linux style line endings (LF - Line Feed).
64
+
65
+ To resolve this, you can use the `dos2unix` utility to convert the line endings in your script from CRLF to LF. Here’s how to install and run `dos2unix` on the script:
66
+
67
+ ```shell
68
+ sudo apt update
69
+ sudo apt install dos2unix
70
+ dos2unix ./run
71
+ ```
72
+
73
+ After executing the above commands, running `./run setup` should work successfully.
74
+
75
+ #### Store Project Files within the WSL File System
76
+ If you continue to experience issues, consider storing your project files within the WSL file system instead of the Windows file system. This method avoids path translations and permissions issues and provides a more consistent development environment.
77
+
78
+ You can keep running the command to get feedback on where you are up to with your setup.
79
+ When setup has been completed, the command will return an output like this:
80
+
81
+ ![Setup Complete](docs/content/imgs/quickstart/006_setup_complete.png)
82
+
83
+ ## Creating Your Agent
84
+
85
+ After completing the setup, the next step is to create your agent template.
86
+ Execute the command `./run agent create YOUR_AGENT_NAME`, where `YOUR_AGENT_NAME` should be replaced with your chosen name.
87
+
88
+ Tips for naming your agent:
89
+ * Give it its own unique name, or name it after yourself
90
+ * Include an important aspect of your agent in the name, such as its purpose
91
+
92
+ Examples: `SwiftyosAssistant`, `PwutsPRAgent`, `MySuperAgent`
93
+
94
+ ![Create an Agent](docs/content/imgs/quickstart/007_create_agent.png)
95
+
96
+ ## Running your Agent
97
+
98
+ Your agent can be started using the command: `./run agent start YOUR_AGENT_NAME`
99
+
100
+ This starts the agent on the URL: `http://localhost:8000/`
101
+
102
+ ![Start the Agent](docs/content/imgs/quickstart/009_start_agent.png)
103
+
104
+ The front end can be accessed from `http://localhost:8000/`; first, you must log in using either a Google account or your GitHub account.
105
+
106
+ ![Login](docs/content/imgs/quickstart/010_login.png)
107
+
108
+ Upon logging in, you will get a page that looks something like this: your task history down the left-hand side of the page, and the 'chat' window to send tasks to your agent.
109
+
110
+ ![Login](docs/content/imgs/quickstart/011_home.png)
111
+
112
+ When you have finished with your agent or just need to restart it, use Ctl-C to end the session. Then, you can re-run the start command.
113
+
114
+ If you are having issues and want to ensure the agent has been stopped, there is a `./run agent stop` command, which will kill the process using port 8000, which should be the agent.
115
+
116
+ ## Benchmarking your Agent
117
+
118
+ The benchmarking system can also be accessed using the CLI too:
119
+
120
+ ```bash
121
+ agpt % ./run benchmark
122
+ Usage: cli.py benchmark [OPTIONS] COMMAND [ARGS]...
123
+
124
+ Commands to start the benchmark and list tests and categories
125
+
126
+ Options:
127
+ --help Show this message and exit.
128
+
129
+ Commands:
130
+ categories Benchmark categories group command
131
+ start Starts the benchmark command
132
+ tests Benchmark tests group command
133
+ agpt % ./run benchmark categories
134
+ Usage: cli.py benchmark categories [OPTIONS] COMMAND [ARGS]...
135
+
136
+ Benchmark categories group command
137
+
138
+ Options:
139
+ --help Show this message and exit.
140
+
141
+ Commands:
142
+ list List benchmark categories command
143
+ agpt % ./run benchmark tests
144
+ Usage: cli.py benchmark tests [OPTIONS] COMMAND [ARGS]...
145
+
146
+ Benchmark tests group command
147
+
148
+ Options:
149
+ --help Show this message and exit.
150
+
151
+ Commands:
152
+ details Benchmark test details command
153
+ list List benchmark tests command
154
+ ```
155
+
156
+ The benchmark has been split into different categories of skills you can test your agent on. You can see what categories are available with
157
+ ```bash
158
+ ./run benchmark categories list
159
+ # And what tests are available with
160
+ ./run benchmark tests list
161
+ ```
162
+
163
+ ![Login](docs/content/imgs/quickstart/012_tests.png)
164
+
165
+
166
+ Finally, you can run the benchmark with
167
+
168
+ ```bash
169
+ ./run benchmark start YOUR_AGENT_NAME
170
+
171
+ ```
172
+
173
+ >
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Toran Bruce Richards
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -1,12 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: Autogpt
3
- emoji:
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 4.41.0
8
- app_file: app.py
9
- pinned: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
1
+ # AutoGPT: Build & Use AI Agents
2
+
3
+ [![Discord Follow](https://dcbadge.vercel.app/api/server/autogpt?style=flat)](https://discord.gg/autogpt) &ensp;
4
+ [![Twitter Follow](https://img.shields.io/twitter/follow/Auto_GPT?style=social)](https://twitter.com/Auto_GPT) &ensp;
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
+
7
+ **AutoGPT** is a powerful tool that lets you create and run intelligent agents. These agents can perform various tasks automatically, making your life easier.
8
+
9
+ ## How to Get Started
10
+
11
+ https://github.com/user-attachments/assets/8508f4dc-b362-4cab-900f-644964a96cdf
12
+
13
+ ### 🧱 AutoGPT Builder
14
+
15
+ The AutoGPT Builder is the frontend. It allows you to design agents using an easy flowchart style. You build your agent by connecting blocks, where each block performs a single action. It's simple and intuitive!
16
+
17
+ [Read this guide](https://docs.agpt.co/server/new_blocks/) to learn how to build your own custom blocks.
18
+
19
+ ### 💽 AutoGPT Server
20
+
21
+ The AutoGPT Server is the backend. This is where your agents run. Once deployed, agents can be triggered by external sources and can operate continuously.
22
+
23
+ ### 🐙 Example Agents
24
+
25
+ Here are two examples of what you can do with AutoGPT:
26
+
27
+ 1. **Reddit Marketing Agent**
28
+ - This agent reads comments on Reddit.
29
+ - It looks for people asking about your product.
30
+ - It then automatically responds to them.
31
+
32
+ 2. **YouTube Content Repurposing Agent**
33
+ - This agent subscribes to your YouTube channel.
34
+ - When you post a new video, it transcribes it.
35
+ - It uses AI to write a search engine optimized blog post.
36
+ - Then, it publishes this blog post to your Medium account.
37
+
38
+ These examples show just a glimpse of what you can achieve with AutoGPT!
39
+
40
+ ---
41
+ Our mission is to provide the tools, so that you can focus on what matters:
42
+
43
+ - 🏗️ **Building** - Lay the foundation for something amazing.
44
+ - 🧪 **Testing** - Fine-tune your agent to perfection.
45
+ - 🤝 **Delegating** - Let AI work for you, and have your ideas come to life.
46
+
47
+ Be part of the revolution! **AutoGPT** is here to stay, at the forefront of AI innovation.
48
+
49
+ **📖 [Documentation](https://docs.agpt.co)**
50
+ &ensp;|&ensp;
51
+ **🚀 [Contributing](CONTRIBUTING.md)**
52
+
53
+
54
  ---
55
+ ## 🤖 AutoGPT Classic
56
+ > Below is information about the classic version of AutoGPT.
57
+
58
+ **🛠️ [Build your own Agent - Quickstart](FORGE-QUICKSTART.md)**
59
+ ### 🏗️ Forge
60
+
61
+ **Forge your own agent!** &ndash; Forge is a ready-to-go template for your agent application. All the boilerplate code is already handled, letting you channel all your creativity into the things that set *your* agent apart. All tutorials are located [here](https://medium.com/@aiedge/autogpt-forge-e3de53cc58ec). Components from the [`forge.sdk`](/forge/forge/sdk) can also be used individually to speed up development and reduce boilerplate in your agent project.
62
+
63
+ 🚀 [**Getting Started with Forge**](https://github.com/Significant-Gravitas/AutoGPT/blob/master/forge/tutorials/001_getting_started.md) &ndash;
64
+ This guide will walk you through the process of creating your own agent and using the benchmark and user interface.
65
+
66
+ 📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/forge) about Forge
67
+
68
+ ### 🎯 Benchmark
69
+
70
+ **Measure your agent's performance!** The `agbenchmark` can be used with any agent that supports the agent protocol, and the integration with the project's [CLI] makes it even easier to use with AutoGPT and forge-based agents. The benchmark offers a stringent testing environment. Our framework allows for autonomous, objective performance evaluations, ensuring your agents are primed for real-world action.
71
+
72
+ <!-- TODO: insert visual demonstrating the benchmark -->
73
+
74
+ 📦 [`agbenchmark`](https://pypi.org/project/agbenchmark/) on Pypi
75
+ &ensp;|&ensp;
76
+ 📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/blob/master/benchmark) about the Benchmark
77
+
78
+ ### 💻 UI
79
+
80
+ **Makes agents easy to use!** The `frontend` gives you a user-friendly interface to control and monitor your agents. It connects to agents through the [agent protocol](#-agent-protocol), ensuring compatibility with many agents from both inside and outside of our ecosystem.
81
+
82
+ <!-- TODO: insert screenshot of front end -->
83
+
84
+ The frontend works out-of-the-box with all agents in the repo. Just use the [CLI] to run your agent of choice!
85
+
86
+ 📘 [Learn More](https://github.com/Significant-Gravitas/AutoGPT/tree/master/frontend) about the Frontend
87
+
88
+ ### ⌨️ CLI
89
+
90
+ [CLI]: #-cli
91
+
92
+ To make it as easy as possible to use all of the tools offered by the repository, a CLI is included at the root of the repo:
93
+
94
+ ```shell
95
+ $ ./run
96
+ Usage: cli.py [OPTIONS] COMMAND [ARGS]...
97
+
98
+ Options:
99
+ --help Show this message and exit.
100
+
101
+ Commands:
102
+ agent Commands to create, start and stop agents
103
+ benchmark Commands to start the benchmark and list tests and categories
104
+ setup Installs dependencies needed for your system.
105
+ ```
106
+
107
+ Just clone the repo, install dependencies with `./run setup`, and you should be good to go!
108
+
109
+ ## 🤔 Questions? Problems? Suggestions?
110
+
111
+ ### Get help - [Discord 💬](https://discord.gg/autogpt)
112
+
113
+ [![Join us on Discord](https://invidget.switchblade.xyz/autogpt)](https://discord.gg/autogpt)
114
+
115
+ To report a bug or request a feature, create a [GitHub Issue](https://github.com/Significant-Gravitas/AutoGPT/issues/new/choose). Please ensure someone else hasn’t created an issue for the same topic.
116
+
117
+ ## 🤝 Sister projects
118
+
119
+ ### 🔄 Agent Protocol
120
+
121
+ To maintain a uniform standard and ensure seamless compatibility with many current and future applications, AutoGPT employs the [agent protocol](https://agentprotocol.ai/) standard by the AI Engineer Foundation. This standardizes the communication pathways from your agent to the frontend and benchmark.
122
+
123
  ---
124
 
125
+ <p align="center">
126
+ <a href="https://star-history.com/#Significant-Gravitas/AutoGPT">
127
+ <picture>
128
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date&theme=dark" />
129
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
130
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=Significant-Gravitas/AutoGPT&type=Date" />
131
+ </picture>
132
+ </a>
133
+ </p>
SECURITY.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Security Policy
2
+
3
+ - [**Using AutoGPT Securely**](#using-AutoGPT-securely)
4
+ - [Restrict Workspace](#restrict-workspace)
5
+ - [Untrusted inputs](#untrusted-inputs)
6
+ - [Data privacy](#data-privacy)
7
+ - [Untrusted environments or networks](#untrusted-environments-or-networks)
8
+ - [Multi-Tenant environments](#multi-tenant-environments)
9
+ - [**Reporting a Vulnerability**](#reporting-a-vulnerability)
10
+
11
+ ## Using AutoGPT Securely
12
+
13
+ ### Restrict Workspace
14
+
15
+ Since agents can read and write files, it is important to keep them restricted to a specific workspace. This happens by default *unless* RESTRICT_TO_WORKSPACE is set to False.
16
+
17
+ Disabling RESTRICT_TO_WORKSPACE can increase security risks. However, if you still need to disable it, consider running AutoGPT inside a [sandbox](https://developers.google.com/code-sandboxing), to mitigate some of these risks.
18
+
19
+ ### Untrusted inputs
20
+
21
+ When handling untrusted inputs, it's crucial to isolate the execution and carefully pre-process inputs to mitigate script injection risks.
22
+
23
+ For maximum security when handling untrusted inputs, you may need to employ the following:
24
+
25
+ * Sandboxing: Isolate the process.
26
+ * Updates: Keep your libraries (including AutoGPT) updated with the latest security patches.
27
+ * Input Sanitation: Before feeding data to the model, sanitize inputs rigorously. This involves techniques such as:
28
+ * Validation: Enforce strict rules on allowed characters and data types.
29
+ * Filtering: Remove potentially malicious scripts or code fragments.
30
+ * Encoding: Convert special characters into safe representations.
31
+ * Verification: Run tooling that identifies potential script injections (e.g. [models that detect prompt injection attempts](https://python.langchain.com/docs/guides/safety/hugging_face_prompt_injection)).
32
+
33
+ ### Data privacy
34
+
35
+ To protect sensitive data from potential leaks or unauthorized access, it is crucial to sandbox the agent execution. This means running it in a secure, isolated environment, which helps mitigate many attack vectors.
36
+
37
+ ### Untrusted environments or networks
38
+
39
+ Since AutoGPT performs network calls to the OpenAI API, it is important to always run it with trusted environments and networks. Running it on untrusted environments can expose your API KEY to attackers.
40
+ Additionally, running it on an untrusted network can expose your data to potential network attacks.
41
+
42
+ However, even when running on trusted networks, it is important to always encrypt sensitive data while sending it over the network.
43
+
44
+ ### Multi-Tenant environments
45
+
46
+ If you intend to run multiple AutoGPT brains in parallel, it is your responsibility to ensure the models do not interact or access each other's data.
47
+
48
+ The primary areas of concern are tenant isolation, resource allocation, model sharing and hardware attacks.
49
+
50
+ - Tenant Isolation: you must make sure that the tenants run separately to prevent unwanted access to the data from other tenants. Keeping model network traffic separate is also important because you not only prevent unauthorized access to data, but also prevent malicious users or tenants sending prompts to execute under another tenant’s identity.
51
+
52
+ - Resource Allocation: a denial of service caused by one tenant can affect the overall system health. Implement safeguards like rate limits, access controls, and health monitoring.
53
+
54
+ - Data Sharing: in a multi-tenant design with data sharing, ensure tenants and users understand the security risks and sandbox agent execution to mitigate risks.
55
+
56
+ - Hardware Attacks: the hardware (GPUs or TPUs) can also be attacked. [Research](https://scholar.google.com/scholar?q=gpu+side+channel) has shown that side channel attacks on GPUs are possible, which can make data leak from other brains or processes running on the same system at the same time.
57
+
58
+ ## Reporting a Vulnerability
59
+
60
+ Beware that none of the topics under [Using AutoGPT Securely](#using-AutoGPT-securely) are considered vulnerabilities on AutoGPT.
61
+
62
+ However, If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
63
+
64
+ Please disclose it as a private [security advisory](https://github.com/Significant-Gravitas/AutoGPT/security/advisories/new).
65
+
66
+ A team of volunteers on a reasonable-effort basis maintains this project. As such, please give us at least 90 days to work on a fix before public exposure.
TROUBLESHOOTING.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This page is a list of issues you could encounter along with their fixes.
2
+
3
+ # Forge
4
+ **Poetry configuration invalid**
5
+
6
+ The poetry configuration is invalid:
7
+ - Additional properties are not allowed ('group' was unexpected)
8
+ <img width="487" alt="Screenshot 2023-09-22 at 5 42 59 PM" src="https://github.com/Significant-Gravitas/AutoGPT/assets/9652976/dd451e6b-8114-44de-9928-075f5f06d661">
9
+
10
+ **Pydantic Validation Error**
11
+
12
+ Remove your sqlite agent.db file. it's probably because some of your data is not complying with the new spec (we will create migrations soon to avoid this problem)
13
+
14
+
15
+ *Solution*
16
+
17
+ Update poetry
18
+
19
+ # Benchmark
20
+ TODO
21
+
22
+ # Frontend
23
+ TODO
assets/gpt_dark_RGB.icns ADDED
Binary file (83.7 kB). View file
 
assets/gpt_dark_RGB.ico ADDED

Git LFS Details

  • SHA256: 559a7286f2aba9a84bdedd42f3d0fc4b4308bf9fa0c3cfd9e183f4095e04d4dd
  • Pointer size: 132 Bytes
  • Size of remote file: 1.14 MB
assets/gpt_dark_RGB.png ADDED
autogpt/.coveragerc ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [run]
2
+ relative_files = true
autogpt/.devcontainer/Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python base image from the Docker Hub
2
+ FROM python:3.10
3
+
4
+ # Install browsers
5
+ RUN apt-get update && apt-get install -y \
6
+ chromium-driver firefox-esr \
7
+ ca-certificates
8
+
9
+ # Install utilities
10
+ RUN apt-get install -y curl jq wget git
11
+
12
+ # Declare working directory
13
+ WORKDIR /workspace/AutoGPT
autogpt/.devcontainer/devcontainer.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "dockerComposeFile": "./docker-compose.yml",
3
+ "service": "auto-gpt",
4
+ "workspaceFolder": "/workspace/AutoGPT",
5
+ "shutdownAction": "stopCompose",
6
+ "features": {
7
+ "ghcr.io/devcontainers/features/common-utils:2": {
8
+ "installZsh": "true",
9
+ "username": "vscode",
10
+ "userUid": "1000",
11
+ "userGid": "1000",
12
+ "upgradePackages": "true"
13
+ },
14
+ "ghcr.io/devcontainers/features/desktop-lite:1": {},
15
+ "ghcr.io/devcontainers/features/github-cli:1": {},
16
+ "ghcr.io/devcontainers/features/python:1": "none",
17
+ "ghcr.io/devcontainers/features/node:1": "none",
18
+ "ghcr.io/devcontainers/features/git:1": {
19
+ "version": "latest",
20
+ "ppa": "false"
21
+ }
22
+ },
23
+ // Configure tool-specific properties.
24
+ "customizations": {
25
+ // Configure properties specific to VS Code.
26
+ "vscode": {
27
+ // Set *default* container specific settings.json values on container create.
28
+ "settings": {
29
+ "python.defaultInterpreterPath": "/usr/local/bin/python",
30
+ "python.testing.pytestEnabled": true,
31
+ "python.testing.unittestEnabled": false
32
+ },
33
+ "extensions": [
34
+ "ms-python.python",
35
+ "VisualStudioExptTeam.vscodeintellicode",
36
+ "ms-python.vscode-pylance",
37
+ "ms-python.black-formatter",
38
+ "ms-python.isort",
39
+ "GitHub.vscode-pull-request-github",
40
+ "GitHub.copilot",
41
+ "github.vscode-github-actions"
42
+ ]
43
+ }
44
+ },
45
+ // Use 'forwardPorts' to make a list of ports inside the container available locally.
46
+ // "forwardPorts": [],
47
+
48
+ // Use 'postCreateCommand' to run commands after the container is created.
49
+ // "postCreateCommand": "poetry install",
50
+
51
+ // Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
52
+ "remoteUser": "vscode",
53
+
54
+ // Add the freshly containerized repo to the list of safe repositories
55
+ "postCreateCommand": "git config --global --add safe.directory /workspace/AutoGPT && poetry install"
56
+ }