qingxu98 commited on
Commit
17d0a32
1 Parent(s): 971ac20

version 3.6

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. Dockerfile +12 -11
  2. README.md +70 -85
  3. app.py +108 -17
  4. check_proxy.py +4 -4
  5. config.py +52 -11
  6. core_functional.py +12 -5
  7. crazy_functional.py +55 -25
  8. crazy_functions/Latex全文润色.py +11 -11
  9. crazy_functions/Latex全文翻译.py +8 -8
  10. crazy_functions/Latex输出PDF结果.py +7 -7
  11. crazy_functions/agent_fns/auto_agent.py +23 -0
  12. crazy_functions/agent_fns/echo_agent.py +19 -0
  13. crazy_functions/agent_fns/general.py +134 -0
  14. crazy_functions/agent_fns/persistent.py +16 -0
  15. crazy_functions/agent_fns/pipe.py +194 -0
  16. crazy_functions/agent_fns/watchdog.py +28 -0
  17. crazy_functions/crazy_utils.py +22 -61
  18. crazy_functions/latex_fns/latex_actions.py +25 -6
  19. crazy_functions/latex_fns/latex_toolbox.py +70 -1
  20. crazy_functions/live_audio/aliyunASR.py +138 -6
  21. crazy_functions/live_audio/audio_io.py +1 -1
  22. crazy_functions/multi_stage/multi_stage_utils.py +45 -0
  23. crazy_functions/pdf_fns/parse_pdf.py +4 -4
  24. crazy_functions/pdf_fns/report_gen_html.py +58 -0
  25. crazy_functions/pdf_fns/report_template.html +0 -0
  26. crazy_functions/vt_fns/vt_call_plugin.py +1 -1
  27. crazy_functions/vt_fns/vt_modify_config.py +3 -3
  28. crazy_functions/下载arxiv论文翻译摘要.py +5 -5
  29. crazy_functions/图片生成.py +151 -16
  30. crazy_functions/多智能体.py +108 -0
  31. crazy_functions/对话历史存档.py +18 -8
  32. crazy_functions/总结word文档.py +5 -5
  33. crazy_functions/总结音视频.py +6 -6
  34. crazy_functions/批量Markdown翻译.py +12 -12
  35. crazy_functions/批量总结PDF文档.py +5 -5
  36. crazy_functions/批量总结PDF文档pdfminer.py +4 -4
  37. crazy_functions/批量翻译PDF文档_NOUGAT.py +28 -18
  38. crazy_functions/批量翻译PDF文档_多线程.py +9 -12
  39. crazy_functions/理解PDF文档内容.py +6 -6
  40. crazy_functions/生成函数注释.py +3 -3
  41. crazy_functions/联网的ChatGPT.py +2 -2
  42. crazy_functions/联网的ChatGPT_bing版.py +2 -2
  43. crazy_functions/虚空终端.py +1 -1
  44. crazy_functions/解析JupyterNotebook.py +4 -4
  45. crazy_functions/解析项目源代码.py +24 -24
  46. crazy_functions/询问多个大语言模型.py +4 -3
  47. crazy_functions/语音助手.py +26 -33
  48. crazy_functions/读文章写摘要.py +3 -3
  49. crazy_functions/谷歌检索小助手.py +10 -4
  50. crazy_functions/辅助功能.py +17 -5
Dockerfile CHANGED
@@ -1,34 +1,35 @@
1
- # 此Dockerfile适用于“无本地模型”的环境构建,如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
2
- # 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
3
- # 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
4
- # 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
 
5
  FROM python:3.11
6
 
7
 
8
- # 非必要步骤,更换pip源
9
  RUN echo '[global]' > /etc/pip.conf && \
10
  echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
11
  echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
12
 
13
 
14
- # 进入工作路径
15
  WORKDIR /gpt
16
 
17
 
18
- # 安装大部分依赖,利用Docker缓存加速以后的构建
19
  COPY requirements.txt ./
20
- COPY ./docs/gradio-3.32.2-py3-none-any.whl ./docs/gradio-3.32.2-py3-none-any.whl
21
  RUN pip3 install -r requirements.txt
22
 
23
 
24
- # 装载项目文件,安装剩余依赖
25
  COPY . .
26
  RUN pip3 install -r requirements.txt
27
 
28
 
29
- # 非必要步骤,用于预热模块
30
  RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
31
 
32
 
33
- # 启动
34
  CMD ["python3", "-u", "main.py"]
 
1
+ # 此Dockerfile适用于“无本地模型”的迷你运行环境构建
2
+ # 如果需要使用chatglm等本地模型或者latex运行依赖,请参考 docker-compose.yml
3
+ # - 如何构建: 先修改 `config.py`, 然后 `docker build -t gpt-academic . `
4
+ # - 如何运行(Linux下): `docker run --rm -it --net=host gpt-academic `
5
+ # - 如何运行(其他操作系统,选择任意一个固定端口50923): `docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic `
6
  FROM python:3.11
7
 
8
 
9
+ # 非必要步骤,更换pip源 (以下三行,可以删除)
10
  RUN echo '[global]' > /etc/pip.conf && \
11
  echo 'index-url = https://mirrors.aliyun.com/pypi/simple/' >> /etc/pip.conf && \
12
  echo 'trusted-host = mirrors.aliyun.com' >> /etc/pip.conf
13
 
14
 
15
+ # 进入工作路径(必要)
16
  WORKDIR /gpt
17
 
18
 
19
+ # 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
20
  COPY requirements.txt ./
21
+ COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
22
  RUN pip3 install -r requirements.txt
23
 
24
 
25
+ # 装载项目文件,安装剩余依赖(必要)
26
  COPY . .
27
  RUN pip3 install -r requirements.txt
28
 
29
 
30
+ # 非必要步骤,用于预热模块(可以删除)
31
  RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
32
 
33
 
34
+ # 启动(必要)
35
  CMD ["python3", "-u", "main.py"]
README.md CHANGED
@@ -11,26 +11,27 @@ pinned: false
11
 
12
  # ChatGPT 学术优化
13
  > **Note**
14
- >
15
- > 2023.7.8: Gradio, Pydantic依赖调整,已修改 `requirements.txt`。请及时**更新代码**,安装依赖时,请严格选择`requirements.txt`中**指定的版本**
16
- >
17
- > `pip install -r requirements.txt`
 
18
 
19
 
20
  # <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
21
 
22
- **如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或函数插件,欢迎发pull requests!**
23
 
24
- If you like this project, please give it a Star. If you've come up with more useful academic shortcuts or functional plugins, feel free to open an issue or pull request. We also have a README in [English|](docs/README_EN.md)[日本語|](docs/README_JP.md)[한국어|](https://github.com/mldljyh/ko_gpt_academic)[Русский|](docs/README_RS.md)[Français](docs/README_FR.md) translated by this project itself.
25
  To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
26
 
27
  > **Note**
28
  >
29
- > 1.请注意只有 **高亮** 标识的函数插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
30
  >
31
- > 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[安装方法](#installation) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。
32
  >
33
- > 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM和Moss等等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
34
 
35
 
36
 
@@ -39,41 +40,38 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l
39
 
40
  功能(⭐= 近期新增功能) | 描述
41
  --- | ---
42
- ⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
43
- 一键润色 | 支持一键润色、一键查找论文语法错误
44
- 一键中英互译 | 一键中英互译
45
- 一键代码解释 | 显示代码、解释代码、生成代码、给代码加注释
46
  [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
47
- 模块化设计 | 支持自定义强大的[函数插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
48
- [自我程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] [一键读懂](https://github.com/binary-husky/gpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A)本项目的源代码
49
- [程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [函数插件] 一键可以剖析其他Python/C/C++/Java/Lua/...项目树
50
- 读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [函数插件] 一键解读latex/pdf论文全文并生成摘要
51
- Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [函数插件] 一键翻译或润色latex论文
52
- 批量注释生成 | [函数插件] 一键批量生成函数注释
53
- Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [函数插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
54
- chat分析报告生成 | [函数插件] 运行后自动生成总结汇报
55
- [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [函数插件] PDF论文提取题目&摘要+翻译全文(多线程)
56
- [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [函数插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
57
- Latex论文一键校对 | [函数插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
58
- [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [函数插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
59
- 互联网信息聚合+GPT | [函数插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
60
- Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [函数插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
61
- ⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [函数插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
62
  公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
63
- 多线程函数插件支持 | 支持多线调用chatgpt,一键处理[海量文本](https://www.bilibili.com/video/BV1FT411H7c5/)或程序
64
  启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
65
  [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
66
  ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
67
  更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
68
  ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
69
- ⭐虚空终端插件 | [函数插件] 用自然语言,直接调度本项目其他插件
70
  更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
71
  </div>
72
 
73
 
74
  - 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
75
  <div align="center">
76
- <img src="https://user-images.githubusercontent.com/96192199/230361456-61078362-a966-4eb5-b49e-3c62ef18b860.gif" width="700" >
77
  </div>
78
 
79
 
@@ -113,16 +111,16 @@ cd gpt_academic
113
 
114
  2. 配置API_KEY
115
 
116
- 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。
117
 
118
  「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」
119
 
120
- 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E8%AF%B4%E6%98%8E)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」
121
 
122
 
123
  3. 安装依赖
124
  ```sh
125
- # (选择I: 如熟悉python)(python版本3.9以上,越新越好),备注:使用官方pip源或者阿里pip源,临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
126
  python -m pip install -r requirements.txt
127
 
128
  # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
@@ -138,17 +136,17 @@ python -m pip install -r requirements.txt # 这个步骤和pip安装一样的步
138
  【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
139
  ```sh
140
  # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
141
- python -m pip install -r request_llm/requirements_chatglm.txt
142
 
143
  # 【可选步骤II】支持复旦MOSS
144
- python -m pip install -r request_llm/requirements_moss.txt
145
- git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llm/moss # 注意执行此行代码时,必须处于项目根路径
146
 
147
  # 【可选步骤III】支持RWKV Runner
148
  参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
149
 
150
  # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
151
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
152
  ```
153
 
154
  </p>
@@ -163,11 +161,11 @@ python main.py
163
 
164
  ### 安装方法II:使用Docker
165
 
166
- 0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。如果您网速慢、硬盘小或没有显卡,则不推荐使用这个,建议使用方案1)(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
167
- [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
168
 
169
  ``` sh
170
- # 修改docker-compose.yml,保留方案0并删除其他方案。修改docker-compose.yml中方案0的配置,参考其中注释即可
171
  docker-compose up
172
  ```
173
 
@@ -177,7 +175,7 @@ docker-compose up
177
  [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
178
 
179
  ``` sh
180
- # 修改docker-compose.yml,保留方案1并删除其他方案。修改docker-compose.yml中方案1的配置,参考其中注释即可
181
  docker-compose up
182
  ```
183
 
@@ -187,48 +185,30 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以
187
  [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
188
 
189
  ``` sh
190
- # 修改docker-compose.yml,保留方案2并删除其他方案。修改docker-compose.yml中方案2的配置,参考其中注释即可
191
- docker-compose up
192
- ```
193
-
194
- 3. ChatGPT + LLAMA + 盘古 + RWKV(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
195
- [![jittorllms](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-jittorllms.yml)
196
-
197
- ``` sh
198
- # 修改docker-compose.yml,保留方案3并删除其他方案。修改docker-compose.yml中方案3的配置,参考其中注释即可
199
  docker-compose up
200
  ```
201
 
202
 
203
  ### 安装方法III:其他部署姿势
204
- 1. 一键运行脚本。
205
  完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
206
  脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
207
 
208
- 2. 使用docker-compose运行。
209
- 请阅读docker-compose.yml后,按照其中的提示操作即可
210
-
211
- 3. 如何使用反代URL
212
- 按照`config.py`中的说明配置API_URL_REDIRECT即可。
213
 
214
- 4. 微软云AzureAPI
215
- ���照`config.py`中的说明配置即可(AZURE_ENDPOINT等四个配置)
216
 
217
- 5. 远程云服务器部署(需要云服务器知识与经验)。
218
- 请访问[部署wiki-1](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
219
-
220
- 6. 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)
221
-
222
- 7. 使用WSL2(Windows Subsystem for Linux 子系统)。
223
- 请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
224
-
225
- 8. 如何在二级网址(如`http://localhost/subpath`)下运行。
226
- 请访问[FastAPI运行说明](docs/WithFastapi.md)
227
 
228
 
229
  # Advanced Usage
230
  ### I:自定义新的便捷按钮(学术快捷键)
231
- 任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序即可。(如果按钮已经添加成功并可见,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
232
  例如
233
  ```
234
  "超级英译中": {
@@ -244,14 +224,13 @@ docker-compose up
244
  </div>
245
 
246
  ### II:自定义函数插件
247
-
248
  编写强大的函数插件来执行任何你想得到的和想不到的任务。
249
  本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
250
  详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
251
 
252
 
253
- # Latest Update
254
- ### I:新功能动态
255
 
256
  1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件,
257
  另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。
@@ -292,28 +271,23 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
292
  <img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
293
  </div>
294
 
295
- 7. 新增MOSS大语言模型支持
296
- <div align="center">
297
- <img src="https://user-images.githubusercontent.com/96192199/236639178-92836f37-13af-4fdd-984d-b4450fe30336.png" width="500" >
298
- </div>
299
-
300
- 8. OpenAI图像生成
301
  <div align="center">
302
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
303
  </div>
304
 
305
- 9. OpenAI音频解析与总结
306
  <div align="center">
307
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
308
  </div>
309
 
310
- 10. Latex全文校对纠错
311
  <div align="center">
312
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" height="200" > ===>
313
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/476f66d9-7716-4537-b5c1-735372c25adb" height="200">
314
  </div>
315
 
316
- 11. 语言、主题切换
317
  <div align="center">
318
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/b6799499-b6fb-4f0c-9c8e-1b441872f4e8" width="500" >
319
  </div>
@@ -321,7 +295,12 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
321
 
322
 
323
  ### II:版本:
324
- - version 3.60(todo): 优化虚空终端,引入code interpreter和更多插件
 
 
 
 
 
325
  - version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题
326
  - version 3.50: 使用自然语言调用本项目的所有函数插件��虚空终端),支持插件分类,改进UI,设计新主题
327
  - version 3.49: 支持百度千帆平台和文心一言
@@ -343,7 +322,7 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
343
  - version 2.0: 引入模块化函数插件
344
  - version 1.0: 基础功能
345
 
346
- gpt_academic开发者QQ群-2:610599535
347
 
348
  - 已知问题
349
  - 某些浏览器翻译插件干扰此软件前端的运行
@@ -354,7 +333,13 @@ gpt_academic开发者QQ群-2:610599535
354
  1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
355
 
356
 
357
- ### IV:参考与学习
 
 
 
 
 
 
358
 
359
  ```
360
  代码中参考了很多其他优秀项目中的设计,顺序不分先后:
 
11
 
12
  # ChatGPT 学术优化
13
  > **Note**
14
+ >
15
+ > 2023.11.12: 某些依赖包尚不兼容python 3.12,推荐python 3.11。
16
+ >
17
+ > 2023.11.7: 安装依赖时,请选择`requirements.txt`中**指定的版本**。 安装命令:`pip install -r requirements.txt`。本项目开源免费,近期发现有人蔑视开源协议并利用本项目违规圈钱,请提高警惕,谨防上当受骗。
18
+
19
 
20
 
21
  # <div align=center><img src="docs/logo.png" width="40"> GPT 学术优化 (GPT Academic)</div>
22
 
23
+ **如果喜欢这个项目,请给它一个Star;如果您发明了好用的快捷键或插件,欢迎发pull requests!**
24
 
25
+ If you like this project, please give it a Star. We also have a README in [English|](docs/README.English.md)[日本語|](docs/README.Japanese.md)[한국어|](docs/README.Korean.md)[Русский|](docs/README.Russian.md)[Français](docs/README.French.md) translated by this project itself.
26
  To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
27
 
28
  > **Note**
29
  >
30
+ > 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR。
31
  >
32
+ > 2.本项目中每个文件的功能都在[自译解报告`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)。[常规安装方法](#installation) | [一键安装脚本](https://github.com/binary-husky/gpt_academic/releases) | [配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
33
  >
34
+ > 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交后即可生效。
35
 
36
 
37
 
 
40
 
41
  功能(⭐= 近期新增功能) | 描述
42
  --- | ---
43
+ ⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B)! | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, [通义千问](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),智谱API,DALLE3
44
+ 润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
 
 
45
  [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
46
+ 模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
47
+ [程序剖析](https://www.bilibili.com/video/BV1cj411A7VW) | [插件] 一键可以剖析Python/C/C++/Java/Lua/...项目树 或 [自我剖析](https://www.bilibili.com/video/BV1cj411A7VW)
48
+ 读论文、[翻译](https://www.bilibili.com/video/BV1KT411x7Wn)论文 | [插件] 一键解读latex/pdf论文全文并生成摘要
49
+ Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
50
+ 批量注释生成 | [插件] 一键批量生成函数注释
51
+ Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?
52
+ chat分析报告生成 | [插件] 运行后自动生成总结汇报
53
+ [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
54
+ [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
55
+ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
56
+ [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
57
+ 互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
58
+ ⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
59
+ ⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
 
60
  公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
61
+ ⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
62
  启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
63
  [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)同时伺候的感觉一定会很不错吧?
64
  ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
65
  更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
66
  ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
67
+ ⭐虚空终端插件 | [插件] 用自然语言,直接调度本项目其他插件
68
  更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
69
  </div>
70
 
71
 
72
  - 新界面(修改`config.py`中的LAYOUT选项即可实现“左右布局”和“上下布局”的切换)
73
  <div align="center">
74
+ <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/d81137c3-affd-4cd1-bb5e-b15610389762" width="700" >
75
  </div>
76
 
77
 
 
111
 
112
  2. 配置API_KEY
113
 
114
+ 在`config.py`中,配置API KEY等设置,[点击查看特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1) 。[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
115
 
116
  「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解该读取逻辑,我们强烈建议您在`config.py`旁边创建一个名为`config_private.py`的新配置文件,并把`config.py`中的配置转移(复制)到`config_private.py`中(仅复制您修改过的配置条目即可)。 」
117
 
118
+ 「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py`。 」
119
 
120
 
121
  3. 安装依赖
122
  ```sh
123
+ # (选择I: 如熟悉python, python推荐版本 3.9 ~ 3.11)备注:使用官方pip源或者阿里pip源, 临时换源方法:python -m pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/
124
  python -m pip install -r requirements.txt
125
 
126
  # (选择II: 使用Anaconda)步骤也是类似的 (https://www.bilibili.com/video/BV1rc411W7Dr):
 
136
  【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
137
  ```sh
138
  # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
139
+ python -m pip install -r request_llms/requirements_chatglm.txt
140
 
141
  # 【可选步骤II】支持复旦MOSS
142
+ python -m pip install -r request_llms/requirements_moss.txt
143
+ git clone --depth=1 https://github.com/OpenLMLab/MOSS.git request_llms/moss # 注意执行此行代码时,必须处于项目根路径
144
 
145
  # 【可选步骤III】支持RWKV Runner
146
  参考wiki:https://github.com/binary-husky/gpt_academic/wiki/%E9%80%82%E9%85%8DRWKV-Runner
147
 
148
  # 【可选步骤IV】确保config.py配置文件的AVAIL_LLM_MODELS包含了期望的模型,目前支持的全部模型如下(jittorllms系列目前仅支持docker方案):
149
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
150
  ```
151
 
152
  </p>
 
161
 
162
  ### 安装方法II:使用Docker
163
 
164
+ 0. 部署项目的全部能力(这个是包含cuda和latex的大型镜像。但如果您网速慢、硬盘小,则不推荐使用这个)
165
+ [![fullcapacity](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-all-capacity.yml)
166
 
167
  ``` sh
168
+ # 修改docker-compose.yml,保留方案0并删除其他方案。然后运行:
169
  docker-compose up
170
  ```
171
 
 
175
  [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
176
 
177
  ``` sh
178
+ # 修改docker-compose.yml,保留方案1并删除其他方案。然后运行:
179
  docker-compose up
180
  ```
181
 
 
185
  [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
186
 
187
  ``` sh
188
+ # 修改docker-compose.yml,保留方案2并删除其他方案。然后运行:
 
 
 
 
 
 
 
 
189
  docker-compose up
190
  ```
191
 
192
 
193
  ### 安装方法III:其他部署姿势
194
+ 1. **Windows一键运行脚本**。
195
  完全不熟悉python环境的Windows用户可以下载[Release](https://github.com/binary-husky/gpt_academic/releases)中发布的一键运行脚本安装无本地模型的版本。
196
  脚本的贡献来源是[oobabooga](https://github.com/oobabooga/one-click-installers)。
197
 
198
+ 2. 使用第三方API、Azure等、文心一言、星火等,见[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)
 
 
 
 
199
 
200
+ 3. 云服务器远程部署避坑指南。
201
+ 请访问[云服务器远程部署wiki](https://github.com/binary-husky/gpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
202
 
203
+ 4. 一些新型的部署平台或方法
204
+ - 使用Sealos[一键部署](https://github.com/binary-husky/gpt_academic/issues/993)
205
+ - 使用WSL2(Windows Subsystem for Linux 子系统)。请访问[部署wiki-2](https://github.com/binary-husky/gpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
206
+ - 如何在二级网址(如`http://localhost/subpath`)下运行。请访问[FastAPI运行说明](docs/WithFastapi.md)
 
 
 
 
 
 
207
 
208
 
209
  # Advanced Usage
210
  ### I:自定义新的便捷按钮(学术快捷键)
211
+ 任意文本编辑器打开`core_functional.py`,添加条目如下,然后重启程序。(如按钮已存在,那么前缀、后缀都支持热修改,无需重启程序即可生效。)
212
  例如
213
  ```
214
  "超级英译中": {
 
224
  </div>
225
 
226
  ### II:自定义函数插件
 
227
  编写强大的函数插件来执行任何你想得到的和想不到的任务。
228
  本项目的插件编写、调试难度很低,只要您具备一定的python基础知识,就可以仿照我们提供的模板实现自己的插件功能。
229
  详情请参考[函数插件指南](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)。
230
 
231
 
232
+ # Updates
233
+ ### I:动态
234
 
235
  1. 对话保存功能。在函数插件区调用 `保存当前的对话` 即可将当前对话保存为可读+可复原的html文件,
236
  另外在函数插件区(下拉菜单)调用 `载入对话历史存档` ,即可还原之前的会话。
 
271
  <img src="https://user-images.githubusercontent.com/96192199/236432361-67739153-73e8-43fe-8111-b61296edabd9.png" width="500" >
272
  </div>
273
 
274
+ 7. OpenAI图像生成
 
 
 
 
 
275
  <div align="center">
276
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
277
  </div>
278
 
279
+ 8. OpenAI音频解析与总结
280
  <div align="center">
281
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
282
  </div>
283
 
284
+ 9. Latex全文校对纠错
285
  <div align="center">
286
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/651ccd98-02c9-4464-91e1-77a6b7d1b033" height="200" > ===>
287
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/476f66d9-7716-4537-b5c1-735372c25adb" height="200">
288
  </div>
289
 
290
+ 10. 语言、主题切换
291
  <div align="center">
292
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/b6799499-b6fb-4f0c-9c8e-1b441872f4e8" width="500" >
293
  </div>
 
295
 
296
 
297
  ### II:版本:
298
+ - version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件
299
+ - version 3.60: 引入AutoGen作为新一代插件的基石
300
+ - version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
301
+ - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
302
+ - version 3.55: 重构前端界面,引入悬浮窗口与菜单栏
303
+ - version 3.54: 新增动态代码解释器(Code Interpreter)(待完善)
304
  - version 3.53: 支持动态选择不同界面主题,提高稳定性&解决多用户冲突问题
305
  - version 3.50: 使用自然语言调用本项目的所有函数插件��虚空终端),支持插件分类,改进UI,设计新主题
306
  - version 3.49: 支持百度千帆平台和文心一言
 
322
  - version 2.0: 引入模块化函数插件
323
  - version 1.0: 基础功能
324
 
325
+ GPT Academic开发者QQ群:`610599535`
326
 
327
  - 已知问题
328
  - 某些浏览器翻译插件干扰此软件前端的运行
 
333
  1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
334
 
335
 
336
+ ### IV:本项目的开发分支
337
+
338
+ 1. `master` 分支: 主分支,稳定版
339
+ 2. `frontier` 分支: 开发分支,测试版
340
+
341
+
342
+ ### V:参考与学习
343
 
344
  ```
345
  代码中参考了很多其他优秀项目中的设计,顺序不分先后:
app.py CHANGED
@@ -1,4 +1,6 @@
1
  import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
 
 
2
 
3
  def main():
4
  import subprocess, sys
@@ -6,20 +8,20 @@ def main():
6
  import gradio as gr
7
  if gr.__version__ not in ['3.32.6']:
8
  raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
9
- from request_llm.bridge_all import predict
10
  from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
11
  # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
12
  proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
13
  CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
14
  ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
15
- DARK_MODE, = get_conf('DARK_MODE')
 
16
 
17
  # 如果WEB_PORT是-1, 则随机选取WEB端口
18
  PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
19
  from check_proxy import get_current_version
20
  from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme
21
 
22
- initial_prompt = "Serve me as a writing and programming assistant."
23
  title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
24
  description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
25
  description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
@@ -31,6 +33,7 @@ def main():
31
  description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
32
  description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
33
  description += "</br></br>如何语音对话: 请阅读Wiki"
 
34
 
35
  # 问询记录, python 版本建议3.9+(越新越好)
36
  import logging, uuid
@@ -47,7 +50,7 @@ def main():
47
 
48
  # 高级函数插件
49
  from crazy_functional import get_crazy_functions
50
- DEFAULT_FN_GROUPS, = get_conf('DEFAULT_FN_GROUPS')
51
  plugins = get_crazy_functions()
52
  all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
53
  match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
@@ -70,10 +73,12 @@ def main():
70
  CHATBOT_HEIGHT /= 2
71
 
72
  cancel_handles = []
 
 
73
  with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
74
  gr.HTML(title_html)
75
  gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
76
- secret_css, dark_mode = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False)
77
  cookies = gr.State(load_chat_cookies())
78
  with gr_L1():
79
  with gr_L2(scale=2, elem_id="gpt-chat"):
@@ -92,16 +97,21 @@ def main():
92
  clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
93
  if ENABLE_AUDIO:
94
  with gr.Row():
95
- audio_mic = gr.Audio(source="microphone", type="numpy", streaming=True, show_label=False).style(container=False)
96
  with gr.Row():
97
  status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
98
  with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
99
  with gr.Row():
 
 
 
 
100
  for k in functional:
101
  if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
102
  variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
103
  functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
104
  functional[k]["Button"].style(size="sm")
 
105
  with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
106
  with gr.Row():
107
  gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
@@ -146,12 +156,14 @@ def main():
146
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
147
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
148
  max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
149
- system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=initial_prompt)
150
 
151
  with gr.Tab("界面外观", elem_id="interact-panel"):
152
  theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
153
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
154
  value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
 
 
155
  dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
156
  dark_mode_btn.click(None, None, None, _js="""() => {
157
  if (document.querySelectorAll('.dark').length) {
@@ -176,6 +188,77 @@ def main():
176
  stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
177
  clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
178
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
  # 功能区显示开关与功能区的互动
180
  def fn_area_visibility(a):
181
  ret = {}
@@ -189,6 +272,14 @@ def main():
189
  if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
190
  return ret
191
  checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
 
 
 
 
 
 
 
 
192
  # 整理反复出现的控件句柄组合
193
  input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
194
  output_combo = [cookies, chatbot, history, status]
@@ -212,6 +303,9 @@ def main():
212
  if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
213
  click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
214
  cancel_handles.append(click_handle)
 
 
 
215
  # 文件上传区,接收文件后与chatbot的互动
216
  file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
217
  file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
@@ -310,33 +404,30 @@ def main():
310
  }
311
  }
312
  }"""
 
 
 
 
313
  demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
314
  demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
315
 
316
  # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
317
- def auto_opentab_delay():
318
  import threading, webbrowser, time
319
  print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
320
  if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
321
  else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
322
- def open():
323
- time.sleep(2) # 打开浏览器
324
- webbrowser.open_new_tab(f"http://localhost:{PORT}")
325
- threading.Thread(target=open, name="open-browser", daemon=True).start()
326
- threading.Thread(target=auto_update, name="self-upgrade", daemon=True).start()
327
- threading.Thread(target=warm_up_modules, name="warm-up", daemon=True).start()
328
 
329
- auto_opentab_delay()
330
  demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
331
 
332
  # 如果需要在二级路径下运行
333
- # CUSTOM_PATH, = get_conf('CUSTOM_PATH')
334
  # if CUSTOM_PATH != "/":
335
  # from toolbox import run_gradio_in_subpath
336
  # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
337
  # else:
338
  # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
339
- # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
340
 
341
  if __name__ == "__main__":
342
  main()
 
1
  import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
2
+ import pickle
3
+ import base64
4
 
5
  def main():
6
  import subprocess, sys
 
8
  import gradio as gr
9
  if gr.__version__ not in ['3.32.6']:
10
  raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
11
+ from request_llms.bridge_all import predict
12
  from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
13
  # 建议您复制一个config_private.py放自己的秘密, 如API和代理网址, 避免不小心传github被别人看到
14
  proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
15
  CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
16
  ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
17
+ DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
18
+ INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
19
 
20
  # 如果WEB_PORT是-1, 则随机选取WEB端口
21
  PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
22
  from check_proxy import get_current_version
23
  from themes.theme import adjust_theme, advanced_css, theme_declaration, load_dynamic_theme
24
 
 
25
  title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
26
  description = "Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic), "
27
  description += "感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors)."
 
33
  description += "</br></br>虚空终端使用说明: 点击虚空终端, 然后根据提示输入指令, 再次点击虚空终端"
34
  description += "</br></br>如何保存对话: 点击保存当前的对话按钮"
35
  description += "</br></br>如何语音对话: 请阅读Wiki"
36
+ description += "</br></br>如何临时更换API_KEY: 在输入区输入临时API_KEY后提交(网页刷新后失效)"
37
 
38
  # 问询记录, python 版本建议3.9+(越新越好)
39
  import logging, uuid
 
50
 
51
  # 高级函数插件
52
  from crazy_functional import get_crazy_functions
53
+ DEFAULT_FN_GROUPS = get_conf('DEFAULT_FN_GROUPS')
54
  plugins = get_crazy_functions()
55
  all_plugin_groups = list(set([g for _, plugin in plugins.items() for g in plugin['Group'].split('|')]))
56
  match_group = lambda tags, groups: any([g in groups for g in tags.split('|')])
 
73
  CHATBOT_HEIGHT /= 2
74
 
75
  cancel_handles = []
76
+ customize_btns = {}
77
+ predefined_btns = {}
78
  with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
79
  gr.HTML(title_html)
80
  gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
81
+ secret_css, dark_mode, persistent_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
82
  cookies = gr.State(load_chat_cookies())
83
  with gr_L1():
84
  with gr_L2(scale=2, elem_id="gpt-chat"):
 
97
  clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
98
  if ENABLE_AUDIO:
99
  with gr.Row():
100
+ audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
101
  with gr.Row():
102
  status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
103
  with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
104
  with gr.Row():
105
+ for k in range(NUM_CUSTOM_BASIC_BTN):
106
+ customize_btn = gr.Button("自定义按钮" + str(k+1), visible=False, variant="secondary", info_str=f'基础功能区: 自定义按钮')
107
+ customize_btn.style(size="sm")
108
+ customize_btns.update({"自定义按钮" + str(k+1): customize_btn})
109
  for k in functional:
110
  if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
111
  variant = functional[k]["Color"] if "Color" in functional[k] else "secondary"
112
  functional[k]["Button"] = gr.Button(k, variant=variant, info_str=f'基础功能区: {k}')
113
  functional[k]["Button"].style(size="sm")
114
+ predefined_btns.update({k: functional[k]["Button"]})
115
  with gr.Accordion("函数插件区", open=True, elem_id="plugin-panel") as area_crazy_fn:
116
  with gr.Row():
117
  gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
 
156
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
157
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
158
  max_length_sl = gr.Slider(minimum=256, maximum=1024*32, value=4096, step=128, interactive=True, label="Local LLM MaxLength",)
159
+ system_prompt = gr.Textbox(show_label=True, lines=2, placeholder=f"System Prompt", label="System prompt", value=INIT_SYS_PROMPT)
160
 
161
  with gr.Tab("界面外观", elem_id="interact-panel"):
162
  theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
163
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
164
  value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
165
+ checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
166
+ value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
167
  dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
168
  dark_mode_btn.click(None, None, None, _js="""() => {
169
  if (document.querySelectorAll('.dark').length) {
 
188
  stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
189
  clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
190
 
191
+ def to_cookie_str(d):
192
+ # Pickle the dictionary and encode it as a string
193
+ pickled_dict = pickle.dumps(d)
194
+ cookie_value = base64.b64encode(pickled_dict).decode('utf-8')
195
+ return cookie_value
196
+
197
+ def from_cookie_str(c):
198
+ # Decode the base64-encoded string and unpickle it into a dictionary
199
+ pickled_dict = base64.b64decode(c.encode('utf-8'))
200
+ return pickle.loads(pickled_dict)
201
+
202
+ with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
203
+ with gr.Accordion("自定义菜单", open=True, elem_id="edit-panel"):
204
+ with gr.Row() as row:
205
+ with gr.Column(scale=10):
206
+ AVAIL_BTN = [btn for btn in customize_btns.keys()] + [k for k in functional]
207
+ basic_btn_dropdown = gr.Dropdown(AVAIL_BTN, value="自定义按钮1", label="选择一个需要自定义基础功能区按钮").style(container=False)
208
+ basic_fn_title = gr.Textbox(show_label=False, placeholder="输入新按钮名称", lines=1).style(container=False)
209
+ basic_fn_prefix = gr.Textbox(show_label=False, placeholder="输入新提示前缀", lines=4).style(container=False)
210
+ basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
211
+ with gr.Column(scale=1, min_width=70):
212
+ basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
213
+ basic_fn_load = gr.Button("加载已保存", variant="primary"); basic_fn_load.style(size="sm")
214
+ def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
215
+ ret = {}
216
+ customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
217
+ customize_fn_overwrite_.update({
218
+ basic_btn_dropdown_:
219
+ {
220
+ "Title":basic_fn_title,
221
+ "Prefix":basic_fn_prefix,
222
+ "Suffix":basic_fn_suffix,
223
+ }
224
+ }
225
+ )
226
+ cookies_.update(customize_fn_overwrite_)
227
+ if basic_btn_dropdown_ in customize_btns:
228
+ ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
229
+ else:
230
+ ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=True, value=basic_fn_title)})
231
+ ret.update({cookies: cookies_})
232
+ try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
233
+ except: persistent_cookie_ = {}
234
+ persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
235
+ persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
236
+ ret.update({persistent_cookie: persistent_cookie_}) # write persistent cookie
237
+ return ret
238
+
239
+ def reflesh_btn(persistent_cookie_, cookies_):
240
+ ret = {}
241
+ for k in customize_btns:
242
+ ret.update({customize_btns[k]: gr.update(visible=False, value="")})
243
+
244
+ try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
245
+ except: return ret
246
+
247
+ customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
248
+ cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
249
+ ret.update({cookies: cookies_})
250
+
251
+ for k,v in persistent_cookie_["custom_bnt"].items():
252
+ if v['Title'] == "": continue
253
+ if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
254
+ else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
255
+ return ret
256
+
257
+ basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies],[cookies, *customize_btns.values(), *predefined_btns.values()])
258
+ h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
259
+ [persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
260
+ h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""") # save persistent cookie
261
+
262
  # 功能区显示开关与功能区的互动
263
  def fn_area_visibility(a):
264
  ret = {}
 
272
  if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
273
  return ret
274
  checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, clearBtn, clearBtn2, plugin_advanced_arg] )
275
+
276
+ # 功能区显示开关与功能区的互动
277
+ def fn_area_visibility_2(a):
278
+ ret = {}
279
+ ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
280
+ return ret
281
+ checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
282
+
283
  # 整理反复出现的控件句柄组合
284
  input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
285
  output_combo = [cookies, chatbot, history, status]
 
303
  if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
304
  click_handle = functional[k]["Button"].click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(k)], outputs=output_combo)
305
  cancel_handles.append(click_handle)
306
+ for btn in customize_btns.values():
307
+ click_handle = btn.click(fn=ArgsGeneralWrapper(predict), inputs=[*input_combo, gr.State(True), gr.State(btn.value)], outputs=output_combo)
308
+ cancel_handles.append(click_handle)
309
  # 文件上传区,接收文件后与chatbot的互动
310
  file_upload.upload(on_file_uploaded, [file_upload, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
311
  file_upload_2.upload(on_file_uploaded, [file_upload_2, chatbot, txt, txt2, checkboxes, cookies], [chatbot, txt, txt2, cookies])
 
404
  }
405
  }
406
  }"""
407
+ load_cookie_js = """(persistent_cookie) => {
408
+ return getCookie("persistent_cookie");
409
+ }"""
410
+ demo.load(None, inputs=None, outputs=[persistent_cookie], _js=load_cookie_js)
411
  demo.load(None, inputs=[dark_mode], outputs=None, _js=darkmode_js) # 配置暗色主题或亮色主题
412
  demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
413
 
414
  # gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
415
+ def run_delayed_tasks():
416
  import threading, webbrowser, time
417
  print(f"如果浏览器没有自动打开,请复制并转到以下URL:")
418
  if DARK_MODE: print(f"\t「暗色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
419
  else: print(f"\t「亮色主题已启用(支持动态切换主题)」: http://localhost:{PORT}")
 
 
 
 
 
 
420
 
 
421
  demo.queue(concurrency_count=CONCURRENT_COUNT).launch(server_name="0.0.0.0", share=False, favicon_path="docs/logo.png", blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile"])
422
 
423
  # 如果需要在二级路径下运行
424
+ # CUSTOM_PATH = get_conf('CUSTOM_PATH')
425
  # if CUSTOM_PATH != "/":
426
  # from toolbox import run_gradio_in_subpath
427
  # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
428
  # else:
429
  # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
430
+ # blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
431
 
432
  if __name__ == "__main__":
433
  main()
check_proxy.py CHANGED
@@ -46,7 +46,7 @@ def backup_and_download(current_version, remote_version):
46
  return new_version_dir
47
  os.makedirs(new_version_dir)
48
  shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
49
- proxies, = get_conf('proxies')
50
  r = requests.get(
51
  'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
52
  zip_file_path = backup_dir+'/master.zip'
@@ -113,7 +113,7 @@ def auto_update(raise_error=False):
113
  import requests
114
  import time
115
  import json
116
- proxies, = get_conf('proxies')
117
  response = requests.get(
118
  "https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
119
  remote_json_data = json.loads(response.text)
@@ -156,7 +156,7 @@ def auto_update(raise_error=False):
156
  def warm_up_modules():
157
  print('正在执行一些模块的预热...')
158
  from toolbox import ProxyNetworkActivate
159
- from request_llm.bridge_all import model_info
160
  with ProxyNetworkActivate("Warmup_Modules"):
161
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
162
  enc.encode("模块预热", disallowed_special=())
@@ -167,5 +167,5 @@ if __name__ == '__main__':
167
  import os
168
  os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
169
  from toolbox import get_conf
170
- proxies, = get_conf('proxies')
171
  check_proxy(proxies)
 
46
  return new_version_dir
47
  os.makedirs(new_version_dir)
48
  shutil.copytree('./', backup_dir, ignore=lambda x, y: ['history'])
49
+ proxies = get_conf('proxies')
50
  r = requests.get(
51
  'https://github.com/binary-husky/chatgpt_academic/archive/refs/heads/master.zip', proxies=proxies, stream=True)
52
  zip_file_path = backup_dir+'/master.zip'
 
113
  import requests
114
  import time
115
  import json
116
+ proxies = get_conf('proxies')
117
  response = requests.get(
118
  "https://raw.githubusercontent.com/binary-husky/chatgpt_academic/master/version", proxies=proxies, timeout=5)
119
  remote_json_data = json.loads(response.text)
 
156
  def warm_up_modules():
157
  print('正在执行一些模块的预热...')
158
  from toolbox import ProxyNetworkActivate
159
+ from request_llms.bridge_all import model_info
160
  with ProxyNetworkActivate("Warmup_Modules"):
161
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
162
  enc.encode("模块预热", disallowed_special=())
 
167
  import os
168
  os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
169
  from toolbox import get_conf
170
+ proxies = get_conf('proxies')
171
  check_proxy(proxies)
config.py CHANGED
@@ -53,6 +53,10 @@ THEME = "Chuanhu-Small-and-Beautiful"
53
  AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
54
 
55
 
 
 
 
 
56
  # 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
57
  CHATBOT_HEIGHT = 1115
58
 
@@ -90,16 +94,23 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
90
 
91
  # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
92
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
93
- AVAIL_LLM_MODELS = ["gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5", "api2d-gpt-3.5-turbo",
94
- "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4", "chatglm", "moss", "newbing", "stack-claude"]
95
- # P.S. 其他可用的模型还包括 ["qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613",
96
- # "spark", "sparkv2", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
 
 
 
 
 
 
 
97
 
98
 
99
  # 百度千帆(LLM_MODEL="qianfan")
100
  BAIDU_CLOUD_API_KEY = ''
101
  BAIDU_CLOUD_SECRET_KEY = ''
102
- BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat"
103
 
104
 
105
  # 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
@@ -132,22 +143,31 @@ AUTHENTICATION = []
132
  CUSTOM_PATH = "/"
133
 
134
 
 
 
 
 
 
135
  # 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
136
  API_ORG = ""
137
 
138
 
139
- # 如果需要使用Slack Claude,使用教程详情见 request_llm/README.md
140
  SLACK_CLAUDE_BOT_ID = ''
141
  SLACK_CLAUDE_USER_TOKEN = ''
142
 
143
 
144
- # 如果需要使用AZURE 详情请见额外文档 docs\use_azure.md
145
  AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
146
  AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用
147
  AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
148
 
149
 
150
- # 使用Newbing
 
 
 
 
151
  NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
152
  NEWBING_COOKIES = """
153
  put your new bing cookies here
@@ -168,6 +188,11 @@ XFYUN_API_SECRET = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
168
  XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
169
 
170
 
 
 
 
 
 
171
  # Claude API KEY
172
  ANTHROPIC_API_KEY = ""
173
 
@@ -184,7 +209,8 @@ HUGGINGFACE_ACCESS_TOKEN = "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"
184
  # 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
185
  GROBID_URLS = [
186
  "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
187
- "https://shaocongma-grobid.hf.space","https://FBR123-grobid.hf.space", "https://yeku-grobid.hf.space",
 
188
  ]
189
 
190
 
@@ -192,6 +218,10 @@ GROBID_URLS = [
192
  ALLOW_RESET_CONFIG = False
193
 
194
 
 
 
 
 
195
  # 临时的上传文件夹位置,请勿修改
196
  PATH_PRIVATE_UPLOAD = "private_upload"
197
 
@@ -201,9 +231,17 @@ PATH_LOGGING = "gpt_log"
201
 
202
 
203
  # 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
204
- WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid", "Warmup_Modules"]
 
205
 
206
 
 
 
 
 
 
 
 
207
  """
208
  在线大模型配置关联关系示意图
209
 
@@ -213,13 +251,16 @@ WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
213
  │ ├── API_ORG(不常用)
214
  │ └── API_URL_REDIRECT(不常用)
215
 
216
- ├── "azure-gpt-3.5" 等azure模型
217
  │ ├── API_KEY
218
  │ ├── AZURE_ENDPOINT
219
  │ ├── AZURE_API_KEY
220
  │ ├── AZURE_ENGINE
221
  │ └── API_URL_REDIRECT
222
 
 
 
 
223
  ├── "spark" 星火认知大模型 spark & sparkv2
224
  │ ├── XFYUN_APPID
225
  │ ├── XFYUN_API_SECRET
 
53
  AVAIL_THEMES = ["Default", "Chuanhu-Small-and-Beautiful", "High-Contrast", "Gstaff/Xkcd", "NoCrypt/Miku"]
54
 
55
 
56
+ # 默认的系统提示词(system prompt)
57
+ INIT_SYS_PROMPT = "Serve me as a writing and programming assistant."
58
+
59
+
60
  # 对话窗的高度 (仅在LAYOUT="TOP-DOWN"时生效)
61
  CHATBOT_HEIGHT = 1115
62
 
 
94
 
95
  # 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
96
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
97
+ AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview",
98
+ "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
99
+ "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
100
+ "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
101
+ "chatglm3", "moss", "newbing", "claude-2"]
102
+ # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "llama2", "qwen", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
103
+ # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"]
104
+
105
+
106
+ # 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
107
+ MULTI_QUERY_LLM_MODELS = "gpt-3.5-turbo&chatglm3"
108
 
109
 
110
  # 百度千帆(LLM_MODEL="qianfan")
111
  BAIDU_CLOUD_API_KEY = ''
112
  BAIDU_CLOUD_SECRET_KEY = ''
113
+ BAIDU_CLOUD_QIANFAN_MODEL = 'ERNIE-Bot' # 可选 "ERNIE-Bot-4"(文心大模型4.0), "ERNIE-Bot"(文心一言), "ERNIE-Bot-turbo", "BLOOMZ-7B", "Llama-2-70B-Chat", "Llama-2-13B-Chat", "Llama-2-7B-Chat"
114
 
115
 
116
  # 如果使用ChatGLM2微调模型,请把 LLM_MODEL="chatglmft",并在此处指定模型路径
 
143
  CUSTOM_PATH = "/"
144
 
145
 
146
+ # HTTPS 秘钥和证书(不需要修改)
147
+ SSL_KEYFILE = ""
148
+ SSL_CERTFILE = ""
149
+
150
+
151
  # 极少数情况下,openai的官方KEY需要伴随组织编码(格式如org-xxxxxxxxxxxxxxxxxxxxxxxx)使用
152
  API_ORG = ""
153
 
154
 
155
+ # 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
156
  SLACK_CLAUDE_BOT_ID = ''
157
  SLACK_CLAUDE_USER_TOKEN = ''
158
 
159
 
160
+ # 如果需要使用AZURE(方法一:单个azure模型部署)详情请见额外文档 docs\use_azure.md
161
  AZURE_ENDPOINT = "https://你亲手写的api名称.openai.azure.com/"
162
  AZURE_API_KEY = "填入azure openai api的密钥" # 建议直接在API_KEY处填写,该选项即将被弃用
163
  AZURE_ENGINE = "填入你亲手写的部署名" # 读 docs\use_azure.md
164
 
165
 
166
+ # 如果需要使用AZURE(方法二:多个azure模型部署+动态切换)详情请见额外文档 docs\use_azure.md
167
+ AZURE_CFG_ARRAY = {}
168
+
169
+
170
+ # 使用Newbing (不推荐使用,未来将删除)
171
  NEWBING_STYLE = "creative" # ["creative", "balanced", "precise"]
172
  NEWBING_COOKIES = """
173
  put your new bing cookies here
 
188
  XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
189
 
190
 
191
+ # 接入智谱大模型
192
+ ZHIPUAI_API_KEY = ""
193
+ ZHIPUAI_MODEL = "chatglm_turbo"
194
+
195
+
196
  # Claude API KEY
197
  ANTHROPIC_API_KEY = ""
198
 
 
209
  # 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
210
  GROBID_URLS = [
211
  "https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
212
+ "https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
213
+ "https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
214
  ]
215
 
216
 
 
218
  ALLOW_RESET_CONFIG = False
219
 
220
 
221
+ # 在使用AutoGen插件时,是否使用Docker容器运行代码
222
+ AUTOGEN_USE_DOCKER = False
223
+
224
+
225
  # 临时的上传文件夹位置,请勿修改
226
  PATH_PRIVATE_UPLOAD = "private_upload"
227
 
 
231
 
232
 
233
  # 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
234
+ WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
235
+ "Warmup_Modules", "Nougat_Download", "AutoGen"]
236
 
237
 
238
+ # *实验性功能*: 自动检测并屏蔽失效的KEY,请勿使用
239
+ BLOCK_INVALID_APIKEY = False
240
+
241
+
242
+ # 自定义按钮的最大数量限制
243
+ NUM_CUSTOM_BASIC_BTN = 4
244
+
245
  """
246
  在线大模型配置关联关系示意图
247
 
 
251
  │ ├── API_ORG(不常用)
252
  │ └── API_URL_REDIRECT(不常用)
253
 
254
+ ├── "azure-gpt-3.5" 等azure模型(单个azure模型,不需要动态切换)
255
  │ ├── API_KEY
256
  │ ├── AZURE_ENDPOINT
257
  │ ├── AZURE_API_KEY
258
  │ ├── AZURE_ENGINE
259
  │ └── API_URL_REDIRECT
260
 
261
+ ├── "azure-gpt-3.5" 等azure模型(多个azure模型,需要动态切换,高优先级)
262
+ │ └── AZURE_CFG_ARRAY
263
+
264
  ├── "spark" 星火认知大模型 spark & sparkv2
265
  │ ├── XFYUN_APPID
266
  │ ├── XFYUN_API_SECRET
core_functional.py CHANGED
@@ -91,8 +91,15 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
91
  import core_functional
92
  importlib.reload(core_functional) # 热更新prompt
93
  core_functional = core_functional.get_core_functions()
94
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
95
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
96
- if core_functional[additional_fn].get("AutoClearHistory", False):
97
- history = []
98
- return inputs, history
 
 
 
 
 
 
 
 
91
  import core_functional
92
  importlib.reload(core_functional) # 热更新prompt
93
  core_functional = core_functional.get_core_functions()
94
+ addition = chatbot._cookies['customize_fn_overwrite']
95
+ if additional_fn in addition:
96
+ # 自定义功能
97
+ inputs = addition[additional_fn]["Prefix"] + inputs + addition[additional_fn]["Suffix"]
98
+ return inputs, history
99
+ else:
100
+ # 预制功能
101
+ if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
102
+ inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
103
+ if core_functional[additional_fn].get("AutoClearHistory", False):
104
+ history = []
105
+ return inputs, history
crazy_functional.py CHANGED
@@ -1,4 +1,5 @@
1
  from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
 
2
 
3
 
4
  def get_crazy_functions():
@@ -190,10 +191,10 @@ def get_crazy_functions():
190
  "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
191
  "Function": HotReload(解析项目本身)
192
  },
193
- "[插件demo]历史上的今天": {
194
  "Group": "对话",
195
  "AsButton": True,
196
- "Info": "查看历史上的今天事件 | 不需要输入参数",
197
  "Function": HotReload(高阶功能模板函数)
198
  },
199
  "精准翻译PDF论文": {
@@ -252,7 +253,7 @@ def get_crazy_functions():
252
  "Function": HotReload(Latex中文润色)
253
  },
254
 
255
- # 被新插件取代
256
  # "Latex项目全文中译英(输入路径或上传压缩包)": {
257
  # "Group": "学术",
258
  # "Color": "stop",
@@ -260,6 +261,8 @@ def get_crazy_functions():
260
  # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
261
  # "Function": HotReload(Latex中译英)
262
  # },
 
 
263
  # "Latex项目全文英译中(输入路径或上传压缩包)": {
264
  # "Group": "学术",
265
  # "Color": "stop",
@@ -290,6 +293,7 @@ def get_crazy_functions():
290
  }
291
  })
292
  except:
 
293
  print('Load function plugin failed')
294
 
295
  try:
@@ -314,6 +318,7 @@ def get_crazy_functions():
314
  }
315
  })
316
  except:
 
317
  print('Load function plugin failed')
318
 
319
  try:
@@ -329,6 +334,7 @@ def get_crazy_functions():
329
  },
330
  })
331
  except:
 
332
  print('Load function plugin failed')
333
 
334
  try:
@@ -344,22 +350,35 @@ def get_crazy_functions():
344
  },
345
  })
346
  except:
 
347
  print('Load function plugin failed')
348
 
349
  try:
350
- from crazy_functions.图片生成 import 图片生成
351
  function_plugins.update({
352
- "图片生成(先切换模型到openai或api2d)": {
353
  "Group": "对话",
354
  "Color": "stop",
355
  "AsButton": False,
356
  "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
357
- "ArgsReminder": "在这里输入分辨率, 如256x256(默认)", # 高级参数输入区的显示提示
358
- "Info": "图片生成 | 输入参数字符串,提供图像的内容",
359
- "Function": HotReload(图片生成)
 
 
 
 
 
 
 
 
 
 
 
360
  },
361
  })
362
  except:
 
363
  print('Load function plugin failed')
364
 
365
  try:
@@ -376,6 +395,7 @@ def get_crazy_functions():
376
  }
377
  })
378
  except:
 
379
  print('Load function plugin failed')
380
 
381
  try:
@@ -390,12 +410,13 @@ def get_crazy_functions():
390
  }
391
  })
392
  except:
 
393
  print('Load function plugin failed')
394
 
395
  try:
396
  from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
397
  function_plugins.update({
398
- "Markdown翻译(手动指定语言)": {
399
  "Group": "编程",
400
  "Color": "stop",
401
  "AsButton": False,
@@ -405,6 +426,7 @@ def get_crazy_functions():
405
  }
406
  })
407
  except:
 
408
  print('Load function plugin failed')
409
 
410
  try:
@@ -420,6 +442,7 @@ def get_crazy_functions():
420
  }
421
  })
422
  except:
 
423
  print('Load function plugin failed')
424
 
425
  try:
@@ -435,12 +458,13 @@ def get_crazy_functions():
435
  }
436
  })
437
  except:
 
438
  print('Load function plugin failed')
439
 
440
  try:
441
  from crazy_functions.交互功能函数模板 import 交互功能模板函数
442
  function_plugins.update({
443
- "交互功能模板函数": {
444
  "Group": "对话",
445
  "Color": "stop",
446
  "AsButton": False,
@@ -448,6 +472,7 @@ def get_crazy_functions():
448
  }
449
  })
450
  except:
 
451
  print('Load function plugin failed')
452
 
453
  try:
@@ -492,23 +517,25 @@ def get_crazy_functions():
492
  }
493
  })
494
  except:
 
495
  print('Load function plugin failed')
496
 
497
  try:
498
  from toolbox import get_conf
499
- ENABLE_AUDIO, = get_conf('ENABLE_AUDIO')
500
  if ENABLE_AUDIO:
501
  from crazy_functions.语音助手 import 语音助手
502
  function_plugins.update({
503
- "实时音频采集": {
504
  "Group": "对话",
505
  "Color": "stop",
506
  "AsButton": True,
507
- "Info": "开始语言对话 | 没有输入参数",
508
  "Function": HotReload(语音助手)
509
  }
510
  })
511
  except:
 
512
  print('Load function plugin failed')
513
 
514
  try:
@@ -522,6 +549,7 @@ def get_crazy_functions():
522
  }
523
  })
524
  except:
 
525
  print('Load function plugin failed')
526
 
527
  try:
@@ -535,20 +563,22 @@ def get_crazy_functions():
535
  }
536
  })
537
  except:
 
538
  print('Load function plugin failed')
539
 
540
- # try:
541
- # from crazy_functions.CodeInterpreter import 虚空终端CodeInterpreter
542
- # function_plugins.update({
543
- # "CodeInterpreter(开发中,仅供测试)": {
544
- # "Group": "编程|对话",
545
- # "Color": "stop",
546
- # "AsButton": False,
547
- # "Function": HotReload(虚空终端CodeInterpreter)
548
- # }
549
- # })
550
- # except:
551
- # print('Load function plugin failed')
 
552
 
553
  # try:
554
  # from crazy_functions.chatglm微调工具 import 微调数据集生成
 
1
  from toolbox import HotReload # HotReload 的意思是热更新,修改函数插件后,不需要重启程序,代码直接生效
2
+ from toolbox import trimmed_format_exc
3
 
4
 
5
  def get_crazy_functions():
 
191
  "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
192
  "Function": HotReload(解析项目本身)
193
  },
194
+ "历史上的今天": {
195
  "Group": "对话",
196
  "AsButton": True,
197
+ "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
198
  "Function": HotReload(高阶功能模板函数)
199
  },
200
  "精准翻译PDF论文": {
 
253
  "Function": HotReload(Latex中文润色)
254
  },
255
 
256
+ # 已经被新插件取代
257
  # "Latex项目全文中译英(输入路径或上传压缩包)": {
258
  # "Group": "学术",
259
  # "Color": "stop",
 
261
  # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
262
  # "Function": HotReload(Latex中译英)
263
  # },
264
+
265
+ # 已经被新插件取代
266
  # "Latex项目全文英译中(输入路径或上传压缩包)": {
267
  # "Group": "学术",
268
  # "Color": "stop",
 
293
  }
294
  })
295
  except:
296
+ print(trimmed_format_exc())
297
  print('Load function plugin failed')
298
 
299
  try:
 
318
  }
319
  })
320
  except:
321
+ print(trimmed_format_exc())
322
  print('Load function plugin failed')
323
 
324
  try:
 
334
  },
335
  })
336
  except:
337
+ print(trimmed_format_exc())
338
  print('Load function plugin failed')
339
 
340
  try:
 
350
  },
351
  })
352
  except:
353
+ print(trimmed_format_exc())
354
  print('Load function plugin failed')
355
 
356
  try:
357
+ from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3
358
  function_plugins.update({
359
+ "图片生成_DALLE2 (先切换模型到openai或api2d)": {
360
  "Group": "对话",
361
  "Color": "stop",
362
  "AsButton": False,
363
  "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
364
+ "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
365
+ "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
366
+ "Function": HotReload(图片生成_DALLE2)
367
+ },
368
+ })
369
+ function_plugins.update({
370
+ "图片生成_DALLE3 (先切换模型到openai或api2d)": {
371
+ "Group": "对话",
372
+ "Color": "stop",
373
+ "AsButton": False,
374
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
375
+ "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 1024x1024, 1792x1024, 1024x1792。如需生成高清图像,请输入 1024x1024-HD, 1792x1024-HD, 1024x1792-HD。", # 高级参数输入区的显示提示
376
+ "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
377
+ "Function": HotReload(图片生成_DALLE3)
378
  },
379
  })
380
  except:
381
+ print(trimmed_format_exc())
382
  print('Load function plugin failed')
383
 
384
  try:
 
395
  }
396
  })
397
  except:
398
+ print(trimmed_format_exc())
399
  print('Load function plugin failed')
400
 
401
  try:
 
410
  }
411
  })
412
  except:
413
+ print(trimmed_format_exc())
414
  print('Load function plugin failed')
415
 
416
  try:
417
  from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
418
  function_plugins.update({
419
+ "Markdown翻译(指定翻译成何种语言)": {
420
  "Group": "编程",
421
  "Color": "stop",
422
  "AsButton": False,
 
426
  }
427
  })
428
  except:
429
+ print(trimmed_format_exc())
430
  print('Load function plugin failed')
431
 
432
  try:
 
442
  }
443
  })
444
  except:
445
+ print(trimmed_format_exc())
446
  print('Load function plugin failed')
447
 
448
  try:
 
458
  }
459
  })
460
  except:
461
+ print(trimmed_format_exc())
462
  print('Load function plugin failed')
463
 
464
  try:
465
  from crazy_functions.交互功能函数模板 import 交互功能模板函数
466
  function_plugins.update({
467
+ "交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
468
  "Group": "对话",
469
  "Color": "stop",
470
  "AsButton": False,
 
472
  }
473
  })
474
  except:
475
+ print(trimmed_format_exc())
476
  print('Load function plugin failed')
477
 
478
  try:
 
517
  }
518
  })
519
  except:
520
+ print(trimmed_format_exc())
521
  print('Load function plugin failed')
522
 
523
  try:
524
  from toolbox import get_conf
525
+ ENABLE_AUDIO = get_conf('ENABLE_AUDIO')
526
  if ENABLE_AUDIO:
527
  from crazy_functions.语音助手 import 语音助手
528
  function_plugins.update({
529
+ "实时语音对话": {
530
  "Group": "对话",
531
  "Color": "stop",
532
  "AsButton": True,
533
+ "Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
534
  "Function": HotReload(语音助手)
535
  }
536
  })
537
  except:
538
+ print(trimmed_format_exc())
539
  print('Load function plugin failed')
540
 
541
  try:
 
549
  }
550
  })
551
  except:
552
+ print(trimmed_format_exc())
553
  print('Load function plugin failed')
554
 
555
  try:
 
563
  }
564
  })
565
  except:
566
+ print(trimmed_format_exc())
567
  print('Load function plugin failed')
568
 
569
+ try:
570
+ from crazy_functions.多智能体 import 多智能体终端
571
+ function_plugins.update({
572
+ "AutoGen多智能体终端(仅供测试)": {
573
+ "Group": "智能体",
574
+ "Color": "stop",
575
+ "AsButton": False,
576
+ "Function": HotReload(多智能体终端)
577
+ }
578
+ })
579
+ except:
580
+ print(trimmed_format_exc())
581
+ print('Load function plugin failed')
582
 
583
  # try:
584
  # from crazy_functions.chatglm微调工具 import 微调数据集生成
crazy_functions/Latex全文润色.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
2
- from toolbox import CatchException, report_execption, write_history_to_file, zip_folder
3
 
4
 
5
  class PaperFileGroup():
@@ -11,7 +11,7 @@ class PaperFileGroup():
11
  self.sp_file_tag = []
12
 
13
  # count_token
14
- from request_llm.bridge_all import model_info
15
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
16
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
17
  self.get_token_num = get_token_num
@@ -146,7 +146,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
146
  try:
147
  import tiktoken
148
  except:
149
- report_execption(chatbot, history,
150
  a=f"解析项目: {txt}",
151
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
152
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -157,12 +157,12 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
157
  project_folder = txt
158
  else:
159
  if txt == "": txt = '空空如也的输入栏'
160
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
161
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
162
  return
163
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
164
  if len(file_manifest) == 0:
165
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
166
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
167
  return
168
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
@@ -184,7 +184,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
184
  try:
185
  import tiktoken
186
  except:
187
- report_execption(chatbot, history,
188
  a=f"解析项目: {txt}",
189
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
190
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -195,12 +195,12 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
195
  project_folder = txt
196
  else:
197
  if txt == "": txt = '空空如也的输入栏'
198
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
199
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
200
  return
201
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
202
  if len(file_manifest) == 0:
203
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
204
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
205
  return
206
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
@@ -220,7 +220,7 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
220
  try:
221
  import tiktoken
222
  except:
223
- report_execption(chatbot, history,
224
  a=f"解析项目: {txt}",
225
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
226
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -231,12 +231,12 @@ def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
231
  project_folder = txt
232
  else:
233
  if txt == "": txt = '空空如也的输入栏'
234
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
235
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
236
  return
237
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
238
  if len(file_manifest) == 0:
239
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
240
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
241
  return
242
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')
 
1
  from toolbox import update_ui, trimmed_format_exc, promote_file_to_downloadzone, get_log_folder
2
+ from toolbox import CatchException, report_exception, write_history_to_file, zip_folder
3
 
4
 
5
  class PaperFileGroup():
 
11
  self.sp_file_tag = []
12
 
13
  # count_token
14
+ from request_llms.bridge_all import model_info
15
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
16
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
17
  self.get_token_num = get_token_num
 
146
  try:
147
  import tiktoken
148
  except:
149
+ report_exception(chatbot, history,
150
  a=f"解析项目: {txt}",
151
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
152
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
157
  project_folder = txt
158
  else:
159
  if txt == "": txt = '空空如也的输入栏'
160
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
161
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
162
  return
163
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
164
  if len(file_manifest) == 0:
165
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
166
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
167
  return
168
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en')
 
184
  try:
185
  import tiktoken
186
  except:
187
+ report_exception(chatbot, history,
188
  a=f"解析项目: {txt}",
189
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
190
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
195
  project_folder = txt
196
  else:
197
  if txt == "": txt = '空空如也的输入栏'
198
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
199
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
200
  return
201
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
202
  if len(file_manifest) == 0:
203
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
204
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
205
  return
206
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh')
 
220
  try:
221
  import tiktoken
222
  except:
223
+ report_exception(chatbot, history,
224
  a=f"解析项目: {txt}",
225
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
226
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
231
  project_folder = txt
232
  else:
233
  if txt == "": txt = '空空如也的输入栏'
234
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
235
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
236
  return
237
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
238
  if len(file_manifest) == 0:
239
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
240
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
241
  return
242
  yield from 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en', mode='proofread')
crazy_functions/Latex全文翻译.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui, promote_file_to_downloadzone
2
- from toolbox import CatchException, report_execption, write_history_to_file
3
  fast_debug = False
4
 
5
  class PaperFileGroup():
@@ -11,7 +11,7 @@ class PaperFileGroup():
11
  self.sp_file_tag = []
12
 
13
  # count_token
14
- from request_llm.bridge_all import model_info
15
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
16
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
17
  self.get_token_num = get_token_num
@@ -117,7 +117,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
117
  try:
118
  import tiktoken
119
  except:
120
- report_execption(chatbot, history,
121
  a=f"解析项目: {txt}",
122
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
123
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -128,12 +128,12 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
128
  project_folder = txt
129
  else:
130
  if txt == "": txt = '空空如也的输入栏'
131
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
132
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
133
  return
134
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
135
  if len(file_manifest) == 0:
136
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
137
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
138
  return
139
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
@@ -154,7 +154,7 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
154
  try:
155
  import tiktoken
156
  except:
157
- report_execption(chatbot, history,
158
  a=f"解析项目: {txt}",
159
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
160
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -165,12 +165,12 @@ def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
165
  project_folder = txt
166
  else:
167
  if txt == "": txt = '空空如也的输入栏'
168
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
169
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
170
  return
171
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
172
  if len(file_manifest) == 0:
173
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
174
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
175
  return
176
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
 
1
  from toolbox import update_ui, promote_file_to_downloadzone
2
+ from toolbox import CatchException, report_exception, write_history_to_file
3
  fast_debug = False
4
 
5
  class PaperFileGroup():
 
11
  self.sp_file_tag = []
12
 
13
  # count_token
14
+ from request_llms.bridge_all import model_info
15
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
16
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
17
  self.get_token_num = get_token_num
 
117
  try:
118
  import tiktoken
119
  except:
120
+ report_exception(chatbot, history,
121
  a=f"解析项目: {txt}",
122
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
123
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
128
  project_folder = txt
129
  else:
130
  if txt == "": txt = '空空如也的输入栏'
131
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
132
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
133
  return
134
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
135
  if len(file_manifest) == 0:
136
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
137
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
138
  return
139
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
 
154
  try:
155
  import tiktoken
156
  except:
157
+ report_exception(chatbot, history,
158
  a=f"解析项目: {txt}",
159
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
160
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
165
  project_folder = txt
166
  else:
167
  if txt == "": txt = '空空如也的输入栏'
168
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
169
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
170
  return
171
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
172
  if len(file_manifest) == 0:
173
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
174
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
175
  return
176
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
crazy_functions/Latex输出PDF结果.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
2
- from toolbox import CatchException, report_execption, update_ui_lastest_msg, zip_result, gen_time_str
3
  from functools import partial
4
  import glob, os, requests, time
5
  pj = os.path.join
@@ -129,7 +129,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
129
  yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
130
  else:
131
  yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
132
- proxies, = get_conf('proxies')
133
  r = requests.get(url_tar, proxies=proxies)
134
  with open(dst, 'wb+') as f:
135
  f.write(r.content)
@@ -171,12 +171,12 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
171
  project_folder = txt
172
  else:
173
  if txt == "": txt = '空空如也的输入栏'
174
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
175
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
176
  return
177
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
178
  if len(file_manifest) == 0:
179
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
180
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
181
  return
182
 
@@ -249,7 +249,7 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
249
  history = []
250
  txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
251
  if txt.endswith('.pdf'):
252
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
253
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
254
  return
255
 
@@ -258,13 +258,13 @@ def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot,
258
  project_folder = txt
259
  else:
260
  if txt == "": txt = '空空如也的输入栏'
261
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
262
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
263
  return
264
 
265
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
266
  if len(file_manifest) == 0:
267
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
268
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
269
  return
270
 
 
1
  from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
2
+ from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
3
  from functools import partial
4
  import glob, os, requests, time
5
  pj = os.path.join
 
129
  yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
130
  else:
131
  yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
132
+ proxies = get_conf('proxies')
133
  r = requests.get(url_tar, proxies=proxies)
134
  with open(dst, 'wb+') as f:
135
  f.write(r.content)
 
171
  project_folder = txt
172
  else:
173
  if txt == "": txt = '空空如也的输入栏'
174
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
175
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
176
  return
177
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
178
  if len(file_manifest) == 0:
179
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
180
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
181
  return
182
 
 
249
  history = []
250
  txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
251
  if txt.endswith('.pdf'):
252
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"发现已经存在翻译好的PDF文档")
253
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
254
  return
255
 
 
258
  project_folder = txt
259
  else:
260
  if txt == "": txt = '空空如也的输入栏'
261
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无法处理: {txt}")
262
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
263
  return
264
 
265
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
266
  if len(file_manifest) == 0:
267
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
268
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
269
  return
270
 
crazy_functions/agent_fns/auto_agent.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
2
+ from toolbox import report_exception, get_log_folder, update_ui_lastest_msg, Singleton
3
+ from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
4
+ from crazy_functions.agent_fns.general import AutoGenGeneral
5
+
6
+
7
+
8
+ class AutoGenMath(AutoGenGeneral):
9
+
10
+ def define_agents(self):
11
+ from autogen import AssistantAgent, UserProxyAgent
12
+ return [
13
+ {
14
+ "name": "assistant", # name of the agent.
15
+ "cls": AssistantAgent, # class of the agent.
16
+ },
17
+ {
18
+ "name": "user_proxy", # name of the agent.
19
+ "cls": UserProxyAgent, # class of the agent.
20
+ "human_input_mode": "ALWAYS", # always ask for human input.
21
+ "llm_config": False, # disables llm-based auto reply.
22
+ },
23
+ ]
crazy_functions/agent_fns/echo_agent.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
2
+
3
+ class EchoDemo(PluginMultiprocessManager):
4
+ def subprocess_worker(self, child_conn):
5
+ # ⭐⭐ 子进程
6
+ self.child_conn = child_conn
7
+ while True:
8
+ msg = self.child_conn.recv() # PipeCom
9
+ if msg.cmd == "user_input":
10
+ # wait futher user input
11
+ self.child_conn.send(PipeCom("show", msg.content))
12
+ wait_success = self.subprocess_worker_wait_user_feedback(wait_msg="我准备好处理下一个问题了.")
13
+ if not wait_success:
14
+ # wait timeout, terminate this subprocess_worker
15
+ break
16
+ elif msg.cmd == "terminate":
17
+ self.child_conn.send(PipeCom("done", ""))
18
+ break
19
+ print('[debug] subprocess_worker terminated')
crazy_functions/agent_fns/general.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import trimmed_format_exc, get_conf, ProxyNetworkActivate
2
+ from crazy_functions.agent_fns.pipe import PluginMultiprocessManager, PipeCom
3
+ from request_llms.bridge_all import predict_no_ui_long_connection
4
+ import time
5
+
6
+ def gpt_academic_generate_oai_reply(
7
+ self,
8
+ messages,
9
+ sender,
10
+ config,
11
+ ):
12
+ llm_config = self.llm_config if config is None else config
13
+ if llm_config is False:
14
+ return False, None
15
+ if messages is None:
16
+ messages = self._oai_messages[sender]
17
+
18
+ inputs = messages[-1]['content']
19
+ history = []
20
+ for message in messages[:-1]:
21
+ history.append(message['content'])
22
+ context=messages[-1].pop("context", None)
23
+ assert context is None, "预留参数 context 未实现"
24
+
25
+ reply = predict_no_ui_long_connection(
26
+ inputs=inputs,
27
+ llm_kwargs=llm_config,
28
+ history=history,
29
+ sys_prompt=self._oai_system_message[0]['content'],
30
+ console_slience=True
31
+ )
32
+ assumed_done = reply.endswith('\nTERMINATE')
33
+ return True, reply
34
+
35
+ class AutoGenGeneral(PluginMultiprocessManager):
36
+ def gpt_academic_print_override(self, user_proxy, message, sender):
37
+ # ⭐⭐ run in subprocess
38
+ self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"]))
39
+
40
+ def gpt_academic_get_human_input(self, user_proxy, message):
41
+ # ⭐⭐ run in subprocess
42
+ patience = 300
43
+ begin_waiting_time = time.time()
44
+ self.child_conn.send(PipeCom("interact", message))
45
+ while True:
46
+ time.sleep(0.5)
47
+ if self.child_conn.poll():
48
+ wait_success = True
49
+ break
50
+ if time.time() - begin_waiting_time > patience:
51
+ self.child_conn.send(PipeCom("done", ""))
52
+ wait_success = False
53
+ break
54
+ if wait_success:
55
+ return self.child_conn.recv().content
56
+ else:
57
+ raise TimeoutError("等待用户输入超时")
58
+
59
+ def define_agents(self):
60
+ raise NotImplementedError
61
+
62
+ def exe_autogen(self, input):
63
+ # ⭐⭐ run in subprocess
64
+ input = input.content
65
+ with ProxyNetworkActivate("AutoGen"):
66
+ code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
67
+ agents = self.define_agents()
68
+ user_proxy = None
69
+ assistant = None
70
+ for agent_kwargs in agents:
71
+ agent_cls = agent_kwargs.pop('cls')
72
+ kwargs = {
73
+ 'llm_config':self.llm_kwargs,
74
+ 'code_execution_config':code_execution_config
75
+ }
76
+ kwargs.update(agent_kwargs)
77
+ agent_handle = agent_cls(**kwargs)
78
+ agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
79
+ for d in agent_handle._reply_func_list:
80
+ if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
81
+ d['reply_func'] = gpt_academic_generate_oai_reply
82
+ if agent_kwargs['name'] == 'user_proxy':
83
+ agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
84
+ user_proxy = agent_handle
85
+ if agent_kwargs['name'] == 'assistant': assistant = agent_handle
86
+ try:
87
+ if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
88
+ user_proxy.initiate_chat(assistant, message=input)
89
+ except Exception as e:
90
+ tb_str = '```\n' + trimmed_format_exc() + '```'
91
+ self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
92
+
93
+ def subprocess_worker(self, child_conn):
94
+ # ⭐⭐ run in subprocess
95
+ self.child_conn = child_conn
96
+ while True:
97
+ msg = self.child_conn.recv() # PipeCom
98
+ self.exe_autogen(msg)
99
+
100
+
101
+ class AutoGenGroupChat(AutoGenGeneral):
102
+ def exe_autogen(self, input):
103
+ # ⭐⭐ run in subprocess
104
+ import autogen
105
+
106
+ input = input.content
107
+ with ProxyNetworkActivate("AutoGen"):
108
+ code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
109
+ agents = self.define_agents()
110
+ agents_instances = []
111
+ for agent_kwargs in agents:
112
+ agent_cls = agent_kwargs.pop("cls")
113
+ kwargs = {"code_execution_config": code_execution_config}
114
+ kwargs.update(agent_kwargs)
115
+ agent_handle = agent_cls(**kwargs)
116
+ agent_handle._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
117
+ agents_instances.append(agent_handle)
118
+ if agent_kwargs["name"] == "user_proxy":
119
+ user_proxy = agent_handle
120
+ user_proxy.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
121
+ try:
122
+ groupchat = autogen.GroupChat(agents=agents_instances, messages=[], max_round=50)
123
+ manager = autogen.GroupChatManager(groupchat=groupchat, **self.define_group_chat_manager_config())
124
+ manager._print_received_message = lambda a, b: self.gpt_academic_print_override(agent_kwargs, a, b)
125
+ manager.get_human_input = lambda a: self.gpt_academic_get_human_input(manager, a)
126
+ if user_proxy is None:
127
+ raise Exception("user_proxy is not defined")
128
+ user_proxy.initiate_chat(manager, message=input)
129
+ except Exception:
130
+ tb_str = "```\n" + trimmed_format_exc() + "```"
131
+ self.child_conn.send(PipeCom("done", "AutoGen exe failed: \n\n" + tb_str))
132
+
133
+ def define_group_chat_manager_config(self):
134
+ raise NotImplementedError
crazy_functions/agent_fns/persistent.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import Singleton
2
+ @Singleton
3
+ class GradioMultiuserManagerForPersistentClasses():
4
+ def __init__(self):
5
+ self.mapping = {}
6
+
7
+ def already_alive(self, key):
8
+ return (key in self.mapping) and (self.mapping[key].is_alive())
9
+
10
+ def set(self, key, x):
11
+ self.mapping[key] = x
12
+ return self.mapping[key]
13
+
14
+ def get(self, key):
15
+ return self.mapping[key]
16
+
crazy_functions/agent_fns/pipe.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import get_log_folder, update_ui, gen_time_str, get_conf, promote_file_to_downloadzone
2
+ from crazy_functions.agent_fns.watchdog import WatchDog
3
+ import time, os
4
+
5
+ class PipeCom:
6
+ def __init__(self, cmd, content) -> None:
7
+ self.cmd = cmd
8
+ self.content = content
9
+
10
+
11
+ class PluginMultiprocessManager:
12
+ def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
13
+ # ⭐ run in main process
14
+ self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
15
+ self.previous_work_dir_files = {}
16
+ self.llm_kwargs = llm_kwargs
17
+ self.plugin_kwargs = plugin_kwargs
18
+ self.chatbot = chatbot
19
+ self.history = history
20
+ self.system_prompt = system_prompt
21
+ # self.web_port = web_port
22
+ self.alive = True
23
+ self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
24
+ self.last_user_input = ""
25
+ # create a thread to monitor self.heartbeat, terminate the instance if no heartbeat for a long time
26
+ timeout_seconds = 5 * 60
27
+ self.heartbeat_watchdog = WatchDog(timeout=timeout_seconds, bark_fn=self.terminate, interval=5)
28
+ self.heartbeat_watchdog.begin_watch()
29
+
30
+ def feed_heartbeat_watchdog(self):
31
+ # feed this `dog`, so the dog will not `bark` (bark_fn will terminate the instance)
32
+ self.heartbeat_watchdog.feed()
33
+
34
+ def is_alive(self):
35
+ return self.alive
36
+
37
+ def launch_subprocess_with_pipe(self):
38
+ # ⭐ run in main process
39
+ from multiprocessing import Process, Pipe
40
+
41
+ parent_conn, child_conn = Pipe()
42
+ self.p = Process(target=self.subprocess_worker, args=(child_conn,))
43
+ self.p.daemon = True
44
+ self.p.start()
45
+ return parent_conn
46
+
47
+ def terminate(self):
48
+ self.p.terminate()
49
+ self.alive = False
50
+ print("[debug] instance terminated")
51
+
52
+ def subprocess_worker(self, child_conn):
53
+ # ⭐⭐ run in subprocess
54
+ raise NotImplementedError
55
+
56
+ def send_command(self, cmd):
57
+ # ⭐ run in main process
58
+ repeated = False
59
+ if cmd == self.last_user_input:
60
+ repeated = True
61
+ cmd = ""
62
+ else:
63
+ self.last_user_input = cmd
64
+ self.parent_conn.send(PipeCom("user_input", cmd))
65
+ return repeated, cmd
66
+
67
+ def immediate_showoff_when_possible(self, fp):
68
+ # ⭐ 主进程
69
+ # 获取fp的拓展名
70
+ file_type = fp.split('.')[-1]
71
+ # 如果是文本文件, 则直接显示文本内容
72
+ if file_type.lower() in ['png', 'jpg']:
73
+ image_path = os.path.abspath(fp)
74
+ self.chatbot.append([
75
+ '检测到新生图像:',
76
+ f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
77
+ ])
78
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
79
+
80
+ def overwatch_workdir_file_change(self):
81
+ # ⭐ 主进程 Docker 外挂文件夹监控
82
+ path_to_overwatch = self.autogen_work_dir
83
+ change_list = []
84
+ # 扫描路径下的所有文件, 并与self.previous_work_dir_files中所记录的文件进行对比,
85
+ # 如果有新文件出现,或者文件的修改时间发生变化,则更新self.previous_work_dir_files中
86
+ # 把新文件和发生变化的文件的路径记录到 change_list 中
87
+ for root, dirs, files in os.walk(path_to_overwatch):
88
+ for file in files:
89
+ file_path = os.path.join(root, file)
90
+ if file_path not in self.previous_work_dir_files.keys():
91
+ last_modified_time = os.stat(file_path).st_mtime
92
+ self.previous_work_dir_files.update({file_path: last_modified_time})
93
+ change_list.append(file_path)
94
+ else:
95
+ last_modified_time = os.stat(file_path).st_mtime
96
+ if last_modified_time != self.previous_work_dir_files[file_path]:
97
+ self.previous_work_dir_files[file_path] = last_modified_time
98
+ change_list.append(file_path)
99
+ if len(change_list) > 0:
100
+ file_links = ""
101
+ for f in change_list:
102
+ res = promote_file_to_downloadzone(f)
103
+ file_links += f'<br/><a href="file={res}" target="_blank">{res}</a>'
104
+ yield from self.immediate_showoff_when_possible(f)
105
+
106
+ self.chatbot.append(['检测到新生文档.', f'文档清单如下: {file_links}'])
107
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
108
+ return change_list
109
+
110
+
111
+ def main_process_ui_control(self, txt, create_or_resume) -> str:
112
+ # ⭐ 主进程
113
+ if create_or_resume == 'create':
114
+ self.cnt = 1
115
+ self.parent_conn = self.launch_subprocess_with_pipe() # ⭐⭐⭐
116
+ repeated, cmd_to_autogen = self.send_command(txt)
117
+ if txt == 'exit':
118
+ self.chatbot.append([f"结束", "结束信号已明确,终止AutoGen程序。"])
119
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
120
+ self.terminate()
121
+ return "terminate"
122
+
123
+ # patience = 10
124
+
125
+ while True:
126
+ time.sleep(0.5)
127
+ if not self.alive:
128
+ # the heartbeat watchdog might have it killed
129
+ self.terminate()
130
+ return "terminate"
131
+ if self.parent_conn.poll():
132
+ self.feed_heartbeat_watchdog()
133
+ if "[GPT-Academic] 等待中" in self.chatbot[-1][-1]:
134
+ self.chatbot.pop(-1) # remove the last line
135
+ if "等待您的进一步指令" in self.chatbot[-1][-1]:
136
+ self.chatbot.pop(-1) # remove the last line
137
+ if '[GPT-Academic] 等待中' in self.chatbot[-1][-1]:
138
+ self.chatbot.pop(-1) # remove the last line
139
+ msg = self.parent_conn.recv() # PipeCom
140
+ if msg.cmd == "done":
141
+ self.chatbot.append([f"结束", msg.content])
142
+ self.cnt += 1
143
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
144
+ self.terminate()
145
+ break
146
+ if msg.cmd == "show":
147
+ yield from self.overwatch_workdir_file_change()
148
+ notice = ""
149
+ if repeated: notice = "(自动忽略重复的输入)"
150
+ self.chatbot.append([f"运行阶段-{self.cnt}(上次用户反馈输入为: 「{cmd_to_autogen}」{notice}", msg.content])
151
+ self.cnt += 1
152
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
153
+ if msg.cmd == "interact":
154
+ yield from self.overwatch_workdir_file_change()
155
+ self.chatbot.append([f"程序抵达用户反馈节点.", msg.content +
156
+ "\n\n等待您的进一步指令." +
157
+ "\n\n(1) 一般情况下您不需要说什么, 清空输入区, 然后直接点击“提交”以继续. " +
158
+ "\n\n(2) 如果您需要补充些什么, 输入要反馈的内容, 直接点击“提交”以继续. " +
159
+ "\n\n(3) 如果您想终止程序, 输入exit, 直接点击“提交”以终止AutoGen并解锁. "
160
+ ])
161
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
162
+ # do not terminate here, leave the subprocess_worker instance alive
163
+ return "wait_feedback"
164
+ else:
165
+ self.feed_heartbeat_watchdog()
166
+ if '[GPT-Academic] 等待中' not in self.chatbot[-1][-1]:
167
+ # begin_waiting_time = time.time()
168
+ self.chatbot.append(["[GPT-Academic] 等待AutoGen执行结果 ...", "[GPT-Academic] 等待中"])
169
+ self.chatbot[-1] = [self.chatbot[-1][0], self.chatbot[-1][1].replace("[GPT-Academic] 等待中", "[GPT-Academic] 等待中.")]
170
+ yield from update_ui(chatbot=self.chatbot, history=self.history)
171
+ # if time.time() - begin_waiting_time > patience:
172
+ # self.chatbot.append([f"结束", "等待超时, 终止AutoGen程序。"])
173
+ # yield from update_ui(chatbot=self.chatbot, history=self.history)
174
+ # self.terminate()
175
+ # return "terminate"
176
+
177
+ self.terminate()
178
+ return "terminate"
179
+
180
+ def subprocess_worker_wait_user_feedback(self, wait_msg="wait user feedback"):
181
+ # ⭐⭐ run in subprocess
182
+ patience = 5 * 60
183
+ begin_waiting_time = time.time()
184
+ self.child_conn.send(PipeCom("interact", wait_msg))
185
+ while True:
186
+ time.sleep(0.5)
187
+ if self.child_conn.poll():
188
+ wait_success = True
189
+ break
190
+ if time.time() - begin_waiting_time > patience:
191
+ self.child_conn.send(PipeCom("done", ""))
192
+ wait_success = False
193
+ break
194
+ return wait_success
crazy_functions/agent_fns/watchdog.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import threading, time
2
+
3
+ class WatchDog():
4
+ def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
5
+ self.last_feed = None
6
+ self.timeout = timeout
7
+ self.bark_fn = bark_fn
8
+ self.interval = interval
9
+ self.msg = msg
10
+ self.kill_dog = False
11
+
12
+ def watch(self):
13
+ while True:
14
+ if self.kill_dog: break
15
+ if time.time() - self.last_feed > self.timeout:
16
+ if len(self.msg) > 0: print(self.msg)
17
+ self.bark_fn()
18
+ break
19
+ time.sleep(self.interval)
20
+
21
+ def begin_watch(self):
22
+ self.last_feed = time.time()
23
+ th = threading.Thread(target=self.watch)
24
+ th.daemon = True
25
+ th.start()
26
+
27
+ def feed(self):
28
+ self.last_feed = time.time()
crazy_functions/crazy_utils.py CHANGED
@@ -5,7 +5,7 @@ import logging
5
 
6
  def input_clipping(inputs, history, max_token_limit):
7
  import numpy as np
8
- from request_llm.bridge_all import model_info
9
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
10
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
11
 
@@ -63,18 +63,21 @@ def request_gpt_model_in_new_thread_with_ui_alive(
63
  """
64
  import time
65
  from concurrent.futures import ThreadPoolExecutor
66
- from request_llm.bridge_all import predict_no_ui_long_connection
67
  # 用户反馈
68
  chatbot.append([inputs_show_user, ""])
69
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
70
  executor = ThreadPoolExecutor(max_workers=16)
71
  mutable = ["", time.time(), ""]
 
 
 
72
  def _req_gpt(inputs, history, sys_prompt):
73
  retry_op = retry_times_at_unknown_error
74
  exceeded_cnt = 0
75
  while True:
76
  # watchdog error
77
- if len(mutable) >= 2 and (time.time()-mutable[1]) > 5:
78
  raise RuntimeError("检测到程序终止。")
79
  try:
80
  # 【第一种情况】:顺利完成
@@ -174,11 +177,11 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
174
  """
175
  import time, random
176
  from concurrent.futures import ThreadPoolExecutor
177
- from request_llm.bridge_all import predict_no_ui_long_connection
178
  assert len(inputs_array) == len(history_array)
179
  assert len(inputs_array) == len(sys_prompt_array)
180
  if max_workers == -1: # 读取配置文件
181
- try: max_workers, = get_conf('DEFAULT_WORKER_NUM')
182
  except: max_workers = 8
183
  if max_workers <= 0: max_workers = 3
184
  # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
@@ -193,19 +196,21 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
193
  # 跨线程传递
194
  mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
195
 
 
 
 
196
  # 子线程任务
197
  def _req_gpt(index, inputs, history, sys_prompt):
198
  gpt_say = ""
199
  retry_op = retry_times_at_unknown_error
200
  exceeded_cnt = 0
201
  mutable[index][2] = "执行中"
 
202
  while True:
203
  # watchdog error
204
- if len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > 5:
205
- raise RuntimeError("检测到程序终止。")
206
  try:
207
  # 【第一种情况】:顺利完成
208
- # time.sleep(10); raise RuntimeError("测试")
209
  gpt_say = predict_no_ui_long_connection(
210
  inputs=inputs, llm_kwargs=llm_kwargs, history=history,
211
  sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
@@ -213,7 +218,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
213
  mutable[index][2] = "已成功"
214
  return gpt_say
215
  except ConnectionAbortedError as token_exceeded_error:
216
- # 【第二种情况】:Token溢出,
217
  if handle_token_exceed:
218
  exceeded_cnt += 1
219
  # 【选择处理】 尝试计算比例,尽可能多地保留文本
@@ -234,6 +239,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
234
  return gpt_say # 放弃
235
  except:
236
  # 【第三种情况】:其他错误
 
237
  tb_str = '```\n' + trimmed_format_exc() + '```'
238
  print(tb_str)
239
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
@@ -250,6 +256,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
250
  for i in range(wait):
251
  mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
252
  # 开始重试
 
253
  mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
254
  continue # 返回重试
255
  else:
@@ -275,7 +282,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
275
  # 在前端打���些好玩的东西
276
  for thread_index, _ in enumerate(worker_done):
277
  print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
278
- replace('\n', '').replace('```', '...').replace(
279
  ' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
280
  observe_win.append(print_something_really_funny)
281
  # 在前端打印些好玩的东西
@@ -301,7 +308,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
301
  gpt_res = f.result()
302
  chatbot.append([inputs_show_user, gpt_res])
303
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
304
- time.sleep(0.3)
305
  return gpt_response_collection
306
 
307
 
@@ -596,7 +603,7 @@ def get_files_from_everything(txt, type): # type='.md'
596
  import requests
597
  from toolbox import get_conf
598
  from toolbox import get_log_folder, gen_time_str
599
- proxies, = get_conf('proxies')
600
  try:
601
  r = requests.get(txt, proxies=proxies)
602
  except:
@@ -715,8 +722,10 @@ class nougat_interface():
715
 
716
  def nougat_with_timeout(self, command, cwd, timeout=3600):
717
  import subprocess
 
718
  logging.info(f'正在执行命令 {command}')
719
- process = subprocess.Popen(command, shell=True, cwd=cwd)
 
720
  try:
721
  stdout, stderr = process.communicate(timeout=timeout)
722
  except subprocess.TimeoutExpired:
@@ -761,54 +770,6 @@ def try_install_deps(deps, reload_m=[]):
761
  importlib.reload(__import__(m))
762
 
763
 
764
- HTML_CSS = """
765
- .row {
766
- display: flex;
767
- flex-wrap: wrap;
768
- }
769
- .column {
770
- flex: 1;
771
- padding: 10px;
772
- }
773
- .table-header {
774
- font-weight: bold;
775
- border-bottom: 1px solid black;
776
- }
777
- .table-row {
778
- border-bottom: 1px solid lightgray;
779
- }
780
- .table-cell {
781
- padding: 5px;
782
- }
783
- """
784
-
785
- TABLE_CSS = """
786
- <div class="row table-row">
787
- <div class="column table-cell">REPLACE_A</div>
788
- <div class="column table-cell">REPLACE_B</div>
789
- </div>
790
- """
791
-
792
- class construct_html():
793
- def __init__(self) -> None:
794
- self.css = HTML_CSS
795
- self.html_string = f'<!DOCTYPE html><head><meta charset="utf-8"><title>翻译结果</title><style>{self.css}</style></head>'
796
-
797
-
798
- def add_row(self, a, b):
799
- tmp = TABLE_CSS
800
- from toolbox import markdown_convertion
801
- tmp = tmp.replace('REPLACE_A', markdown_convertion(a))
802
- tmp = tmp.replace('REPLACE_B', markdown_convertion(b))
803
- self.html_string += tmp
804
-
805
-
806
- def save_file(self, file_name):
807
- with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
808
- f.write(self.html_string.encode('utf-8', 'ignore').decode())
809
- return os.path.join(get_log_folder(), file_name)
810
-
811
-
812
  def get_plugin_arg(plugin_kwargs, key, default):
813
  # 如果参数是空的
814
  if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
 
5
 
6
  def input_clipping(inputs, history, max_token_limit):
7
  import numpy as np
8
+ from request_llms.bridge_all import model_info
9
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
10
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
11
 
 
63
  """
64
  import time
65
  from concurrent.futures import ThreadPoolExecutor
66
+ from request_llms.bridge_all import predict_no_ui_long_connection
67
  # 用户反馈
68
  chatbot.append([inputs_show_user, ""])
69
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
70
  executor = ThreadPoolExecutor(max_workers=16)
71
  mutable = ["", time.time(), ""]
72
+ # 看门狗耐心
73
+ watch_dog_patience = 5
74
+ # 请求任务
75
  def _req_gpt(inputs, history, sys_prompt):
76
  retry_op = retry_times_at_unknown_error
77
  exceeded_cnt = 0
78
  while True:
79
  # watchdog error
80
+ if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
81
  raise RuntimeError("检测到程序终止。")
82
  try:
83
  # 【第一种情况】:顺利完成
 
177
  """
178
  import time, random
179
  from concurrent.futures import ThreadPoolExecutor
180
+ from request_llms.bridge_all import predict_no_ui_long_connection
181
  assert len(inputs_array) == len(history_array)
182
  assert len(inputs_array) == len(sys_prompt_array)
183
  if max_workers == -1: # 读取配置文件
184
+ try: max_workers = get_conf('DEFAULT_WORKER_NUM')
185
  except: max_workers = 8
186
  if max_workers <= 0: max_workers = 3
187
  # 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
 
196
  # 跨线程传递
197
  mutable = [["", time.time(), "等待中"] for _ in range(n_frag)]
198
 
199
+ # 看门狗耐心
200
+ watch_dog_patience = 5
201
+
202
  # 子线程任务
203
  def _req_gpt(index, inputs, history, sys_prompt):
204
  gpt_say = ""
205
  retry_op = retry_times_at_unknown_error
206
  exceeded_cnt = 0
207
  mutable[index][2] = "执行中"
208
+ detect_timeout = lambda: len(mutable[index]) >= 2 and (time.time()-mutable[index][1]) > watch_dog_patience
209
  while True:
210
  # watchdog error
211
+ if detect_timeout(): raise RuntimeError("检测到程序终止。")
 
212
  try:
213
  # 【第一种情况】:顺利完成
 
214
  gpt_say = predict_no_ui_long_connection(
215
  inputs=inputs, llm_kwargs=llm_kwargs, history=history,
216
  sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
 
218
  mutable[index][2] = "已成功"
219
  return gpt_say
220
  except ConnectionAbortedError as token_exceeded_error:
221
+ # 【第二种情况】:Token溢出
222
  if handle_token_exceed:
223
  exceeded_cnt += 1
224
  # 【选择处理】 尝试计算比例,尽可能多地保留文本
 
239
  return gpt_say # 放弃
240
  except:
241
  # 【第三种情况】:其他错误
242
+ if detect_timeout(): raise RuntimeError("检测到程序终止。")
243
  tb_str = '```\n' + trimmed_format_exc() + '```'
244
  print(tb_str)
245
  gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
 
256
  for i in range(wait):
257
  mutable[index][2] = f"{fail_info}等待重试 {wait-i}"; time.sleep(1)
258
  # 开始重试
259
+ if detect_timeout(): raise RuntimeError("检测到程序终止。")
260
  mutable[index][2] = f"重试中 {retry_times_at_unknown_error-retry_op}/{retry_times_at_unknown_error}"
261
  continue # 返回重试
262
  else:
 
282
  # 在前端打���些好玩的东西
283
  for thread_index, _ in enumerate(worker_done):
284
  print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
285
+ replace('\n', '').replace('`', '.').replace(
286
  ' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
287
  observe_win.append(print_something_really_funny)
288
  # 在前端打印些好玩的东西
 
308
  gpt_res = f.result()
309
  chatbot.append([inputs_show_user, gpt_res])
310
  yield from update_ui(chatbot=chatbot, history=[]) # 刷新界面
311
+ time.sleep(0.5)
312
  return gpt_response_collection
313
 
314
 
 
603
  import requests
604
  from toolbox import get_conf
605
  from toolbox import get_log_folder, gen_time_str
606
+ proxies = get_conf('proxies')
607
  try:
608
  r = requests.get(txt, proxies=proxies)
609
  except:
 
722
 
723
  def nougat_with_timeout(self, command, cwd, timeout=3600):
724
  import subprocess
725
+ from toolbox import ProxyNetworkActivate
726
  logging.info(f'正在执行命令 {command}')
727
+ with ProxyNetworkActivate("Nougat_Download"):
728
+ process = subprocess.Popen(command, shell=True, cwd=cwd, env=os.environ)
729
  try:
730
  stdout, stderr = process.communicate(timeout=timeout)
731
  except subprocess.TimeoutExpired:
 
770
  importlib.reload(__import__(m))
771
 
772
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773
  def get_plugin_arg(plugin_kwargs, key, default):
774
  # 如果参数是空的
775
  if (key in plugin_kwargs) and (plugin_kwargs[key] == ""): plugin_kwargs.pop(key)
crazy_functions/latex_fns/latex_actions.py CHANGED
@@ -1,9 +1,10 @@
1
  from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
2
- from toolbox import zip_folder, objdump, objload, promote_file_to_downloadzone
3
  from .latex_toolbox import PRESERVE, TRANSFORM
4
  from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
5
  from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
6
  from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
 
7
 
8
  import os, shutil
9
  import re
@@ -90,7 +91,18 @@ class LatexPaperSplit():
90
  "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
91
  # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
92
  self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
93
-
 
 
 
 
 
 
 
 
 
 
 
94
 
95
  def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
96
  """
@@ -165,7 +177,7 @@ class LatexPaperFileGroup():
165
  self.sp_file_tag = []
166
 
167
  # count_token
168
- from request_llm.bridge_all import model_info
169
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
170
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
171
  self.get_token_num = get_token_num
@@ -234,8 +246,8 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
234
  chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
235
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
236
  lps = LatexPaperSplit()
 
237
  res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
238
-
239
  # <-------- 拆分过长的latex片段 ---------->
240
  pfg = LatexPaperFileGroup()
241
  for index, r in enumerate(res):
@@ -256,12 +268,19 @@ def Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin
256
 
257
  else:
258
  # <-------- gpt 多线程请求 ---------->
 
 
 
 
 
 
 
259
  gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
260
  inputs_array=inputs_array,
261
  inputs_show_user_array=inputs_show_user_array,
262
  llm_kwargs=llm_kwargs,
263
  chatbot=chatbot,
264
- history_array=[[""] for _ in range(n_split)],
265
  sys_prompt_array=sys_prompt_array,
266
  # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
267
  scroller_max_len = 40
@@ -423,7 +442,7 @@ def write_html(sp_file_contents, sp_file_result, chatbot, project_folder):
423
  # write html
424
  try:
425
  import shutil
426
- from ..crazy_utils import construct_html
427
  from toolbox import gen_time_str
428
  ch = construct_html()
429
  orig = ""
 
1
  from toolbox import update_ui, update_ui_lastest_msg, get_log_folder
2
+ from toolbox import get_conf, objdump, objload, promote_file_to_downloadzone
3
  from .latex_toolbox import PRESERVE, TRANSFORM
4
  from .latex_toolbox import set_forbidden_text, set_forbidden_text_begin_end, set_forbidden_text_careful_brace
5
  from .latex_toolbox import reverse_forbidden_text_careful_brace, reverse_forbidden_text, convert_to_linklist, post_process
6
  from .latex_toolbox import fix_content, find_main_tex_file, merge_tex_files, compile_latex_with_timeout
7
+ from .latex_toolbox import find_title_and_abs
8
 
9
  import os, shutil
10
  import re
 
91
  "项目Github地址 \\url{https://github.com/binary-husky/gpt_academic/}。"
92
  # 请您不要删除或修改这行警告,除非您是论文的原作者(如果您是论文原作者,欢迎加REAME中的QQ联系开发者)
93
  self.msg_declare = "为了防止大语言模型的意外谬误产生扩散影响,禁止移除或修改此警告。}}\\\\"
94
+ self.title = "unknown"
95
+ self.abstract = "unknown"
96
+
97
+ def read_title_and_abstract(self, txt):
98
+ try:
99
+ title, abstract = find_title_and_abs(txt)
100
+ if title is not None:
101
+ self.title = title.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
102
+ if abstract is not None:
103
+ self.abstract = abstract.replace('\n', ' ').replace('\\\\', ' ').replace(' ', '').replace(' ', '')
104
+ except:
105
+ pass
106
 
107
  def merge_result(self, arr, mode, msg, buggy_lines=[], buggy_line_surgery_n_lines=10):
108
  """
 
177
  self.sp_file_tag = []
178
 
179
  # count_token
180
+ from request_llms.bridge_all import model_info
181
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
182
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
183
  self.get_token_num = get_token_num
 
246
  chatbot.append((f"Latex文件融合完成", f'[Local Message] 正在精细切分latex文件,这需要一段时间计算,文档越长耗时越长,请耐心等待。'))
247
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
248
  lps = LatexPaperSplit()
249
+ lps.read_title_and_abstract(merged_content)
250
  res = lps.split(merged_content, project_folder, opts) # 消耗时间的函数
 
251
  # <-------- 拆分过长的latex片段 ---------->
252
  pfg = LatexPaperFileGroup()
253
  for index, r in enumerate(res):
 
268
 
269
  else:
270
  # <-------- gpt 多线程请求 ---------->
271
+ history_array = [[""] for _ in range(n_split)]
272
+ # LATEX_EXPERIMENTAL, = get_conf('LATEX_EXPERIMENTAL')
273
+ # if LATEX_EXPERIMENTAL:
274
+ # paper_meta = f"The paper you processing is `{lps.title}`, a part of the abstraction is `{lps.abstract}`"
275
+ # paper_meta_max_len = 888
276
+ # history_array = [[ paper_meta[:paper_meta_max_len] + '...', "Understand, what should I do?"] for _ in range(n_split)]
277
+
278
  gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
279
  inputs_array=inputs_array,
280
  inputs_show_user_array=inputs_show_user_array,
281
  llm_kwargs=llm_kwargs,
282
  chatbot=chatbot,
283
+ history_array=history_array,
284
  sys_prompt_array=sys_prompt_array,
285
  # max_workers=5, # 并行任务数量限制, 最多同时执行5个, 其他的排队等待
286
  scroller_max_len = 40
 
442
  # write html
443
  try:
444
  import shutil
445
+ from crazy_functions.pdf_fns.report_gen_html import construct_html
446
  from toolbox import gen_time_str
447
  ch = construct_html()
448
  orig = ""
crazy_functions/latex_fns/latex_toolbox.py CHANGED
@@ -308,13 +308,51 @@ def merge_tex_files_(project_foler, main_file, mode):
308
  fp = os.path.join(project_foler, f)
309
  fp_ = find_tex_file_ignore_case(fp)
310
  if fp_:
311
- with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
 
 
 
312
  else:
313
  raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
314
  c = merge_tex_files_(project_foler, c, mode)
315
  main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
316
  return main_file
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  def merge_tex_files(project_foler, main_file, mode):
319
  """
320
  Merge Tex project recrusively
@@ -342,10 +380,41 @@ def merge_tex_files(project_foler, main_file, mode):
342
  pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
343
  match_opt1 = pattern_opt1.search(main_file)
344
  match_opt2 = pattern_opt2.search(main_file)
 
 
 
 
 
345
  assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
346
  return main_file
347
 
348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  """
350
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
351
  Post process
 
308
  fp = os.path.join(project_foler, f)
309
  fp_ = find_tex_file_ignore_case(fp)
310
  if fp_:
311
+ try:
312
+ with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
313
+ except:
314
+ c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
315
  else:
316
  raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
317
  c = merge_tex_files_(project_foler, c, mode)
318
  main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
319
  return main_file
320
 
321
+
322
+ def find_title_and_abs(main_file):
323
+
324
+ def extract_abstract_1(text):
325
+ pattern = r"\\abstract\{(.*?)\}"
326
+ match = re.search(pattern, text, re.DOTALL)
327
+ if match:
328
+ return match.group(1)
329
+ else:
330
+ return None
331
+
332
+ def extract_abstract_2(text):
333
+ pattern = r"\\begin\{abstract\}(.*?)\\end\{abstract\}"
334
+ match = re.search(pattern, text, re.DOTALL)
335
+ if match:
336
+ return match.group(1)
337
+ else:
338
+ return None
339
+
340
+ def extract_title(string):
341
+ pattern = r"\\title\{(.*?)\}"
342
+ match = re.search(pattern, string, re.DOTALL)
343
+
344
+ if match:
345
+ return match.group(1)
346
+ else:
347
+ return None
348
+
349
+ abstract = extract_abstract_1(main_file)
350
+ if abstract is None:
351
+ abstract = extract_abstract_2(main_file)
352
+ title = extract_title(main_file)
353
+ return title, abstract
354
+
355
+
356
  def merge_tex_files(project_foler, main_file, mode):
357
  """
358
  Merge Tex project recrusively
 
380
  pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
381
  match_opt1 = pattern_opt1.search(main_file)
382
  match_opt2 = pattern_opt2.search(main_file)
383
+ if (match_opt1 is None) and (match_opt2 is None):
384
+ # "Cannot find paper abstract section!"
385
+ main_file = insert_abstract(main_file)
386
+ match_opt1 = pattern_opt1.search(main_file)
387
+ match_opt2 = pattern_opt2.search(main_file)
388
  assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
389
  return main_file
390
 
391
 
392
+ insert_missing_abs_str = r"""
393
+ \begin{abstract}
394
+ The GPT-Academic program cannot find abstract section in this paper.
395
+ \end{abstract}
396
+ """
397
+
398
+ def insert_abstract(tex_content):
399
+ if "\\maketitle" in tex_content:
400
+ # find the position of "\maketitle"
401
+ find_index = tex_content.index("\\maketitle")
402
+ # find the nearest ending line
403
+ end_line_index = tex_content.find("\n", find_index)
404
+ # insert "abs_str" on the next line
405
+ modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
406
+ return modified_tex
407
+ elif r"\begin{document}" in tex_content:
408
+ # find the position of "\maketitle"
409
+ find_index = tex_content.index(r"\begin{document}")
410
+ # find the nearest ending line
411
+ end_line_index = tex_content.find("\n", find_index)
412
+ # insert "abs_str" on the next line
413
+ modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
414
+ return modified_tex
415
+ else:
416
+ return tex_content
417
+
418
  """
419
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
420
  Post process
crazy_functions/live_audio/aliyunASR.py CHANGED
@@ -1,4 +1,106 @@
1
- import time, logging, json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
 
4
  class AliyunASR():
@@ -66,12 +168,22 @@ class AliyunASR():
66
  on_close=self.test_on_close,
67
  callback_args=[uuid.hex]
68
  )
69
-
70
  r = sr.start(aformat="pcm",
 
71
  enable_intermediate_result=True,
72
  enable_punctuation_prediction=True,
73
  enable_inverse_text_normalization=True)
74
 
 
 
 
 
 
 
 
 
 
75
  while not self.stop:
76
  # time.sleep(self.capture_interval)
77
  audio = rad.read(uuid.hex)
@@ -79,12 +191,32 @@ class AliyunASR():
79
  # convert to pcm file
80
  temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
81
  dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
82
- io.wavfile.write(temp_file, NEW_SAMPLERATE, dsdata)
83
  # read pcm binary
84
  with open(temp_file, "rb") as f: data = f.read()
85
- # print('audio len:', len(audio), '\t ds len:', len(dsdata), '\t need n send:', len(data)//640)
86
- slices = zip(*(iter(data),) * 640) # 640个字节为一组
87
- for i in slices: sr.send_audio(bytes(i))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  else:
89
  time.sleep(0.1)
90
 
 
1
+ import time, logging, json, sys, struct
2
+ import numpy as np
3
+ from scipy.io.wavfile import WAVE_FORMAT
4
+
5
+ def write_numpy_to_wave(filename, rate, data, add_header=False):
6
+ """
7
+ Write a NumPy array as a WAV file.
8
+ """
9
+ def _array_tofile(fid, data):
10
+ # ravel gives a c-contiguous buffer
11
+ fid.write(data.ravel().view('b').data)
12
+
13
+ if hasattr(filename, 'write'):
14
+ fid = filename
15
+ else:
16
+ fid = open(filename, 'wb')
17
+
18
+ fs = rate
19
+
20
+ try:
21
+ dkind = data.dtype.kind
22
+ if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and
23
+ data.dtype.itemsize == 1)):
24
+ raise ValueError("Unsupported data type '%s'" % data.dtype)
25
+
26
+ header_data = b''
27
+
28
+ header_data += b'RIFF'
29
+ header_data += b'\x00\x00\x00\x00'
30
+ header_data += b'WAVE'
31
+
32
+ # fmt chunk
33
+ header_data += b'fmt '
34
+ if dkind == 'f':
35
+ format_tag = WAVE_FORMAT.IEEE_FLOAT
36
+ else:
37
+ format_tag = WAVE_FORMAT.PCM
38
+ if data.ndim == 1:
39
+ channels = 1
40
+ else:
41
+ channels = data.shape[1]
42
+ bit_depth = data.dtype.itemsize * 8
43
+ bytes_per_second = fs*(bit_depth // 8)*channels
44
+ block_align = channels * (bit_depth // 8)
45
+
46
+ fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs,
47
+ bytes_per_second, block_align, bit_depth)
48
+ if not (dkind == 'i' or dkind == 'u'):
49
+ # add cbSize field for non-PCM files
50
+ fmt_chunk_data += b'\x00\x00'
51
+
52
+ header_data += struct.pack('<I', len(fmt_chunk_data))
53
+ header_data += fmt_chunk_data
54
+
55
+ # fact chunk (non-PCM files)
56
+ if not (dkind == 'i' or dkind == 'u'):
57
+ header_data += b'fact'
58
+ header_data += struct.pack('<II', 4, data.shape[0])
59
+
60
+ # check data size (needs to be immediately before the data chunk)
61
+ if ((len(header_data)-4-4) + (4+4+data.nbytes)) > 0xFFFFFFFF:
62
+ raise ValueError("Data exceeds wave file size limit")
63
+ if add_header:
64
+ fid.write(header_data)
65
+ # data chunk
66
+ fid.write(b'data')
67
+ fid.write(struct.pack('<I', data.nbytes))
68
+ if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and
69
+ sys.byteorder == 'big'):
70
+ data = data.byteswap()
71
+ _array_tofile(fid, data)
72
+
73
+ if add_header:
74
+ # Determine file size and place it in correct
75
+ # position at start of the file.
76
+ size = fid.tell()
77
+ fid.seek(4)
78
+ fid.write(struct.pack('<I', size-8))
79
+
80
+ finally:
81
+ if not hasattr(filename, 'write'):
82
+ fid.close()
83
+ else:
84
+ fid.seek(0)
85
+
86
+ def is_speaker_speaking(vad, data, sample_rate):
87
+ # Function to detect if the speaker is speaking
88
+ # The WebRTC VAD only accepts 16-bit mono PCM audio,
89
+ # sampled at 8000, 16000, 32000 or 48000 Hz.
90
+ # A frame must be either 10, 20, or 30 ms in duration:
91
+ frame_duration = 30
92
+ n_bit_each = int(sample_rate * frame_duration / 1000)*2 # x2 because audio is 16 bit (2 bytes)
93
+ res_list = []
94
+ for t in range(len(data)):
95
+ if t!=0 and t % n_bit_each == 0:
96
+ res_list.append(vad.is_speech(data[t-n_bit_each:t], sample_rate))
97
+
98
+ info = ''.join(['^' if r else '.' for r in res_list])
99
+ info = info[:10]
100
+ if any(res_list):
101
+ return True, info
102
+ else:
103
+ return False, info
104
 
105
 
106
  class AliyunASR():
 
168
  on_close=self.test_on_close,
169
  callback_args=[uuid.hex]
170
  )
171
+ timeout_limit_second = 20
172
  r = sr.start(aformat="pcm",
173
+ timeout=timeout_limit_second,
174
  enable_intermediate_result=True,
175
  enable_punctuation_prediction=True,
176
  enable_inverse_text_normalization=True)
177
 
178
+ import webrtcvad
179
+ vad = webrtcvad.Vad()
180
+ vad.set_mode(1)
181
+
182
+ is_previous_frame_transmitted = False # 上一帧是否有人说话
183
+ previous_frame_data = None
184
+ echo_cnt = 0 # 在没有声音之后,继续向服务器发送n次音频数据
185
+ echo_cnt_max = 4 # 在没有声音之后,继续向服务器发送n次音频数据
186
+ keep_alive_last_send_time = time.time()
187
  while not self.stop:
188
  # time.sleep(self.capture_interval)
189
  audio = rad.read(uuid.hex)
 
191
  # convert to pcm file
192
  temp_file = f'{temp_folder}/{uuid.hex}.pcm' #
193
  dsdata = change_sample_rate(audio, rad.rate, NEW_SAMPLERATE) # 48000 --> 16000
194
+ write_numpy_to_wave(temp_file, NEW_SAMPLERATE, dsdata)
195
  # read pcm binary
196
  with open(temp_file, "rb") as f: data = f.read()
197
+ is_speaking, info = is_speaker_speaking(vad, data, NEW_SAMPLERATE)
198
+
199
+ if is_speaking or echo_cnt > 0:
200
+ # 如果话筒激活 / 如果处于回声收尾阶段
201
+ echo_cnt -= 1
202
+ if not is_previous_frame_transmitted: # 上一帧没有人声,但是我们把上一帧同样加上
203
+ if previous_frame_data is not None: data = previous_frame_data + data
204
+ if is_speaking:
205
+ echo_cnt = echo_cnt_max
206
+ slices = zip(*(iter(data),) * 640) # 640个字节为一组
207
+ for i in slices: sr.send_audio(bytes(i))
208
+ keep_alive_last_send_time = time.time()
209
+ is_previous_frame_transmitted = True
210
+ else:
211
+ is_previous_frame_transmitted = False
212
+ echo_cnt = 0
213
+ # 保持链接激活,即使没有声音,也根据时间间隔,发送一些音频片段给服务器
214
+ if time.time() - keep_alive_last_send_time > timeout_limit_second/2:
215
+ slices = zip(*(iter(data),) * 640) # 640个字节为一组
216
+ for i in slices: sr.send_audio(bytes(i))
217
+ keep_alive_last_send_time = time.time()
218
+ is_previous_frame_transmitted = True
219
+ self.audio_shape = info
220
  else:
221
  time.sleep(0.1)
222
 
crazy_functions/live_audio/audio_io.py CHANGED
@@ -35,7 +35,7 @@ class RealtimeAudioDistribution():
35
  def read(self, uuid):
36
  if uuid in self.data:
37
  res = self.data.pop(uuid)
38
- print('\r read-', len(res), '-', max(res), end='', flush=True)
39
  else:
40
  res = None
41
  return res
 
35
  def read(self, uuid):
36
  if uuid in self.data:
37
  res = self.data.pop(uuid)
38
+ # print('\r read-', len(res), '-', max(res), end='', flush=True)
39
  else:
40
  res = None
41
  return res
crazy_functions/multi_stage/multi_stage_utils.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import List
3
+ from toolbox import update_ui_lastest_msg, disable_auto_promotion
4
+ from request_llms.bridge_all import predict_no_ui_long_connection
5
+ from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
6
+ import time
7
+ import pickle
8
+
9
+ def have_any_recent_upload_files(chatbot):
10
+ _5min = 5 * 60
11
+ if not chatbot: return False # chatbot is None
12
+ most_recent_uploaded = chatbot._cookies.get("most_recent_uploaded", None)
13
+ if not most_recent_uploaded: return False # most_recent_uploaded is None
14
+ if time.time() - most_recent_uploaded["time"] < _5min: return True # most_recent_uploaded is new
15
+ else: return False # most_recent_uploaded is too old
16
+
17
+ class GptAcademicState():
18
+ def __init__(self):
19
+ self.reset()
20
+
21
+ def reset(self):
22
+ pass
23
+
24
+ def lock_plugin(self, chatbot):
25
+ chatbot._cookies['plugin_state'] = pickle.dumps(self)
26
+
27
+ def unlock_plugin(self, chatbot):
28
+ self.reset()
29
+ chatbot._cookies['plugin_state'] = pickle.dumps(self)
30
+
31
+ def set_state(self, chatbot, key, value):
32
+ setattr(self, key, value)
33
+ chatbot._cookies['plugin_state'] = pickle.dumps(self)
34
+
35
+ def get_state(chatbot, cls=None):
36
+ state = chatbot._cookies.get('plugin_state', None)
37
+ if state is not None: state = pickle.loads(state)
38
+ elif cls is not None: state = cls()
39
+ else: state = GptAcademicState()
40
+ state.chatbot = chatbot
41
+ return state
42
+
43
+ class GatherMaterials():
44
+ def __init__(self, materials) -> None:
45
+ materials = ['image', 'prompt']
crazy_functions/pdf_fns/parse_pdf.py CHANGED
@@ -14,7 +14,7 @@ import math
14
  class GROBID_OFFLINE_EXCEPTION(Exception): pass
15
 
16
  def get_avail_grobid_url():
17
- GROBID_URLS, = get_conf('GROBID_URLS')
18
  if len(GROBID_URLS) == 0: return None
19
  try:
20
  _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡
@@ -73,7 +73,7 @@ def produce_report_markdown(gpt_response_collection, meta, paper_meta_info, chat
73
  return res_path
74
 
75
  def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
76
- from crazy_functions.crazy_utils import construct_html
77
  from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
78
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
79
  from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
@@ -82,7 +82,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
82
  # title
83
  title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n'
84
  # authors
85
- authors = article_dict.get('authors', '无法获取 authors'); prompt += f'authors:{authors}\n\n'
86
  # abstract
87
  abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n'
88
  # command
@@ -103,7 +103,7 @@ def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_fi
103
  inputs_show_user_array = []
104
 
105
  # get_token_num
106
- from request_llm.bridge_all import model_info
107
  enc = model_info[llm_kwargs['llm_model']]['tokenizer']
108
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
109
 
 
14
  class GROBID_OFFLINE_EXCEPTION(Exception): pass
15
 
16
  def get_avail_grobid_url():
17
+ GROBID_URLS = get_conf('GROBID_URLS')
18
  if len(GROBID_URLS) == 0: return None
19
  try:
20
  _grobid_url = random.choice(GROBID_URLS) # 随机负载均衡
 
73
  return res_path
74
 
75
  def translate_pdf(article_dict, llm_kwargs, chatbot, fp, generated_conclusion_files, TOKEN_LIMIT_PER_FRAGMENT, DST_LANG):
76
+ from crazy_functions.pdf_fns.report_gen_html import construct_html
77
  from crazy_functions.crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
78
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
79
  from crazy_functions.crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
 
82
  # title
83
  title = article_dict.get('title', '无法获取 title'); prompt += f'title:{title}\n\n'
84
  # authors
85
+ authors = article_dict.get('authors', '无法获取 authors')[:100]; prompt += f'authors:{authors}\n\n'
86
  # abstract
87
  abstract = article_dict.get('abstract', '无法获取 abstract'); prompt += f'abstract:{abstract}\n\n'
88
  # command
 
103
  inputs_show_user_array = []
104
 
105
  # get_token_num
106
+ from request_llms.bridge_all import model_info
107
  enc = model_info[llm_kwargs['llm_model']]['tokenizer']
108
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
109
 
crazy_functions/pdf_fns/report_gen_html.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from toolbox import update_ui, get_conf, trimmed_format_exc, get_log_folder
2
+ import os
3
+
4
+
5
+
6
+
7
+ class construct_html():
8
+ def __init__(self) -> None:
9
+ self.html_string = ""
10
+
11
+ def add_row(self, a, b):
12
+ from toolbox import markdown_convertion
13
+ template = """
14
+ {
15
+ primary_col: {
16
+ header: String.raw`__PRIMARY_HEADER__`,
17
+ msg: String.raw`__PRIMARY_MSG__`,
18
+ },
19
+ secondary_rol: {
20
+ header: String.raw`__SECONDARY_HEADER__`,
21
+ msg: String.raw`__SECONDARY_MSG__`,
22
+ }
23
+ },
24
+ """
25
+ def std(str):
26
+ str = str.replace(r'`',r'&#96;')
27
+ if str.endswith("\\"): str += ' '
28
+ if str.endswith("}"): str += ' '
29
+ if str.endswith("$"): str += ' '
30
+ return str
31
+
32
+ template_ = template
33
+ a_lines = a.split('\n')
34
+ b_lines = b.split('\n')
35
+
36
+ if len(a_lines) == 1 or len(a_lines[0]) > 50:
37
+ template_ = template_.replace("__PRIMARY_HEADER__", std(a[:20]))
38
+ template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion(a)))
39
+ else:
40
+ template_ = template_.replace("__PRIMARY_HEADER__", std(a_lines[0]))
41
+ template_ = template_.replace("__PRIMARY_MSG__", std(markdown_convertion('\n'.join(a_lines[1:]))))
42
+
43
+ if len(b_lines) == 1 or len(b_lines[0]) > 50:
44
+ template_ = template_.replace("__SECONDARY_HEADER__", std(b[:20]))
45
+ template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion(b)))
46
+ else:
47
+ template_ = template_.replace("__SECONDARY_HEADER__", std(b_lines[0]))
48
+ template_ = template_.replace("__SECONDARY_MSG__", std(markdown_convertion('\n'.join(b_lines[1:]))))
49
+ self.html_string += template_
50
+
51
+ def save_file(self, file_name):
52
+ from toolbox import get_log_folder
53
+ with open('crazy_functions/pdf_fns/report_template.html', 'r', encoding='utf8') as f:
54
+ html_template = f.read()
55
+ html_template = html_template.replace("__TF_ARR__", self.html_string)
56
+ with open(os.path.join(get_log_folder(), file_name), 'w', encoding='utf8') as f:
57
+ f.write(html_template.encode('utf-8', 'ignore').decode())
58
+ return os.path.join(get_log_folder(), file_name)
crazy_functions/pdf_fns/report_template.html ADDED
The diff for this file is too large to render. See raw diff
 
crazy_functions/vt_fns/vt_call_plugin.py CHANGED
@@ -1,7 +1,7 @@
1
  from pydantic import BaseModel, Field
2
  from typing import List
3
  from toolbox import update_ui_lastest_msg, disable_auto_promotion
4
- from request_llm.bridge_all import predict_no_ui_long_connection
5
  from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
6
  import copy, json, pickle, os, sys, time
7
 
 
1
  from pydantic import BaseModel, Field
2
  from typing import List
3
  from toolbox import update_ui_lastest_msg, disable_auto_promotion
4
+ from request_llms.bridge_all import predict_no_ui_long_connection
5
  from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
6
  import copy, json, pickle, os, sys, time
7
 
crazy_functions/vt_fns/vt_modify_config.py CHANGED
@@ -1,13 +1,13 @@
1
  from pydantic import BaseModel, Field
2
  from typing import List
3
  from toolbox import update_ui_lastest_msg, get_conf
4
- from request_llm.bridge_all import predict_no_ui_long_connection
5
  from crazy_functions.json_fns.pydantic_io import GptJsonIO
6
  import copy, json, pickle, os, sys
7
 
8
 
9
  def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
10
- ALLOW_RESET_CONFIG, = get_conf('ALLOW_RESET_CONFIG')
11
  if not ALLOW_RESET_CONFIG:
12
  yield from update_ui_lastest_msg(
13
  lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
@@ -66,7 +66,7 @@ def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
66
  )
67
 
68
  def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
69
- ALLOW_RESET_CONFIG, = get_conf('ALLOW_RESET_CONFIG')
70
  if not ALLOW_RESET_CONFIG:
71
  yield from update_ui_lastest_msg(
72
  lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
 
1
  from pydantic import BaseModel, Field
2
  from typing import List
3
  from toolbox import update_ui_lastest_msg, get_conf
4
+ from request_llms.bridge_all import predict_no_ui_long_connection
5
  from crazy_functions.json_fns.pydantic_io import GptJsonIO
6
  import copy, json, pickle, os, sys
7
 
8
 
9
  def modify_configuration_hot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
10
+ ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
11
  if not ALLOW_RESET_CONFIG:
12
  yield from update_ui_lastest_msg(
13
  lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
 
66
  )
67
 
68
  def modify_configuration_reboot(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_intention):
69
+ ALLOW_RESET_CONFIG = get_conf('ALLOW_RESET_CONFIG')
70
  if not ALLOW_RESET_CONFIG:
71
  yield from update_ui_lastest_msg(
72
  lastmsg=f"当前配置不允许被修改!如需激活本功能,请在config.py中设置ALLOW_RESET_CONFIG=True后重启软件。",
crazy_functions/下载arxiv论文翻译摘要.py CHANGED
@@ -1,6 +1,6 @@
1
  from toolbox import update_ui, get_log_folder
2
  from toolbox import write_history_to_file, promote_file_to_downloadzone
3
- from toolbox import CatchException, report_execption, get_conf
4
  import re, requests, unicodedata, os
5
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
6
  def download_arxiv_(url_pdf):
@@ -43,7 +43,7 @@ def download_arxiv_(url_pdf):
43
  file_path = download_dir+title_str
44
 
45
  print('下载中')
46
- proxies, = get_conf('proxies')
47
  r = requests.get(requests_pdf_url, proxies=proxies)
48
  with open(file_path, 'wb+') as f:
49
  f.write(r.content)
@@ -77,7 +77,7 @@ def get_name(_url_):
77
  # print('在缓存中')
78
  # return arxiv_recall[_url_]
79
 
80
- proxies, = get_conf('proxies')
81
  res = requests.get(_url_, proxies=proxies)
82
 
83
  bs = BeautifulSoup(res.text, 'html.parser')
@@ -144,7 +144,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
144
  try:
145
  import bs4
146
  except:
147
- report_execption(chatbot, history,
148
  a = f"解析项目: {txt}",
149
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。")
150
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -157,7 +157,7 @@ def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, hi
157
  try:
158
  pdf_path, info = download_arxiv_(txt)
159
  except:
160
- report_execption(chatbot, history,
161
  a = f"解析项目: {txt}",
162
  b = f"下载pdf文件未成功")
163
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
1
  from toolbox import update_ui, get_log_folder
2
  from toolbox import write_history_to_file, promote_file_to_downloadzone
3
+ from toolbox import CatchException, report_exception, get_conf
4
  import re, requests, unicodedata, os
5
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
6
  def download_arxiv_(url_pdf):
 
43
  file_path = download_dir+title_str
44
 
45
  print('下载中')
46
+ proxies = get_conf('proxies')
47
  r = requests.get(requests_pdf_url, proxies=proxies)
48
  with open(file_path, 'wb+') as f:
49
  f.write(r.content)
 
77
  # print('在缓存中')
78
  # return arxiv_recall[_url_]
79
 
80
+ proxies = get_conf('proxies')
81
  res = requests.get(_url_, proxies=proxies)
82
 
83
  bs = BeautifulSoup(res.text, 'html.parser')
 
144
  try:
145
  import bs4
146
  except:
147
+ report_exception(chatbot, history,
148
  a = f"解析项目: {txt}",
149
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4```。")
150
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
157
  try:
158
  pdf_path, info = download_arxiv_(txt)
159
  except:
160
+ report_exception(chatbot, history,
161
  a = f"解析项目: {txt}",
162
  b = f"下载pdf文件未成功")
163
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
crazy_functions/图片生成.py CHANGED
@@ -1,13 +1,12 @@
1
  from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
2
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
- import datetime
4
 
5
 
6
- def gen_image(llm_kwargs, prompt, resolution="256x256"):
7
  import requests, json, time, os
8
- from request_llm.bridge_all import model_info
9
 
10
- proxies, = get_conf('proxies')
11
  # Set up OpenAI API key and model
12
  api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
13
  chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
@@ -23,8 +22,10 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
23
  'prompt': prompt,
24
  'n': 1,
25
  'size': resolution,
 
26
  'response_format': 'url'
27
  }
 
28
  response = requests.post(url, headers=headers, json=data, proxies=proxies)
29
  print(response.content)
30
  try:
@@ -42,23 +43,62 @@ def gen_image(llm_kwargs, prompt, resolution="256x256"):
42
  return image_url, file_path+file_name
43
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  @CatchException
47
- def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
48
  """
49
- txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
50
- llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
51
- plugin_kwargs 插件模型的参数,暂时没有用武之地
52
- chatbot 聊天显示框的句柄,用于显示给用户
53
- history 聊天历史,前情提要
54
  system_prompt 给gpt的静默提醒
55
  web_port 当前软件运行的端口号
56
  """
57
- history = [] # 清空历史,以免输入溢出
58
- chatbot.append(("这是什么功能?", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文效果不理想, 请尝试英文Prompt。正在处理中 ....."))
59
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
60
  if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
61
- resolution = plugin_kwargs.get("advanced_arg", '256x256')
62
  image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
63
  chatbot.append([prompt,
64
  f'图像中转网址: <br/>`{image_url}`<br/>'+
@@ -66,4 +106,99 @@ def 图片生成(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
66
  f'本地文件地址: <br/>`{image_path}`<br/>'+
67
  f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
68
  ])
69
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from toolbox import CatchException, update_ui, get_conf, select_api_key, get_log_folder
2
+ from crazy_functions.multi_stage.multi_stage_utils import GptAcademicState
 
3
 
4
 
5
+ def gen_image(llm_kwargs, prompt, resolution="1024x1024", model="dall-e-2", quality=None):
6
  import requests, json, time, os
7
+ from request_llms.bridge_all import model_info
8
 
9
+ proxies = get_conf('proxies')
10
  # Set up OpenAI API key and model
11
  api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
12
  chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
 
22
  'prompt': prompt,
23
  'n': 1,
24
  'size': resolution,
25
+ 'model': model,
26
  'response_format': 'url'
27
  }
28
+ if quality is not None: data.update({'quality': quality})
29
  response = requests.post(url, headers=headers, json=data, proxies=proxies)
30
  print(response.content)
31
  try:
 
43
  return image_url, file_path+file_name
44
 
45
 
46
+ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="dall-e-2"):
47
+ import requests, json, time, os
48
+ from request_llms.bridge_all import model_info
49
+
50
+ proxies = get_conf('proxies')
51
+ api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
52
+ chat_endpoint = model_info[llm_kwargs['llm_model']]['endpoint']
53
+ # 'https://api.openai.com/v1/chat/completions'
54
+ img_endpoint = chat_endpoint.replace('chat/completions','images/edits')
55
+ # # Generate the image
56
+ url = img_endpoint
57
+ headers = {
58
+ 'Authorization': f"Bearer {api_key}",
59
+ 'Content-Type': 'application/json'
60
+ }
61
+ data = {
62
+ 'image': open(image_path, 'rb'),
63
+ 'prompt': prompt,
64
+ 'n': 1,
65
+ 'size': resolution,
66
+ 'model': model,
67
+ 'response_format': 'url'
68
+ }
69
+ response = requests.post(url, headers=headers, json=data, proxies=proxies)
70
+ print(response.content)
71
+ try:
72
+ image_url = json.loads(response.content.decode('utf8'))['data'][0]['url']
73
+ except:
74
+ raise RuntimeError(response.content.decode())
75
+ # 文件保存到本地
76
+ r = requests.get(image_url, proxies=proxies)
77
+ file_path = f'{get_log_folder()}/image_gen/'
78
+ os.makedirs(file_path, exist_ok=True)
79
+ file_name = 'Image' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.png'
80
+ with open(file_path+file_name, 'wb+') as f: f.write(r.content)
81
+
82
+
83
+ return image_url, file_path+file_name
84
+
85
 
86
  @CatchException
87
+ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
88
  """
89
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
90
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
91
+ plugin_kwargs 插件模型的参数,暂时没有用武之地
92
+ chatbot 聊天显示框的句柄,用于显示给用户
93
+ history 聊天历史,前情提要
94
  system_prompt 给gpt的静默提醒
95
  web_port 当前软件运行的端口号
96
  """
97
+ history = [] # 清空历史,以免输入溢出
98
+ chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
99
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
100
  if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
101
+ resolution = plugin_kwargs.get("advanced_arg", '1024x1024')
102
  image_url, image_path = gen_image(llm_kwargs, prompt, resolution)
103
  chatbot.append([prompt,
104
  f'图像中转网址: <br/>`{image_url}`<br/>'+
 
106
  f'本地文件地址: <br/>`{image_path}`<br/>'+
107
  f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
108
  ])
109
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
110
+
111
+
112
+ @CatchException
113
+ def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
114
+ history = [] # 清空历史,以免输入溢出
115
+ chatbot.append(("您正在调用“图像生成”插件。", "[Local Message] 生成图像, 请先把模型切换至gpt-*或者api2d-*。如果中文Prompt效果不理想, 请尝试英文Prompt。正在处理中 ....."))
116
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 由于请求gpt需要一段时间,我们先及时地做一次界面更新
117
+ if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
118
+ resolution = plugin_kwargs.get("advanced_arg", '1024x1024').lower()
119
+ if resolution.endswith('-hd'):
120
+ resolution = resolution.replace('-hd', '')
121
+ quality = 'hd'
122
+ else:
123
+ quality = 'standard'
124
+ image_url, image_path = gen_image(llm_kwargs, prompt, resolution, model="dall-e-3", quality=quality)
125
+ chatbot.append([prompt,
126
+ f'图像中转网址: <br/>`{image_url}`<br/>'+
127
+ f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
128
+ f'本地文件地址: <br/>`{image_path}`<br/>'+
129
+ f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
130
+ ])
131
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
132
+
133
+ class ImageEditState(GptAcademicState):
134
+ # 尚未完成
135
+ def get_image_file(self, x):
136
+ import os, glob
137
+ if len(x) == 0: return False, None
138
+ if not os.path.exists(x): return False, None
139
+ if x.endswith('.png'): return True, x
140
+ file_manifest = [f for f in glob.glob(f'{x}/**/*.png', recursive=True)]
141
+ confirm = (len(file_manifest) >= 1 and file_manifest[0].endswith('.png') and os.path.exists(file_manifest[0]))
142
+ file = None if not confirm else file_manifest[0]
143
+ return confirm, file
144
+
145
+ def get_resolution(self, x):
146
+ return (x in ['256x256', '512x512', '1024x1024']), x
147
+
148
+ def get_prompt(self, x):
149
+ confirm = (len(x)>=5) and (not self.get_resolution(x)[0]) and (not self.get_image_file(x)[0])
150
+ return confirm, x
151
+
152
+ def reset(self):
153
+ self.req = [
154
+ {'value':None, 'description': '请先上传图像(必须是.png格式), 然后再次点击本插件', 'verify_fn': self.get_image_file},
155
+ {'value':None, 'description': '请输入分辨率,可选:256x256, 512x512 或 1024x1024', 'verify_fn': self.get_resolution},
156
+ {'value':None, 'description': '请输入修改需求,建议您使用英文提示词', 'verify_fn': self.get_prompt},
157
+ ]
158
+ self.info = ""
159
+
160
+ def feed(self, prompt, chatbot):
161
+ for r in self.req:
162
+ if r['value'] is None:
163
+ confirm, res = r['verify_fn'](prompt)
164
+ if confirm:
165
+ r['value'] = res
166
+ self.set_state(chatbot, 'dummy_key', 'dummy_value')
167
+ break
168
+ return self
169
+
170
+ def next_req(self):
171
+ for r in self.req:
172
+ if r['value'] is None:
173
+ return r['description']
174
+ return "已经收集到所有信息"
175
+
176
+ def already_obtained_all_materials(self):
177
+ return all([x['value'] is not None for x in self.req])
178
+
179
+ @CatchException
180
+ def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
181
+ # 尚未完成
182
+ history = [] # 清空历史
183
+ state = ImageEditState.get_state(chatbot, ImageEditState)
184
+ state = state.feed(prompt, chatbot)
185
+ if not state.already_obtained_all_materials():
186
+ chatbot.append(["图片修改(先上传图片,再输入修改需求,最后输入分辨率)", state.next_req()])
187
+ yield from update_ui(chatbot=chatbot, history=history)
188
+ return
189
+
190
+ image_path = state.req[0]
191
+ resolution = state.req[1]
192
+ prompt = state.req[2]
193
+ chatbot.append(["图片修改, 执行中", f"图片:`{image_path}`<br/>分辨率:`{resolution}`<br/>修改需求:`{prompt}`"])
194
+ yield from update_ui(chatbot=chatbot, history=history)
195
+
196
+ image_url, image_path = edit_image(llm_kwargs, prompt, image_path, resolution)
197
+ chatbot.append([state.prompt,
198
+ f'图像中转网址: <br/>`{image_url}`<br/>'+
199
+ f'中转网址预览: <br/><div align="center"><img src="{image_url}"></div>'
200
+ f'本地文件地址: <br/>`{image_path}`<br/>'+
201
+ f'本地文件预览: <br/><div align="center"><img src="file={image_path}"></div>'
202
+ ])
203
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 界面更新
204
+
crazy_functions/多智能体.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 本源代码中, ⭐ = 关键步骤
2
+ """
3
+ 测试:
4
+ - show me the solution of $x^2=cos(x)$, solve this problem with figure, and plot and save image to t.jpg
5
+
6
+ """
7
+
8
+
9
+ from toolbox import CatchException, update_ui, gen_time_str, trimmed_format_exc, ProxyNetworkActivate
10
+ from toolbox import get_conf, select_api_key, update_ui_lastest_msg, Singleton
11
+ from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, get_plugin_arg
12
+ from crazy_functions.crazy_utils import input_clipping, try_install_deps
13
+ from crazy_functions.agent_fns.persistent import GradioMultiuserManagerForPersistentClasses
14
+ from crazy_functions.agent_fns.auto_agent import AutoGenMath
15
+ import time
16
+
17
+ def remove_model_prefix(llm):
18
+ if llm.startswith('api2d-'): llm = llm.replace('api2d-', '')
19
+ if llm.startswith('azure-'): llm = llm.replace('azure-', '')
20
+ return llm
21
+
22
+
23
+ @CatchException
24
+ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
25
+ """
26
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
27
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
28
+ plugin_kwargs 插件模型的参数
29
+ chatbot 聊天显示框的句柄,用于显示给用户
30
+ history 聊天历史,前情提要
31
+ system_prompt 给gpt的静默提醒
32
+ web_port 当前软件运行的端口号
33
+ """
34
+ # 检查当前的模型是否符合要求
35
+ supported_llms = [
36
+ "gpt-3.5-turbo-16k",
37
+ 'gpt-3.5-turbo-1106',
38
+ "gpt-4",
39
+ "gpt-4-32k",
40
+ 'gpt-4-1106-preview',
41
+ "azure-gpt-3.5-turbo-16k",
42
+ "azure-gpt-3.5-16k",
43
+ "azure-gpt-4",
44
+ "azure-gpt-4-32k",
45
+ ]
46
+ from request_llms.bridge_all import model_info
47
+ if model_info[llm_kwargs['llm_model']]["max_token"] < 8000: # 至少是8k上下文的模型
48
+ chatbot.append([f"处理任务: {txt}", f"当前插件只支持{str(supported_llms)}, 当前模型{llm_kwargs['llm_model']}的最大上下文长度太短, 不能支撑AutoGen运行。"])
49
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
50
+ return
51
+ if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
52
+ llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
53
+
54
+ # 检查当前的模型是否符合要求
55
+ API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
56
+ if len(API_URL_REDIRECT) > 0:
57
+ chatbot.append([f"处理任务: {txt}", f"暂不支持中转."])
58
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
59
+ return
60
+
61
+ # 尝试导入依赖,如果缺少依赖,则给出安装建议
62
+ try:
63
+ import autogen
64
+ if get_conf("AUTOGEN_USE_DOCKER"):
65
+ import docker
66
+ except:
67
+ chatbot.append([ f"处理任务: {txt}",
68
+ f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pyautogen docker```。"])
69
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
70
+ return
71
+
72
+ # 尝试导入依赖,如果缺少依赖,则给出安装建议
73
+ try:
74
+ import autogen
75
+ import glob, os, time, subprocess
76
+ if get_conf("AUTOGEN_USE_DOCKER"):
77
+ subprocess.Popen(["docker", "--version"])
78
+ except:
79
+ chatbot.append([f"处理任务: {txt}", f"缺少docker运行环境!"])
80
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
81
+ return
82
+
83
+ # 解锁插件
84
+ chatbot.get_cookies()['lock_plugin'] = None
85
+ persistent_class_multi_user_manager = GradioMultiuserManagerForPersistentClasses()
86
+ user_uuid = chatbot.get_cookies().get('uuid')
87
+ persistent_key = f"{user_uuid}->多智能体终端"
88
+ if persistent_class_multi_user_manager.already_alive(persistent_key):
89
+ # 当已经存在一个正在运行的多智能体终端时,直接将用户输入传递给它,而不是再次启动一个新的多智能体终端
90
+ print('[debug] feed new user input')
91
+ executor = persistent_class_multi_user_manager.get(persistent_key)
92
+ exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="resume")
93
+ else:
94
+ # 运行多智能体终端 (首次)
95
+ print('[debug] create new executor instance')
96
+ history = []
97
+ chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
98
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
99
+ executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port)
100
+ persistent_class_multi_user_manager.set(persistent_key, executor)
101
+ exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
102
+
103
+ if exit_reason == "wait_feedback":
104
+ # 当用户点击了“等待反馈”按钮时,将executor存储到cookie中,等待用户的再次调用
105
+ executor.chatbot.get_cookies()['lock_plugin'] = 'crazy_functions.多智能体->多智能体终端'
106
+ else:
107
+ executor.chatbot.get_cookies()['lock_plugin'] = None
108
+ yield from update_ui(chatbot=executor.chatbot, history=executor.history) # 更新状态
crazy_functions/对话历史存档.py CHANGED
@@ -1,7 +1,8 @@
1
- from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder
2
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
  import re
4
 
 
 
5
  def write_chat_to_file(chatbot, history=None, file_name=None):
6
  """
7
  将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
@@ -9,8 +10,8 @@ def write_chat_to_file(chatbot, history=None, file_name=None):
9
  import os
10
  import time
11
  if file_name is None:
12
- file_name = 'chatGPT对话历史' + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
13
- fp = os.path.join(get_log_folder(), file_name)
14
  with open(fp, 'w', encoding='utf8') as f:
15
  from themes.theme import advanced_css
16
  f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
@@ -80,7 +81,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
80
  """
81
 
82
  chatbot.append(("保存当前对话",
83
- f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用“载入对话历史存档”还原当下的对话。\n警告!被保存的对话历史可以被使用该系统的任何人查阅。"))
84
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
85
 
86
  def hide_cwd(str):
@@ -106,7 +107,12 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
106
  if not success:
107
  if txt == "": txt = '空空如也的输入栏'
108
  import glob
109
- local_history = "<br/>".join(["`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
 
 
 
 
 
110
  chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
111
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
112
  return
@@ -132,8 +138,12 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
132
  """
133
 
134
  import glob, os
135
- local_history = "<br/>".join(["`"+hide_cwd(f)+"`" for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True)])
136
- for f in glob.glob(f'{get_log_folder()}/**/chatGPT对话历史*.html', recursive=True):
 
 
 
 
137
  os.remove(f)
138
  chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
139
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
1
+ from toolbox import CatchException, update_ui, promote_file_to_downloadzone, get_log_folder, get_user
 
2
  import re
3
 
4
+ f_prefix = 'GPT-Academic对话存档'
5
+
6
  def write_chat_to_file(chatbot, history=None, file_name=None):
7
  """
8
  将对话记录history以Markdown格式写入文件中。如果没有指定文件名,则使用当前时间生成文件名。
 
10
  import os
11
  import time
12
  if file_name is None:
13
+ file_name = f_prefix + time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + '.html'
14
+ fp = os.path.join(get_log_folder(get_user(chatbot), plugin_name='chat_history'), file_name)
15
  with open(fp, 'w', encoding='utf8') as f:
16
  from themes.theme import advanced_css
17
  f.write(f'<!DOCTYPE html><head><meta charset="utf-8"><title>对话历史</title><style>{advanced_css}</style></head>')
 
81
  """
82
 
83
  chatbot.append(("保存当前对话",
84
+ f"[Local Message] {write_chat_to_file(chatbot, history)},您可以调用下拉菜单中的“载入对话历史存档”还原当下的对话。"))
85
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
86
 
87
  def hide_cwd(str):
 
107
  if not success:
108
  if txt == "": txt = '空空如也的输入栏'
109
  import glob
110
+ local_history = "<br/>".join([
111
+ "`"+hide_cwd(f)+f" ({gen_file_preview(f)})"+"`"
112
+ for f in glob.glob(
113
+ f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html',
114
+ recursive=True
115
+ )])
116
  chatbot.append([f"正在查找对话历史文件(html格式): {txt}", f"找不到任何html文件: {txt}。但本地存储了以下历史文件,您可以将任意一个文件路径粘贴到输入区,然后重试:<br/>{local_history}"])
117
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
118
  return
 
138
  """
139
 
140
  import glob, os
141
+ local_history = "<br/>".join([
142
+ "`"+hide_cwd(f)+"`"
143
+ for f in glob.glob(
144
+ f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True
145
+ )])
146
+ for f in glob.glob(f'{get_log_folder(get_user(chatbot), plugin_name="chat_history")}/**/{f_prefix}*.html', recursive=True):
147
  os.remove(f)
148
  chatbot.append([f"删除所有历史对话文件", f"已删除<br/>{local_history}"])
149
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
crazy_functions/总结word文档.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
@@ -32,7 +32,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
32
  print(file_content)
33
  # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
34
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
35
- from request_llm.bridge_all import model_info
36
  max_token = model_info[llm_kwargs['llm_model']]['max_token']
37
  TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
38
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
@@ -97,7 +97,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
97
  try:
98
  from docx import Document
99
  except:
100
- report_execption(chatbot, history,
101
  a=f"解析项目: {txt}",
102
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
103
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -111,7 +111,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
111
  project_folder = txt
112
  else:
113
  if txt == "": txt = '空空如也的输入栏'
114
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
115
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
116
  return
117
 
@@ -124,7 +124,7 @@ def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pr
124
 
125
  # 如果没找到任何文件
126
  if len(file_manifest) == 0:
127
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
128
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
129
  return
130
 
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
 
32
  print(file_content)
33
  # private_upload里面的文件名在解压zip后容易出现乱码(rar和7z格式正常),故可以只分析文章内容,不输入文件名
34
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
35
+ from request_llms.bridge_all import model_info
36
  max_token = model_info[llm_kwargs['llm_model']]['max_token']
37
  TOKEN_LIMIT_PER_FRAGMENT = max_token * 3 // 4
38
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
 
97
  try:
98
  from docx import Document
99
  except:
100
+ report_exception(chatbot, history,
101
  a=f"解析项目: {txt}",
102
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
103
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
111
  project_folder = txt
112
  else:
113
  if txt == "": txt = '空空如也的输入栏'
114
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
115
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
116
  return
117
 
 
124
 
125
  # 如果没找到任何文件
126
  if len(file_manifest) == 0:
127
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.docx或doc文件: {txt}")
128
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
129
  return
130
 
crazy_functions/总结音视频.py CHANGED
@@ -1,4 +1,4 @@
1
- from toolbox import CatchException, report_execption, select_api_key, update_ui, get_conf
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
4
 
@@ -41,7 +41,7 @@ def split_audio_file(filename, split_duration=1000):
41
  def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
42
  import os, requests
43
  from moviepy.editor import AudioFileClip
44
- from request_llm.bridge_all import model_info
45
 
46
  # 设置OpenAI密钥和模型
47
  api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
@@ -79,7 +79,7 @@ def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
79
 
80
  chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
81
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
82
- proxies, = get_conf('proxies')
83
  response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
84
 
85
  chatbot.append(["音频解析结果", response])
@@ -144,7 +144,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
144
  try:
145
  from moviepy.editor import AudioFileClip
146
  except:
147
- report_execption(chatbot, history,
148
  a=f"解析项目: {txt}",
149
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
150
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -158,7 +158,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
158
  project_folder = txt
159
  else:
160
  if txt == "": txt = '空空如也的输入栏'
161
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
162
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
163
  return
164
 
@@ -174,7 +174,7 @@ def 总结音视频(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
174
 
175
  # 如果没找到任何文件
176
  if len(file_manifest) == 0:
177
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
178
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
179
  return
180
 
 
1
+ from toolbox import CatchException, report_exception, select_api_key, update_ui, get_conf
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone, get_log_folder
4
 
 
41
  def AnalyAudio(parse_prompt, file_manifest, llm_kwargs, chatbot, history):
42
  import os, requests
43
  from moviepy.editor import AudioFileClip
44
+ from request_llms.bridge_all import model_info
45
 
46
  # 设置OpenAI密钥和模型
47
  api_key = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
 
79
 
80
  chatbot.append([f"将 {i} 发送到openai音频解析终端 (whisper),当前参数:{parse_prompt}", "正在处理 ..."])
81
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
82
+ proxies = get_conf('proxies')
83
  response = requests.post(url, headers=headers, files=files, data=data, proxies=proxies).text
84
 
85
  chatbot.append(["音频解析结果", response])
 
144
  try:
145
  from moviepy.editor import AudioFileClip
146
  except:
147
+ report_exception(chatbot, history,
148
  a=f"解析项目: {txt}",
149
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade moviepy```。")
150
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
158
  project_folder = txt
159
  else:
160
  if txt == "": txt = '空空如也的输入栏'
161
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
162
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
163
  return
164
 
 
174
 
175
  # 如果没找到任何文件
176
  if len(file_manifest) == 0:
177
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何音频或视频文件: {txt}")
178
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
179
  return
180
 
crazy_functions/批量Markdown翻译.py CHANGED
@@ -1,6 +1,6 @@
1
  import glob, time, os, re, logging
2
  from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
3
- from toolbox import CatchException, report_execption, get_log_folder
4
  from toolbox import write_history_to_file, promote_file_to_downloadzone
5
  fast_debug = False
6
 
@@ -13,7 +13,7 @@ class PaperFileGroup():
13
  self.sp_file_tag = []
14
 
15
  # count_token
16
- from request_llm.bridge_all import model_info
17
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
18
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
19
  self.get_token_num = get_token_num
@@ -118,7 +118,7 @@ def get_files_from_everything(txt, preference=''):
118
  if txt.startswith('http'):
119
  import requests
120
  from toolbox import get_conf
121
- proxies, = get_conf('proxies')
122
  # 网络的远程文件
123
  if preference == 'Github':
124
  logging.info('正在从github下载资源 ...')
@@ -165,7 +165,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
165
  try:
166
  import tiktoken
167
  except:
168
- report_execption(chatbot, history,
169
  a=f"解析项目: {txt}",
170
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
171
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -177,12 +177,12 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
177
  if not success:
178
  # 什么都没有
179
  if txt == "": txt = '空空如也的输入栏'
180
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
181
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
182
  return
183
 
184
  if len(file_manifest) == 0:
185
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
186
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
187
  return
188
 
@@ -205,7 +205,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
205
  try:
206
  import tiktoken
207
  except:
208
- report_execption(chatbot, history,
209
  a=f"解析项目: {txt}",
210
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
211
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -215,11 +215,11 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
215
  if not success:
216
  # 什么都没有
217
  if txt == "": txt = '空空如也的输入栏'
218
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
219
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
220
  return
221
  if len(file_manifest) == 0:
222
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
223
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
224
  return
225
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
@@ -238,7 +238,7 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
238
  try:
239
  import tiktoken
240
  except:
241
- report_execption(chatbot, history,
242
  a=f"解析项目: {txt}",
243
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
244
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -248,11 +248,11 @@ def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history,
248
  if not success:
249
  # 什么都没有
250
  if txt == "": txt = '空空如也的输入栏'
251
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
252
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
253
  return
254
  if len(file_manifest) == 0:
255
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
256
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
257
  return
258
 
 
1
  import glob, time, os, re, logging
2
  from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
3
+ from toolbox import CatchException, report_exception, get_log_folder
4
  from toolbox import write_history_to_file, promote_file_to_downloadzone
5
  fast_debug = False
6
 
 
13
  self.sp_file_tag = []
14
 
15
  # count_token
16
+ from request_llms.bridge_all import model_info
17
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
18
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
19
  self.get_token_num = get_token_num
 
118
  if txt.startswith('http'):
119
  import requests
120
  from toolbox import get_conf
121
+ proxies = get_conf('proxies')
122
  # 网络的远程文件
123
  if preference == 'Github':
124
  logging.info('正在从github下载资源 ...')
 
165
  try:
166
  import tiktoken
167
  except:
168
+ report_exception(chatbot, history,
169
  a=f"解析项目: {txt}",
170
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
171
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
177
  if not success:
178
  # 什么都没有
179
  if txt == "": txt = '空空如也的输入栏'
180
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
181
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
182
  return
183
 
184
  if len(file_manifest) == 0:
185
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
186
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
187
  return
188
 
 
205
  try:
206
  import tiktoken
207
  except:
208
+ report_exception(chatbot, history,
209
  a=f"解析项目: {txt}",
210
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
211
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
215
  if not success:
216
  # 什么都没有
217
  if txt == "": txt = '空空如也的输入栏'
218
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
219
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
220
  return
221
  if len(file_manifest) == 0:
222
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
223
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
224
  return
225
  yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
 
238
  try:
239
  import tiktoken
240
  except:
241
+ report_exception(chatbot, history,
242
  a=f"解析项目: {txt}",
243
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
244
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
248
  if not success:
249
  # 什么都没有
250
  if txt == "": txt = '空空如也的输入栏'
251
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
252
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
253
  return
254
  if len(file_manifest) == 0:
255
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
256
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
257
  return
258
 
crazy_functions/批量总结PDF文档.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
2
- from toolbox import CatchException, report_execption
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  from .crazy_utils import read_and_clean_pdf_text
@@ -21,7 +21,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
21
  TOKEN_LIMIT_PER_FRAGMENT = 2500
22
 
23
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
24
- from request_llm.bridge_all import model_info
25
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
26
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
27
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
@@ -119,7 +119,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
119
  try:
120
  import fitz
121
  except:
122
- report_execption(chatbot, history,
123
  a = f"解析项目: {txt}",
124
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
125
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -133,7 +133,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
133
  project_folder = txt
134
  else:
135
  if txt == "": txt = '空空如也的输入栏'
136
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
137
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
138
  return
139
 
@@ -142,7 +142,7 @@ def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
142
 
143
  # 如果没找到任何文件
144
  if len(file_manifest) == 0:
145
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
146
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
147
  return
148
 
 
1
  from toolbox import update_ui, promote_file_to_downloadzone, gen_time_str
2
+ from toolbox import CatchException, report_exception
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  from .crazy_utils import read_and_clean_pdf_text
 
21
  TOKEN_LIMIT_PER_FRAGMENT = 2500
22
 
23
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
24
+ from request_llms.bridge_all import model_info
25
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
26
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
27
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
 
119
  try:
120
  import fitz
121
  except:
122
+ report_exception(chatbot, history,
123
  a = f"解析项目: {txt}",
124
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
125
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
133
  project_folder = txt
134
  else:
135
  if txt == "": txt = '空空如也的输入栏'
136
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
137
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
138
  return
139
 
 
142
 
143
  # 如果没找到任何文件
144
  if len(file_manifest) == 0:
145
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或.pdf文件: {txt}")
146
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
147
  return
148
 
crazy_functions/批量总结PDF文档pdfminer.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
4
  from toolbox import write_history_to_file, promote_file_to_downloadzone
5
 
@@ -138,7 +138,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
138
  try:
139
  import pdfminer, bs4
140
  except:
141
- report_execption(chatbot, history,
142
  a = f"解析项目: {txt}",
143
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
144
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -147,7 +147,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
147
  project_folder = txt
148
  else:
149
  if txt == "": txt = '空空如也的输入栏'
150
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
151
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
152
  return
153
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
@@ -155,7 +155,7 @@ def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, histo
155
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
156
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
157
  if len(file_manifest) == 0:
158
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
159
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
160
  return
161
  yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
4
  from toolbox import write_history_to_file, promote_file_to_downloadzone
5
 
 
138
  try:
139
  import pdfminer, bs4
140
  except:
141
+ report_exception(chatbot, history,
142
  a = f"解析项目: {txt}",
143
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
144
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
147
  project_folder = txt
148
  else:
149
  if txt == "": txt = '空空如也的输入栏'
150
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
151
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
152
  return
153
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
 
155
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
156
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
157
  if len(file_manifest) == 0:
158
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
159
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
160
  return
161
  yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
crazy_functions/批量翻译PDF文档_NOUGAT.py CHANGED
@@ -1,4 +1,4 @@
1
- from toolbox import CatchException, report_execption, get_log_folder, gen_time_str
2
  from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@@ -57,30 +57,35 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
57
  "批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
58
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
59
 
60
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
61
- try:
62
- import nougat
63
- import tiktoken
64
- except:
65
- report_execption(chatbot, history,
66
- a=f"解析项目: {txt}",
67
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。")
68
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
69
- return
70
-
71
  # 清空历史,以免输入溢出
72
  history = []
73
 
74
  from .crazy_utils import get_files_from_everything
75
  success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  # 检测输入参数,如没有给定输入参数,直接退出
77
  if not success:
78
  if txt == "": txt = '空空如也的输入栏'
79
 
80
  # 如果没找到任何文件
81
  if len(file_manifest) == 0:
82
- report_execption(chatbot, history,
83
- a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
84
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
85
  return
86
 
@@ -97,12 +102,17 @@ def 解析PDF_基于NOUGAT(file_manifest, project_folder, llm_kwargs, plugin_kwa
97
  generated_conclusion_files = []
98
  generated_html_files = []
99
  DST_LANG = "中文"
100
- from crazy_functions.crazy_utils import nougat_interface, construct_html
 
101
  nougat_handle = nougat_interface()
102
  for index, fp in enumerate(file_manifest):
103
- chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
104
- fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history)
105
- promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot)
 
 
 
 
106
  with open(fpp, 'r', encoding='utf8') as f:
107
  article_content = f.readlines()
108
  article_dict = markdown_to_dict(article_content)
 
1
+ from toolbox import CatchException, report_exception, get_log_folder, gen_time_str
2
  from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 
57
  "批量翻译PDF文档。函数插件贡献者: Binary-Husky"])
58
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
59
 
 
 
 
 
 
 
 
 
 
 
 
60
  # 清空历史,以免输入溢出
61
  history = []
62
 
63
  from .crazy_utils import get_files_from_everything
64
  success, file_manifest, project_folder = get_files_from_everything(txt, type='.pdf')
65
+ if len(file_manifest) > 0:
66
+ # 尝试导入依赖,如果缺少依赖,则给出安装建议
67
+ try:
68
+ import nougat
69
+ import tiktoken
70
+ except:
71
+ report_exception(chatbot, history,
72
+ a=f"解析项目: {txt}",
73
+ b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade nougat-ocr tiktoken```。")
74
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
75
+ return
76
+ success_mmd, file_manifest_mmd, _ = get_files_from_everything(txt, type='.mmd')
77
+ success = success or success_mmd
78
+ file_manifest += file_manifest_mmd
79
+ chatbot.append(["文件列表:", ", ".join([e.split('/')[-1] for e in file_manifest])]);
80
+ yield from update_ui( chatbot=chatbot, history=history)
81
  # 检测输入参数,如没有给定输入参数,直接退出
82
  if not success:
83
  if txt == "": txt = '空空如也的输入栏'
84
 
85
  # 如果没找到任何文件
86
  if len(file_manifest) == 0:
87
+ report_exception(chatbot, history,
88
+ a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
89
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
90
  return
91
 
 
102
  generated_conclusion_files = []
103
  generated_html_files = []
104
  DST_LANG = "中文"
105
+ from crazy_functions.crazy_utils import nougat_interface
106
+ from crazy_functions.pdf_fns.report_gen_html import construct_html
107
  nougat_handle = nougat_interface()
108
  for index, fp in enumerate(file_manifest):
109
+ if fp.endswith('pdf'):
110
+ chatbot.append(["当前进度:", f"正在解析论文,请稍候。(第一次运行时,需要花费较长时间下载NOUGAT参数)"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
111
+ fpp = yield from nougat_handle.NOUGAT_parse_pdf(fp, chatbot, history)
112
+ promote_file_to_downloadzone(fpp, rename_file=os.path.basename(fpp)+'.nougat.mmd', chatbot=chatbot)
113
+ else:
114
+ chatbot.append(["当前论文无需解析:", fp]); yield from update_ui( chatbot=chatbot, history=history)
115
+ fpp = fp
116
  with open(fpp, 'r', encoding='utf8') as f:
117
  article_content = f.readlines()
118
  article_dict = markdown_to_dict(article_content)
crazy_functions/批量翻译PDF文档_多线程.py CHANGED
@@ -1,4 +1,4 @@
1
- from toolbox import CatchException, report_execption, get_log_folder, gen_time_str
2
  from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
@@ -6,9 +6,8 @@ from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_an
6
  from .crazy_utils import read_and_clean_pdf_text
7
  from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
8
  from colorful import *
9
- import copy
10
  import os
11
- import math
12
 
13
  @CatchException
14
  def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
@@ -22,11 +21,9 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
22
 
23
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
24
  try:
25
- import fitz
26
- import tiktoken
27
- import scipdf
28
  except:
29
- report_execption(chatbot, history,
30
  a=f"解析项目: {txt}",
31
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
32
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -43,8 +40,8 @@ def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
43
 
44
  # 如果没找到任何文件
45
  if len(file_manifest) == 0:
46
- report_execption(chatbot, history,
47
- a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
48
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
49
  return
50
 
@@ -63,7 +60,7 @@ def 解析PDF_基于GROBID(file_manifest, project_folder, llm_kwargs, plugin_kwa
63
  generated_conclusion_files = []
64
  generated_html_files = []
65
  DST_LANG = "中文"
66
- from crazy_functions.crazy_utils import construct_html
67
  for index, fp in enumerate(file_manifest):
68
  chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
69
  article_dict = parse_pdf(fp, grobid_url)
@@ -86,7 +83,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
86
  TOKEN_LIMIT_PER_FRAGMENT = 1024
87
  generated_conclusion_files = []
88
  generated_html_files = []
89
- from crazy_functions.crazy_utils import construct_html
90
  for index, fp in enumerate(file_manifest):
91
  # 读取PDF文件
92
  file_content, page_one = read_and_clean_pdf_text(fp)
@@ -95,7 +92,7 @@ def 解析PDF(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot,
95
 
96
  # 递归地切割PDF文件
97
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
98
- from request_llm.bridge_all import model_info
99
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
100
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
101
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
 
1
+ from toolbox import CatchException, report_exception, get_log_folder, gen_time_str, check_packages
2
  from toolbox import update_ui, promote_file_to_downloadzone, update_ui_lastest_msg, disable_auto_promotion
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 
6
  from .crazy_utils import read_and_clean_pdf_text
7
  from .pdf_fns.parse_pdf import parse_pdf, get_avail_grobid_url, translate_pdf
8
  from colorful import *
 
9
  import os
10
+
11
 
12
  @CatchException
13
  def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
 
21
 
22
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
23
  try:
24
+ check_packages(["fitz", "tiktoken", "scipdf"])
 
 
25
  except:
26
+ report_exception(chatbot, history,
27
  a=f"解析项目: {txt}",
28
  b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf tiktoken scipdf_parser```。")
29
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
40
 
41
  # 如果没找到任何文件
42
  if len(file_manifest) == 0:
43
+ report_exception(chatbot, history,
44
+ a=f"解析项目: {txt}", b=f"找不到任何.pdf拓展名的文件: {txt}")
45
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
46
  return
47
 
 
60
  generated_conclusion_files = []
61
  generated_html_files = []
62
  DST_LANG = "中文"
63
+ from crazy_functions.pdf_fns.report_gen_html import construct_html
64
  for index, fp in enumerate(file_manifest):
65
  chatbot.append(["当前进度:", f"正在连接GROBID服务,请稍候: {grobid_url}\n如果等待时间过长,请修改config中的GROBID_URL,可修改成本地GROBID服务。"]); yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
66
  article_dict = parse_pdf(fp, grobid_url)
 
83
  TOKEN_LIMIT_PER_FRAGMENT = 1024
84
  generated_conclusion_files = []
85
  generated_html_files = []
86
+ from crazy_functions.pdf_fns.report_gen_html import construct_html
87
  for index, fp in enumerate(file_manifest):
88
  # 读取PDF文件
89
  file_content, page_one = read_and_clean_pdf_text(fp)
 
92
 
93
  # 递归地切割PDF文件
94
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
95
+ from request_llms.bridge_all import model_info
96
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
97
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
98
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
crazy_functions/理解PDF文档内容.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from .crazy_utils import read_and_clean_pdf_text
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
@@ -19,7 +19,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
19
  TOKEN_LIMIT_PER_FRAGMENT = 2500
20
 
21
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
22
- from request_llm.bridge_all import model_info
23
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
24
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
25
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
@@ -49,7 +49,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
49
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
50
  llm_kwargs, chatbot,
51
  history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
52
- sys_prompt="Extract the main idea of this section." # 提示
53
  )
54
  iteration_results.append(gpt_say)
55
  last_iteration_result = gpt_say
@@ -81,7 +81,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
81
  try:
82
  import fitz
83
  except:
84
- report_execption(chatbot, history,
85
  a = f"解析项目: {txt}",
86
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
87
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
@@ -96,7 +96,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
96
  else:
97
  if txt == "":
98
  txt = '空空如也的输入栏'
99
- report_execption(chatbot, history,
100
  a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
101
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
102
  return
@@ -105,7 +105,7 @@ def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chat
105
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
106
  # 如果没找到任何文件
107
  if len(file_manifest) == 0:
108
- report_execption(chatbot, history,
109
  a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
110
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
111
  return
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from .crazy_utils import read_and_clean_pdf_text
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
 
19
  TOKEN_LIMIT_PER_FRAGMENT = 2500
20
 
21
  from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
22
+ from request_llms.bridge_all import model_info
23
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
24
  def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
25
  paper_fragments = breakdown_txt_to_satisfy_token_limit_for_pdf(
 
49
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
50
  llm_kwargs, chatbot,
51
  history=["The main idea of the previous section is?", last_iteration_result], # 迭代上一次的结果
52
+ sys_prompt="Extract the main idea of this section, answer me with Chinese." # 提示
53
  )
54
  iteration_results.append(gpt_say)
55
  last_iteration_result = gpt_say
 
81
  try:
82
  import fitz
83
  except:
84
+ report_exception(chatbot, history,
85
  a = f"解析项目: {txt}",
86
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
87
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
96
  else:
97
  if txt == "":
98
  txt = '空空如也的输入栏'
99
+ report_exception(chatbot, history,
100
  a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
101
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
102
  return
 
105
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
106
  # 如果没找到任何文件
107
  if len(file_manifest) == 0:
108
+ report_exception(chatbot, history,
109
  a=f"解析项目: {txt}", b=f"找不到任何.tex或.pdf文件: {txt}")
110
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
111
  return
crazy_functions/生成函数注释.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
@@ -43,14 +43,14 @@ def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
43
  project_folder = txt
44
  else:
45
  if txt == "": txt = '空空如也的输入栏'
46
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
47
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
48
  return
49
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
50
  [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
51
 
52
  if len(file_manifest) == 0:
53
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
54
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
55
  return
56
  yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
  fast_debug = False
 
43
  project_folder = txt
44
  else:
45
  if txt == "": txt = '空空如也的输入栏'
46
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
47
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
48
  return
49
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)] + \
50
  [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)]
51
 
52
  if len(file_manifest) == 0:
53
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
54
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
55
  return
56
  yield from 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
crazy_functions/联网的ChatGPT.py CHANGED
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
3
  import requests
4
  from bs4 import BeautifulSoup
5
- from request_llm.bridge_all import model_info
6
 
7
  def google(query, proxies):
8
  query = query # 在此处替换您要搜索的关键词
@@ -72,7 +72,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
72
 
73
  # ------------- < 第1步:爬取搜索引擎的结果 > -------------
74
  from toolbox import get_conf
75
- proxies, = get_conf('proxies')
76
  urls = google(txt, proxies)
77
  history = []
78
  if len(urls) == 0:
 
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
3
  import requests
4
  from bs4 import BeautifulSoup
5
+ from request_llms.bridge_all import model_info
6
 
7
  def google(query, proxies):
8
  query = query # 在此处替换您要搜索的关键词
 
72
 
73
  # ------------- < 第1步:爬取搜索引擎的结果 > -------------
74
  from toolbox import get_conf
75
+ proxies = get_conf('proxies')
76
  urls = google(txt, proxies)
77
  history = []
78
  if len(urls) == 0:
crazy_functions/联网的ChatGPT_bing版.py CHANGED
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
3
  import requests
4
  from bs4 import BeautifulSoup
5
- from request_llm.bridge_all import model_info
6
 
7
 
8
  def bing_search(query, proxies=None):
@@ -72,7 +72,7 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor
72
 
73
  # ------------- < 第1步:爬取搜索引擎的结果 > -------------
74
  from toolbox import get_conf
75
- proxies, = get_conf('proxies')
76
  urls = bing_search(txt, proxies)
77
  history = []
78
  if len(urls) == 0:
 
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive, input_clipping
3
  import requests
4
  from bs4 import BeautifulSoup
5
+ from request_llms.bridge_all import model_info
6
 
7
 
8
  def bing_search(query, proxies=None):
 
72
 
73
  # ------------- < 第1步:爬取搜索引擎的结果 > -------------
74
  from toolbox import get_conf
75
+ proxies = get_conf('proxies')
76
  urls = bing_search(txt, proxies)
77
  history = []
78
  if len(urls) == 0:
crazy_functions/虚空终端.py CHANGED
@@ -48,7 +48,7 @@ from pydantic import BaseModel, Field
48
  from typing import List
49
  from toolbox import CatchException, update_ui, is_the_upload_folder
50
  from toolbox import update_ui_lastest_msg, disable_auto_promotion
51
- from request_llm.bridge_all import predict_no_ui_long_connection
52
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
53
  from crazy_functions.crazy_utils import input_clipping
54
  from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
 
48
  from typing import List
49
  from toolbox import CatchException, update_ui, is_the_upload_folder
50
  from toolbox import update_ui_lastest_msg, disable_auto_promotion
51
+ from request_llms.bridge_all import predict_no_ui_long_connection
52
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
53
  from crazy_functions.crazy_utils import input_clipping
54
  from crazy_functions.json_fns.pydantic_io import GptJsonIO, JsonStringError
crazy_functions/解析JupyterNotebook.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  fast_debug = True
5
 
@@ -13,7 +13,7 @@ class PaperFileGroup():
13
  self.sp_file_tag = []
14
 
15
  # count_token
16
- from request_llm.bridge_all import model_info
17
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
18
  def get_token_num(txt): return len(
19
  enc.encode(txt, disallowed_special=()))
@@ -131,7 +131,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
131
  else:
132
  if txt == "":
133
  txt = '空空如也的输入栏'
134
- report_execption(chatbot, history,
135
  a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
136
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
137
  return
@@ -141,7 +141,7 @@ def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
141
  file_manifest = [f for f in glob.glob(
142
  f'{project_folder}/**/*.ipynb', recursive=True)]
143
  if len(file_manifest) == 0:
144
- report_execption(chatbot, history,
145
  a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
146
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
147
  return
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  fast_debug = True
5
 
 
13
  self.sp_file_tag = []
14
 
15
  # count_token
16
+ from request_llms.bridge_all import model_info
17
  enc = model_info["gpt-3.5-turbo"]['tokenizer']
18
  def get_token_num(txt): return len(
19
  enc.encode(txt, disallowed_special=()))
 
131
  else:
132
  if txt == "":
133
  txt = '空空如也的输入栏'
134
+ report_exception(chatbot, history,
135
  a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
136
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
137
  return
 
141
  file_manifest = [f for f in glob.glob(
142
  f'{project_folder}/**/*.ipynb', recursive=True)]
143
  if len(file_manifest) == 0:
144
+ report_exception(chatbot, history,
145
  a=f"解析项目: {txt}", b=f"找不到任何.ipynb文件: {txt}")
146
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
147
  return
crazy_functions/解析项目源代码.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
2
- from toolbox import CatchException, report_execption, write_history_to_file
3
  from .crazy_utils import input_clipping
4
 
5
  def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
@@ -113,7 +113,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
113
  [f for f in glob.glob('./*/*.py')]
114
  project_folder = './'
115
  if len(file_manifest) == 0:
116
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
117
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
118
  return
119
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -126,12 +126,12 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
126
  project_folder = txt
127
  else:
128
  if txt == "": txt = '空空如也的输入栏'
129
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
130
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
131
  return
132
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
133
  if len(file_manifest) == 0:
134
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
135
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
136
  return
137
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -144,12 +144,12 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
144
  project_folder = txt
145
  else:
146
  if txt == "": txt = '空空如也的输入栏'
147
- report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
148
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
149
  return
150
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)]
151
  if len(file_manifest) == 0:
152
- report_execption(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}")
153
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
154
  return
155
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -162,14 +162,14 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
162
  project_folder = txt
163
  else:
164
  if txt == "": txt = '空空如也的输入栏'
165
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
166
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
167
  return
168
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
169
  [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
170
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
171
  if len(file_manifest) == 0:
172
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
173
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
174
  return
175
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -182,7 +182,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
182
  project_folder = txt
183
  else:
184
  if txt == "": txt = '空空如也的输入栏'
185
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
186
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
187
  return
188
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
@@ -190,7 +190,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
190
  [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
191
  [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
192
  if len(file_manifest) == 0:
193
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
194
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
195
  return
196
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -204,7 +204,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
204
  project_folder = txt
205
  else:
206
  if txt == "": txt = '空空如也的输入栏'
207
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
208
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
209
  return
210
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
@@ -212,7 +212,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
212
  [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
213
  [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
214
  if len(file_manifest) == 0:
215
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
216
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
217
  return
218
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -226,7 +226,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
226
  project_folder = txt
227
  else:
228
  if txt == "": txt = '空空如也的输入栏'
229
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
230
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
231
  return
232
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
@@ -241,7 +241,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
241
  [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \
242
  [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
243
  if len(file_manifest) == 0:
244
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}")
245
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
246
  return
247
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -255,7 +255,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
255
  project_folder = txt
256
  else:
257
  if txt == "": txt = '空空如也的输入栏'
258
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
259
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
260
  return
261
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
@@ -263,7 +263,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
263
  [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
264
  [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
265
  if len(file_manifest) == 0:
266
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
267
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
268
  return
269
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -276,14 +276,14 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
276
  project_folder = txt
277
  else:
278
  if txt == "": txt = '空空如也的输入栏'
279
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
280
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
281
  return
282
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \
283
  [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \
284
  [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)]
285
  if len(file_manifest) == 0:
286
- report_execption(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
287
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
288
  return
289
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -296,7 +296,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
296
  project_folder = txt
297
  else:
298
  if txt == "": txt = '空空如也的输入栏'
299
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
300
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
301
  return
302
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
@@ -304,7 +304,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
304
  [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
305
  [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
306
  if len(file_manifest) == 0:
307
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
308
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
309
  return
310
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -318,13 +318,13 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
318
  project_folder = txt
319
  else:
320
  if txt == "": txt = '空空如也的输入栏'
321
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
322
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
323
  return
324
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
325
  [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
326
  if len(file_manifest) == 0:
327
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
328
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
329
  return
330
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
@@ -352,7 +352,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
352
  project_folder = txt
353
  else:
354
  if txt == "": txt = '空空如也的输入栏'
355
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
356
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
357
  return
358
  # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
@@ -365,7 +365,7 @@ def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
365
  file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
366
  os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
367
  if len(file_manifest) == 0:
368
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
369
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
370
  return
371
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
1
  from toolbox import update_ui, promote_file_to_downloadzone, disable_auto_promotion
2
+ from toolbox import CatchException, report_exception, write_history_to_file
3
  from .crazy_utils import input_clipping
4
 
5
  def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
 
113
  [f for f in glob.glob('./*/*.py')]
114
  project_folder = './'
115
  if len(file_manifest) == 0:
116
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
117
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
118
  return
119
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
126
  project_folder = txt
127
  else:
128
  if txt == "": txt = '空空如也的输入栏'
129
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
130
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
131
  return
132
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.py', recursive=True)]
133
  if len(file_manifest) == 0:
134
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何python文件: {txt}")
135
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
136
  return
137
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
144
  project_folder = txt
145
  else:
146
  if txt == "": txt = '空空如也的输入栏'
147
+ report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
148
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
149
  return
150
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.m', recursive=True)]
151
  if len(file_manifest) == 0:
152
+ report_exception(chatbot, history, a = f"解析Matlab项目: {txt}", b = f"找不到任何`.m`源文件: {txt}")
153
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
154
  return
155
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
162
  project_folder = txt
163
  else:
164
  if txt == "": txt = '空空如也的输入栏'
165
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
166
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
167
  return
168
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
169
  [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] #+ \
170
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
171
  if len(file_manifest) == 0:
172
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
173
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
174
  return
175
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
182
  project_folder = txt
183
  else:
184
  if txt == "": txt = '空空如也的输入栏'
185
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
186
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
187
  return
188
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.h', recursive=True)] + \
 
190
  [f for f in glob.glob(f'{project_folder}/**/*.hpp', recursive=True)] + \
191
  [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
192
  if len(file_manifest) == 0:
193
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.h头文件: {txt}")
194
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
195
  return
196
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
204
  project_folder = txt
205
  else:
206
  if txt == "": txt = '空空如也的输入栏'
207
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
208
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
209
  return
210
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.java', recursive=True)] + \
 
212
  [f for f in glob.glob(f'{project_folder}/**/*.xml', recursive=True)] + \
213
  [f for f in glob.glob(f'{project_folder}/**/*.sh', recursive=True)]
214
  if len(file_manifest) == 0:
215
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何java文件: {txt}")
216
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
217
  return
218
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
226
  project_folder = txt
227
  else:
228
  if txt == "": txt = '空空如也的输入栏'
229
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
230
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
231
  return
232
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.ts', recursive=True)] + \
 
241
  [f for f in glob.glob(f'{project_folder}/**/*.css', recursive=True)] + \
242
  [f for f in glob.glob(f'{project_folder}/**/*.jsx', recursive=True)]
243
  if len(file_manifest) == 0:
244
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何前端相关文件: {txt}")
245
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
246
  return
247
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
255
  project_folder = txt
256
  else:
257
  if txt == "": txt = '空空如也的输入栏'
258
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
259
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
260
  return
261
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.go', recursive=True)] + \
 
263
  [f for f in glob.glob(f'{project_folder}/**/go.sum', recursive=True)] + \
264
  [f for f in glob.glob(f'{project_folder}/**/go.work', recursive=True)]
265
  if len(file_manifest) == 0:
266
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
267
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
268
  return
269
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
276
  project_folder = txt
277
  else:
278
  if txt == "": txt = '空空如也的输入栏'
279
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
280
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
281
  return
282
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.rs', recursive=True)] + \
283
  [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)] + \
284
  [f for f in glob.glob(f'{project_folder}/**/*.lock', recursive=True)]
285
  if len(file_manifest) == 0:
286
+ report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何golang文件: {txt}")
287
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
288
  return
289
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
296
  project_folder = txt
297
  else:
298
  if txt == "": txt = '空空如也的输入栏'
299
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
300
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
301
  return
302
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.lua', recursive=True)] + \
 
304
  [f for f in glob.glob(f'{project_folder}/**/*.json', recursive=True)] + \
305
  [f for f in glob.glob(f'{project_folder}/**/*.toml', recursive=True)]
306
  if len(file_manifest) == 0:
307
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何lua文件: {txt}")
308
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
309
  return
310
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
318
  project_folder = txt
319
  else:
320
  if txt == "": txt = '空空如也的输入栏'
321
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
322
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
323
  return
324
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.cs', recursive=True)] + \
325
  [f for f in glob.glob(f'{project_folder}/**/*.csproj', recursive=True)]
326
  if len(file_manifest) == 0:
327
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何CSharp文件: {txt}")
328
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
329
  return
330
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
352
  project_folder = txt
353
  else:
354
  if txt == "": txt = '空空如也的输入栏'
355
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
356
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
357
  return
358
  # 若上传压缩文件, 先寻找到解压的文件夹路径, 从而避免解析压缩文件
 
365
  file_manifest = [f for pattern in pattern_include for f in glob.glob(f'{extract_folder_path}/**/{pattern}', recursive=True) if "" != extract_folder_path and \
366
  os.path.isfile(f) and (not re.search(pattern_except, f) or pattern.endswith('.' + re.search(pattern_except, f).group().split('.')[-1]))]
367
  if len(file_manifest) == 0:
368
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何文件: {txt}")
369
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
370
  return
371
  yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
crazy_functions/询问多个大语言模型.py CHANGED
@@ -1,4 +1,4 @@
1
- from toolbox import CatchException, update_ui
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
  import datetime
4
  @CatchException
@@ -13,11 +13,12 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
13
  web_port 当前软件运行的端口号
14
  """
15
  history = [] # 清空历史,以免输入溢出
16
- chatbot.append((txt, "正在同时咨询gpt-3.5和gpt-4……"))
 
17
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
18
 
19
  # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
20
- llm_kwargs['llm_model'] = 'gpt-3.5-turbo&gpt-4' # 支持任意数量的llm接口,用&符号分隔
21
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
22
  inputs=txt, inputs_show_user=txt,
23
  llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
 
1
+ from toolbox import CatchException, update_ui, get_conf
2
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
  import datetime
4
  @CatchException
 
13
  web_port 当前软件运行的端口号
14
  """
15
  history = [] # 清空历史,以免输入溢出
16
+ MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
17
+ chatbot.append((txt, "正在同时咨询" + MULTI_QUERY_LLM_MODELS))
18
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
19
 
20
  # llm_kwargs['llm_model'] = 'chatglm&gpt-3.5-turbo&api2d-gpt-3.5-turbo' # 支持任意数量的llm接口,用&符号分隔
21
+ llm_kwargs['llm_model'] = MULTI_QUERY_LLM_MODELS # 支持任意数量的llm接口,用&符号分隔
22
  gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
23
  inputs=txt, inputs_show_user=txt,
24
  llm_kwargs=llm_kwargs, chatbot=chatbot, history=history,
crazy_functions/语音助手.py CHANGED
@@ -1,47 +1,35 @@
1
  from toolbox import update_ui
2
  from toolbox import CatchException, get_conf, markdown_convertion
3
  from crazy_functions.crazy_utils import input_clipping
4
- from request_llm.bridge_all import predict_no_ui_long_connection
 
5
  import threading, time
6
  import numpy as np
7
  from .live_audio.aliyunASR import AliyunASR
8
  import json
 
9
 
10
- class WatchDog():
11
- def __init__(self, timeout, bark_fn, interval=3, msg="") -> None:
12
- self.last_feed = None
13
- self.timeout = timeout
14
- self.bark_fn = bark_fn
15
- self.interval = interval
16
- self.msg = msg
17
- self.kill_dog = False
18
-
19
- def watch(self):
20
- while True:
21
- if self.kill_dog: break
22
- if time.time() - self.last_feed > self.timeout:
23
- if len(self.msg) > 0: print(self.msg)
24
- self.bark_fn()
25
- break
26
- time.sleep(self.interval)
27
-
28
- def begin_watch(self):
29
- self.last_feed = time.time()
30
- th = threading.Thread(target=self.watch)
31
- th.daemon = True
32
- th.start()
33
-
34
- def feed(self):
35
- self.last_feed = time.time()
36
 
37
  def chatbot2history(chatbot):
38
  history = []
39
  for c in chatbot:
40
  for q in c:
41
- if q not in ["[请讲话]", "[等待GPT响应]", "[正在等您说完问题]"]:
 
 
 
 
42
  history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
43
  return history
44
 
 
 
 
 
 
 
 
 
45
  class AsyncGptTask():
46
  def __init__(self) -> None:
47
  self.observe_future = []
@@ -81,8 +69,9 @@ class InterviewAssistant(AliyunASR):
81
  self.capture_interval = 0.5 # second
82
  self.stop = False
83
  self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入
84
- self.parsed_sentence = "" # 某段话的整个句子,由 test_on_sentence_end() 写入
85
  self.buffered_sentence = "" #
 
86
  self.event_on_result_chg = threading.Event()
87
  self.event_on_entence_end = threading.Event()
88
  self.event_on_commit_question = threading.Event()
@@ -117,7 +106,7 @@ class InterviewAssistant(AliyunASR):
117
  def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
118
  # main plugin function
119
  self.init(chatbot)
120
- chatbot.append(["[请讲话]", "[正在等您说完问题]"])
121
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
122
  self.plugin_wd.begin_watch()
123
  self.agt = AsyncGptTask()
@@ -157,14 +146,18 @@ class InterviewAssistant(AliyunASR):
157
 
158
  self.commit_wd.begin_watch()
159
  chatbot[-1] = list(chatbot[-1])
160
- chatbot[-1] = [self.buffered_sentence, "[等待GPT响应]"]
161
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
162
  # add gpt task 创建子线程请求gpt,避免线程阻塞
163
  history = chatbot2history(chatbot)
164
  self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
165
 
166
  self.buffered_sentence = ""
167
- chatbot.append(["[请讲话]", "[正在等您说完问题]"])
 
 
 
 
168
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
169
 
170
  if len(self.stop_msg) != 0:
@@ -183,7 +176,7 @@ def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
183
  import nls
184
  from scipy import io
185
  except:
186
- chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
187
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
188
  return
189
 
 
1
  from toolbox import update_ui
2
  from toolbox import CatchException, get_conf, markdown_convertion
3
  from crazy_functions.crazy_utils import input_clipping
4
+ from crazy_functions.agent_fns.watchdog import WatchDog
5
+ from request_llms.bridge_all import predict_no_ui_long_connection
6
  import threading, time
7
  import numpy as np
8
  from .live_audio.aliyunASR import AliyunASR
9
  import json
10
+ import re
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def chatbot2history(chatbot):
14
  history = []
15
  for c in chatbot:
16
  for q in c:
17
+ if q in ["[ 请讲话 ]", "[ 等待GPT响应 ]", "[ 正在等您说完问题 ]"]:
18
+ continue
19
+ elif q.startswith("[ 正在等您说完问题 ]"):
20
+ continue
21
+ else:
22
  history.append(q.strip('<div class="markdown-body">').strip('</div>').strip('<p>').strip('</p>'))
23
  return history
24
 
25
+ def visualize_audio(chatbot, audio_shape):
26
+ if len(chatbot) == 0: chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
27
+ chatbot[-1] = list(chatbot[-1])
28
+ p1 = '「'
29
+ p2 = '」'
30
+ chatbot[-1][-1] = re.sub(p1+r'(.*)'+p2, '', chatbot[-1][-1])
31
+ chatbot[-1][-1] += (p1+f"`{audio_shape}`"+p2)
32
+
33
  class AsyncGptTask():
34
  def __init__(self) -> None:
35
  self.observe_future = []
 
69
  self.capture_interval = 0.5 # second
70
  self.stop = False
71
  self.parsed_text = "" # 下个句子中已经说完的部分, 由 test_on_result_chg() 写入
72
+ self.parsed_sentence = "" # 某段话的整个句子, test_on_sentence_end() 写入
73
  self.buffered_sentence = "" #
74
+ self.audio_shape = "" # 音频的可视化表现, 由 audio_convertion_thread() 写入
75
  self.event_on_result_chg = threading.Event()
76
  self.event_on_entence_end = threading.Event()
77
  self.event_on_commit_question = threading.Event()
 
106
  def begin(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
107
  # main plugin function
108
  self.init(chatbot)
109
+ chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
110
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
111
  self.plugin_wd.begin_watch()
112
  self.agt = AsyncGptTask()
 
146
 
147
  self.commit_wd.begin_watch()
148
  chatbot[-1] = list(chatbot[-1])
149
+ chatbot[-1] = [self.buffered_sentence, "[ 等待GPT响应 ]"]
150
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
151
  # add gpt task 创建子线程请求gpt,避免线程阻塞
152
  history = chatbot2history(chatbot)
153
  self.agt.add_async_gpt_task(self.buffered_sentence, len(chatbot)-1, llm_kwargs, history, system_prompt)
154
 
155
  self.buffered_sentence = ""
156
+ chatbot.append(["[ 请讲话 ]", "[ 正在等您说完问题 ]"])
157
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
158
+
159
+ if not self.event_on_result_chg.is_set() and not self.event_on_entence_end.is_set() and not self.event_on_commit_question.is_set():
160
+ visualize_audio(chatbot, self.audio_shape)
161
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
162
 
163
  if len(self.stop_msg) != 0:
 
176
  import nls
177
  from scipy import io
178
  except:
179
+ chatbot.append(["导入依赖失败", "使用该模块需要额外依赖, 安装方法:```pip install --upgrade aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git```"])
180
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
181
  return
182
 
crazy_functions/读文章写摘要.py CHANGED
@@ -1,5 +1,5 @@
1
  from toolbox import update_ui
2
- from toolbox import CatchException, report_execption
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
 
@@ -51,14 +51,14 @@ def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
51
  project_folder = txt
52
  else:
53
  if txt == "": txt = '空空如也的输入栏'
54
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
55
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
56
  return
57
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
58
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
59
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
60
  if len(file_manifest) == 0:
61
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
62
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
63
  return
64
  yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
 
1
  from toolbox import update_ui
2
+ from toolbox import CatchException, report_exception
3
  from toolbox import write_history_to_file, promote_file_to_downloadzone
4
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
5
 
 
51
  project_folder = txt
52
  else:
53
  if txt == "": txt = '空空如也的输入栏'
54
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
55
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
56
  return
57
  file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] # + \
58
  # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
59
  # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
60
  if len(file_manifest) == 0:
61
+ report_exception(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
62
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
63
  return
64
  yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
crazy_functions/谷歌检索小助手.py CHANGED
@@ -1,5 +1,5 @@
1
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
2
- from toolbox import CatchException, report_execption, promote_file_to_downloadzone
3
  from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
4
  import logging
5
  import requests
@@ -17,7 +17,7 @@ def get_meta_information(url, chatbot, history):
17
  from urllib.parse import urlparse
18
  session = requests.session()
19
 
20
- proxies, = get_conf('proxies')
21
  headers = {
22
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
23
  'Accept-Encoding': 'gzip, deflate, br',
@@ -26,7 +26,13 @@ def get_meta_information(url, chatbot, history):
26
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
27
  'Connection': 'keep-alive'
28
  }
29
- session.proxies.update(proxies)
 
 
 
 
 
 
30
  session.headers.update(headers)
31
 
32
  response = session.get(url)
@@ -140,7 +146,7 @@ def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
140
  import math
141
  from bs4 import BeautifulSoup
142
  except:
143
- report_execption(chatbot, history,
144
  a = f"解析项目: {txt}",
145
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
146
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
1
  from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
2
+ from toolbox import CatchException, report_exception, promote_file_to_downloadzone
3
  from toolbox import update_ui, update_ui_lastest_msg, disable_auto_promotion, write_history_to_file
4
  import logging
5
  import requests
 
17
  from urllib.parse import urlparse
18
  session = requests.session()
19
 
20
+ proxies = get_conf('proxies')
21
  headers = {
22
  'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
23
  'Accept-Encoding': 'gzip, deflate, br',
 
26
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
27
  'Connection': 'keep-alive'
28
  }
29
+ try:
30
+ session.proxies.update(proxies)
31
+ except:
32
+ report_exception(chatbot, history,
33
+ a=f"获取代理失败 无代理状态下很可能无法访问OpenAI家族的模型及谷歌学术 建议:检查USE_PROXY选项是否修改。",
34
+ b=f"尝试直接连接")
35
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
36
  session.headers.update(headers)
37
 
38
  response = session.get(url)
 
146
  import math
147
  from bs4 import BeautifulSoup
148
  except:
149
+ report_exception(chatbot, history,
150
  a = f"解析项目: {txt}",
151
  b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade beautifulsoup4 arxiv```。")
152
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
crazy_functions/辅助功能.py CHANGED
@@ -2,9 +2,12 @@
2
  # @Time : 2023/4/19
3
  # @Author : Spike
4
  # @Descr :
5
- from toolbox import update_ui, get_conf
6
  from toolbox import CatchException
 
7
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
 
 
8
 
9
 
10
  @CatchException
@@ -33,10 +36,19 @@ def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
33
  chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
34
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
35
 
36
- import shutil, os
37
- PATH_PRIVATE_UPLOAD, PATH_LOGGING = get_conf('PATH_PRIVATE_UPLOAD', 'PATH_LOGGING')
38
- shutil.rmtree(PATH_LOGGING, ignore_errors=True)
39
- shutil.rmtree(PATH_PRIVATE_UPLOAD, ignore_errors=True)
 
 
 
 
 
 
 
 
 
40
 
41
  chatbot.append(['清除本地缓存数据', '执行完成'])
42
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
 
2
  # @Time : 2023/4/19
3
  # @Author : Spike
4
  # @Descr :
5
+ from toolbox import update_ui, get_conf, get_user
6
  from toolbox import CatchException
7
+ from toolbox import default_user_name
8
  from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
9
+ import shutil
10
+ import os
11
 
12
 
13
  @CatchException
 
36
  chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
37
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
38
 
39
+ def _get_log_folder(user=default_user_name):
40
+ PATH_LOGGING = get_conf('PATH_LOGGING')
41
+ _dir = os.path.join(PATH_LOGGING, user)
42
+ if not os.path.exists(_dir): os.makedirs(_dir)
43
+ return _dir
44
+
45
+ def _get_upload_folder(user=default_user_name):
46
+ PATH_PRIVATE_UPLOAD = get_conf('PATH_PRIVATE_UPLOAD')
47
+ _dir = os.path.join(PATH_PRIVATE_UPLOAD, user)
48
+ return _dir
49
+
50
+ shutil.rmtree(_get_log_folder(get_user(chatbot)), ignore_errors=True)
51
+ shutil.rmtree(_get_upload_folder(get_user(chatbot)), ignore_errors=True)
52
 
53
  chatbot.append(['清除本地缓存数据', '执行完成'])
54
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面