qingxu98 commited on
Commit
47289f8
1 Parent(s): eaf27df
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .pre-commit-config.yaml +32 -0
  2. Dockerfile +0 -1
  3. README.md +68 -26
  4. app.py +7 -8
  5. config.py +20 -5
  6. core_functional.py +69 -24
  7. crazy_functional.py +327 -257
  8. crazy_functions/Latex输出PDF结果.py +3 -3
  9. crazy_functions/agent_fns/general.py +31 -27
  10. crazy_functions/crazy_utils.py +3 -0
  11. crazy_functions/latex_fns/latex_toolbox.py +258 -126
  12. crazy_functions/pdf_fns/breakdown_txt.py +2 -2
  13. crazy_functions/多智能体.py +1 -8
  14. crazy_functions/数学动画生成manim.py +9 -5
  15. crazy_functions/高级功能函数模板.py +43 -1
  16. docker-compose.yml +2 -3
  17. docs/Dockerfile+ChatGLM +0 -1
  18. docs/Dockerfile+JittorLLM +1 -1
  19. docs/GithubAction+NoLocal+Latex +1 -1
  20. docs/README.Arabic.md +9 -10
  21. docs/README.English.md +5 -6
  22. docs/README.French.md +6 -7
  23. docs/README.German.md +11 -12
  24. docs/README.Italian.md +7 -8
  25. docs/README.Japanese.md +5 -6
  26. docs/README.Korean.md +4 -5
  27. docs/README.Portuguese.md +8 -9
  28. docs/README.Russian.md +7 -8
  29. docs/WithFastapi.md +4 -4
  30. docs/test_markdown_format.py +77 -40
  31. docs/translate_japanese.json +1 -1
  32. docs/translate_std.json +1 -1
  33. docs/use_audio.md +0 -1
  34. docs/waifu_plugin/autoload.js +2 -2
  35. docs/waifu_plugin/flat-ui-icons-regular.svg +1 -1
  36. docs/waifu_plugin/jquery-ui.min.js +0 -0
  37. docs/waifu_plugin/source +1 -1
  38. docs/waifu_plugin/waifu-tips.js +38 -38
  39. docs/waifu_plugin/waifu-tips.json +2 -2
  40. docs/waifu_plugin/waifu.css +1 -1
  41. multi_language.py +8 -8
  42. request_llms/README.md +1 -1
  43. request_llms/bridge_all.py +36 -16
  44. request_llms/bridge_chatgpt.py +3 -0
  45. request_llms/bridge_google_gemini.py +114 -0
  46. request_llms/bridge_newbingfree.py +145 -79
  47. request_llms/bridge_skylark2.py +67 -0
  48. request_llms/bridge_stackclaude.py +117 -68
  49. request_llms/bridge_zhipu.py +1 -1
  50. request_llms/com_google.py +229 -0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ default_language_version:
2
+ python: python3
3
+ exclude: 'dotnet'
4
+ ci:
5
+ autofix_prs: true
6
+ autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions'
7
+ autoupdate_schedule: 'quarterly'
8
+
9
+ repos:
10
+ - repo: https://github.com/pre-commit/pre-commit-hooks
11
+ rev: v4.4.0
12
+ hooks:
13
+ - id: check-ast
14
+ # - id: check-yaml
15
+ - id: check-toml
16
+ - id: check-json
17
+ - id: check-byte-order-marker
18
+ exclude: .gitignore
19
+ - id: check-merge-conflict
20
+ - id: detect-private-key
21
+ - id: trailing-whitespace
22
+ - id: end-of-file-fixer
23
+ - id: no-commit-to-branch
24
+ - repo: https://github.com/psf/black
25
+ rev: 23.3.0
26
+ hooks:
27
+ - id: black
28
+ # - repo: https://github.com/charliermarsh/ruff-pre-commit
29
+ # rev: v0.0.261
30
+ # hooks:
31
+ # - id: ruff
32
+ # args: ["--fix"]
Dockerfile CHANGED
@@ -18,7 +18,6 @@ WORKDIR /gpt
18
 
19
  # 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
20
  COPY requirements.txt ./
21
- COPY ./docs/gradio-3.32.6-py3-none-any.whl ./docs/gradio-3.32.6-py3-none-any.whl
22
  RUN pip3 install -r requirements.txt
23
 
24
 
 
18
 
19
  # 安装大部分依赖,利用Docker缓存加速以后的构建 (以下三行,可以删除)
20
  COPY requirements.txt ./
 
21
  RUN pip3 install -r requirements.txt
22
 
23
 
README.md CHANGED
@@ -54,13 +54,11 @@ If you like this project, please give it a Star.
54
  Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
55
  <br>
56
 
57
-
58
- > 1.请注意只有 **高亮** 标识的插件(按钮)才支持读取文件,部分插件位于插件区的**下拉菜单**中。另外我们以**最高优先级**欢迎和处理任何新插件的PR
59
- >
60
- > 2.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki。
61
  > [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
62
- >
63
- > 3.本项目兼容并鼓励尝试国产大语言模型ChatGLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
64
 
65
  <br><br>
66
 
@@ -68,7 +66,12 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
68
 
69
  功能(⭐= 近期新增功能) | 描述
70
  --- | ---
71
- ⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱API](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
 
 
 
 
 
72
  润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
73
  [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
74
  模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
@@ -77,22 +80,16 @@ Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanes
77
  Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
78
  批量注释生成 | [插件] 一键批量生成函数注释
79
  Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
80
- chat分析报告生成 | [插件] 运行后自动生成总结汇报
81
  [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
82
  [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
83
  Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
84
  [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
85
  互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
86
- ⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
87
- ⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
88
  公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
89
- ⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
90
  启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
91
  [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
92
- ⭐ChatGLM2微调模型 | 支持加载ChatGLM2微调模型,提供ChatGLM2微调辅助插件
93
  更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
94
  ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
95
- ⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
96
  更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
97
  </div>
98
 
@@ -131,7 +128,26 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
131
  <br><br>
132
 
133
  # Installation
134
- ### 安装方法I:直接运行 (Windows, Linux or MacOS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
  1. 下载项目
137
 
@@ -144,7 +160,7 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
144
 
145
  在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
146
 
147
- 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,以确保更新或其他用户无法轻易查看您的私有配置 」。
148
 
149
  「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
150
 
@@ -164,11 +180,11 @@ Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼
164
  <details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
165
  <p>
166
 
167
- 【可选步骤】如果需要支持清华ChatGLM2/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
168
 
169
  ```sh
170
- # 【可选步骤I】支持清华ChatGLM2。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
171
- python -m pip install -r request_llms/requirements_chatglm.txt
172
 
173
  # 【可选步骤II】支持复旦MOSS
174
  python -m pip install -r request_llms/requirements_moss.txt
@@ -209,7 +225,7 @@ pip install peft
209
  docker-compose up
210
  ```
211
 
212
- 1. 仅ChatGPT+文心一言+spark等在线模型(推荐大多数人选择)
213
  [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
214
  [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
215
  [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
@@ -221,7 +237,7 @@ pip install peft
221
 
222
  P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
223
 
224
- 2. ChatGPT + ChatGLM2 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
225
  [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
226
 
227
  ``` sh
@@ -255,8 +271,8 @@ P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以
255
  ```python
256
  "超级英译中": {
257
  # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
258
- "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
259
-
260
  # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
261
  "Suffix": "",
262
  },
@@ -320,9 +336,9 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
320
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
321
  </div>
322
 
323
- 8. OpenAI音频解析与总结
324
  <div align="center">
325
- <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/709ccf95-3aee-498a-934a-e1c22d3d5d5b" width="500" >
326
  </div>
327
 
328
  9. Latex全文校对纠错
@@ -339,8 +355,8 @@ Tip:不指定文件直接点击 `载入对话历史存档` 可以查看历史h
339
 
340
 
341
  ### II:版本:
342
-
343
- - version 3.70(todo): 优化AutoGen插件主题并设计一系列衍生插件
344
  - version 3.60: 引入AutoGen作为新一代插件的基石
345
  - version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
346
  - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
@@ -373,6 +389,32 @@ GPT Academic开发者QQ群:`610599535`
373
  - 某些浏览器翻译插件干扰此软件前端的运行
374
  - 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio**
375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  ### III:主题
377
  可以通过修改`THEME`选项(config.py)变更主题
378
  1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
 
54
  Read this in [English](docs/README.English.md) | [日本語](docs/README.Japanese.md) | [한국어](docs/README.Korean.md) | [Русский](docs/README.Russian.md) | [Français](docs/README.French.md). All translations have been provided by the project itself. To translate this project to arbitrary language with GPT, read and run [`multi_language.py`](multi_language.py) (experimental).
55
  <br>
56
 
57
+ > [!NOTE]
58
+ > 1.本项目中每个文件的功能都在[自译解报告](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告)`self_analysis.md`详细说明。随着版本的迭代,您也可以随时自行点击相关函数插件,调用GPT重新生成项目的自我解析报告。常见问题请查阅wiki
 
 
59
  > [![常规安装方法](https://img.shields.io/static/v1?label=&message=常规安装方法&color=gray)](#installation) [![一键安装脚本](https://img.shields.io/static/v1?label=&message=一键安装脚本&color=gray)](https://github.com/binary-husky/gpt_academic/releases) [![配置说明](https://img.shields.io/static/v1?label=&message=配置说明&color=gray)](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明) [![wiki](https://img.shields.io/static/v1?label=&message=wiki&color=gray)]([https://github.com/binary-husky/gpt_academic/wiki/项目配置说明](https://github.com/binary-husky/gpt_academic/wiki))
60
+ >
61
+ > 2.本项目兼容并鼓励尝试国内中文大语言基座模型如通义千问,智谱GLM等。支持多个api-key共存,可在配置文件中填写如`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`。需要临时更换`API_KEY`时,在输入区输入临时的`API_KEY`然后回车键提交即可生效。
62
 
63
  <br><br>
64
 
 
66
 
67
  功能(⭐= 近期新增功能) | 描述
68
  --- | ---
69
+ ⭐[接入新模型](https://github.com/binary-husky/gpt_academic/wiki/%E5%A6%82%E4%BD%95%E5%88%87%E6%8D%A2%E6%A8%A1%E5%9E%8B) | 百度[千帆](https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu)与文心一言, 通义千问[Qwen](https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary),上海AI-Lab[书生](https://github.com/InternLM/InternLM),讯飞[星火](https://xinghuo.xfyun.cn/),[LLaMa2](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf),[智谱GLM4](https://open.bigmodel.cn/),DALLE3, [DeepseekCoder](https://coder.deepseek.com/)
70
+ ⭐支持mermaid图像渲染 | 支持让GPT生成[流程图](https://www.bilibili.com/video/BV18c41147H9/)、状态转移图、甘特图、饼状图、GitGraph等等(3.7版本)
71
+ ⭐Arxiv论文精细翻译 ([Docker](https://github.com/binary-husky/gpt_academic/pkgs/container/gpt_academic_with_latex)) | [插件] 一键[以超高质量翻译arxiv论文](https://www.bilibili.com/video/BV1dz4y1v77A/),目前最好的论文翻译工具
72
+ ⭐[实时语音对话输入](https://github.com/binary-husky/gpt_academic/blob/master/docs/use_audio.md) | [插件] 异步[监听音频](https://www.bilibili.com/video/BV1AV4y187Uy/),自动断句,自动寻找回答时机
73
+ ⭐AutoGen多智能体插件 | [插件] 借助微软AutoGen,探索多Agent的智能涌现可能!
74
+ ⭐虚空终端插件 | [插件] 能够使用自然语言直接调度本项目其他插件
75
  润色、翻译、代码解释 | 一键润色、翻译、查找论文语法错误、解释代码
76
  [自定义快捷键](https://www.bilibili.com/video/BV14s4y1E7jN) | 支持自定义快捷键
77
  模块化设计 | 支持自定义强大的[插件](https://github.com/binary-husky/gpt_academic/tree/master/crazy_functions),插件支持[热更新](https://github.com/binary-husky/gpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97)
 
80
  Latex全文[翻译](https://www.bilibili.com/video/BV1nk4y1Y7Js/)、[润色](https://www.bilibili.com/video/BV1FT411H7c5/) | [插件] 一键翻译或润色latex论文
81
  批量注释生成 | [插件] 一键批量生成函数注释
82
  Markdown[中英互译](https://www.bilibili.com/video/BV1yo4y157jV/) | [插件] 看到上面5种语言的[README](https://github.com/binary-husky/gpt_academic/blob/master/docs/README_EN.md)了吗?就是出自他的手笔
 
83
  [PDF论文全文翻译功能](https://www.bilibili.com/video/BV1KT411x7Wn) | [插件] PDF论文提取题目&摘要+翻译全文(多线程)
84
  [Arxiv小助手](https://www.bilibili.com/video/BV1LM4y1279X) | [插件] 输入arxiv文章url即可一键翻译摘要+下载PDF
85
  Latex论文一键校对 | [插件] 仿Grammarly对Latex文章进行语法、拼写纠错+输出对照PDF
86
  [谷歌学术统合小助手](https://www.bilibili.com/video/BV19L411U7ia) | [插件] 给定任意谷歌学术搜索页面URL,让gpt帮你[写relatedworks](https://www.bilibili.com/video/BV1GP411U7Az/)
87
  互联网信息聚合+GPT | [插件] 一键[让GPT从互联网获取信息](https://www.bilibili.com/video/BV1om4y127ck)回答问题,让信息永不过时
 
 
88
  公式/图片/表格显示 | 可以同时显示公式的[tex形式和渲染形式](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png),支持公式、代码高亮
 
89
  启动暗色[主题](https://github.com/binary-husky/gpt_academic/issues/173) | 在浏览器url后面添加```/?__theme=dark```可以切换dark主题
90
  [多LLM模型](https://www.bilibili.com/video/BV1wT411p7yf)支持 | 同时被GPT3.5、GPT4、[清华ChatGLM2](https://github.com/THUDM/ChatGLM2-6B)、[复旦MOSS](https://github.com/OpenLMLab/MOSS)伺候的感觉一定会很不错吧?
 
91
  更多LLM模型接入,支持[huggingface部署](https://huggingface.co/spaces/qingxu98/gpt-academic) | 加入Newbing接口(新必应),引入清华[Jittorllms](https://github.com/Jittor/JittorLLMs)支持[LLaMA](https://github.com/facebookresearch/llama)和[盘古α](https://openi.org.cn/pangu/)
92
  ⭐[void-terminal](https://github.com/binary-husky/void-terminal) pip包 | 脱离GUI,在Python中直接调用本项目的所有函数插件(开发中)
 
93
  更多新功能展示 (图像生成等) …… | 见本文档结尾处 ……
94
  </div>
95
 
 
128
  <br><br>
129
 
130
  # Installation
131
+
132
+ ```mermaid
133
+ flowchart TD
134
+ A{"安装方法"} --> W1("I. 🔑直接运行 (Windows, Linux or MacOS)")
135
+ W1 --> W11["1. Python pip包管理依赖"]
136
+ W1 --> W12["2. Anaconda包管理依赖(推荐⭐)"]
137
+
138
+ A --> W2["II. 🐳使用Docker (Windows, Linux or MacOS)"]
139
+
140
+ W2 --> k1["1. 部署项目全部能力的大镜像(推荐⭐)"]
141
+ W2 --> k2["2. 仅在线模型(GPT, GLM4等)镜像"]
142
+ W2 --> k3["3. 在线模型 + Latex的大镜像"]
143
+
144
+ A --> W4["IV. 🚀其他部署方法"]
145
+ W4 --> C1["1. Windows/MacOS 一键安装运行脚本(推荐⭐)"]
146
+ W4 --> C2["2. Huggingface, Sealos远程部署"]
147
+ W4 --> C4["3. ... 其他 ..."]
148
+ ```
149
+
150
+ ### 安装方法I:直接运行 (Windows, Linux or MacOS)
151
 
152
  1. 下载项目
153
 
 
160
 
161
  在`config.py`中,配置API KEY等变量。[特殊网络环境设置方法](https://github.com/binary-husky/gpt_academic/issues/1)、[Wiki-项目配置说明](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
162
 
163
+ 「 程序会优先检查是否存在名为`config_private.py`的私密配置文件,并用其中的配置覆盖`config.py`的同名配置。如您能理解以上读取逻辑,我们强烈建议您在`config.py`同路径下创建一个名为`config_private.py`的新配置文件,并使用`config_private.py`配置项目,从而确保自动更新时不会丢失配置 」。
164
 
165
  「 支持通过`环境变量`配置项目,环境变量的书写格式参考`docker-compose.yml`文件或者我们的[Wiki页面](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。配置读取优先级: `环境变量` > `config_private.py` > `config.py` 」。
166
 
 
180
  <details><summary>如果需要支持清华ChatGLM2/复旦MOSS/RWKV作为后端,请点击展开此处</summary>
181
  <p>
182
 
183
+ 【可选步骤】如果需要支持清华ChatGLM3/复旦MOSS作为后端,需要额外安装更多依赖(前提条件:熟悉Python + 用过Pytorch + 电脑配置够强):
184
 
185
  ```sh
186
+ # 【可选步骤I】支持清华ChatGLM3。清华ChatGLM备注:如果遇到"Call ChatGLM fail 不能正常加载ChatGLM的参数" 错误,参考如下: 1:以上默认安装的为torch+cpu版,使用cuda需要卸载torch重新安装torch+cuda; 2:如因本机配置不够无法加载模型,可以修改request_llm/bridge_chatglm.py中的模型精度, 将 AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) 都修改为 AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
187
+ python -m pip install -r request_llms/requirements_chatglm.txt
188
 
189
  # 【可选步骤II】支持复旦MOSS
190
  python -m pip install -r request_llms/requirements_moss.txt
 
225
  docker-compose up
226
  ```
227
 
228
+ 1. 仅ChatGPT + GLM4 + 文心一言+spark等在线模型(推荐大多数人选择)
229
  [![basic](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-without-local-llms.yml)
230
  [![basiclatex](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-latex.yml)
231
  [![basicaudio](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-audio-assistant.yml)
 
237
 
238
  P.S. 如果需要依赖Latex的插件功能,请见Wiki。另外,您也可以直接使用方案4或者方案0获取Latex功能。
239
 
240
+ 2. ChatGPT + GLM3 + MOSS + LLAMA2 + 通义千问(需要熟悉[Nvidia Docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#installing-on-ubuntu-and-debian)运行时)
241
  [![chatglm](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml/badge.svg?branch=master)](https://github.com/binary-husky/gpt_academic/actions/workflows/build-with-chatglm.yml)
242
 
243
  ``` sh
 
271
  ```python
272
  "超级英译中": {
273
  # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
274
+ "Prefix": "请翻译把下面一段内容成中文,然后用一个markdown表格逐一解释文中出现的专有名词:\n\n",
275
+
276
  # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来。
277
  "Suffix": "",
278
  },
 
336
  <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/bc7ab234-ad90-48a0-8d62-f703d9e74665" width="500" >
337
  </div>
338
 
339
+ 8. 基于mermaid的流图、脑图绘制
340
  <div align="center">
341
+ <img src="https://github.com/binary-husky/gpt_academic/assets/96192199/c518b82f-bd53-46e2-baf5-ad1b081c1da4" width="500" >
342
  </div>
343
 
344
  9. Latex全文校对纠错
 
355
 
356
 
357
  ### II:版本:
358
+ - version 3.80(TODO): 优化AutoGen插件主题并设计一系列衍生插件
359
+ - version 3.70: 引入Mermaid绘图,实现GPT画脑图等功能
360
  - version 3.60: 引入AutoGen作为新一代插件的基石
361
  - version 3.57: 支持GLM3,星火v3,文心一言v4,修复本地模型的并发BUG
362
  - version 3.56: 支持动态追加基础功能按钮,新汇报PDF汇总页面
 
389
  - 某些浏览器翻译插件干扰此软件前端的运行
390
  - 官方Gradio目前有很多兼容性问题,请**务必使用`requirement.txt`安装Gradio**
391
 
392
+ ```mermaid
393
+ timeline LR
394
+ title GPT-Academic项目发展历程
395
+ section 2.x
396
+ 1.0~2.2: 基础功能: 引入模块化函数插件: 可折叠式布局: 函数插件支持热重载
397
+ 2.3~2.5: 增强多线程交互性: 新增PDF全文翻译功能: 新增输入区切换位置的功能: 自更新
398
+ 2.6: 重构了插件结构: 提高了交互性: 加入更多插件
399
+ section 3.x
400
+ 3.0~3.1: 对chatglm支持: 对其他小型llm支持: 支持同时问询多个gpt模型: 支持多个apikey负载均衡
401
+ 3.2~3.3: 函数插件支持更多参数接口: 保存对话功能: 解读任意语言代码: 同时询问任意的LLM组合: 互联网信息综合功能
402
+ 3.4: 加入arxiv论文翻译: 加入latex论文批改功能
403
+ 3.44: 正式支持Azure: 优化界面易用性
404
+ 3.46: 自定义ChatGLM2微调模型: 实时语音对话
405
+ 3.49: 支持阿里达摩院通义千问: 上海AI-Lab书生: 讯飞星火: 支持百度千帆平台 & 文心一言
406
+ 3.50: 虚空终端: 支持插件分类: 改进UI: 设计新主题
407
+ 3.53: 动态选择不同界面主题: 提高稳定性: 解决多用户冲突问题
408
+ 3.55: 动态代码解释器: 重构前端界面: 引入悬浮窗口与菜单栏
409
+ 3.56: 动态追加基础功能按钮: 新汇报PDF汇总页面
410
+ 3.57: GLM3, 星火v3: 支持文心一言v4: 修复本地模型的并发BUG
411
+ 3.60: 引入AutoGen
412
+ 3.70: 引入Mermaid绘图: 实现GPT画脑图等功能
413
+ 3.80(TODO): 优化AutoGen插件主题: 设计衍生插件
414
+
415
+ ```
416
+
417
+
418
  ### III:主题
419
  可以通过修改`THEME`选项(config.py)变更主题
420
  1. `Chuanhu-Small-and-Beautiful` [网址](https://github.com/GaiZhenbiao/ChuanhuChatGPT/)
app.py CHANGED
@@ -15,9 +15,9 @@ help_menu_description = \
15
 
16
  def main():
17
  import subprocess, sys
18
- subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://github.com/binary-husky/gpt_academic/raw/master/docs/gradio-3.32.6-py3-none-any.whl'])
19
  import gradio as gr
20
- if gr.__version__ not in ['3.32.6']:
21
  raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
22
  from request_llms.bridge_all import predict
23
  from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
@@ -142,17 +142,17 @@ def main():
142
  with gr.Row():
143
  switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
144
  with gr.Row():
145
- with gr.Accordion("点击展开“文件上传区”。上传本地文件/压缩包供函数插件调用。", open=False) as area_file_up:
146
  file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
147
 
148
 
149
- with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden"):
150
  with gr.Row():
151
  with gr.Tab("上传文件", elem_id="interact-panel"):
152
  gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
153
  file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
154
 
155
- with gr.Tab("更换模型 & Prompt", elem_id="interact-panel"):
156
  md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
157
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
158
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
@@ -164,10 +164,9 @@ def main():
164
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
165
  value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
166
  checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
167
- value=[], label="显示/隐藏自定义菜单", elem_id='cbs').style(container=False)
168
  dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
169
- dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode,
170
- )
171
  with gr.Tab("帮助", elem_id="interact-panel"):
172
  gr.Markdown(help_menu_description)
173
 
 
15
 
16
  def main():
17
  import subprocess, sys
18
+ subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://fastly.jsdelivr.net/gh/binary-husky/gradio-fix@gpt-academic/release/gradio-3.32.7-py3-none-any.whl'])
19
  import gradio as gr
20
+ if gr.__version__ not in ['3.32.6', '3.32.7']:
21
  raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
22
  from request_llms.bridge_all import predict
23
  from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
 
142
  with gr.Row():
143
  switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
144
  with gr.Row():
145
+ with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
146
  file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
147
 
148
 
149
+ with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
150
  with gr.Row():
151
  with gr.Tab("上传文件", elem_id="interact-panel"):
152
  gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
153
  file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
154
 
155
+ with gr.Tab("更换模型", elem_id="interact-panel"):
156
  md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
157
  top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
158
  temperature = gr.Slider(minimum=-0, maximum=2.0, value=1.0, step=0.01, interactive=True, label="Temperature",)
 
164
  checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
165
  value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
166
  checkboxes_2 = gr.CheckboxGroup(["自定义菜单"],
167
+ value=[], label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
168
  dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
169
+ dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
 
170
  with gr.Tab("帮助", elem_id="interact-panel"):
171
  gr.Markdown(help_menu_description)
172
 
config.py CHANGED
@@ -96,12 +96,14 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
96
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
97
  AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview",
98
  "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
99
- "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
100
  "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
101
- "chatglm3", "moss", "claude-2"]
102
- # P.S. 其他可用的模型还包括 ["zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613", "gpt-3.5-random"
 
 
 
103
  # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
104
- # “qwen-turbo", "qwen-plus", "qwen-max"]
105
 
106
 
107
  # 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
@@ -200,7 +202,13 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
200
 
201
  # 接入智谱大模型
202
  ZHIPUAI_API_KEY = ""
203
- ZHIPUAI_MODEL = "chatglm_turbo"
 
 
 
 
 
 
204
 
205
 
206
  # Claude API KEY
@@ -211,6 +219,10 @@ ANTHROPIC_API_KEY = ""
211
  CUSTOM_API_KEY_PATTERN = ""
212
 
213
 
 
 
 
 
214
  # HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
215
  HUGGINGFACE_ACCESS_TOKEN = "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"
216
 
@@ -299,6 +311,9 @@ NUM_CUSTOM_BASIC_BTN = 4
299
  ├── "qwen-turbo" 等通义千问大模型
300
  │ └── DASHSCOPE_API_KEY
301
 
 
 
 
302
  └── "newbing" Newbing接口不再稳定,不推荐使用
303
  ├── NEWBING_STYLE
304
  └── NEWBING_COOKIES
 
96
  LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
97
  AVAIL_LLM_MODELS = ["gpt-3.5-turbo-1106","gpt-4-1106-preview","gpt-4-vision-preview",
98
  "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
 
99
  "gpt-4", "gpt-4-32k", "azure-gpt-4", "api2d-gpt-4",
100
+ "gemini-pro", "chatglm3", "claude-2", "zhipuai"]
101
+ # P.S. 其他可用的模型还包括 [
102
+ # "moss", "qwen-turbo", "qwen-plus", "qwen-max"
103
+ # "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
104
+ # "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
105
  # "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
106
+ # ]
107
 
108
 
109
  # 定义界面上“询问多个GPT模型”插件应该使用哪些模型,请从AVAIL_LLM_MODELS中选择,并在不同模型之间用`&`间隔,例如"gpt-3.5-turbo&chatglm3&azure-gpt-4"
 
202
 
203
  # 接入智谱大模型
204
  ZHIPUAI_API_KEY = ""
205
+ ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
206
+
207
+
208
+ # # 火山引擎YUNQUE大模型
209
+ # YUNQUE_SECRET_KEY = ""
210
+ # YUNQUE_ACCESS_KEY = ""
211
+ # YUNQUE_MODEL = ""
212
 
213
 
214
  # Claude API KEY
 
219
  CUSTOM_API_KEY_PATTERN = ""
220
 
221
 
222
+ # Google Gemini API-Key
223
+ GEMINI_API_KEY = ''
224
+
225
+
226
  # HUGGINGFACE的TOKEN,下载LLAMA时起作用 https://huggingface.co/docs/hub/security-tokens
227
  HUGGINGFACE_ACCESS_TOKEN = "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"
228
 
 
311
  ├── "qwen-turbo" 等通义千问大模型
312
  │ └── DASHSCOPE_API_KEY
313
 
314
+ ├── "Gemini"
315
+ │ └── GEMINI_API_KEY
316
+
317
  └── "newbing" Newbing接口不再稳定,不推荐使用
318
  ├── NEWBING_STYLE
319
  └── NEWBING_COOKIES
core_functional.py CHANGED
@@ -3,30 +3,58 @@
3
  # 'stop' 颜色对应 theme.py 中的 color_er
4
  import importlib
5
  from toolbox import clear_line_break
6
-
7
 
8
  def get_core_functions():
9
  return {
 
10
  "英语学术润色": {
11
- # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
12
- "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, " +
13
- r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. " +
14
  r"Firstly, you should provide the polished paragraph. "
15
  r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
16
- # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
17
  "Suffix": r"",
18
- # 按钮颜色 (默认 secondary)
19
  "Color": r"secondary",
20
- # 按钮是否可见 (默认 True,即可见)
21
  "Visible": True,
22
- # 是否在触发时清除历史 (默认 False,即不处理之前的对话历史)
23
- "AutoClearHistory": False
 
 
24
  },
25
- "中文学术润色": {
26
- "Prefix": r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性," +
27
- r"同时分解长句,减少重复,并提供改进建议。请只提供文本的更正版本,避免包括解释。请编辑以下文本" + "\n\n",
28
- "Suffix": r"",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  },
 
 
30
  "查找语法错误": {
31
  "Prefix": r"Help me ensure that the grammar and the spelling is correct. "
32
  r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
@@ -46,11 +74,15 @@ def get_core_functions():
46
  "Suffix": r"",
47
  "PreProcess": clear_line_break, # 预处理:清除换行符
48
  },
 
 
49
  "中译英": {
50
  "Prefix": r"Please translate following sentence to English:" + "\n\n",
51
  "Suffix": r"",
52
  },
53
- "学术中英互译": {
 
 
54
  "Prefix": r"I want you to act as a scientific English-Chinese translator, " +
55
  r"I will provide you with some paragraphs in one language " +
56
  r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
@@ -59,29 +91,36 @@ def get_core_functions():
59
  r"such as natural language processing, and rhetorical knowledge " +
60
  r"and experience about effective writing techniques to reply. " +
61
  r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
62
- "Suffix": "",
63
- "Color": "secondary",
64
  },
 
 
65
  "英译中": {
66
  "Prefix": r"翻译成地道的中文:" + "\n\n",
67
  "Suffix": r"",
68
- "Visible": False,
69
  },
 
 
70
  "找图片": {
71
- "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL," +
72
  r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
73
  "Suffix": r"",
74
- "Visible": False,
75
  },
 
 
76
  "解释代码": {
77
  "Prefix": r"请解释以下代码:" + "\n```\n",
78
  "Suffix": "\n```\n",
79
  },
 
 
80
  "参考文献转Bib": {
81
- "Prefix": r"Here are some bibliography items, please transform them into bibtex style." +
82
- r"Note that, reference styles maybe more than one kind, you should transform each item correctly." +
83
- r"Items need to be transformed:",
84
- "Visible": False,
85
  "Suffix": r"",
86
  }
87
  }
@@ -98,8 +137,14 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
98
  return inputs, history
99
  else:
100
  # 预制功能
101
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
 
 
102
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
103
  if core_functional[additional_fn].get("AutoClearHistory", False):
104
  history = []
105
  return inputs, history
 
 
 
 
 
3
  # 'stop' 颜色对应 theme.py 中的 color_er
4
  import importlib
5
  from toolbox import clear_line_break
6
+ from textwrap import dedent
7
 
8
  def get_core_functions():
9
  return {
10
+
11
  "英语学术润色": {
12
+ # [1*] 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
13
+ "Prefix": r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
14
+ r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
15
  r"Firstly, you should provide the polished paragraph. "
16
  r"Secondly, you should list all your modification and explain the reasons to do so in markdown table." + "\n\n",
17
+ # [2*] 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
18
  "Suffix": r"",
19
+ # [3] 按钮颜色 (可选参数,默认 secondary)
20
  "Color": r"secondary",
21
+ # [4] 按钮是否可见 (可选参数,默认 True,即可见)
22
  "Visible": True,
23
+ # [5] 是否在触发时清除历史 (可选参数,默认 False,即不处理之前的对话历史)
24
+ "AutoClearHistory": False,
25
+ # [6] 文本预处理 (可选参数,默认 None,举例:写个函数移除所有的换行符)
26
+ "PreProcess": None,
27
  },
28
+
29
+
30
+ "总结绘制脑图": {
31
+ # 前缀,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等
32
+ "Prefix": r"",
33
+ # 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
34
+ "Suffix":
35
+ dedent("\n"+r'''
36
+ ==============================
37
+ 使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
38
+
39
+ 以下是对以上文本的总结,以mermaid flowchart的形式展示:
40
+ ```mermaid
41
+ flowchart LR
42
+ A["节点名1"] --> B("节点名2")
43
+ B --> C{"节点名3"}
44
+ C --> D["节点名4"]
45
+ C --> |"箭头名1"| E["节点名5"]
46
+ C --> |"箭头名2"| F["节点名6"]
47
+ ```
48
+
49
+ 警告:
50
+ (1)使用中文
51
+ (2)节点名字使用引号包裹,如["Laptop"]
52
+ (3)`|` 和 `"`之间不要存在空格
53
+ (4)根据情况选择flowchart LR(从左到右)或者flowchart TD(从上到下)
54
+ '''),
55
  },
56
+
57
+
58
  "查找语法错误": {
59
  "Prefix": r"Help me ensure that the grammar and the spelling is correct. "
60
  r"Do not try to polish the text, if no mistake is found, tell me that this paragraph is good. "
 
74
  "Suffix": r"",
75
  "PreProcess": clear_line_break, # 预处理:清除换行符
76
  },
77
+
78
+
79
  "中译英": {
80
  "Prefix": r"Please translate following sentence to English:" + "\n\n",
81
  "Suffix": r"",
82
  },
83
+
84
+
85
+ "学术英中互译": {
86
  "Prefix": r"I want you to act as a scientific English-Chinese translator, " +
87
  r"I will provide you with some paragraphs in one language " +
88
  r"and your task is to accurately and academically translate the paragraphs only into the other language. " +
 
91
  r"such as natural language processing, and rhetorical knowledge " +
92
  r"and experience about effective writing techniques to reply. " +
93
  r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:" + "\n\n",
94
+ "Suffix": r"",
 
95
  },
96
+
97
+
98
  "英译中": {
99
  "Prefix": r"翻译成地道的中文:" + "\n\n",
100
  "Suffix": r"",
101
+ "Visible": False,
102
  },
103
+
104
+
105
  "找图片": {
106
+ "Prefix": r"我需要你找一张网络图片。使用Unsplash API(https://source.unsplash.com/960x640/?<英语关键词>)获取图片URL,"
107
  r"然后请使用Markdown格式封装,并且不要有反斜线,不要用代码块。现在,请按以下描述给我发送图片:" + "\n\n",
108
  "Suffix": r"",
109
+ "Visible": False,
110
  },
111
+
112
+
113
  "解释代码": {
114
  "Prefix": r"请解释以下代码:" + "\n```\n",
115
  "Suffix": "\n```\n",
116
  },
117
+
118
+
119
  "参考文献转Bib": {
120
+ "Prefix": r"Here are some bibliography items, please transform them into bibtex style."
121
+ r"Note that, reference styles maybe more than one kind, you should transform each item correctly."
122
+ r"Items need to be transformed:" + "\n\n",
123
+ "Visible": False,
124
  "Suffix": r"",
125
  }
126
  }
 
137
  return inputs, history
138
  else:
139
  # 预制功能
140
+ if "PreProcess" in core_functional[additional_fn]:
141
+ if core_functional[additional_fn]["PreProcess"] is not None:
142
+ inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
143
  inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
144
  if core_functional[additional_fn].get("AutoClearHistory", False):
145
  history = []
146
  return inputs, history
147
+
148
+ if __name__ == "__main__":
149
+ t = get_core_functions()["总结绘制脑图"]
150
+ print(t["Prefix"] + t["Suffix"])
crazy_functional.py CHANGED
@@ -37,110 +37,109 @@ def get_crazy_functions():
37
  from crazy_functions.批量Markdown翻译 import Markdown中译英
38
  from crazy_functions.虚空终端 import 虚空终端
39
 
40
-
41
  function_plugins = {
42
  "虚空终端": {
43
  "Group": "对话|编程|学术|智能体",
44
  "Color": "stop",
45
  "AsButton": True,
46
- "Function": HotReload(虚空终端)
47
  },
48
  "解析整个Python项目": {
49
  "Group": "编程",
50
  "Color": "stop",
51
  "AsButton": True,
52
  "Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
53
- "Function": HotReload(解析一个Python项目)
54
  },
55
  "载入对话历史存档(先上传存档或输入路径)": {
56
  "Group": "对话",
57
  "Color": "stop",
58
  "AsButton": False,
59
  "Info": "载入对话历史存档 | 输入参数为路径",
60
- "Function": HotReload(载入对话历史存档)
61
  },
62
  "删除所有本地对话历史记录(谨慎操作)": {
63
  "Group": "对话",
64
  "AsButton": False,
65
  "Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
66
- "Function": HotReload(删除所有本地对话历史记录)
67
  },
68
  "清除所有缓存文件(谨慎操作)": {
69
  "Group": "对话",
70
  "Color": "stop",
71
  "AsButton": False, # 加入下拉菜单中
72
  "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
73
- "Function": HotReload(清除缓存)
74
  },
75
  "批量总结Word文档": {
76
  "Group": "学术",
77
  "Color": "stop",
78
  "AsButton": True,
79
  "Info": "批量总结word文档 | 输入参数为路径",
80
- "Function": HotReload(总结word文档)
81
  },
82
  "解析整个Matlab项目": {
83
  "Group": "编程",
84
  "Color": "stop",
85
  "AsButton": False,
86
  "Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
87
- "Function": HotReload(解析一个Matlab项目)
88
  },
89
  "解析整个C++项目头文件": {
90
  "Group": "编程",
91
  "Color": "stop",
92
  "AsButton": False, # 加入下拉菜单中
93
  "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
94
- "Function": HotReload(解析一个C项目的头文件)
95
  },
96
  "解析整个C++项目(.cpp/.hpp/.c/.h)": {
97
  "Group": "编程",
98
  "Color": "stop",
99
  "AsButton": False, # 加入下拉菜单中
100
  "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
101
- "Function": HotReload(解析一个C项目)
102
  },
103
  "解析整个Go项目": {
104
  "Group": "编程",
105
  "Color": "stop",
106
  "AsButton": False, # 加入下拉菜单中
107
  "Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
108
- "Function": HotReload(解析一个Golang项目)
109
  },
110
  "解析整个Rust项目": {
111
  "Group": "编程",
112
  "Color": "stop",
113
  "AsButton": False, # 加入下拉菜单中
114
  "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
115
- "Function": HotReload(解析一个Rust项目)
116
  },
117
  "解析整个Java项目": {
118
  "Group": "编程",
119
  "Color": "stop",
120
  "AsButton": False, # 加入下拉菜单中
121
  "Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
122
- "Function": HotReload(解析一个Java项目)
123
  },
124
  "解析整个前端项目(js,ts,css等)": {
125
  "Group": "编程",
126
  "Color": "stop",
127
  "AsButton": False, # 加入下拉菜单中
128
  "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
129
- "Function": HotReload(解析一个前端项目)
130
  },
131
  "解析整个Lua项目": {
132
  "Group": "编程",
133
  "Color": "stop",
134
  "AsButton": False, # 加入下拉菜单中
135
  "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
136
- "Function": HotReload(解析一个Lua项目)
137
  },
138
  "解析整个CSharp项目": {
139
  "Group": "编程",
140
  "Color": "stop",
141
  "AsButton": False, # 加入下拉菜单中
142
  "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
143
- "Function": HotReload(解析一个CSharp项目)
144
  },
145
  "解析Jupyter Notebook文件": {
146
  "Group": "编程",
@@ -156,103 +155,102 @@ def get_crazy_functions():
156
  "Color": "stop",
157
  "AsButton": False,
158
  "Info": "读取Tex论文并写摘要 | 输入参数为路径",
159
- "Function": HotReload(读文章写摘要)
160
  },
161
  "翻译README或MD": {
162
  "Group": "编程",
163
  "Color": "stop",
164
  "AsButton": True,
165
  "Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
166
- "Function": HotReload(Markdown英译中)
167
  },
168
  "翻译Markdown或README(支持Github链接)": {
169
  "Group": "编程",
170
  "Color": "stop",
171
  "AsButton": False,
172
  "Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
173
- "Function": HotReload(Markdown英译中)
174
  },
175
  "批量生成函数注释": {
176
  "Group": "编程",
177
  "Color": "stop",
178
  "AsButton": False, # 加入下拉菜单中
179
  "Info": "批量生成函数的注释 | 输入参数为路径",
180
- "Function": HotReload(批量生成函数注释)
181
  },
182
  "保存当前的对话": {
183
  "Group": "对话",
184
  "AsButton": True,
185
  "Info": "保存当前的对话 | 不需要输入参数",
186
- "Function": HotReload(对话历史存档)
187
  },
188
  "[多线程Demo]解析此项目本身(源码自译解)": {
189
  "Group": "对话|编程",
190
  "AsButton": False, # 加入下拉菜单中
191
  "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
192
- "Function": HotReload(解析项目本身)
193
  },
194
  "历史上的今天": {
195
  "Group": "对话",
196
  "AsButton": True,
197
  "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
198
- "Function": HotReload(高阶功能模板函数)
199
  },
200
  "精准翻译PDF论文": {
201
  "Group": "学术",
202
  "Color": "stop",
203
- "AsButton": True,
204
  "Info": "精准翻译PDF论文为中文 | 输入参数为路径",
205
- "Function": HotReload(批量翻译PDF文档)
206
  },
207
  "询问多个GPT模型": {
208
  "Group": "对话",
209
  "Color": "stop",
210
  "AsButton": True,
211
- "Function": HotReload(同时问询)
212
  },
213
  "批量总结PDF文档": {
214
  "Group": "学术",
215
  "Color": "stop",
216
  "AsButton": False, # 加入下拉菜单中
217
  "Info": "批量总结PDF文档的内容 | 输入参数为路径",
218
- "Function": HotReload(批量总结PDF文档)
219
  },
220
  "谷歌学术检索助手(输入谷歌学术搜索页url)": {
221
  "Group": "学术",
222
  "Color": "stop",
223
  "AsButton": False, # 加入下拉菜单中
224
  "Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
225
- "Function": HotReload(谷歌检索小助手)
226
  },
227
  "理解PDF文档内容 (模仿ChatPDF)": {
228
  "Group": "学术",
229
  "Color": "stop",
230
  "AsButton": False, # 加入下拉菜单中
231
  "Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
232
- "Function": HotReload(理解PDF文档内容标准文件输入)
233
  },
234
  "英文Latex项目全文润色(输入路径或上传压缩包)": {
235
  "Group": "学术",
236
  "Color": "stop",
237
  "AsButton": False, # 加入下拉菜单中
238
  "Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
239
- "Function": HotReload(Latex英文润色)
240
  },
241
  "英文Latex项目全文纠错(输入路径或上传压缩包)": {
242
  "Group": "学术",
243
  "Color": "stop",
244
  "AsButton": False, # 加入下拉菜单中
245
  "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
246
- "Function": HotReload(Latex英文纠错)
247
  },
248
  "中文Latex项目全文润色(输入路径或上传压缩包)": {
249
  "Group": "学术",
250
  "Color": "stop",
251
  "AsButton": False, # 加入下拉菜单中
252
  "Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
253
- "Function": HotReload(Latex中文润色)
254
  },
255
-
256
  # 已经被新插件取代
257
  # "Latex项目全文中译英(输入路径或上传压缩包)": {
258
  # "Group": "学术",
@@ -261,7 +259,6 @@ def get_crazy_functions():
261
  # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
262
  # "Function": HotReload(Latex中译英)
263
  # },
264
-
265
  # 已经被新插件取代
266
  # "Latex项目全文英译中(输入路径或上传压缩包)": {
267
  # "Group": "学术",
@@ -270,339 +267,414 @@ def get_crazy_functions():
270
  # "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
271
  # "Function": HotReload(Latex英译中)
272
  # },
273
-
274
  "批量Markdown中译英(输入路径或上传压缩包)": {
275
  "Group": "编程",
276
  "Color": "stop",
277
  "AsButton": False, # 加入下拉菜单中
278
  "Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
279
- "Function": HotReload(Markdown中译英)
280
  },
281
  }
282
 
283
  # -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
284
  try:
285
  from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
286
- function_plugins.update({
287
- "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
288
- "Group": "学术",
289
- "Color": "stop",
290
- "AsButton": False, # 加入下拉菜单中
291
- # "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
292
- "Function": HotReload(下载arxiv论文并翻译摘要)
 
 
 
293
  }
294
- })
295
  except:
296
  print(trimmed_format_exc())
297
- print('Load function plugin failed')
298
 
299
  try:
300
  from crazy_functions.联网的ChatGPT import 连接网络回答问题
301
- function_plugins.update({
302
- "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
303
- "Group": "对话",
304
- "Color": "stop",
305
- "AsButton": False, # 加入下拉菜单中
306
- # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
307
- "Function": HotReload(连接网络回答问题)
 
 
 
308
  }
309
- })
310
  from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
311
- function_plugins.update({
312
- "连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
313
- "Group": "对话",
314
- "Color": "stop",
315
- "AsButton": False, # 加入下拉菜单中
316
- "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
317
- "Function": HotReload(连接bing搜索回答问题)
 
 
 
318
  }
319
- })
320
  except:
321
  print(trimmed_format_exc())
322
- print('Load function plugin failed')
323
 
324
  try:
325
  from crazy_functions.解析项目源代码 import 解析任意code项目
326
- function_plugins.update({
327
- "解析项目源代码(手动指定和筛选源代码文件类型)": {
328
- "Group": "编程",
329
- "Color": "stop",
330
- "AsButton": False,
331
- "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
332
- "ArgsReminder": "输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: \"*.c, ^*.cpp, config.toml, ^*.toml\"", # 高级参数输入区的显示提示
333
- "Function": HotReload(解析任意code项目)
334
- },
335
- })
 
 
 
336
  except:
337
  print(trimmed_format_exc())
338
- print('Load function plugin failed')
339
 
340
  try:
341
  from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
342
- function_plugins.update({
343
- "询问多个GPT模型(手动指定询问哪些模型)": {
344
- "Group": "对话",
345
- "Color": "stop",
346
- "AsButton": False,
347
- "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
348
- "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
349
- "Function": HotReload(同时问询_指定模型)
350
- },
351
- })
 
 
 
352
  except:
353
  print(trimmed_format_exc())
354
- print('Load function plugin failed')
355
 
356
  try:
357
  from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
358
- function_plugins.update({
359
- "图片生成_DALLE2 (先切换模型到gpt-*)": {
360
- "Group": "对话",
361
- "Color": "stop",
362
- "AsButton": False,
363
- "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
364
- "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
365
- "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
366
- "Function": HotReload(图片生成_DALLE2)
367
- },
368
- })
369
- function_plugins.update({
370
- "图片生成_DALLE3 (先切换模型到gpt-*)": {
371
- "Group": "对话",
372
- "Color": "stop",
373
- "AsButton": False,
374
- "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
375
- "ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨���支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
376
- "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
377
- "Function": HotReload(图片生成_DALLE3)
378
- },
379
- })
380
- function_plugins.update({
381
- "图片修改_DALLE2 (先切换模型到gpt-*)": {
382
- "Group": "对话",
383
- "Color": "stop",
384
- "AsButton": False,
385
- "AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
386
- # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
387
- "Function": HotReload(图片修改_DALLE2)
388
- },
389
- })
 
 
 
 
 
 
 
390
  except:
391
  print(trimmed_format_exc())
392
- print('Load function plugin failed')
393
 
394
  try:
395
  from crazy_functions.总结音视频 import 总结音视频
396
- function_plugins.update({
397
- "批量总结音视频(输入路径或上传压缩包)": {
398
- "Group": "对话",
399
- "Color": "stop",
400
- "AsButton": False,
401
- "AdvancedArgs": True,
402
- "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
403
- "Info": "批量总结音频或视频 | 输入参数为路径",
404
- "Function": HotReload(总结音视频)
 
 
 
405
  }
406
- })
407
  except:
408
  print(trimmed_format_exc())
409
- print('Load function plugin failed')
410
 
411
  try:
412
  from crazy_functions.数学动画生成manim import 动画生成
413
- function_plugins.update({
414
- "数学动画生成(Manim)": {
415
- "Group": "对话",
416
- "Color": "stop",
417
- "AsButton": False,
418
- "Info": "按照自然语言描��生成一个动画 | 输入参数是一段话",
419
- "Function": HotReload(动画生成)
 
 
 
420
  }
421
- })
422
  except:
423
  print(trimmed_format_exc())
424
- print('Load function plugin failed')
425
 
426
  try:
427
  from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
428
- function_plugins.update({
429
- "Markdown翻译(指定翻译成何种语言)": {
430
- "Group": "编程",
431
- "Color": "stop",
432
- "AsButton": False,
433
- "AdvancedArgs": True,
434
- "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
435
- "Function": HotReload(Markdown翻译指定语言)
 
 
 
436
  }
437
- })
438
  except:
439
  print(trimmed_format_exc())
440
- print('Load function plugin failed')
441
 
442
  try:
443
  from crazy_functions.知识库问答 import 知识库文件注入
444
- function_plugins.update({
445
- "构建知识库(先上传文件素材,再运行此插件)": {
446
- "Group": "对话",
447
- "Color": "stop",
448
- "AsButton": False,
449
- "AdvancedArgs": True,
450
- "ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
451
- "Function": HotReload(知识库文件注入)
 
 
 
452
  }
453
- })
454
  except:
455
  print(trimmed_format_exc())
456
- print('Load function plugin failed')
457
 
458
  try:
459
  from crazy_functions.知识库问答 import 读取知识库作答
460
- function_plugins.update({
461
- "知识库文件注入(构建知识库后,再运行此插件)": {
462
- "Group": "对话",
463
- "Color": "stop",
464
- "AsButton": False,
465
- "AdvancedArgs": True,
466
- "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
467
- "Function": HotReload(读取知识库作答)
 
 
 
468
  }
469
- })
470
  except:
471
  print(trimmed_format_exc())
472
- print('Load function plugin failed')
473
 
474
  try:
475
  from crazy_functions.交互功能函数模板 import 交互功能模板函数
476
- function_plugins.update({
477
- "交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
478
- "Group": "对话",
479
- "Color": "stop",
480
- "AsButton": False,
481
- "Function": HotReload(交互功能模板函数)
 
 
 
482
  }
483
- })
484
  except:
485
  print(trimmed_format_exc())
486
- print('Load function plugin failed')
487
 
488
  try:
489
  from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
490
- function_plugins.update({
491
- "Latex英文纠错+高亮修正位置 [需Latex]": {
492
- "Group": "学术",
493
- "Color": "stop",
494
- "AsButton": False,
495
- "AdvancedArgs": True,
496
- "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
497
- "Function": HotReload(Latex英文纠错加PDF对比)
 
 
 
498
  }
499
- })
500
  from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
501
- function_plugins.update({
502
- "Arxiv论文精细翻译(输入arxivID)[需Latex]": {
503
- "Group": "学术",
504
- "Color": "stop",
505
- "AsButton": False,
506
- "AdvancedArgs": True,
507
- "ArgsReminder":
508
- "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
509
- "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
510
- 'If the term "agent" is used in this section, it should be translated to "智能体". ',
511
- "Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
512
- "Function": HotReload(Latex翻译中文并重新编译PDF)
 
 
513
  }
514
- })
515
- function_plugins.update({
516
- "本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
517
- "Group": "学术",
518
- "Color": "stop",
519
- "AsButton": False,
520
- "AdvancedArgs": True,
521
- "ArgsReminder":
522
- "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 " +
523
- "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: " +
524
- 'If the term "agent" is used in this section, it should be translated to "智能体". ',
525
- "Info": "本地Latex论文精细翻译 | 输入参数是路径",
526
- "Function": HotReload(Latex翻译中文并重新编译PDF)
 
527
  }
528
- })
529
  except:
530
  print(trimmed_format_exc())
531
- print('Load function plugin failed')
532
 
533
  try:
534
  from toolbox import get_conf
535
- ENABLE_AUDIO = get_conf('ENABLE_AUDIO')
 
536
  if ENABLE_AUDIO:
537
  from crazy_functions.语音助手 import 语音助手
538
- function_plugins.update({
539
- "实时语音对话": {
540
- "Group": "对话",
541
- "Color": "stop",
542
- "AsButton": True,
543
- "Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
544
- "Function": HotReload(语音助手)
 
 
 
545
  }
546
- })
547
  except:
548
  print(trimmed_format_exc())
549
- print('Load function plugin failed')
550
 
551
  try:
552
  from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
553
- function_plugins.update({
554
- "精准翻译PDF文档(NOUGAT)": {
555
- "Group": "学术",
556
- "Color": "stop",
557
- "AsButton": False,
558
- "Function": HotReload(批量翻译PDF文档)
 
 
 
559
  }
560
- })
561
  except:
562
  print(trimmed_format_exc())
563
- print('Load function plugin failed')
564
 
565
  try:
566
  from crazy_functions.函数动态生成 import 函数动态生成
567
- function_plugins.update({
568
- "动态代码解释器(CodeInterpreter)": {
569
- "Group": "智能体",
570
- "Color": "stop",
571
- "AsButton": False,
572
- "Function": HotReload(函数动态生成)
 
 
 
573
  }
574
- })
575
  except:
576
  print(trimmed_format_exc())
577
- print('Load function plugin failed')
578
 
579
  try:
580
  from crazy_functions.多智能体 import 多智能体终端
581
- function_plugins.update({
582
- "AutoGen多智能体终端(仅供测试)": {
583
- "Group": "智能体",
584
- "Color": "stop",
585
- "AsButton": False,
586
- "Function": HotReload(多智能体终端)
 
 
 
587
  }
588
- })
589
  except:
590
  print(trimmed_format_exc())
591
- print('Load function plugin failed')
592
 
593
  try:
594
  from crazy_functions.互动小游戏 import 随机小游戏
595
- function_plugins.update({
596
- "随机互动小游戏(仅供测试)": {
597
- "Group": "智能体",
598
- "Color": "stop",
599
- "AsButton": False,
600
- "Function": HotReload(随机小游戏)
 
 
 
601
  }
602
- })
603
  except:
604
  print(trimmed_format_exc())
605
- print('Load function plugin failed')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
606
 
607
  # try:
608
  # from crazy_functions.chatglm微调工具 import 微调数据集生成
@@ -618,8 +690,6 @@ def get_crazy_functions():
618
  # except:
619
  # print('Load function plugin failed')
620
 
621
-
622
-
623
  """
624
  设置默认值:
625
  - 默认 Group = 对话
@@ -629,12 +699,12 @@ def get_crazy_functions():
629
  """
630
  for name, function_meta in function_plugins.items():
631
  if "Group" not in function_meta:
632
- function_plugins[name]["Group"] = '对话'
633
  if "AsButton" not in function_meta:
634
  function_plugins[name]["AsButton"] = True
635
  if "AdvancedArgs" not in function_meta:
636
  function_plugins[name]["AdvancedArgs"] = False
637
  if "Color" not in function_meta:
638
- function_plugins[name]["Color"] = 'secondary'
639
 
640
  return function_plugins
 
37
  from crazy_functions.批量Markdown翻译 import Markdown中译英
38
  from crazy_functions.虚空终端 import 虚空终端
39
 
 
40
  function_plugins = {
41
  "虚空终端": {
42
  "Group": "对话|编程|学术|智能体",
43
  "Color": "stop",
44
  "AsButton": True,
45
+ "Function": HotReload(虚空终端),
46
  },
47
  "解析整个Python项目": {
48
  "Group": "编程",
49
  "Color": "stop",
50
  "AsButton": True,
51
  "Info": "解析一个Python项目的所有源文件(.py) | 输入参数为路径",
52
+ "Function": HotReload(解析一个Python项目),
53
  },
54
  "载入对话历史存档(先上传存档或输入路径)": {
55
  "Group": "对话",
56
  "Color": "stop",
57
  "AsButton": False,
58
  "Info": "载入对话历史存档 | 输入参数为路径",
59
+ "Function": HotReload(载入对话历史存档),
60
  },
61
  "删除所有本地对话历史记录(谨慎操作)": {
62
  "Group": "对话",
63
  "AsButton": False,
64
  "Info": "删除所有本地对话历史记录,谨慎操作 | 不需要输入参数",
65
+ "Function": HotReload(删除所有本地对话历史记录),
66
  },
67
  "清除所有缓存文件(谨慎操作)": {
68
  "Group": "对话",
69
  "Color": "stop",
70
  "AsButton": False, # 加入下拉菜单中
71
  "Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
72
+ "Function": HotReload(清除缓存),
73
  },
74
  "批量总结Word文档": {
75
  "Group": "学术",
76
  "Color": "stop",
77
  "AsButton": True,
78
  "Info": "批量总结word文档 | 输入参数为路径",
79
+ "Function": HotReload(总结word文档),
80
  },
81
  "解析整个Matlab项目": {
82
  "Group": "编程",
83
  "Color": "stop",
84
  "AsButton": False,
85
  "Info": "解析一个Matlab项目的所有源文件(.m) | 输入参数为路径",
86
+ "Function": HotReload(解析一个Matlab项目),
87
  },
88
  "解析整个C++项目头文件": {
89
  "Group": "编程",
90
  "Color": "stop",
91
  "AsButton": False, # 加入下拉菜单中
92
  "Info": "解析一个C++项目的所有头文件(.h/.hpp) | 输入参数为路径",
93
+ "Function": HotReload(解析一个C项目的头文件),
94
  },
95
  "解析整个C++项目(.cpp/.hpp/.c/.h)": {
96
  "Group": "编程",
97
  "Color": "stop",
98
  "AsButton": False, # 加入下拉菜单中
99
  "Info": "解析一个C++项目的所有源文件(.cpp/.hpp/.c/.h)| 输入参数为路径",
100
+ "Function": HotReload(解析一个C项目),
101
  },
102
  "解析整个Go项目": {
103
  "Group": "编程",
104
  "Color": "stop",
105
  "AsButton": False, # 加入下拉菜单中
106
  "Info": "解析一个Go项目的所有源文件 | 输入参数为路径",
107
+ "Function": HotReload(解析一个Golang项目),
108
  },
109
  "解析整个Rust项目": {
110
  "Group": "编程",
111
  "Color": "stop",
112
  "AsButton": False, # 加入下拉菜单中
113
  "Info": "解析一个Rust项目的所有源文件 | 输入参数为路径",
114
+ "Function": HotReload(解析一个Rust项目),
115
  },
116
  "解析整个Java项目": {
117
  "Group": "编程",
118
  "Color": "stop",
119
  "AsButton": False, # 加入下拉菜单中
120
  "Info": "解析一个Java项目的所有源文件 | 输入参数为路径",
121
+ "Function": HotReload(解析一个Java项目),
122
  },
123
  "解析整个前端项目(js,ts,css等)": {
124
  "Group": "编程",
125
  "Color": "stop",
126
  "AsButton": False, # 加入下拉菜单中
127
  "Info": "解析一个前端项目的所有源文件(js,ts,css等) | 输入参数为路径",
128
+ "Function": HotReload(解析一个前端项目),
129
  },
130
  "解析整个Lua项目": {
131
  "Group": "编程",
132
  "Color": "stop",
133
  "AsButton": False, # 加入下拉菜单中
134
  "Info": "解析一个Lua项目的所有源文件 | 输入参数为路径",
135
+ "Function": HotReload(解析一个Lua项目),
136
  },
137
  "解析整个CSharp项目": {
138
  "Group": "编程",
139
  "Color": "stop",
140
  "AsButton": False, # 加入下拉菜单中
141
  "Info": "解析一个CSharp项目的所有源文件 | 输入参数为路径",
142
+ "Function": HotReload(解析一个CSharp项目),
143
  },
144
  "解析Jupyter Notebook文件": {
145
  "Group": "编程",
 
155
  "Color": "stop",
156
  "AsButton": False,
157
  "Info": "读取Tex论文并写摘要 | 输入参数为路径",
158
+ "Function": HotReload(读文章写摘要),
159
  },
160
  "翻译README或MD": {
161
  "Group": "编程",
162
  "Color": "stop",
163
  "AsButton": True,
164
  "Info": "将Markdown翻译为中文 | 输入参数为路径或URL",
165
+ "Function": HotReload(Markdown英译中),
166
  },
167
  "翻译Markdown或README(支持Github链接)": {
168
  "Group": "编程",
169
  "Color": "stop",
170
  "AsButton": False,
171
  "Info": "将Markdown或README翻译为中文 | 输入参数为路径或URL",
172
+ "Function": HotReload(Markdown英译中),
173
  },
174
  "批量生成函数注释": {
175
  "Group": "编程",
176
  "Color": "stop",
177
  "AsButton": False, # 加入下拉菜单中
178
  "Info": "批量生成函数的注释 | 输入参数为路径",
179
+ "Function": HotReload(批量生成函数注释),
180
  },
181
  "保存当前的对话": {
182
  "Group": "对话",
183
  "AsButton": True,
184
  "Info": "保存当前的对话 | 不需要输入参数",
185
+ "Function": HotReload(对话历史存档),
186
  },
187
  "[多线程Demo]解析此项目本身(源码自译解)": {
188
  "Group": "对话|编程",
189
  "AsButton": False, # 加入下拉菜单中
190
  "Info": "多线程解析并翻译此项目的源码 | 不需要输入参数",
191
+ "Function": HotReload(解析项目本身),
192
  },
193
  "历史上的今天": {
194
  "Group": "对话",
195
  "AsButton": True,
196
  "Info": "查看历史上的今天事件 (这是一个面向开发者的插件Demo) | 不需要输入参数",
197
+ "Function": HotReload(高阶功能模板函数),
198
  },
199
  "精准翻译PDF论文": {
200
  "Group": "学术",
201
  "Color": "stop",
202
+ "AsButton": True,
203
  "Info": "精准翻译PDF论文为中文 | 输入参数为路径",
204
+ "Function": HotReload(批量翻译PDF文档),
205
  },
206
  "询问多个GPT模型": {
207
  "Group": "对话",
208
  "Color": "stop",
209
  "AsButton": True,
210
+ "Function": HotReload(同时问询),
211
  },
212
  "批量总结PDF文档": {
213
  "Group": "学术",
214
  "Color": "stop",
215
  "AsButton": False, # 加入下拉菜单中
216
  "Info": "批量总结PDF文档的内容 | 输入参数为路径",
217
+ "Function": HotReload(批量总结PDF文档),
218
  },
219
  "谷歌学术检索助手(输入谷歌学术搜索页url)": {
220
  "Group": "学术",
221
  "Color": "stop",
222
  "AsButton": False, # 加入下拉菜单中
223
  "Info": "使用谷歌学术检索助手搜索指定URL的结果 | 输入参数为谷歌学术搜索页的URL",
224
+ "Function": HotReload(谷歌检索小助手),
225
  },
226
  "理解PDF文档内容 (模仿ChatPDF)": {
227
  "Group": "学术",
228
  "Color": "stop",
229
  "AsButton": False, # 加入下拉菜单中
230
  "Info": "理解PDF文档的内容并进行回答 | 输入参数为路径",
231
+ "Function": HotReload(理解PDF文档内容标准文件输入),
232
  },
233
  "英文Latex项目全文润色(输入路径或上传压缩包)": {
234
  "Group": "学术",
235
  "Color": "stop",
236
  "AsButton": False, # 加入下拉菜单中
237
  "Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
238
+ "Function": HotReload(Latex英文润色),
239
  },
240
  "英文Latex项目全文纠错(输入路径或上传压缩包)": {
241
  "Group": "学术",
242
  "Color": "stop",
243
  "AsButton": False, # 加入下拉菜单中
244
  "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
245
+ "Function": HotReload(Latex英文纠错),
246
  },
247
  "中文Latex项目全文润色(输入路径或上传压缩包)": {
248
  "Group": "学术",
249
  "Color": "stop",
250
  "AsButton": False, # 加入下拉菜单中
251
  "Info": "对中文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
252
+ "Function": HotReload(Latex中文润色),
253
  },
 
254
  # 已经被新插件取代
255
  # "Latex项目全文中译英(输入路径或上传压缩包)": {
256
  # "Group": "学术",
 
259
  # "Info": "对Latex项目全文进行中译英处理 | 输入参数为路径或上传压缩包",
260
  # "Function": HotReload(Latex中译英)
261
  # },
 
262
  # 已经被新插件取代
263
  # "Latex项目全文英译中(输入路径或上传压缩包)": {
264
  # "Group": "学术",
 
267
  # "Info": "对Latex项目全文进行英译中处理 | 输入参数为路径或上传压缩包",
268
  # "Function": HotReload(Latex英译中)
269
  # },
 
270
  "批量Markdown中译英(输入路径或上传压缩包)": {
271
  "Group": "编程",
272
  "Color": "stop",
273
  "AsButton": False, # 加入下拉菜单中
274
  "Info": "批量将Markdown文件中文翻译为英文 | 输入参数为路径或上传压缩包",
275
+ "Function": HotReload(Markdown中译英),
276
  },
277
  }
278
 
279
  # -=--=- 尚未充分测试的实验性插件 & 需要额外依赖的插件 -=--=-
280
  try:
281
  from crazy_functions.下载arxiv论文翻译摘要 import 下载arxiv论文并翻译摘要
282
+
283
+ function_plugins.update(
284
+ {
285
+ "一键下载arxiv论文并翻译摘要(先在input输入编号,如1812.10695)": {
286
+ "Group": "学术",
287
+ "Color": "stop",
288
+ "AsButton": False, # 加入下拉菜单中
289
+ # "Info": "下载arxiv论文并翻译摘要 | 输入参数为arxiv编号如1812.10695",
290
+ "Function": HotReload(下载arxiv论文并翻译摘要),
291
+ }
292
  }
293
+ )
294
  except:
295
  print(trimmed_format_exc())
296
+ print("Load function plugin failed")
297
 
298
  try:
299
  from crazy_functions.联网的ChatGPT import 连接网络回答问题
300
+
301
+ function_plugins.update(
302
+ {
303
+ "连接网络回答问题(输入问题后点击该插件,需要访问谷歌)": {
304
+ "Group": "对话",
305
+ "Color": "stop",
306
+ "AsButton": False, # 加入下拉菜单中
307
+ # "Info": "连接网络回答问题(需要访问谷歌)| 输入参数是一个问题",
308
+ "Function": HotReload(连接网络回答问题),
309
+ }
310
  }
311
+ )
312
  from crazy_functions.联网的ChatGPT_bing版 import 连接bing搜索回答问题
313
+
314
+ function_plugins.update(
315
+ {
316
+ "连接网络回答问题(中文Bing版,输入问题后点击该插件)": {
317
+ "Group": "对话",
318
+ "Color": "stop",
319
+ "AsButton": False, # 加入下拉菜单中
320
+ "Info": "连接网络回答问题(需要访问中文Bing)| 输入参数是一个问题",
321
+ "Function": HotReload(连接bing搜索回答问题),
322
+ }
323
  }
324
+ )
325
  except:
326
  print(trimmed_format_exc())
327
+ print("Load function plugin failed")
328
 
329
  try:
330
  from crazy_functions.解析项目源代码 import 解析任意code项目
331
+
332
+ function_plugins.update(
333
+ {
334
+ "解析项目源代码(手动指定和筛选源代码文件类型)": {
335
+ "Group": "编程",
336
+ "Color": "stop",
337
+ "AsButton": False,
338
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
339
+ "ArgsReminder": '输入时用逗号隔开, *代表通配符, 加了^代表不匹配; 不输入代表全部匹配。例如: "*.c, ^*.cpp, config.toml, ^*.toml"', # 高级参数输入区的显示提示
340
+ "Function": HotReload(解析任意code项目),
341
+ },
342
+ }
343
+ )
344
  except:
345
  print(trimmed_format_exc())
346
+ print("Load function plugin failed")
347
 
348
  try:
349
  from crazy_functions.询问多个大语言模型 import 同时问询_指定模型
350
+
351
+ function_plugins.update(
352
+ {
353
+ "询问多个GPT模型(手动指定询问哪些模型)": {
354
+ "Group": "对话",
355
+ "Color": "stop",
356
+ "AsButton": False,
357
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
358
+ "ArgsReminder": "支持任意数量的llm接口,用&符号分隔。例如chatglm&gpt-3.5-turbo&gpt-4", # 高级参数输入区的显示提示
359
+ "Function": HotReload(同时问询_指定模型),
360
+ },
361
+ }
362
+ )
363
  except:
364
  print(trimmed_format_exc())
365
+ print("Load function plugin failed")
366
 
367
  try:
368
  from crazy_functions.图片生成 import 图片生成_DALLE2, 图片生成_DALLE3, 图片修改_DALLE2
369
+
370
+ function_plugins.update(
371
+ {
372
+ "图片生成_DALLE2 (先切换模型到gpt-*)": {
373
+ "Group": "对话",
374
+ "Color": "stop",
375
+ "AsButton": False,
376
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
377
+ "ArgsReminder": "在这里输入分辨率, 如1024x1024(默认),支持 256x256, 512x512, 1024x1024", # 高级参数输入区的显示提示
378
+ "Info": "使用DALLE2生成图片 | 输入参数字符串,提供图像的内容",
379
+ "Function": HotReload(图片生成_DALLE2),
380
+ },
381
+ }
382
+ )
383
+ function_plugins.update(
384
+ {
385
+ "图片生成_DALLE3 (先切换模型到gpt-*)": {
386
+ "Group": "对话",
387
+ "Color": "stop",
388
+ "AsButton": False,
389
+ "AdvancedArgs": True, # 调用时,唤起高级参数输入区(默认False)
390
+ "ArgsReminder": "在这里输入自定义参数「分辨率-质量(可选)-风格(可选)」, 参数示例「1024x1024-hd-vivid」 || 分辨率支持 「1024x1024」(默认) /「1792x1024」/「1024x1792」 || 质量支持 「-standard」(默认) /「-hd」 || 风格支持 「-vivid」(默认) /「-natural」", # 高级参数输入区的显示提示
391
+ "Info": "使用DALLE3生成图片 | 输入参数字符串,提供图像的内容",
392
+ "Function": HotReload(图片生成_DALLE3),
393
+ },
394
+ }
395
+ )
396
+ function_plugins.update(
397
+ {
398
+ "图片修改_DALLE2 (先切换模型到gpt-*)": {
399
+ "Group": "对话",
400
+ "Color": "stop",
401
+ "AsButton": False,
402
+ "AdvancedArgs": False, # 调用时,唤起高级参数输入区(默认False)
403
+ # "Info": "使用DALLE2修改图片 | 输入参数字符串,提供图像的内容",
404
+ "Function": HotReload(图片修改_DALLE2),
405
+ },
406
+ }
407
+ )
408
  except:
409
  print(trimmed_format_exc())
410
+ print("Load function plugin failed")
411
 
412
  try:
413
  from crazy_functions.总结音视频 import 总结音视频
414
+
415
+ function_plugins.update(
416
+ {
417
+ "批量总结音视频(输入路径或上传压缩包)": {
418
+ "Group": "对话",
419
+ "Color": "stop",
420
+ "AsButton": False,
421
+ "AdvancedArgs": True,
422
+ "ArgsReminder": "调用openai api 使用whisper-1模型, 目前支持的格式:mp4, m4a, wav, mpga, mpeg, mp3。此处可以输入解析提示,例如:解析为简体中文(默认)。",
423
+ "Info": "批量总结音频或视频 | 输入参数为路径",
424
+ "Function": HotReload(总结音视频),
425
+ }
426
  }
427
+ )
428
  except:
429
  print(trimmed_format_exc())
430
+ print("Load function plugin failed")
431
 
432
  try:
433
  from crazy_functions.数学动画生成manim import 动画生成
434
+
435
+ function_plugins.update(
436
+ {
437
+ "数学动画生成(Manim)": {
438
+ "Group": "对话",
439
+ "Color": "stop",
440
+ "AsButton": False,
441
+ "Info": "按照自然语言描述生成一个动画 | 输入参数是一段话",
442
+ "Function": HotReload(动画生成),
443
+ }
444
  }
445
+ )
446
  except:
447
  print(trimmed_format_exc())
448
+ print("Load function plugin failed")
449
 
450
  try:
451
  from crazy_functions.批量Markdown翻译 import Markdown翻译指定语言
452
+
453
+ function_plugins.update(
454
+ {
455
+ "Markdown翻译(指定翻译成何种语言)": {
456
+ "Group": "编程",
457
+ "Color": "stop",
458
+ "AsButton": False,
459
+ "AdvancedArgs": True,
460
+ "ArgsReminder": "请输入要翻译成哪种语言,默认为Chinese。",
461
+ "Function": HotReload(Markdown翻译指定语言),
462
+ }
463
  }
464
+ )
465
  except:
466
  print(trimmed_format_exc())
467
+ print("Load function plugin failed")
468
 
469
  try:
470
  from crazy_functions.知识库问答 import 知识库文件注入
471
+
472
+ function_plugins.update(
473
+ {
474
+ "构建知识库(先上传文件素材,再运行此插件)": {
475
+ "Group": "对话",
476
+ "Color": "stop",
477
+ "AsButton": False,
478
+ "AdvancedArgs": True,
479
+ "ArgsReminder": "此处待注入的知识库名称id, 默认为default。文件进入知识库后可长期保存。可以通过再次调用本插件的方式,向知识库追加更多文档。",
480
+ "Function": HotReload(知识库文件注入),
481
+ }
482
  }
483
+ )
484
  except:
485
  print(trimmed_format_exc())
486
+ print("Load function plugin failed")
487
 
488
  try:
489
  from crazy_functions.知识库问答 import 读取知识库作答
490
+
491
+ function_plugins.update(
492
+ {
493
+ "知识库文件注入(构建知识库后,再运行此插件)": {
494
+ "Group": "对话",
495
+ "Color": "stop",
496
+ "AsButton": False,
497
+ "AdvancedArgs": True,
498
+ "ArgsReminder": "待提取的知识库名称id, 默认为default, 您需要构建知识库后再运行此插件。",
499
+ "Function": HotReload(读取知识库作答),
500
+ }
501
  }
502
+ )
503
  except:
504
  print(trimmed_format_exc())
505
+ print("Load function plugin failed")
506
 
507
  try:
508
  from crazy_functions.交互功能函数模板 import 交互功能模板函数
509
+
510
+ function_plugins.update(
511
+ {
512
+ "交互功能模板Demo函数(查找wallhaven.cc的壁纸)": {
513
+ "Group": "对话",
514
+ "Color": "stop",
515
+ "AsButton": False,
516
+ "Function": HotReload(交互功能模板函数),
517
+ }
518
  }
519
+ )
520
  except:
521
  print(trimmed_format_exc())
522
+ print("Load function plugin failed")
523
 
524
  try:
525
  from crazy_functions.Latex输出PDF结果 import Latex英文纠错加PDF对比
526
+
527
+ function_plugins.update(
528
+ {
529
+ "Latex英文纠错+高亮修正位置 [需Latex]": {
530
+ "Group": "学术",
531
+ "Color": "stop",
532
+ "AsButton": False,
533
+ "AdvancedArgs": True,
534
+ "ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
535
+ "Function": HotReload(Latex英文纠错加PDF对比),
536
+ }
537
  }
538
+ )
539
  from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
540
+
541
+ function_plugins.update(
542
+ {
543
+ "Arxiv论文精细翻译(输入arxivID)[需Latex]": {
544
+ "Group": "学术",
545
+ "Color": "stop",
546
+ "AsButton": False,
547
+ "AdvancedArgs": True,
548
+ "ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
549
+ + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
550
+ + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
551
+ "Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
552
+ "Function": HotReload(Latex翻译中文并重新编译PDF),
553
+ }
554
  }
555
+ )
556
+ function_plugins.update(
557
+ {
558
+ "本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
559
+ "Group": "学术",
560
+ "Color": "stop",
561
+ "AsButton": False,
562
+ "AdvancedArgs": True,
563
+ "ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
564
+ + "例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
565
+ + 'If the term "agent" is used in this section, it should be translated to "智能体". ',
566
+ "Info": "本地Latex论文精细翻译 | 输入参数是路径",
567
+ "Function": HotReload(Latex翻译中文并重新编译PDF),
568
+ }
569
  }
570
+ )
571
  except:
572
  print(trimmed_format_exc())
573
+ print("Load function plugin failed")
574
 
575
  try:
576
  from toolbox import get_conf
577
+
578
+ ENABLE_AUDIO = get_conf("ENABLE_AUDIO")
579
  if ENABLE_AUDIO:
580
  from crazy_functions.语音助手 import 语音助手
581
+
582
+ function_plugins.update(
583
+ {
584
+ "实时语音对话": {
585
+ "Group": "对话",
586
+ "Color": "stop",
587
+ "AsButton": True,
588
+ "Info": "这是一个时刻聆听着的语音对话助手 | 没有输入参数",
589
+ "Function": HotReload(语音助手),
590
+ }
591
  }
592
+ )
593
  except:
594
  print(trimmed_format_exc())
595
+ print("Load function plugin failed")
596
 
597
  try:
598
  from crazy_functions.批量翻译PDF文档_NOUGAT import 批量翻译PDF文档
599
+
600
+ function_plugins.update(
601
+ {
602
+ "精准翻译PDF文档(NOUGAT)": {
603
+ "Group": "学术",
604
+ "Color": "stop",
605
+ "AsButton": False,
606
+ "Function": HotReload(批量翻译PDF文档),
607
+ }
608
  }
609
+ )
610
  except:
611
  print(trimmed_format_exc())
612
+ print("Load function plugin failed")
613
 
614
  try:
615
  from crazy_functions.函数动态生成 import 函数动态生成
616
+
617
+ function_plugins.update(
618
+ {
619
+ "动态代码解释器(CodeInterpreter)": {
620
+ "Group": "智能体",
621
+ "Color": "stop",
622
+ "AsButton": False,
623
+ "Function": HotReload(函数动态生成),
624
+ }
625
  }
626
+ )
627
  except:
628
  print(trimmed_format_exc())
629
+ print("Load function plugin failed")
630
 
631
  try:
632
  from crazy_functions.多智能体 import 多智能体终端
633
+
634
+ function_plugins.update(
635
+ {
636
+ "AutoGen多智能体终端(仅供测试)": {
637
+ "Group": "智能体",
638
+ "Color": "stop",
639
+ "AsButton": False,
640
+ "Function": HotReload(多智能体终端),
641
+ }
642
  }
643
+ )
644
  except:
645
  print(trimmed_format_exc())
646
+ print("Load function plugin failed")
647
 
648
  try:
649
  from crazy_functions.互动小游戏 import 随机小游戏
650
+
651
+ function_plugins.update(
652
+ {
653
+ "随机互动小游戏(仅供测试)": {
654
+ "Group": "智能体",
655
+ "Color": "stop",
656
+ "AsButton": False,
657
+ "Function": HotReload(随机小游戏),
658
+ }
659
  }
660
+ )
661
  except:
662
  print(trimmed_format_exc())
663
+ print("Load function plugin failed")
664
+
665
+ # try:
666
+ # from crazy_functions.高级功能函数模板 import 测试图表渲染
667
+ # function_plugins.update({
668
+ # "绘制逻辑关系(测试图表渲染)": {
669
+ # "Group": "智能体",
670
+ # "Color": "stop",
671
+ # "AsButton": True,
672
+ # "Function": HotReload(测试图表渲染)
673
+ # }
674
+ # })
675
+ # except:
676
+ # print(trimmed_format_exc())
677
+ # print('Load function plugin failed')
678
 
679
  # try:
680
  # from crazy_functions.chatglm微调工具 import 微调数据集生成
 
690
  # except:
691
  # print('Load function plugin failed')
692
 
 
 
693
  """
694
  设置默认值:
695
  - 默认 Group = 对话
 
699
  """
700
  for name, function_meta in function_plugins.items():
701
  if "Group" not in function_meta:
702
+ function_plugins[name]["Group"] = "对话"
703
  if "AsButton" not in function_meta:
704
  function_plugins[name]["AsButton"] = True
705
  if "AdvancedArgs" not in function_meta:
706
  function_plugins[name]["AdvancedArgs"] = False
707
  if "Color" not in function_meta:
708
+ function_plugins[name]["Color"] = "secondary"
709
 
710
  return function_plugins
crazy_functions/Latex输出PDF结果.py CHANGED
@@ -5,7 +5,7 @@ import glob, os, requests, time
5
  pj = os.path.join
6
  ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
7
 
8
- # =================================== 工具函数 ===============================================
9
  # 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
10
  def switch_prompt(pfg, mode, more_requirement):
11
  """
@@ -142,7 +142,7 @@ def arxiv_download(chatbot, history, txt, allow_cache=True):
142
  from toolbox import extract_archive
143
  extract_archive(file_path=dst, dest_dir=extract_dst)
144
  return extract_dst, arxiv_id
145
- # ========================================= 插件主程序1 =====================================================
146
 
147
 
148
  @CatchException
@@ -218,7 +218,7 @@ def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, histo
218
  # <-------------- we are done ------------->
219
  return success
220
 
221
- # ========================================= 插件主程序2 =====================================================
222
 
223
  @CatchException
224
  def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
 
5
  pj = os.path.join
6
  ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
7
 
8
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
9
  # 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
10
  def switch_prompt(pfg, mode, more_requirement):
11
  """
 
142
  from toolbox import extract_archive
143
  extract_archive(file_path=dst, dest_dir=extract_dst)
144
  return extract_dst, arxiv_id
145
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
146
 
147
 
148
  @CatchException
 
218
  # <-------------- we are done ------------->
219
  return success
220
 
221
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
222
 
223
  @CatchException
224
  def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
crazy_functions/agent_fns/general.py CHANGED
@@ -35,7 +35,11 @@ def gpt_academic_generate_oai_reply(
35
  class AutoGenGeneral(PluginMultiprocessManager):
36
  def gpt_academic_print_override(self, user_proxy, message, sender):
37
  # ⭐⭐ run in subprocess
38
- self.child_conn.send(PipeCom("show", sender.name + "\n\n---\n\n" + message["content"]))
 
 
 
 
39
 
40
  def gpt_academic_get_human_input(self, user_proxy, message):
41
  # ⭐⭐ run in subprocess
@@ -62,33 +66,33 @@ class AutoGenGeneral(PluginMultiprocessManager):
62
  def exe_autogen(self, input):
63
  # ⭐⭐ run in subprocess
64
  input = input.content
65
- with ProxyNetworkActivate("AutoGen"):
66
- code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
67
- agents = self.define_agents()
68
- user_proxy = None
69
- assistant = None
70
- for agent_kwargs in agents:
71
- agent_cls = agent_kwargs.pop('cls')
72
- kwargs = {
73
- 'llm_config':self.llm_kwargs,
74
- 'code_execution_config':code_execution_config
75
- }
76
- kwargs.update(agent_kwargs)
77
- agent_handle = agent_cls(**kwargs)
78
- agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
79
- for d in agent_handle._reply_func_list:
80
- if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
81
- d['reply_func'] = gpt_academic_generate_oai_reply
82
- if agent_kwargs['name'] == 'user_proxy':
83
- agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
84
- user_proxy = agent_handle
85
- if agent_kwargs['name'] == 'assistant': assistant = agent_handle
86
- try:
87
- if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
88
  user_proxy.initiate_chat(assistant, message=input)
89
- except Exception as e:
90
- tb_str = '```\n' + trimmed_format_exc() + '```'
91
- self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
92
 
93
  def subprocess_worker(self, child_conn):
94
  # ⭐⭐ run in subprocess
 
35
  class AutoGenGeneral(PluginMultiprocessManager):
36
  def gpt_academic_print_override(self, user_proxy, message, sender):
37
  # ⭐⭐ run in subprocess
38
+ try:
39
+ print_msg = sender.name + "\n\n---\n\n" + message["content"]
40
+ except:
41
+ print_msg = sender.name + "\n\n---\n\n" + message
42
+ self.child_conn.send(PipeCom("show", print_msg))
43
 
44
  def gpt_academic_get_human_input(self, user_proxy, message):
45
  # ⭐⭐ run in subprocess
 
66
  def exe_autogen(self, input):
67
  # ⭐⭐ run in subprocess
68
  input = input.content
69
+ code_execution_config = {"work_dir": self.autogen_work_dir, "use_docker": self.use_docker}
70
+ agents = self.define_agents()
71
+ user_proxy = None
72
+ assistant = None
73
+ for agent_kwargs in agents:
74
+ agent_cls = agent_kwargs.pop('cls')
75
+ kwargs = {
76
+ 'llm_config':self.llm_kwargs,
77
+ 'code_execution_config':code_execution_config
78
+ }
79
+ kwargs.update(agent_kwargs)
80
+ agent_handle = agent_cls(**kwargs)
81
+ agent_handle._print_received_message = lambda a,b: self.gpt_academic_print_override(agent_kwargs, a, b)
82
+ for d in agent_handle._reply_func_list:
83
+ if hasattr(d['reply_func'],'__name__') and d['reply_func'].__name__ == 'generate_oai_reply':
84
+ d['reply_func'] = gpt_academic_generate_oai_reply
85
+ if agent_kwargs['name'] == 'user_proxy':
86
+ agent_handle.get_human_input = lambda a: self.gpt_academic_get_human_input(user_proxy, a)
87
+ user_proxy = agent_handle
88
+ if agent_kwargs['name'] == 'assistant': assistant = agent_handle
89
+ try:
90
+ if user_proxy is None or assistant is None: raise Exception("用户代理或助理代理未定义")
91
+ with ProxyNetworkActivate("AutoGen"):
92
  user_proxy.initiate_chat(assistant, message=input)
93
+ except Exception as e:
94
+ tb_str = '```\n' + trimmed_format_exc() + '```'
95
+ self.child_conn.send(PipeCom("done", "AutoGen 执行失败: \n\n" + tb_str))
96
 
97
  def subprocess_worker(self, child_conn):
98
  # ⭐⭐ run in subprocess
crazy_functions/crazy_utils.py CHANGED
@@ -466,6 +466,9 @@ def read_and_clean_pdf_text(fp):
466
  return True
467
  else:
468
  return False
 
 
 
469
  for _ in range(100):
470
  for index, block_txt in enumerate(meta_txt):
471
  if starts_with_lowercase_word(block_txt):
 
466
  return True
467
  else:
468
  return False
469
+ # 对于某些PDF会有第一个段落就以小写字母开头,为了避免索引错误将其更改为大写
470
+ if starts_with_lowercase_word(meta_txt[0]):
471
+ meta_txt[0] = meta_txt[0].capitalize()
472
  for _ in range(100):
473
  for index, block_txt in enumerate(meta_txt):
474
  if starts_with_lowercase_word(block_txt):
crazy_functions/latex_fns/latex_toolbox.py CHANGED
@@ -1,15 +1,18 @@
1
  import os, shutil
2
  import re
3
  import numpy as np
 
4
  PRESERVE = 0
5
  TRANSFORM = 1
6
 
7
  pj = os.path.join
8
 
9
- class LinkedListNode():
 
10
  """
11
  Linked List Node
12
  """
 
13
  def __init__(self, string, preserve=True) -> None:
14
  self.string = string
15
  self.preserve = preserve
@@ -18,41 +21,47 @@ class LinkedListNode():
18
  # self.begin_line = 0
19
  # self.begin_char = 0
20
 
 
21
  def convert_to_linklist(text, mask):
22
  root = LinkedListNode("", preserve=True)
23
  current_node = root
24
  for c, m, i in zip(text, mask, range(len(text))):
25
- if (m==PRESERVE and current_node.preserve) \
26
- or (m==TRANSFORM and not current_node.preserve):
 
27
  # add
28
  current_node.string += c
29
  else:
30
- current_node.next = LinkedListNode(c, preserve=(m==PRESERVE))
31
  current_node = current_node.next
32
  return root
33
 
 
34
  def post_process(root):
35
  # 修复括号
36
  node = root
37
  while True:
38
  string = node.string
39
- if node.preserve:
40
  node = node.next
41
- if node is None: break
 
42
  continue
 
43
  def break_check(string):
44
- str_stack = [""] # (lv, index)
45
  for i, c in enumerate(string):
46
- if c == '{':
47
- str_stack.append('{')
48
- elif c == '}':
49
  if len(str_stack) == 1:
50
- print('stack fix')
51
  return i
52
  str_stack.pop(-1)
53
  else:
54
  str_stack[-1] += c
55
  return -1
 
56
  bp = break_check(string)
57
 
58
  if bp == -1:
@@ -69,51 +78,66 @@ def post_process(root):
69
  node.next = q
70
 
71
  node = node.next
72
- if node is None: break
 
73
 
74
  # 屏蔽空行和太短的句子
75
  node = root
76
  while True:
77
- if len(node.string.strip('\n').strip(''))==0: node.preserve = True
78
- if len(node.string.strip('\n').strip(''))<42: node.preserve = True
 
 
79
  node = node.next
80
- if node is None: break
 
81
  node = root
82
  while True:
83
  if node.next and node.preserve and node.next.preserve:
84
  node.string += node.next.string
85
  node.next = node.next.next
86
  node = node.next
87
- if node is None: break
 
88
 
89
  # 将前后断行符脱离
90
  node = root
91
  prev_node = None
92
  while True:
93
  if not node.preserve:
94
- lstriped_ = node.string.lstrip().lstrip('\n')
95
- if (prev_node is not None) and (prev_node.preserve) and (len(lstriped_)!=len(node.string)):
96
- prev_node.string += node.string[:-len(lstriped_)]
 
 
 
 
97
  node.string = lstriped_
98
- rstriped_ = node.string.rstrip().rstrip('\n')
99
- if (node.next is not None) and (node.next.preserve) and (len(rstriped_)!=len(node.string)):
100
- node.next.string = node.string[len(rstriped_):] + node.next.string
 
 
 
 
101
  node.string = rstriped_
102
- # =====
103
  prev_node = node
104
  node = node.next
105
- if node is None: break
 
106
 
107
  # 标注节点的行数范围
108
  node = root
109
  n_line = 0
110
  expansion = 2
111
  while True:
112
- n_l = node.string.count('\n')
113
- node.range = [n_line-expansion, n_line+n_l+expansion] # 失败时,扭转的范围
114
- n_line = n_line+n_l
115
  node = node.next
116
- if node is None: break
 
117
  return root
118
 
119
 
@@ -128,97 +152,125 @@ def set_forbidden_text(text, mask, pattern, flags=0):
128
  """
129
  Add a preserve text area in this paper
130
  e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
131
- you can mask out (mask = PRESERVE so that text become untouchable for GPT)
132
  everything between "\begin{equation}" and "\end{equation}"
133
  """
134
- if isinstance(pattern, list): pattern = '|'.join(pattern)
 
135
  pattern_compile = re.compile(pattern, flags)
136
  for res in pattern_compile.finditer(text):
137
- mask[res.span()[0]:res.span()[1]] = PRESERVE
138
  return text, mask
139
 
 
140
  def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
141
  """
142
  Move area out of preserve area (make text editable for GPT)
143
- count the number of the braces so as to catch compelete text area.
144
  e.g.
145
- \begin{abstract} blablablablablabla. \end{abstract}
146
  """
147
- if isinstance(pattern, list): pattern = '|'.join(pattern)
 
148
  pattern_compile = re.compile(pattern, flags)
149
  for res in pattern_compile.finditer(text):
150
  if not forbid_wrapper:
151
- mask[res.span()[0]:res.span()[1]] = TRANSFORM
152
  else:
153
- mask[res.regs[0][0]: res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
154
- mask[res.regs[1][0]: res.regs[1][1]] = TRANSFORM # abstract
155
- mask[res.regs[1][1]: res.regs[0][1]] = PRESERVE # abstract
156
  return text, mask
157
 
 
158
  def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
159
  """
160
  Add a preserve text area in this paper (text become untouchable for GPT).
161
- count the number of the braces so as to catch compelete text area.
162
  e.g.
163
- \caption{blablablablabla\texbf{blablabla}blablabla.}
164
  """
165
  pattern_compile = re.compile(pattern, flags)
166
  for res in pattern_compile.finditer(text):
167
  brace_level = -1
168
  p = begin = end = res.regs[0][0]
169
- for _ in range(1024*16):
170
- if text[p] == '}' and brace_level == 0: break
171
- elif text[p] == '}': brace_level -= 1
172
- elif text[p] == '{': brace_level += 1
 
 
 
173
  p += 1
174
- end = p+1
175
  mask[begin:end] = PRESERVE
176
  return text, mask
177
 
178
- def reverse_forbidden_text_careful_brace(text, mask, pattern, flags=0, forbid_wrapper=True):
 
 
 
179
  """
180
  Move area out of preserve area (make text editable for GPT)
181
- count the number of the braces so as to catch compelete text area.
182
  e.g.
183
- \caption{blablablablabla\texbf{blablabla}blablabla.}
184
  """
185
  pattern_compile = re.compile(pattern, flags)
186
  for res in pattern_compile.finditer(text):
187
  brace_level = 0
188
  p = begin = end = res.regs[1][0]
189
- for _ in range(1024*16):
190
- if text[p] == '}' and brace_level == 0: break
191
- elif text[p] == '}': brace_level -= 1
192
- elif text[p] == '{': brace_level += 1
 
 
 
193
  p += 1
194
  end = p
195
  mask[begin:end] = TRANSFORM
196
  if forbid_wrapper:
197
- mask[res.regs[0][0]:begin] = PRESERVE
198
- mask[end:res.regs[0][1]] = PRESERVE
199
  return text, mask
200
 
 
201
  def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
202
  """
203
  Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
204
  Add it to preserve area
205
  """
206
  pattern_compile = re.compile(pattern, flags)
 
207
  def search_with_line_limit(text, mask):
208
  for res in pattern_compile.finditer(text):
209
  cmd = res.group(1) # begin{what}
210
- this = res.group(2) # content between begin and end
211
- this_mask = mask[res.regs[2][0]:res.regs[2][1]]
212
- white_list = ['document', 'abstract', 'lemma', 'definition', 'sproof',
213
- 'em', 'emph', 'textit', 'textbf', 'itemize', 'enumerate']
214
- if (cmd in white_list) or this.count('\n') >= limit_n_lines: # use a magical number 42
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  this, this_mask = search_with_line_limit(this, this_mask)
216
- mask[res.regs[2][0]:res.regs[2][1]] = this_mask
217
  else:
218
- mask[res.regs[0][0]:res.regs[0][1]] = PRESERVE
219
  return text, mask
220
- return search_with_line_limit(text, mask)
221
 
 
222
 
223
 
224
  """
@@ -227,6 +279,7 @@ Latex Merge File
227
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
228
  """
229
 
 
230
  def find_main_tex_file(file_manifest, mode):
231
  """
232
  在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
@@ -234,27 +287,36 @@ def find_main_tex_file(file_manifest, mode):
234
  """
235
  canidates = []
236
  for texf in file_manifest:
237
- if os.path.basename(texf).startswith('merge'):
238
  continue
239
- with open(texf, 'r', encoding='utf8', errors='ignore') as f:
240
  file_content = f.read()
241
- if r'\documentclass' in file_content:
242
  canidates.append(texf)
243
  else:
244
  continue
245
 
246
  if len(canidates) == 0:
247
- raise RuntimeError('无法找到一个主Tex文件(包含documentclass关键字)')
248
  elif len(canidates) == 1:
249
  return canidates[0]
250
- else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
251
  canidates_score = []
252
  # 给出一些判定模板文档的词作为扣分项
253
- unexpected_words = ['\LaTeX', 'manuscript', 'Guidelines', 'font', 'citations', 'rejected', 'blind review', 'reviewers']
254
- expected_words = ['\input', '\ref', '\cite']
 
 
 
 
 
 
 
 
 
255
  for texf in canidates:
256
  canidates_score.append(0)
257
- with open(texf, 'r', encoding='utf8', errors='ignore') as f:
258
  file_content = f.read()
259
  file_content = rm_comments(file_content)
260
  for uw in unexpected_words:
@@ -263,9 +325,10 @@ def find_main_tex_file(file_manifest, mode):
263
  for uw in expected_words:
264
  if uw in file_content:
265
  canidates_score[-1] += 1
266
- select = np.argmax(canidates_score) # 取评分最高者返回
267
  return canidates[select]
268
-
 
269
  def rm_comments(main_file):
270
  new_file_remove_comment_lines = []
271
  for l in main_file.splitlines():
@@ -274,30 +337,39 @@ def rm_comments(main_file):
274
  pass
275
  else:
276
  new_file_remove_comment_lines.append(l)
277
- main_file = '\n'.join(new_file_remove_comment_lines)
278
  # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
279
- main_file = re.sub(r'(?<!\\)%.*', '', main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
280
  return main_file
281
 
 
282
  def find_tex_file_ignore_case(fp):
283
  dir_name = os.path.dirname(fp)
284
  base_name = os.path.basename(fp)
285
  # 如果输入的文件路径是正确的
286
- if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
 
287
  # 如果不正确,试着加上.tex后缀试试
288
- if not base_name.endswith('.tex'): base_name+='.tex'
289
- if os.path.isfile(pj(dir_name, base_name)): return pj(dir_name, base_name)
 
 
290
  # 如果还找不到,解除大小写限制,再试一次
291
  import glob
292
- for f in glob.glob(dir_name+'/*.tex'):
 
293
  base_name_s = os.path.basename(fp)
294
  base_name_f = os.path.basename(f)
295
- if base_name_s.lower() == base_name_f.lower(): return f
 
296
  # 试着加上.tex后缀试试
297
- if not base_name_s.endswith('.tex'): base_name_s+='.tex'
298
- if base_name_s.lower() == base_name_f.lower(): return f
 
 
299
  return None
300
 
 
301
  def merge_tex_files_(project_foler, main_file, mode):
302
  """
303
  Merge Tex project recrusively
@@ -309,18 +381,18 @@ def merge_tex_files_(project_foler, main_file, mode):
309
  fp_ = find_tex_file_ignore_case(fp)
310
  if fp_:
311
  try:
312
- with open(fp_, 'r', encoding='utf-8', errors='replace') as fx: c = fx.read()
 
313
  except:
314
  c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
315
  else:
316
- raise RuntimeError(f'找不到{fp},Tex源文件缺失!')
317
  c = merge_tex_files_(project_foler, c, mode)
318
- main_file = main_file[:s.span()[0]] + c + main_file[s.span()[1]:]
319
  return main_file
320
 
321
 
322
  def find_title_and_abs(main_file):
323
-
324
  def extract_abstract_1(text):
325
  pattern = r"\\abstract\{(.*?)\}"
326
  match = re.search(pattern, text, re.DOTALL)
@@ -362,21 +434,30 @@ def merge_tex_files(project_foler, main_file, mode):
362
  main_file = merge_tex_files_(project_foler, main_file, mode)
363
  main_file = rm_comments(main_file)
364
 
365
- if mode == 'translate_zh':
366
  # find paper documentclass
367
- pattern = re.compile(r'\\documentclass.*\n')
368
  match = pattern.search(main_file)
369
  assert match is not None, "Cannot find documentclass statement!"
370
  position = match.end()
371
- add_ctex = '\\usepackage{ctex}\n'
372
- add_url = '\\usepackage{url}\n' if '{url}' not in main_file else ''
373
  main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
374
  # fontset=windows
375
  import platform
376
- main_file = re.sub(r"\\documentclass\[(.*?)\]{(.*?)}", r"\\documentclass[\1,fontset=windows,UTF8]{\2}",main_file)
377
- main_file = re.sub(r"\\documentclass{(.*?)}", r"\\documentclass[fontset=windows,UTF8]{\1}",main_file)
 
 
 
 
 
 
 
 
 
378
  # find paper abstract
379
- pattern_opt1 = re.compile(r'\\begin\{abstract\}.*\n')
380
  pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
381
  match_opt1 = pattern_opt1.search(main_file)
382
  match_opt2 = pattern_opt2.search(main_file)
@@ -385,7 +466,9 @@ def merge_tex_files(project_foler, main_file, mode):
385
  main_file = insert_abstract(main_file)
386
  match_opt1 = pattern_opt1.search(main_file)
387
  match_opt2 = pattern_opt2.search(main_file)
388
- assert (match_opt1 is not None) or (match_opt2 is not None), "Cannot find paper abstract section!"
 
 
389
  return main_file
390
 
391
 
@@ -395,6 +478,7 @@ The GPT-Academic program cannot find abstract section in this paper.
395
  \end{abstract}
396
  """
397
 
 
398
  def insert_abstract(tex_content):
399
  if "\\maketitle" in tex_content:
400
  # find the position of "\maketitle"
@@ -402,7 +486,13 @@ def insert_abstract(tex_content):
402
  # find the nearest ending line
403
  end_line_index = tex_content.find("\n", find_index)
404
  # insert "abs_str" on the next line
405
- modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
 
 
 
 
 
 
406
  return modified_tex
407
  elif r"\begin{document}" in tex_content:
408
  # find the position of "\maketitle"
@@ -410,29 +500,39 @@ def insert_abstract(tex_content):
410
  # find the nearest ending line
411
  end_line_index = tex_content.find("\n", find_index)
412
  # insert "abs_str" on the next line
413
- modified_tex = tex_content[:end_line_index+1] + '\n\n' + insert_missing_abs_str + '\n\n' + tex_content[end_line_index+1:]
 
 
 
 
 
 
414
  return modified_tex
415
  else:
416
  return tex_content
417
 
 
418
  """
419
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
420
  Post process
421
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
422
  """
 
 
423
  def mod_inbraket(match):
424
  """
425
- 为啥chatgpt会把cite里面的逗号换成中文逗号呀
426
  """
427
  # get the matched string
428
  cmd = match.group(1)
429
  str_to_modify = match.group(2)
430
  # modify the matched string
431
- str_to_modify = str_to_modify.replace('', ':') # 前面是中文冒号,后面是英文冒号
432
- str_to_modify = str_to_modify.replace('', ',') # 前面是中文逗号,后面是英文逗号
433
  # str_to_modify = 'BOOM'
434
  return "\\" + cmd + "{" + str_to_modify + "}"
435
 
 
436
  def fix_content(final_tex, node_string):
437
  """
438
  Fix common GPT errors to increase success rate
@@ -443,10 +543,10 @@ def fix_content(final_tex, node_string):
443
  final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
444
 
445
  if "Traceback" in final_tex and "[Local Message]" in final_tex:
446
- final_tex = node_string # 出问题了,还原原文
447
- if node_string.count('\\begin') != final_tex.count('\\begin'):
448
- final_tex = node_string # 出问题了,还原原文
449
- if node_string.count('\_') > 0 and node_string.count('\_') > final_tex.count('\_'):
450
  # walk and replace any _ without \
451
  final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
452
 
@@ -454,24 +554,32 @@ def fix_content(final_tex, node_string):
454
  # this function count the number of { and }
455
  brace_level = 0
456
  for c in string:
457
- if c == "{": brace_level += 1
458
- elif c == "}": brace_level -= 1
 
 
459
  return brace_level
 
460
  def join_most(tex_t, tex_o):
461
  # this function join translated string and original string when something goes wrong
462
  p_t = 0
463
  p_o = 0
 
464
  def find_next(string, chars, begin):
465
  p = begin
466
  while p < len(string):
467
- if string[p] in chars: return p, string[p]
 
468
  p += 1
469
  return None, None
 
470
  while True:
471
- res1, char = find_next(tex_o, ['{','}'], p_o)
472
- if res1 is None: break
 
473
  res2, char = find_next(tex_t, [char], p_t)
474
- if res2 is None: break
 
475
  p_o = res1 + 1
476
  p_t = res2 + 1
477
  return tex_t[:p_t] + tex_o[p_o:]
@@ -480,10 +588,14 @@ def fix_content(final_tex, node_string):
480
  # 出问题了,还原部分原文,保证括号正确
481
  final_tex = join_most(final_tex, node_string)
482
  return final_tex
483
-
 
484
  def compile_latex_with_timeout(command, cwd, timeout=60):
485
  import subprocess
486
- process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
 
 
 
487
  try:
488
  stdout, stderr = process.communicate(timeout=timeout)
489
  except subprocess.TimeoutExpired:
@@ -493,43 +605,52 @@ def compile_latex_with_timeout(command, cwd, timeout=60):
493
  return False
494
  return True
495
 
 
496
  def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
497
  import sys
 
498
  try:
499
  result = func(*args, **kwargs)
500
- return_dict['result'] = result
501
  except Exception as e:
502
  exc_info = sys.exc_info()
503
- exception_dict['exception'] = exc_info
 
504
 
505
  def run_in_subprocess(func):
506
  import multiprocessing
 
507
  def wrapper(*args, **kwargs):
508
  return_dict = multiprocessing.Manager().dict()
509
  exception_dict = multiprocessing.Manager().dict()
510
- process = multiprocessing.Process(target=run_in_subprocess_wrapper_func,
511
- args=(func, args, kwargs, return_dict, exception_dict))
 
 
512
  process.start()
513
  process.join()
514
  process.close()
515
- if 'exception' in exception_dict:
516
  # ooops, the subprocess ran into an exception
517
- exc_info = exception_dict['exception']
518
  raise exc_info[1].with_traceback(exc_info[2])
519
- if 'result' in return_dict.keys():
520
  # If the subprocess ran successfully, return the result
521
- return return_dict['result']
 
522
  return wrapper
523
 
 
524
  def _merge_pdfs(pdf1_path, pdf2_path, output_path):
525
- import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
 
526
  Percent = 0.95
527
  # raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
528
  # Open the first PDF file
529
- with open(pdf1_path, 'rb') as pdf1_file:
530
  pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
531
  # Open the second PDF file
532
- with open(pdf2_path, 'rb') as pdf2_file:
533
  pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
534
  # Create a new PDF file to store the merged pages
535
  output_writer = PyPDF2.PdfFileWriter()
@@ -549,14 +670,25 @@ def _merge_pdfs(pdf1_path, pdf2_path, output_path):
549
  page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
550
  # Create a new empty page with double width
551
  new_page = PyPDF2.PageObject.createBlankPage(
552
- width = int(int(page1.mediaBox.getWidth()) + int(page2.mediaBox.getWidth()) * Percent),
553
- height = max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight())
 
 
 
554
  )
555
  new_page.mergeTranslatedPage(page1, 0, 0)
556
- new_page.mergeTranslatedPage(page2, int(int(page1.mediaBox.getWidth())-int(page2.mediaBox.getWidth())* (1-Percent)), 0)
 
 
 
 
 
 
 
557
  output_writer.addPage(new_page)
558
  # Save the merged PDF file
559
- with open(output_path, 'wb') as output_file:
560
  output_writer.write(output_file)
561
 
562
- merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
 
 
1
  import os, shutil
2
  import re
3
  import numpy as np
4
+
5
  PRESERVE = 0
6
  TRANSFORM = 1
7
 
8
  pj = os.path.join
9
 
10
+
11
+ class LinkedListNode:
12
  """
13
  Linked List Node
14
  """
15
+
16
  def __init__(self, string, preserve=True) -> None:
17
  self.string = string
18
  self.preserve = preserve
 
21
  # self.begin_line = 0
22
  # self.begin_char = 0
23
 
24
+
25
  def convert_to_linklist(text, mask):
26
  root = LinkedListNode("", preserve=True)
27
  current_node = root
28
  for c, m, i in zip(text, mask, range(len(text))):
29
+ if (m == PRESERVE and current_node.preserve) or (
30
+ m == TRANSFORM and not current_node.preserve
31
+ ):
32
  # add
33
  current_node.string += c
34
  else:
35
+ current_node.next = LinkedListNode(c, preserve=(m == PRESERVE))
36
  current_node = current_node.next
37
  return root
38
 
39
+
40
  def post_process(root):
41
  # 修复括号
42
  node = root
43
  while True:
44
  string = node.string
45
+ if node.preserve:
46
  node = node.next
47
+ if node is None:
48
+ break
49
  continue
50
+
51
  def break_check(string):
52
+ str_stack = [""] # (lv, index)
53
  for i, c in enumerate(string):
54
+ if c == "{":
55
+ str_stack.append("{")
56
+ elif c == "}":
57
  if len(str_stack) == 1:
58
+ print("stack fix")
59
  return i
60
  str_stack.pop(-1)
61
  else:
62
  str_stack[-1] += c
63
  return -1
64
+
65
  bp = break_check(string)
66
 
67
  if bp == -1:
 
78
  node.next = q
79
 
80
  node = node.next
81
+ if node is None:
82
+ break
83
 
84
  # 屏蔽空行和太短的句子
85
  node = root
86
  while True:
87
+ if len(node.string.strip("\n").strip("")) == 0:
88
+ node.preserve = True
89
+ if len(node.string.strip("\n").strip("")) < 42:
90
+ node.preserve = True
91
  node = node.next
92
+ if node is None:
93
+ break
94
  node = root
95
  while True:
96
  if node.next and node.preserve and node.next.preserve:
97
  node.string += node.next.string
98
  node.next = node.next.next
99
  node = node.next
100
+ if node is None:
101
+ break
102
 
103
  # 将前后断行符脱离
104
  node = root
105
  prev_node = None
106
  while True:
107
  if not node.preserve:
108
+ lstriped_ = node.string.lstrip().lstrip("\n")
109
+ if (
110
+ (prev_node is not None)
111
+ and (prev_node.preserve)
112
+ and (len(lstriped_) != len(node.string))
113
+ ):
114
+ prev_node.string += node.string[: -len(lstriped_)]
115
  node.string = lstriped_
116
+ rstriped_ = node.string.rstrip().rstrip("\n")
117
+ if (
118
+ (node.next is not None)
119
+ and (node.next.preserve)
120
+ and (len(rstriped_) != len(node.string))
121
+ ):
122
+ node.next.string = node.string[len(rstriped_) :] + node.next.string
123
  node.string = rstriped_
124
+ # =-=-=
125
  prev_node = node
126
  node = node.next
127
+ if node is None:
128
+ break
129
 
130
  # 标注节点的行数范围
131
  node = root
132
  n_line = 0
133
  expansion = 2
134
  while True:
135
+ n_l = node.string.count("\n")
136
+ node.range = [n_line - expansion, n_line + n_l + expansion] # 失败时,扭转的范围
137
+ n_line = n_line + n_l
138
  node = node.next
139
+ if node is None:
140
+ break
141
  return root
142
 
143
 
 
152
  """
153
  Add a preserve text area in this paper
154
  e.g. with pattern = r"\\begin\{algorithm\}(.*?)\\end\{algorithm\}"
155
+ you can mask out (mask = PRESERVE so that text become untouchable for GPT)
156
  everything between "\begin{equation}" and "\end{equation}"
157
  """
158
+ if isinstance(pattern, list):
159
+ pattern = "|".join(pattern)
160
  pattern_compile = re.compile(pattern, flags)
161
  for res in pattern_compile.finditer(text):
162
+ mask[res.span()[0] : res.span()[1]] = PRESERVE
163
  return text, mask
164
 
165
+
166
  def reverse_forbidden_text(text, mask, pattern, flags=0, forbid_wrapper=True):
167
  """
168
  Move area out of preserve area (make text editable for GPT)
169
+ count the number of the braces so as to catch compelete text area.
170
  e.g.
171
+ \begin{abstract} blablablablablabla. \end{abstract}
172
  """
173
+ if isinstance(pattern, list):
174
+ pattern = "|".join(pattern)
175
  pattern_compile = re.compile(pattern, flags)
176
  for res in pattern_compile.finditer(text):
177
  if not forbid_wrapper:
178
+ mask[res.span()[0] : res.span()[1]] = TRANSFORM
179
  else:
180
+ mask[res.regs[0][0] : res.regs[1][0]] = PRESERVE # '\\begin{abstract}'
181
+ mask[res.regs[1][0] : res.regs[1][1]] = TRANSFORM # abstract
182
+ mask[res.regs[1][1] : res.regs[0][1]] = PRESERVE # abstract
183
  return text, mask
184
 
185
+
186
  def set_forbidden_text_careful_brace(text, mask, pattern, flags=0):
187
  """
188
  Add a preserve text area in this paper (text become untouchable for GPT).
189
+ count the number of the braces so as to catch compelete text area.
190
  e.g.
191
+ \caption{blablablablabla\texbf{blablabla}blablabla.}
192
  """
193
  pattern_compile = re.compile(pattern, flags)
194
  for res in pattern_compile.finditer(text):
195
  brace_level = -1
196
  p = begin = end = res.regs[0][0]
197
+ for _ in range(1024 * 16):
198
+ if text[p] == "}" and brace_level == 0:
199
+ break
200
+ elif text[p] == "}":
201
+ brace_level -= 1
202
+ elif text[p] == "{":
203
+ brace_level += 1
204
  p += 1
205
+ end = p + 1
206
  mask[begin:end] = PRESERVE
207
  return text, mask
208
 
209
+
210
+ def reverse_forbidden_text_careful_brace(
211
+ text, mask, pattern, flags=0, forbid_wrapper=True
212
+ ):
213
  """
214
  Move area out of preserve area (make text editable for GPT)
215
+ count the number of the braces so as to catch compelete text area.
216
  e.g.
217
+ \caption{blablablablabla\texbf{blablabla}blablabla.}
218
  """
219
  pattern_compile = re.compile(pattern, flags)
220
  for res in pattern_compile.finditer(text):
221
  brace_level = 0
222
  p = begin = end = res.regs[1][0]
223
+ for _ in range(1024 * 16):
224
+ if text[p] == "}" and brace_level == 0:
225
+ break
226
+ elif text[p] == "}":
227
+ brace_level -= 1
228
+ elif text[p] == "{":
229
+ brace_level += 1
230
  p += 1
231
  end = p
232
  mask[begin:end] = TRANSFORM
233
  if forbid_wrapper:
234
+ mask[res.regs[0][0] : begin] = PRESERVE
235
+ mask[end : res.regs[0][1]] = PRESERVE
236
  return text, mask
237
 
238
+
239
  def set_forbidden_text_begin_end(text, mask, pattern, flags=0, limit_n_lines=42):
240
  """
241
  Find all \begin{} ... \end{} text block that with less than limit_n_lines lines.
242
  Add it to preserve area
243
  """
244
  pattern_compile = re.compile(pattern, flags)
245
+
246
  def search_with_line_limit(text, mask):
247
  for res in pattern_compile.finditer(text):
248
  cmd = res.group(1) # begin{what}
249
+ this = res.group(2) # content between begin and end
250
+ this_mask = mask[res.regs[2][0] : res.regs[2][1]]
251
+ white_list = [
252
+ "document",
253
+ "abstract",
254
+ "lemma",
255
+ "definition",
256
+ "sproof",
257
+ "em",
258
+ "emph",
259
+ "textit",
260
+ "textbf",
261
+ "itemize",
262
+ "enumerate",
263
+ ]
264
+ if (cmd in white_list) or this.count(
265
+ "\n"
266
+ ) >= limit_n_lines: # use a magical number 42
267
  this, this_mask = search_with_line_limit(this, this_mask)
268
+ mask[res.regs[2][0] : res.regs[2][1]] = this_mask
269
  else:
270
+ mask[res.regs[0][0] : res.regs[0][1]] = PRESERVE
271
  return text, mask
 
272
 
273
+ return search_with_line_limit(text, mask)
274
 
275
 
276
  """
 
279
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
280
  """
281
 
282
+
283
  def find_main_tex_file(file_manifest, mode):
284
  """
285
  在多Tex文档中,寻找主文件,必须包含documentclass,返回找到的第一个。
 
287
  """
288
  canidates = []
289
  for texf in file_manifest:
290
+ if os.path.basename(texf).startswith("merge"):
291
  continue
292
+ with open(texf, "r", encoding="utf8", errors="ignore") as f:
293
  file_content = f.read()
294
+ if r"\documentclass" in file_content:
295
  canidates.append(texf)
296
  else:
297
  continue
298
 
299
  if len(canidates) == 0:
300
+ raise RuntimeError("无法找到一个主Tex文件(包含documentclass关键字)")
301
  elif len(canidates) == 1:
302
  return canidates[0]
303
+ else: # if len(canidates) >= 2 通过一些Latex模板中常见(但通常不会出现在正文)的单词,对不同latex源文件扣分,取评分最高者返回
304
  canidates_score = []
305
  # 给出一些判定模板文档的词作为扣分项
306
+ unexpected_words = [
307
+ "\\LaTeX",
308
+ "manuscript",
309
+ "Guidelines",
310
+ "font",
311
+ "citations",
312
+ "rejected",
313
+ "blind review",
314
+ "reviewers",
315
+ ]
316
+ expected_words = ["\\input", "\\ref", "\\cite"]
317
  for texf in canidates:
318
  canidates_score.append(0)
319
+ with open(texf, "r", encoding="utf8", errors="ignore") as f:
320
  file_content = f.read()
321
  file_content = rm_comments(file_content)
322
  for uw in unexpected_words:
 
325
  for uw in expected_words:
326
  if uw in file_content:
327
  canidates_score[-1] += 1
328
+ select = np.argmax(canidates_score) # 取评分最高者返回
329
  return canidates[select]
330
+
331
+
332
  def rm_comments(main_file):
333
  new_file_remove_comment_lines = []
334
  for l in main_file.splitlines():
 
337
  pass
338
  else:
339
  new_file_remove_comment_lines.append(l)
340
+ main_file = "\n".join(new_file_remove_comment_lines)
341
  # main_file = re.sub(r"\\include{(.*?)}", r"\\input{\1}", main_file) # 将 \include 命令转换为 \input 命令
342
+ main_file = re.sub(r"(?<!\\)%.*", "", main_file) # 使用正则表达式查找半行注释, 并替换为空字符串
343
  return main_file
344
 
345
+
346
  def find_tex_file_ignore_case(fp):
347
  dir_name = os.path.dirname(fp)
348
  base_name = os.path.basename(fp)
349
  # 如果输入的文件路径是正确的
350
+ if os.path.isfile(pj(dir_name, base_name)):
351
+ return pj(dir_name, base_name)
352
  # 如果不正确,试着加上.tex后缀试试
353
+ if not base_name.endswith(".tex"):
354
+ base_name += ".tex"
355
+ if os.path.isfile(pj(dir_name, base_name)):
356
+ return pj(dir_name, base_name)
357
  # 如果还找不到,解除大小写限制,再试一次
358
  import glob
359
+
360
+ for f in glob.glob(dir_name + "/*.tex"):
361
  base_name_s = os.path.basename(fp)
362
  base_name_f = os.path.basename(f)
363
+ if base_name_s.lower() == base_name_f.lower():
364
+ return f
365
  # 试着加上.tex后缀试试
366
+ if not base_name_s.endswith(".tex"):
367
+ base_name_s += ".tex"
368
+ if base_name_s.lower() == base_name_f.lower():
369
+ return f
370
  return None
371
 
372
+
373
  def merge_tex_files_(project_foler, main_file, mode):
374
  """
375
  Merge Tex project recrusively
 
381
  fp_ = find_tex_file_ignore_case(fp)
382
  if fp_:
383
  try:
384
+ with open(fp_, "r", encoding="utf-8", errors="replace") as fx:
385
+ c = fx.read()
386
  except:
387
  c = f"\n\nWarning from GPT-Academic: LaTex source file is missing!\n\n"
388
  else:
389
+ raise RuntimeError(f"找不到{fp},Tex源文件缺失!")
390
  c = merge_tex_files_(project_foler, c, mode)
391
+ main_file = main_file[: s.span()[0]] + c + main_file[s.span()[1] :]
392
  return main_file
393
 
394
 
395
  def find_title_and_abs(main_file):
 
396
  def extract_abstract_1(text):
397
  pattern = r"\\abstract\{(.*?)\}"
398
  match = re.search(pattern, text, re.DOTALL)
 
434
  main_file = merge_tex_files_(project_foler, main_file, mode)
435
  main_file = rm_comments(main_file)
436
 
437
+ if mode == "translate_zh":
438
  # find paper documentclass
439
+ pattern = re.compile(r"\\documentclass.*\n")
440
  match = pattern.search(main_file)
441
  assert match is not None, "Cannot find documentclass statement!"
442
  position = match.end()
443
+ add_ctex = "\\usepackage{ctex}\n"
444
+ add_url = "\\usepackage{url}\n" if "{url}" not in main_file else ""
445
  main_file = main_file[:position] + add_ctex + add_url + main_file[position:]
446
  # fontset=windows
447
  import platform
448
+
449
+ main_file = re.sub(
450
+ r"\\documentclass\[(.*?)\]{(.*?)}",
451
+ r"\\documentclass[\1,fontset=windows,UTF8]{\2}",
452
+ main_file,
453
+ )
454
+ main_file = re.sub(
455
+ r"\\documentclass{(.*?)}",
456
+ r"\\documentclass[fontset=windows,UTF8]{\1}",
457
+ main_file,
458
+ )
459
  # find paper abstract
460
+ pattern_opt1 = re.compile(r"\\begin\{abstract\}.*\n")
461
  pattern_opt2 = re.compile(r"\\abstract\{(.*?)\}", flags=re.DOTALL)
462
  match_opt1 = pattern_opt1.search(main_file)
463
  match_opt2 = pattern_opt2.search(main_file)
 
466
  main_file = insert_abstract(main_file)
467
  match_opt1 = pattern_opt1.search(main_file)
468
  match_opt2 = pattern_opt2.search(main_file)
469
+ assert (match_opt1 is not None) or (
470
+ match_opt2 is not None
471
+ ), "Cannot find paper abstract section!"
472
  return main_file
473
 
474
 
 
478
  \end{abstract}
479
  """
480
 
481
+
482
  def insert_abstract(tex_content):
483
  if "\\maketitle" in tex_content:
484
  # find the position of "\maketitle"
 
486
  # find the nearest ending line
487
  end_line_index = tex_content.find("\n", find_index)
488
  # insert "abs_str" on the next line
489
+ modified_tex = (
490
+ tex_content[: end_line_index + 1]
491
+ + "\n\n"
492
+ + insert_missing_abs_str
493
+ + "\n\n"
494
+ + tex_content[end_line_index + 1 :]
495
+ )
496
  return modified_tex
497
  elif r"\begin{document}" in tex_content:
498
  # find the position of "\maketitle"
 
500
  # find the nearest ending line
501
  end_line_index = tex_content.find("\n", find_index)
502
  # insert "abs_str" on the next line
503
+ modified_tex = (
504
+ tex_content[: end_line_index + 1]
505
+ + "\n\n"
506
+ + insert_missing_abs_str
507
+ + "\n\n"
508
+ + tex_content[end_line_index + 1 :]
509
+ )
510
  return modified_tex
511
  else:
512
  return tex_content
513
 
514
+
515
  """
516
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
517
  Post process
518
  =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
519
  """
520
+
521
+
522
  def mod_inbraket(match):
523
  """
524
+ 为啥chatgpt会把cite里面的逗号换成中文逗号呀
525
  """
526
  # get the matched string
527
  cmd = match.group(1)
528
  str_to_modify = match.group(2)
529
  # modify the matched string
530
+ str_to_modify = str_to_modify.replace("", ":") # 前面是中文冒号,后面是英文冒号
531
+ str_to_modify = str_to_modify.replace("", ",") # 前面是中文逗号,后面是英文逗号
532
  # str_to_modify = 'BOOM'
533
  return "\\" + cmd + "{" + str_to_modify + "}"
534
 
535
+
536
  def fix_content(final_tex, node_string):
537
  """
538
  Fix common GPT errors to increase success rate
 
543
  final_tex = re.sub(r"\\([a-z]{2,10})\{([^\}]*?)\}", mod_inbraket, string=final_tex)
544
 
545
  if "Traceback" in final_tex and "[Local Message]" in final_tex:
546
+ final_tex = node_string # 出问题了,还原原文
547
+ if node_string.count("\\begin") != final_tex.count("\\begin"):
548
+ final_tex = node_string # 出问题了,还原原文
549
+ if node_string.count("\_") > 0 and node_string.count("\_") > final_tex.count("\_"):
550
  # walk and replace any _ without \
551
  final_tex = re.sub(r"(?<!\\)_", "\\_", final_tex)
552
 
 
554
  # this function count the number of { and }
555
  brace_level = 0
556
  for c in string:
557
+ if c == "{":
558
+ brace_level += 1
559
+ elif c == "}":
560
+ brace_level -= 1
561
  return brace_level
562
+
563
  def join_most(tex_t, tex_o):
564
  # this function join translated string and original string when something goes wrong
565
  p_t = 0
566
  p_o = 0
567
+
568
  def find_next(string, chars, begin):
569
  p = begin
570
  while p < len(string):
571
+ if string[p] in chars:
572
+ return p, string[p]
573
  p += 1
574
  return None, None
575
+
576
  while True:
577
+ res1, char = find_next(tex_o, ["{", "}"], p_o)
578
+ if res1 is None:
579
+ break
580
  res2, char = find_next(tex_t, [char], p_t)
581
+ if res2 is None:
582
+ break
583
  p_o = res1 + 1
584
  p_t = res2 + 1
585
  return tex_t[:p_t] + tex_o[p_o:]
 
588
  # 出问题了,还原部分原文,保证括号正确
589
  final_tex = join_most(final_tex, node_string)
590
  return final_tex
591
+
592
+
593
  def compile_latex_with_timeout(command, cwd, timeout=60):
594
  import subprocess
595
+
596
+ process = subprocess.Popen(
597
+ command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
598
+ )
599
  try:
600
  stdout, stderr = process.communicate(timeout=timeout)
601
  except subprocess.TimeoutExpired:
 
605
  return False
606
  return True
607
 
608
+
609
  def run_in_subprocess_wrapper_func(func, args, kwargs, return_dict, exception_dict):
610
  import sys
611
+
612
  try:
613
  result = func(*args, **kwargs)
614
+ return_dict["result"] = result
615
  except Exception as e:
616
  exc_info = sys.exc_info()
617
+ exception_dict["exception"] = exc_info
618
+
619
 
620
  def run_in_subprocess(func):
621
  import multiprocessing
622
+
623
  def wrapper(*args, **kwargs):
624
  return_dict = multiprocessing.Manager().dict()
625
  exception_dict = multiprocessing.Manager().dict()
626
+ process = multiprocessing.Process(
627
+ target=run_in_subprocess_wrapper_func,
628
+ args=(func, args, kwargs, return_dict, exception_dict),
629
+ )
630
  process.start()
631
  process.join()
632
  process.close()
633
+ if "exception" in exception_dict:
634
  # ooops, the subprocess ran into an exception
635
+ exc_info = exception_dict["exception"]
636
  raise exc_info[1].with_traceback(exc_info[2])
637
+ if "result" in return_dict.keys():
638
  # If the subprocess ran successfully, return the result
639
+ return return_dict["result"]
640
+
641
  return wrapper
642
 
643
+
644
  def _merge_pdfs(pdf1_path, pdf2_path, output_path):
645
+ import PyPDF2 # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
646
+
647
  Percent = 0.95
648
  # raise RuntimeError('PyPDF2 has a serious memory leak problem, please use other tools to merge PDF files.')
649
  # Open the first PDF file
650
+ with open(pdf1_path, "rb") as pdf1_file:
651
  pdf1_reader = PyPDF2.PdfFileReader(pdf1_file)
652
  # Open the second PDF file
653
+ with open(pdf2_path, "rb") as pdf2_file:
654
  pdf2_reader = PyPDF2.PdfFileReader(pdf2_file)
655
  # Create a new PDF file to store the merged pages
656
  output_writer = PyPDF2.PdfFileWriter()
 
670
  page2 = PyPDF2.PageObject.createBlankPage(pdf1_reader)
671
  # Create a new empty page with double width
672
  new_page = PyPDF2.PageObject.createBlankPage(
673
+ width=int(
674
+ int(page1.mediaBox.getWidth())
675
+ + int(page2.mediaBox.getWidth()) * Percent
676
+ ),
677
+ height=max(page1.mediaBox.getHeight(), page2.mediaBox.getHeight()),
678
  )
679
  new_page.mergeTranslatedPage(page1, 0, 0)
680
+ new_page.mergeTranslatedPage(
681
+ page2,
682
+ int(
683
+ int(page1.mediaBox.getWidth())
684
+ - int(page2.mediaBox.getWidth()) * (1 - Percent)
685
+ ),
686
+ 0,
687
+ )
688
  output_writer.addPage(new_page)
689
  # Save the merged PDF file
690
+ with open(output_path, "wb") as output_file:
691
  output_writer.write(output_file)
692
 
693
+
694
+ merge_pdfs = run_in_subprocess(_merge_pdfs) # PyPDF2这个库有严重的内存泄露问题,把它放到子进程中运行,从而方便内存的释放
crazy_functions/pdf_fns/breakdown_txt.py CHANGED
@@ -65,10 +65,10 @@ def cut(limit, get_token_fn, txt_tocut, must_break_at_empty_line, break_anyway=F
65
  # 如果没有找到合适的切分点
66
  if break_anyway:
67
  # 是否允许暴力切分
68
- prev, post = force_breakdown(txt_tocut, limit, get_token_fn)
69
  else:
70
  # 不允许直接报错
71
- raise RuntimeError(f"存在一行极长的文本!{txt_tocut}")
72
 
73
  # 追加列表
74
  res.append(prev); fin_len+=len(prev)
 
65
  # 如果没有找到合适的切分点
66
  if break_anyway:
67
  # 是否允许暴力切分
68
+ prev, post = force_breakdown(remain_txt_to_cut, limit, get_token_fn)
69
  else:
70
  # 不允许直接报错
71
+ raise RuntimeError(f"存在一行极长的文本!{remain_txt_to_cut}")
72
 
73
  # 追加列表
74
  res.append(prev); fin_len+=len(prev)
crazy_functions/多智能体.py CHANGED
@@ -50,14 +50,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
50
  return
51
  if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
52
  llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
53
-
54
- # 检查当前的模型是否符合要求
55
- API_URL_REDIRECT = get_conf('API_URL_REDIRECT')
56
- if len(API_URL_REDIRECT) > 0:
57
- chatbot.append([f"处理任务: {txt}", f"暂不支持中转."])
58
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
59
- return
60
-
61
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
62
  try:
63
  import autogen
 
50
  return
51
  if model_info[llm_kwargs['llm_model']]["endpoint"] is not None: # 如果不是本地模型,加载API_KEY
52
  llm_kwargs['api_key'] = select_api_key(llm_kwargs['api_key'], llm_kwargs['llm_model'])
53
+
 
 
 
 
 
 
 
54
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
55
  try:
56
  import autogen
crazy_functions/数学动画生成manim.py CHANGED
@@ -1,6 +1,7 @@
1
- from toolbox import CatchException, update_ui, gen_time_str
2
- from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
3
- from .crazy_utils import input_clipping
 
4
 
5
  def inspect_dependency(chatbot, history):
6
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
@@ -27,9 +28,10 @@ def eval_manim(code):
27
  class_name = get_class_name(code)
28
 
29
  try:
 
30
  subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"])
31
- shutil.move('media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{gen_time_str()}.mp4')
32
- return f'gpt_log/{gen_time_str()}.mp4'
33
  except subprocess.CalledProcessError as e:
34
  output = e.output.decode()
35
  print(f"Command returned non-zero exit status {e.returncode}: {output}.")
@@ -94,6 +96,8 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
94
  res = eval_manim(code)
95
 
96
  chatbot.append(("生成的视频文件路径", res))
 
 
97
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
98
 
99
  # 在这里放一些网上搜集的demo,辅助gpt生成代码
 
1
+ import os
2
+ from toolbox import CatchException, update_ui, gen_time_str, promote_file_to_downloadzone
3
+ from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
4
+ from crazy_functions.crazy_utils import input_clipping
5
 
6
  def inspect_dependency(chatbot, history):
7
  # 尝试导入依赖,如果缺少依赖,则给出安装建议
 
28
  class_name = get_class_name(code)
29
 
30
  try:
31
+ time_str = gen_time_str()
32
  subprocess.check_output([sys.executable, '-c', f"from gpt_log.MyAnimation import {class_name}; {class_name}().render()"])
33
+ shutil.move(f'media/videos/1080p60/{class_name}.mp4', f'gpt_log/{class_name}-{time_str}.mp4')
34
+ return f'gpt_log/{time_str}.mp4'
35
  except subprocess.CalledProcessError as e:
36
  output = e.output.decode()
37
  print(f"Command returned non-zero exit status {e.returncode}: {output}.")
 
96
  res = eval_manim(code)
97
 
98
  chatbot.append(("生成的视频文件路径", res))
99
+ if os.path.exists(res):
100
+ promote_file_to_downloadzone(res, chatbot=chatbot)
101
  yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
102
 
103
  # 在这里放一些网上搜集的demo,辅助gpt生成代码
crazy_functions/高级功能函数模板.py CHANGED
@@ -26,4 +26,46 @@ def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
26
  )
27
  chatbot[-1] = (i_say, gpt_say)
28
  history.append(i_say);history.append(gpt_say)
29
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  )
27
  chatbot[-1] = (i_say, gpt_say)
28
  history.append(i_say);history.append(gpt_say)
29
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
30
+
31
+
32
+
33
+
34
+ PROMPT = """
35
+ 请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例:
36
+ ```mermaid
37
+ graph TD
38
+ P(编程) --> L1(Python)
39
+ P(编程) --> L2(C)
40
+ P(编程) --> L3(C++)
41
+ P(编程) --> L4(Javascipt)
42
+ P(编程) --> L5(PHP)
43
+ ```
44
+ """
45
+ @CatchException
46
+ def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
47
+ """
48
+ txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
49
+ llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
50
+ plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
51
+ chatbot 聊天显示框的句柄,用于显示给用户
52
+ history 聊天历史,前情提要
53
+ system_prompt 给gpt的静默提醒
54
+ web_port 当前软件运行的端口号
55
+ """
56
+ history = [] # 清空历史,以免输入溢出
57
+ chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一些关键词,然后使用mermaid+llm绘制图表。"))
58
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
59
+
60
+ if txt == "": txt = "空白的输入栏" # 调皮一下
61
+
62
+ i_say_show_user = f'请绘制有关“{txt}”的逻辑关系图。'
63
+ i_say = PROMPT.format(subject=txt)
64
+ gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
65
+ inputs=i_say,
66
+ inputs_show_user=i_say_show_user,
67
+ llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
68
+ sys_prompt=""
69
+ )
70
+ history.append(i_say); history.append(gpt_say)
71
+ yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
docker-compose.yml CHANGED
@@ -129,7 +129,7 @@ services:
129
  runtime: nvidia
130
  devices:
131
  - /dev/nvidia0:/dev/nvidia0
132
-
133
  # 与宿主的网络融合
134
  network_mode: "host"
135
  command: >
@@ -163,7 +163,7 @@ services:
163
  runtime: nvidia
164
  devices:
165
  - /dev/nvidia0:/dev/nvidia0
166
-
167
  # 与宿主的网络融合
168
  network_mode: "host"
169
 
@@ -229,4 +229,3 @@ services:
229
  # 不使用代理网络拉取最新代码
230
  command: >
231
  bash -c "python3 -u main.py"
232
-
 
129
  runtime: nvidia
130
  devices:
131
  - /dev/nvidia0:/dev/nvidia0
132
+
133
  # 与宿主的网络融合
134
  network_mode: "host"
135
  command: >
 
163
  runtime: nvidia
164
  devices:
165
  - /dev/nvidia0:/dev/nvidia0
166
+
167
  # 与宿主的网络融合
168
  network_mode: "host"
169
 
 
229
  # 不使用代理网络拉取最新代码
230
  command: >
231
  bash -c "python3 -u main.py"
 
docs/Dockerfile+ChatGLM CHANGED
@@ -1,2 +1 @@
1
  # 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
2
-
 
1
  # 此Dockerfile不再维护,请前往docs/GithubAction+ChatGLM+Moss
 
docs/Dockerfile+JittorLLM CHANGED
@@ -1 +1 @@
1
- # 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs
 
1
+ # 此Dockerfile不再维护,请前往docs/GithubAction+JittorLLMs
docs/GithubAction+NoLocal+Latex CHANGED
@@ -18,7 +18,7 @@ WORKDIR /gpt
18
 
19
  RUN pip3 install openai numpy arxiv rich
20
  RUN pip3 install colorama Markdown pygments pymupdf
21
- RUN pip3 install python-docx pdfminer
22
  RUN pip3 install nougat-ocr
23
 
24
  # 装载项目文件
 
18
 
19
  RUN pip3 install openai numpy arxiv rich
20
  RUN pip3 install colorama Markdown pygments pymupdf
21
+ RUN pip3 install python-docx pdfminer
22
  RUN pip3 install nougat-ocr
23
 
24
  # 装载项目文件
docs/README.Arabic.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **ملحوظة**
5
- >
6
  > تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة.
7
- >
8
  > 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`.
9
 
10
  # <div align=center><img src="logo.png" width="40"> GPT الأكاديمي</div>
@@ -12,14 +12,14 @@
12
  **إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي).
13
 
14
  > **ملحوظة**
15
- >
16
  > 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة.
17
- >
18
  > 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
19
- >
20
  > 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول.
21
 
22
-
23
 
24
  <div align="center">
25
 
@@ -46,7 +46,7 @@
46
  ⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل
47
  تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح
48
  دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS)
49
- ⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
50
  دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/)
51
  ⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير)
52
  ⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع
@@ -200,8 +200,8 @@ docker-compose up
200
  ```
201
  "ترجمة سوبر الإنجليزية إلى العربية": {
202
  # البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا
203
- "بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
204
-
205
  # اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك.
206
  "لاحقة": "",
207
  },
@@ -341,4 +341,3 @@ https://github.com/oobabooga/one-click-installers
341
  # المزيد:
342
  https://github.com/gradio-app/gradio
343
  https://github.com/fghrsh/live2d_demo
344
-
 
2
 
3
 
4
  > **ملحوظة**
5
+ >
6
  > تمت ترجمة هذا الملف README باستخدام GPT (بواسطة المكون الإضافي لهذا المشروع) وقد لا تكون الترجمة 100٪ موثوقة، يُرجى التمييز بعناية بنتائج الترجمة.
7
+ >
8
  > 2023.11.7: عند تثبيت التبعيات، يُرجى اختيار الإصدار المُحدد في `requirements.txt`. الأمر للتثبيت: `pip install -r requirements.txt`.
9
 
10
  # <div align=center><img src="logo.png" width="40"> GPT الأكاديمي</div>
 
12
  **إذا كنت تحب هذا المشروع، فيُرجى إعطاؤه Star. لترجمة هذا المشروع إلى لغة عشوائية باستخدام GPT، قم بقراءة وتشغيل [`multi_language.py`](multi_language.py) (تجريبي).
13
 
14
  > **ملحوظة**
15
+ >
16
  > 1. يُرجى ملاحظة أنها الإضافات (الأزرار) المميزة فقط التي تدعم قراءة الملفات، وبعض الإضافات توجد في قائمة منسدلة في منطقة الإضافات. بالإضافة إلى ذلك، نرحب بأي Pull Request جديد بأعلى أولوية لأي إضافة جديدة.
17
+ >
18
  > 2. تُوضّح كل من الملفات في هذا المشروع وظيفتها بالتفصيل في [تقرير الفهم الذاتي `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). يمكنك في أي وقت أن تنقر على إضافة وظيفة ذات صلة لاستدعاء GPT وإعادة إنشاء تقرير الفهم الذاتي للمشروع. للأسئلة الشائعة [`الويكي`](https://github.com/binary-husky/gpt_academic/wiki). [طرق التثبيت العادية](#installation) | [نصب بنقرة واحدة](https://github.com/binary-husky/gpt_academic/releases) | [تعليمات التكوين](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
19
+ >
20
  > 3. يتم توافق هذا المشروع مع ودعم توصيات اللغة البيجائية الأكبر شمولًا وشجاعة لمثل ChatGLM. يمكنك توفير العديد من مفاتيح Api المشتركة في تكوين الملف، مثل `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. عند تبديل مؤقت لـ `API_KEY`، قم بإدخال `API_KEY` المؤقت في منطقة الإدخال ثم اضغط على زر "إدخال" لجعله ساري المفعول.
21
 
22
+
23
 
24
  <div align="center">
25
 
 
46
  ⭐إضغط على وكيل "شارلوت الذكي" | [وظائف] استكمال الذكاء للكأس الأول للذكاء المكتسب من مايكروسوفت، اكتشاف وتطوير عالمي العميل
47
  تبديل الواجهة المُظلمة | يمكنك التبديل إلى الواجهة المظلمة بإضافة ```/?__theme=dark``` إلى نهاية عنوان URL في المتصفح
48
  دعم المزيد من نماذج LLM | دعم لجميع GPT3.5 وGPT4 و[ChatGLM2 في جامعة ثوه في لين](https://github.com/THUDM/ChatGLM2-6B) و[MOSS في جامعة فودان](https://github.com/OpenLMLab/MOSS)
49
+ ⭐تحوي انطباعة "ChatGLM2" | يدعم استيراد "ChatGLM2" ويوفر إضافة المساعدة في تعديله
50
  دعم المزيد من نماذج "LLM"، دعم [نشر الحديس](https://huggingface.co/spaces/qingxu98/gpt-academic) | انضم إلى واجهة "Newbing" (Bing الجديدة)،نقدم نماذج Jittorllms الجديدة تؤيدهم [LLaMA](https://github.com/facebookresearch/llama) و [盘古α](https://openi.org.cn/pangu/)
51
  ⭐حزمة "void-terminal" للشبكة (pip) | قم بطلب كافة وظائف إضافة هذا المشروع في python بدون واجهة رسومية (قيد التطوير)
52
  ⭐PCI-Express لإعلام (PCI) | [وظائف] باللغة الطبيعية، قم بتنفيذ المِهام الأخرى في المشروع
 
200
  ```
201
  "ترجمة سوبر الإنجليزية إلى العربية": {
202
  # البادئة، ستتم إضافتها قبل إدخالاتك. مثلاً، لوصف ما تريده مثل ترجمة أو شرح كود أو تلوين وهلم جرا
203
+ "بادئة": "يرجى ترجمة النص التالي إلى العربية ثم استخدم جدول Markdown لشرح المصطلحات المختصة المذكورة في النص:\n\n",
204
+
205
  # اللاحقة، سيتم إضافتها بعد إدخالاتك. يمكن استخدامها لوضع علامات اقتباس حول إدخالك.
206
  "لاحقة": "",
207
  },
 
341
  # المزيد:
342
  https://github.com/gradio-app/gradio
343
  https://github.com/fghrsh/live2d_demo
 
docs/README.English.md CHANGED
@@ -18,11 +18,11 @@ To translate this project to arbitrary language with GPT, read and run [`multi_l
18
  > 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs.
19
  >
20
  > 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
21
- >
22
  > 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it.
23
 
24
 
25
-
26
 
27
  <div align="center">
28
 
@@ -126,7 +126,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
126
  【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration):
127
  ```sh
128
  # 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
129
- python -m pip install -r request_llms/requirements_chatglm.txt
130
 
131
  # 【Optional Step II】Support Fudan MOSS
132
  python -m pip install -r request_llms/requirements_moss.txt
@@ -204,8 +204,8 @@ For example:
204
  ```
205
  "Super Translation": {
206
  # Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc.
207
- "Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
208
-
209
  # Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix.
210
  "Suffix": "",
211
  },
@@ -355,4 +355,3 @@ https://github.com/oobabooga/one-click-installers
355
  # More:
356
  https://github.com/gradio-app/gradio
357
  https://github.com/fghrsh/live2d_demo
358
-
 
18
  > 1.Please note that only plugins (buttons) highlighted in **bold** support reading files, and some plugins are located in the **dropdown menu** in the plugin area. Additionally, we welcome and process any new plugins with the **highest priority** through PRs.
19
  >
20
  > 2.The functionalities of each file in this project are described in detail in the [self-analysis report `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). As the version iterates, you can also click on the relevant function plugin at any time to call GPT to regenerate the project's self-analysis report. Common questions are in the [`wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Regular installation method](#installation) | [One-click installation script](https://github.com/binary-husky/gpt_academic/releases) | [Configuration instructions](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
21
+ >
22
  > 3.This project is compatible with and encourages the use of domestic large-scale language models such as ChatGLM. Multiple api-keys can be used together. You can fill in the configuration file with `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"` to temporarily switch `API_KEY` during input, enter the temporary `API_KEY`, and then press enter to apply it.
23
 
24
 
25
+
26
 
27
  <div align="center">
28
 
 
126
  【Optional Step】If you need to support THU ChatGLM2 or Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Familiar with Pytorch + Sufficient computer configuration):
127
  ```sh
128
  # 【Optional Step I】Support THU ChatGLM2. Note: If you encounter the "Call ChatGLM fail unable to load ChatGLM parameters" error, refer to the following: 1. The default installation above is for torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2. If the model cannot be loaded due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
129
+ python -m pip install -r request_llms/requirements_chatglm.txt
130
 
131
  # 【Optional Step II】Support Fudan MOSS
132
  python -m pip install -r request_llms/requirements_moss.txt
 
204
  ```
205
  "Super Translation": {
206
  # Prefix: will be added before your input. For example, used to describe your request, such as translation, code explanation, proofreading, etc.
207
+ "Prefix": "Please translate the following paragraph into Chinese and then explain each proprietary term in the text using a markdown table:\n\n",
208
+
209
  # Suffix: will be added after your input. For example, used to wrap your input in quotation marks along with the prefix.
210
  "Suffix": "",
211
  },
 
355
  # More:
356
  https://github.com/gradio-app/gradio
357
  https://github.com/fghrsh/live2d_demo
 
docs/README.French.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **Remarque**
5
- >
6
  > Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction.
7
- >
8
  > 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`.
9
 
10
 
@@ -12,7 +12,7 @@
12
 
13
  **Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !**
14
 
15
- Si vous aimez ce projet, veuillez lui donner une étoile.
16
  Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
17
 
18
  > **Remarque**
@@ -22,7 +22,7 @@ Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez
22
  > 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)..
23
  >
24
  > 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci.
25
-
26
 
27
  <div align="center">
28
 
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
128
  [Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration):
129
  ```sh
130
  # [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
131
- python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Support Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
@@ -201,7 +201,7 @@ Par exemple:
201
  "Traduction avancée de l'anglais vers le français": {
202
  # Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc.
203
  "Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n",
204
-
205
  # Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets.
206
  "Suffix": "",
207
  },
@@ -354,4 +354,3 @@ https://github.com/oobabooga/one-click-installers
354
  # Plus:
355
  https://github.com/gradio-app/gradio
356
  https://github.com/fghrsh/live2d_demo
357
-
 
2
 
3
 
4
  > **Remarque**
5
+ >
6
  > Ce README a été traduit par GPT (implémenté par le plugin de ce projet) et n'est pas fiable à 100 %. Veuillez examiner attentivement les résultats de la traduction.
7
+ >
8
  > 7 novembre 2023 : Lors de l'installation des dépendances, veuillez choisir les versions **spécifiées** dans le fichier `requirements.txt`. Commande d'installation : `pip install -r requirements.txt`.
9
 
10
 
 
12
 
13
  **Si vous aimez ce projet, merci de lui donner une étoile ; si vous avez inventé des raccourcis ou des plugins utiles, n'hésitez pas à envoyer des demandes d'extraction !**
14
 
15
+ Si vous aimez ce projet, veuillez lui donner une étoile.
16
  Pour traduire ce projet dans une langue arbitraire avec GPT, lisez et exécutez [`multi_language.py`](multi_language.py) (expérimental).
17
 
18
  > **Remarque**
 
22
  > 2. Les fonctionnalités de chaque fichier de ce projet sont spécifiées en détail dans [le rapport d'auto-analyse `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic个项目自译解报告). Vous pouvez également cliquer à tout moment sur les plugins de fonctions correspondants pour appeler GPT et générer un rapport d'auto-analyse du projet. Questions fréquemment posées [wiki](https://github.com/binary-husky/gpt_academic/wiki). [Méthode d'installation standard](#installation) | [Script d'installation en un clic](https://github.com/binary-husky/gpt_academic/releases) | [Instructions de configuration](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)..
23
  >
24
  > 3. Ce projet est compatible avec et recommande l'expérimentation de grands modèles de langage chinois tels que ChatGLM, etc. Prend en charge plusieurs clés API, vous pouvez les remplir dans le fichier de configuration comme `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Pour changer temporairement la clé API, entrez la clé API temporaire dans la zone de saisie, puis appuyez sur Entrée pour soumettre et activer celle-ci.
25
+
26
 
27
  <div align="center">
28
 
 
128
  [Optional Steps] If you need to support Tsinghua ChatGLM2/Fudan MOSS as backends, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Sufficient computer configuration):
129
  ```sh
130
  # [Optional Step I] Support Tsinghua ChatGLM2. Comment on this note: If you encounter the error "Call ChatGLM generated an error and cannot load the parameters of ChatGLM", refer to the following: 1: The default installation is the torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If the model cannot be loaded due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True).
131
+ python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Support Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
 
201
  "Traduction avancée de l'anglais vers le français": {
202
  # Préfixe, ajouté avant votre saisie. Par exemple, utilisez-le pour décrire votre demande, telle que la traduction, l'explication du code, l'amélioration, etc.
203
  "Prefix": "Veuillez traduire le contenu suivant en français, puis expliquer chaque terme propre à la langue anglaise utilisé dans le texte à l'aide d'un tableau markdown : \n\n",
204
+
205
  # Suffixe, ajouté après votre saisie. Par exemple, en utilisant le préfixe, vous pouvez entourer votre contenu par des guillemets.
206
  "Suffix": "",
207
  },
 
354
  # Plus:
355
  https://github.com/gradio-app/gradio
356
  https://github.com/fghrsh/live2d_demo
 
docs/README.German.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **Hinweis**
5
- >
6
- > Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
7
- >
8
  > 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`.
9
 
10
 
@@ -12,19 +12,19 @@
12
 
13
  **Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!**
14
 
15
- Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
16
  Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell).
17
 
18
  > **Hinweis**
19
  >
20
  > 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**.
21
- >
22
  > 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung).
23
- >
24
  > 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen.
25
 
26
 
27
-
28
 
29
  <div align="center">
30
 
@@ -93,7 +93,7 @@ Weitere Funktionen anzeigen (z. B. Bildgenerierung) …… | Siehe das Ende dies
93
  </div>
94
 
95
  # Installation
96
- ### Installation Method I: Run directly (Windows, Linux or MacOS)
97
 
98
  1. Download the project
99
  ```sh
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # This step is the same as installing
128
  [Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration):
129
  ```sh
130
  # [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
131
- python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Support Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
@@ -207,8 +207,8 @@ Beispiel:
207
  ```
208
  "Übersetzung von Englisch nach Chinesisch": {
209
  # Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw.
210
- "Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
211
-
212
  # Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen.
213
  "Suffix": "",
214
  },
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
361
  # Weitere:
362
  https://github.com/gradio-app/gradio
363
  https://github.com/fghrsh/live2d_demo
364
-
 
2
 
3
 
4
  > **Hinweis**
5
+ >
6
+ > Dieses README wurde mithilfe der GPT-Übersetzung (durch das Plugin dieses Projekts) erstellt und ist nicht zu 100 % zuverlässig. Bitte überprüfen Sie die Übersetzungsergebnisse sorgfältig.
7
+ >
8
  > 7. November 2023: Beim Installieren der Abhängigkeiten bitte nur die in der `requirements.txt` **angegebenen Versionen** auswählen. Installationsbefehl: `pip install -r requirements.txt`.
9
 
10
 
 
12
 
13
  **Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star. Wenn Sie praktische Tastenkombinationen oder Plugins entwickelt haben, sind Pull-Anfragen willkommen!**
14
 
15
+ Wenn Ihnen dieses Projekt gefällt, geben Sie ihm bitte einen Star.
16
  Um dieses Projekt mit GPT in eine beliebige Sprache zu übersetzen, lesen Sie [`multi_language.py`](multi_language.py) (experimentell).
17
 
18
  > **Hinweis**
19
  >
20
  > 1. Beachten Sie bitte, dass nur die mit **hervorgehobenen** Plugins (Schaltflächen) Dateien lesen können. Einige Plugins befinden sich im **Drop-down-Menü** des Plugin-Bereichs. Außerdem freuen wir uns über jede neue Plugin-PR mit **höchster Priorität**.
21
+ >
22
  > 2. Die Funktionen jeder Datei in diesem Projekt sind im [Selbstanalysebericht `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT-Academic-Selbstanalysebericht) ausführlich erläutert. Sie können jederzeit auf die relevanten Funktions-Plugins klicken und GPT aufrufen, um den Selbstanalysebericht des Projekts neu zu generieren. Häufig gestellte Fragen finden Sie im [`Wiki`](https://github.com/binary-husky/gpt_academic/wiki). [Standardinstallationsmethode](#installation) | [Ein-Klick-Installationsskript](https://github.com/binary-husky/gpt_academic/releases) | [Konfigurationsanleitung](https://github.com/binary-husky/gpt_academic/wiki/Projekt-Konfigurationsanleitung).
23
+ >
24
  > 3. Dieses Projekt ist kompatibel mit und unterstützt auch die Verwendung von inländischen Sprachmodellen wie ChatGLM. Die gleichzeitige Verwendung mehrerer API-Schlüssel ist möglich, indem Sie sie in der Konfigurationsdatei wie folgt angeben: `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Wenn Sie den `API_KEY` vorübergehend ändern möchten, geben Sie vorübergehend den temporären `API_KEY` im Eingabebereich ein und drücken Sie die Eingabetaste, um die Änderung wirksam werden zu lassen.
25
 
26
 
27
+
28
 
29
  <div align="center">
30
 
 
93
  </div>
94
 
95
  # Installation
96
+ ### Installation Method I: Run directly (Windows, Linux or MacOS)
97
 
98
  1. Download the project
99
  ```sh
 
128
  [Optional] If you need to support Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used PyTorch + Strong computer configuration):
129
  ```sh
130
  # [Optional Step I] Support Tsinghua ChatGLM2. Tsinghua ChatGLM note: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters normally", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, you need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py. Change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
131
+ python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Support Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
 
207
  ```
208
  "Übersetzung von Englisch nach Chinesisch": {
209
  # Präfix, wird vor Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Anforderungen zu beschreiben, z.B. Übersetzen, Code erklären, verbessern usw.
210
+ "Präfix": "Bitte übersetzen Sie den folgenden Abschnitt ins Chinesische und erklären Sie dann jedes Fachwort in einer Markdown-Tabelle:\n\n",
211
+
212
  # Suffix, wird nach Ihrer Eingabe hinzugefügt. Zum Beispiel, um Ihre Eingabe in Anführungszeichen zu setzen.
213
  "Suffix": "",
214
  },
 
361
  # Weitere:
362
  https://github.com/gradio-app/gradio
363
  https://github.com/fghrsh/live2d_demo
 
docs/README.Italian.md CHANGED
@@ -12,7 +12,7 @@
12
 
13
  **Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!**
14
 
15
- Se ti piace questo progetto, dagli una stella.
16
  Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale).
17
 
18
  > **Nota**
@@ -20,11 +20,11 @@ Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`mult
20
  > 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request.
21
  >
22
  > 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
23
- >
24
  > 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare.
25
 
26
 
27
-
28
 
29
  <div align="center">
30
 
@@ -128,7 +128,7 @@ python -m pip install -r requirements.txt # Questo passaggio è identico alla pr
128
  [Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente):
129
  ```sh
130
  # [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
131
- python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Supporto per Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
@@ -206,8 +206,8 @@ Ad esempio,
206
  ```
207
  "Traduzione avanzata Cinese-Inglese": {
208
  # Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc.
209
- "Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
210
-
211
  # Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette.
212
  "Suffisso": "",
213
  },
@@ -224,7 +224,7 @@ La scrittura di plugin per questo progetto è facile e richiede solo conoscenze
224
  # Aggiornamenti
225
  ### I: Aggiornamenti
226
 
227
- 1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
228
  Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente.
229
  Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML.
230
  <div align="center">
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
358
  # Altre risorse:
359
  https://github.com/gradio-app/gradio
360
  https://github.com/fghrsh/live2d_demo
361
-
 
12
 
13
  **Se ti piace questo progetto, per favore dagli una stella; se hai idee o plugin utili, fai una pull request!**
14
 
15
+ Se ti piace questo progetto, dagli una stella.
16
  Per tradurre questo progetto in qualsiasi lingua con GPT, leggi ed esegui [`multi_language.py`](multi_language.py) (sperimentale).
17
 
18
  > **Nota**
 
20
  > 1. Fai attenzione che solo i plugin (pulsanti) **evidenziati** supportano la lettura dei file, alcuni plugin si trovano nel **menu a tendina** nell'area dei plugin. Inoltre, accogliamo e gestiamo con **massima priorità** qualsiasi nuovo plugin attraverso pull request.
21
  >
22
  > 2. Le funzioni di ogni file in questo progetto sono descritte in dettaglio nel [rapporto di traduzione automatica del progetto `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Con l'iterazione della versione, puoi anche fare clic sui plugin delle funzioni rilevanti in qualsiasi momento per richiamare GPT e rigenerare il rapporto di auto-analisi del progetto. Domande frequenti [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Metodo di installazione standard](#installazione) | [Script di installazione one-click](https://github.com/binary-husky/gpt_academic/releases) | [Configurazione](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
23
+ >
24
  > 3. Questo progetto è compatibile e incoraggia l'uso di modelli di linguaggio di grandi dimensioni nazionali, come ChatGLM. Supporto per la coesistenza di più chiavi API, puoi compilare nel file di configurazione come `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando è necessario sostituire temporaneamente `API_KEY`, inserisci temporaneamente `API_KEY` nell'area di input e premi Invio per confermare.
25
 
26
 
27
+
28
 
29
  <div align="center">
30
 
 
128
  [Optional] Se desideri utilizzare ChatGLM2 di Tsinghua/Fudan MOSS come backend, è necessario installare ulteriori dipendenze (Requisiti: conoscenza di Python + esperienza con Pytorch + hardware potente):
129
  ```sh
130
  # [Optional Step I] Supporto per ChatGLM2 di Tsinghua. Note di ChatGLM di Tsinghua: Se si verifica l'errore "Call ChatGLM fail non può caricare i parametri di ChatGLM", fare riferimento a quanto segue: 1: L'installazione predefinita è la versione torch+cpu, per usare cuda è necessario disinstallare torch ed installare nuovamente la versione con torch+cuda; 2: Se il modello non può essere caricato a causa di una configurazione insufficiente, è possibile modificare la precisione del modello in request_llm/bridge_chatglm.py, sostituendo AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) con AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
131
+ python -m pip install -r request_llms/requirements_chatglm.txt
132
 
133
  # [Optional Step II] Supporto per Fudan MOSS
134
  python -m pip install -r request_llms/requirements_moss.txt
 
206
  ```
207
  "Traduzione avanzata Cinese-Inglese": {
208
  # Prefisso, sarà aggiunto prima del tuo input. Ad esempio, utilizzato per descrivere la tua richiesta, come traduzione, spiegazione del codice, rifinitura, ecc.
209
+ "Prefisso": "Si prega di tradurre il seguente testo in cinese e fornire spiegazione per i termini tecnici utilizzati, utilizzando una tabella in markdown uno per uno:\n\n",
210
+
211
  # Suffisso, sarà aggiunto dopo il tuo input. Ad esempio, in combinazione con il prefisso, puoi circondare il tuo input con virgolette.
212
  "Suffisso": "",
213
  },
 
224
  # Aggiornamenti
225
  ### I: Aggiornamenti
226
 
227
+ 1. Funzionalità di salvataggio della conversazione. Chiamare `Salva la conversazione corrente` nell'area del plugin per salvare la conversazione corrente come un file html leggibile e ripristinabile.
228
  Inoltre, nella stessa area del plugin (menu a tendina) chiamare `Carica la cronologia della conversazione` per ripristinare una conversazione precedente.
229
  Suggerimento: fare clic su `Carica la cronologia della conversazione` senza specificare un file per visualizzare la tua cronologia di archiviazione HTML.
230
  <div align="center">
 
358
  # Altre risorse:
359
  https://github.com/gradio-app/gradio
360
  https://github.com/fghrsh/live2d_demo
 
docs/README.Japanese.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **注意**
5
- >
6
  > 此READMEはGPTによる翻訳で生成されました(このプロジェクトのプラグインによって実装されています)、翻訳結果は100%正確ではないため、注意してください。
7
- >
8
  > 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。
9
 
10
 
@@ -18,11 +18,11 @@ GPTを使用してこのプロジェクトを任意の言語に翻訳するに
18
  > 1. **強調された** プラグイン(ボタン)のみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。
19
  >
20
  > 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。
21
- >
22
  > 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。
23
 
24
 
25
-
26
 
27
  <div align="center">
28
 
@@ -189,7 +189,7 @@ Python環境に詳しくないWindowsユーザーは、[リリース](https://gi
189
  "超级英译中": {
190
  # プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など
191
  "プレフィックス": "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、
192
-
193
  # サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。
194
  "サフィックス": ""、
195
  }、
@@ -342,4 +342,3 @@ https://github.com/oobabooga/one-click-installers
342
  # その他:
343
  https://github.com/gradio-app/gradio
344
  https://github.com/fghrsh/live2d_demo
345
-
 
2
 
3
 
4
  > **注意**
5
+ >
6
  > 此READMEはGPTによる翻訳で生成されました(このプロジェクトのプラグインによって実装されています)、翻訳結果は100%正確ではないため、注意してください。
7
+ >
8
  > 2023年11月7日: 依存関係をインストールする際は、`requirements.txt`で**指定されたバージョン**を選択してください。 インストールコマンド: `pip install -r requirements.txt`。
9
 
10
 
 
18
  > 1. **強調された** プラグイン(ボタン)のみがファイルを読み込むことができることに注意してください。一部のプラグインは、プラグインエリアのドロップダウンメニューにあります。また、新しいプラグインのPRを歓迎し、最優先で対応します。
19
  >
20
  > 2. このプロジェクトの各ファイルの機能は、[自己分析レポート`self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E5%A0%82)で詳しく説明されています。バージョンが進化するにつれて、関連する関数プラグインをクリックして、プロジェクトの自己分析レポートをGPTで再生成することもできます。よくある質問については、[`wiki`](https://github.com/binary-husky/gpt_academic/wiki)をご覧ください。[標準的なインストール方法](#installation) | [ワンクリックインストールスクリプト](https://github.com/binary-husky/gpt_academic/releases) | [構成の説明](https://github.com/binary-husky/gpt_academic/wiki/Project-Configuration-Explain)。
21
+ >
22
  > 3. このプロジェクトは、[ChatGLM](https://www.chatglm.dev/)などの中国製の大規模言語モデルも互換性があり、試してみることを推奨しています。複数のAPIキーを共存させることができ、設定ファイルに`API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`のように記入できます。`API_KEY`を一時的に変更する必要がある場合は、入力エリアに一時的な`API_KEY`を入力し、Enterキーを押して提出すると有効になります。
23
 
24
 
25
+
26
 
27
  <div align="center">
28
 
 
189
  "超级英译中": {
190
  # プレフィックス、入力の前に追加されます。例えば、要求を記述するために使用されます。翻訳、コードの解説、校正など
191
  "プレフィックス": "下記の内容を中国語に翻訳し、専門用語を一つずつマークダウンテーブルで解説してください:\n\n"、
192
+
193
  # サフィックス、入力の後に追加されます。プレフィックスと一緒に使用して、入力内容を引用符で囲むことができます。
194
  "サフィックス": ""、
195
  }、
 
342
  # その他:
343
  https://github.com/gradio-app/gradio
344
  https://github.com/fghrsh/live2d_demo
 
docs/README.Korean.md CHANGED
@@ -27,7 +27,7 @@ GPT를 사용하여 이 프로젝트를 임의의 언어로 번역하려면 [`mu
27
 
28
 
29
 
30
-
31
 
32
  <div align="center">
33
 
@@ -130,7 +130,7 @@ python -m pip install -r requirements.txt # This step is the same as the pip ins
130
  [Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration):
131
  ```sh
132
  # [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
133
- python -m pip install -r request_llms/requirements_chatglm.txt
134
 
135
  # [Optional Step II] Support for Fudan MOSS
136
  python -m pip install -r request_llms/requirements_moss.txt
@@ -208,8 +208,8 @@ Please visit the [cloud server remote deployment wiki](https://github.com/binary
208
  ```
209
  "초급영문 번역": {
210
  # 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등
211
- "Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
212
-
213
  # 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다.
214
  "Suffix": "",
215
  },
@@ -361,4 +361,3 @@ https://github.com/oobabooga/one-click-installers
361
  # 더보기:
362
  https://github.com/gradio-app/gradio
363
  https://github.com/fghrsh/live2d_demo
364
-
 
27
 
28
 
29
 
30
+
31
 
32
  <div align="center">
33
 
 
130
  [Optional Step] If you need support for Tsinghua ChatGLM2/Fudan MOSS as the backend, you need to install additional dependencies (Prerequisites: Familiar with Python + Have used Pytorch + Sufficient computer configuration):
131
  ```sh
132
  # [Optional Step I] Support for Tsinghua ChatGLM2. Note for Tsinghua ChatGLM: If you encounter the error "Call ChatGLM fail cannot load ChatGLM parameters", refer to the following: 1: The default installation above is torch+cpu version. To use cuda, uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient computer configuration, you can modify the model precision in request_llm/bridge_chatglm.py, change AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
133
+ python -m pip install -r request_llms/requirements_chatglm.txt
134
 
135
  # [Optional Step II] Support for Fudan MOSS
136
  python -m pip install -r request_llms/requirements_moss.txt
 
208
  ```
209
  "초급영문 번역": {
210
  # 접두사, 입력 내용 앞에 추가됩니다. 예를 들어 요구 사항을 설명하는 데 사용됩니다. 예를 들어 번역, 코드 설명, 교정 등
211
+ "Prefix": "다음 내용을 한국어로 번역하고 전문 용어에 대한 설명을 적용한 마크다운 표를 사용하세요:\n\n",
212
+
213
  # 접미사, 입력 내용 뒤에 추가됩니다. 예를 들어 접두사와 함께 입력 내용을 따옴표로 감쌀 수 있습니다.
214
  "Suffix": "",
215
  },
 
361
  # 더보기:
362
  https://github.com/gradio-app/gradio
363
  https://github.com/fghrsh/live2d_demo
 
docs/README.Portuguese.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **Nota**
5
- >
6
  > Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução.
7
- >
8
  > 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`.
9
 
10
  # <div align=center><img src="logo.png" width="40"> GPT Acadêmico</div>
@@ -15,12 +15,12 @@ Para traduzir este projeto para qualquer idioma utilizando o GPT, leia e execute
15
  > **Nota**
16
  >
17
  > 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR.
18
- >
19
  > 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
20
  >
21
  > 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo.
22
 
23
-
24
  <div align="center">
25
 
26
  Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição
@@ -89,7 +89,7 @@ Apresentação de mais novas funcionalidades (geração de imagens, etc.) ... |
89
  </div>
90
 
91
  # Instalação
92
- ### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
93
 
94
  1. Baixe o projeto
95
  ```sh
@@ -124,7 +124,7 @@ python -m pip install -r requirements.txt # Este passo é igual ao da instalaç
124
  [Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente):
125
  ```sh
126
  # [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
127
- python -m pip install -r request_llms/requirements_chatglm.txt
128
 
129
  # [Opcional Passo II] Suporte para MOSS do Fudan
130
  python -m pip install -r request_llms/requirements_moss.txt
@@ -202,8 +202,8 @@ Por exemplo:
202
  ```
203
  "超级英译中": {
204
  # Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc.
205
- "Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
206
-
207
  # Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas.
208
  "Suffix": "",
209
  },
@@ -355,4 +355,3 @@ https://github.com/oobabooga/instaladores-de-um-clique
355
  # Mais:
356
  https://github.com/gradio-app/gradio
357
  https://github.com/fghrsh/live2d_demo
358
-
 
2
 
3
 
4
  > **Nota**
5
+ >
6
  > Este README foi traduzido pelo GPT (implementado por um plugin deste projeto) e não é 100% confiável. Por favor, verifique cuidadosamente o resultado da tradução.
7
+ >
8
  > 7 de novembro de 2023: Ao instalar as dependências, favor selecionar as **versões especificadas** no `requirements.txt`. Comando de instalação: `pip install -r requirements.txt`.
9
 
10
  # <div align=center><img src="logo.png" width="40"> GPT Acadêmico</div>
 
15
  > **Nota**
16
  >
17
  > 1. Observe que apenas os plugins (botões) marcados em **destaque** são capazes de ler arquivos, alguns plugins estão localizados no **menu suspenso** do plugin area. Também damos boas-vindas e prioridade máxima a qualquer novo plugin via PR.
18
+ >
19
  > 2. As funcionalidades de cada arquivo deste projeto estão detalhadamente explicadas em [autoanálise `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). Com a iteração das versões, você também pode clicar nos plugins de funções relevantes a qualquer momento para chamar o GPT para regerar o relatório de autonálise do projeto. Perguntas frequentes [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [Método de instalação convencional](#installation) | [Script de instalação em um clique](https://github.com/binary-husky/gpt_academic/releases) | [Explicação de configuração](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明)。
20
  >
21
  > 3. Este projeto é compatível e encoraja o uso de modelos de linguagem chineses, como ChatGLM. Vários api-keys podem ser usados simultaneamente, podendo ser especificados no arquivo de configuração como `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Quando precisar alterar temporariamente o `API_KEY`, insira o `API_KEY` temporário na área de entrada e pressione Enter para que ele seja efetivo.
22
 
23
+
24
  <div align="center">
25
 
26
  Funcionalidades (⭐= funcionalidade recentemente adicionada) | Descrição
 
89
  </div>
90
 
91
  # Instalação
92
+ ### Método de instalação I: Executar diretamente (Windows, Linux ou MacOS)
93
 
94
  1. Baixe o projeto
95
  ```sh
 
124
  [Opcional] Se você quiser suporte para o ChatGLM2 do THU/ MOSS do Fudan, precisará instalar dependências extras (pré-requisitos: familiarizado com o Python + já usou o PyTorch + o computador tem configuração suficiente):
125
  ```sh
126
  # [Opcional Passo I] Suporte para ChatGLM2 do THU. Observações sobre o ChatGLM2 do THU: Se você encontrar o erro "Call ChatGLM fail 不能正常加载ChatGLM的参数" (Falha ao chamar o ChatGLM, não é possível carregar os parâmetros do ChatGLM), consulte o seguinte: 1: A versão instalada por padrão é a versão torch+cpu. Se você quiser usar a versão cuda, desinstale o torch e reinstale uma versão com torch+cuda; 2: Se a sua configuração não for suficiente para carregar o modelo, você pode modificar a precisão do modelo em request_llm/bridge_chatglm.py, alterando todas as ocorrências de AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) para AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
127
+ python -m pip install -r request_llms/requirements_chatglm.txt
128
 
129
  # [Opcional Passo II] Suporte para MOSS do Fudan
130
  python -m pip install -r request_llms/requirements_moss.txt
 
202
  ```
203
  "超级英译中": {
204
  # Prefixo, adicionado antes do seu input. Por exemplo, usado para descrever sua solicitação, como traduzir, explicar o código, revisar, etc.
205
+ "Prefix": "Por favor, traduza o parágrafo abaixo para o chinês e explique cada termo técnico dentro de uma tabela markdown:\n\n",
206
+
207
  # Sufixo, adicionado após o seu input. Por exemplo, em conjunto com o prefixo, pode-se colocar seu input entre aspas.
208
  "Suffix": "",
209
  },
 
355
  # Mais:
356
  https://github.com/gradio-app/gradio
357
  https://github.com/fghrsh/live2d_demo
 
docs/README.Russian.md CHANGED
@@ -2,9 +2,9 @@
2
 
3
 
4
  > **Примечание**
5
- >
6
  > Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода.
7
- >
8
  > 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`.
9
 
10
 
@@ -17,12 +17,12 @@
17
  >
18
  > 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**.
19
  >
20
- > 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
21
  >
22
  > 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения.
23
 
24
 
25
-
26
 
27
  <div align="center">
28
 
@@ -204,8 +204,8 @@ docker-compose up
204
  ```
205
  "Супер-англо-русский перевод": {
206
  # Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д.
207
- "Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
208
-
209
  # Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префи��сом, чтобы заключить ваш ввод в кавычки.
210
  "Суффикс": "",
211
  },
@@ -335,7 +335,7 @@ GPT Academic Группа QQ разработчиков: `610599535`
335
  ```
336
  В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения:
337
 
338
- # ChatGLM2-6B от Тиньхуа:
339
  https://github.com/THUDM/ChatGLM2-6B
340
 
341
  # Линейные модели с ограниченной памятью от Тиньхуа:
@@ -358,4 +358,3 @@ https://github.com/oobabooga/one-click-installers
358
  # Больше:
359
  https://github.com/gradio-app/gradio
360
  https://github.com/fghrsh/live2d_demo
361
-
 
2
 
3
 
4
  > **Примечание**
5
+ >
6
  > Этот README был переведен с помощью GPT (реализовано с помощью плагина этого проекта) и не может быть полностью надежным, пожалуйста, внимательно проверьте результаты перевода.
7
+ >
8
  > 7 ноября 2023 года: При установке зависимостей, пожалуйста, выберите **указанные версии** из `requirements.txt`. Команда установки: `pip install -r requirements.txt`.
9
 
10
 
 
17
  >
18
  > 1. Пожалуйста, обратите внимание, что только плагины (кнопки), выделенные **жирным шрифтом**, поддерживают чтение файлов, некоторые плагины находятся в выпадающем меню **плагинов**. Кроме того, мы с радостью приветствуем и обрабатываем PR для любых новых плагинов с **наивысшим приоритетом**.
19
  >
20
+ > 2. Функции каждого файла в этом проекте подробно описаны в [отчете о самостоятельном анализе проекта `self_analysis.md`](https://github.com/binary-husky/gpt_academic/wiki/GPT‐Academic项目自译解报告). С каждым новым релизом вы также можете в любое время нажать на соответствующий функциональный плагин, вызвать GPT для повторной генерации сводного отчета о самоанализе проекта. Часто задаваемые вопросы [`wiki`](https://github.com/binary-husky/gpt_academic/wiki) | [обычные методы установки](#installation) | [скрипт одношаговой установки](https://github.com/binary-husky/gpt_academic/releases) | [инструкции по настройке](https://github.com/binary-husky/gpt_academic/wiki/项目配置说明).
21
  >
22
  > 3. Этот проект совместим и настоятельно рекомендуется использование китайской NLP-модели ChatGLM и других моделей больших языков производства Китая. Поддерживает одновременное использование нескольких ключей API, которые можно указать в конфигурационном файле, например, `API_KEY="openai-key1,openai-key2,azure-key3,api2d-key4"`. Если нужно временно заменить `API_KEY`, введите временный `API_KEY` в окне ввода и нажмите Enter для его подтверждения.
23
 
24
 
25
+
26
 
27
  <div align="center">
28
 
 
204
  ```
205
  "Супер-англо-русский перевод": {
206
  # Префикс, который будет добавлен перед вашим вводом. Например, используется для описания вашего запроса, например, перевода, объяснения кода, редактирования и т.д.
207
+ "Префикс": "Пожалуйста, переведите следующий абзац на русский язык, а затем покажите каждый термин на экране с помощью таблицы Markdown:\n\n",
208
+
209
  # Суффикс, который будет добавлен после вашего ввода. Например, можно использовать с префи��сом, чтобы заключить ваш ввод в кавычки.
210
  "Суффикс": "",
211
  },
 
335
  ```
336
  В коде использовались многие функции, представленные в других отличных проектах, поэтому их порядок не имеет значения:
337
 
338
+ # ChatGLM2-6B от Тиньхуа:
339
  https://github.com/THUDM/ChatGLM2-6B
340
 
341
  # Линейные модели с ограниченной памятью от Тиньхуа:
 
358
  # Больше:
359
  https://github.com/gradio-app/gradio
360
  https://github.com/fghrsh/live2d_demo
 
docs/WithFastapi.md CHANGED
@@ -17,18 +17,18 @@ nano config.py
17
 
18
  - # 如果需要在二级路径下运行
19
  - # CUSTOM_PATH = get_conf('CUSTOM_PATH')
20
- - # if CUSTOM_PATH != "/":
21
  - # from toolbox import run_gradio_in_subpath
22
  - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
23
- - # else:
24
  - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
25
 
26
  + 如果需要在二级路径下运行
27
  + CUSTOM_PATH = get_conf('CUSTOM_PATH')
28
- + if CUSTOM_PATH != "/":
29
  + from toolbox import run_gradio_in_subpath
30
  + run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
31
- + else:
32
  + demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
33
 
34
  if __name__ == "__main__":
 
17
 
18
  - # 如果需要在二级路径下运行
19
  - # CUSTOM_PATH = get_conf('CUSTOM_PATH')
20
+ - # if CUSTOM_PATH != "/":
21
  - # from toolbox import run_gradio_in_subpath
22
  - # run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
23
+ - # else:
24
  - # demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
25
 
26
  + 如果需要在二级路径下运行
27
  + CUSTOM_PATH = get_conf('CUSTOM_PATH')
28
+ + if CUSTOM_PATH != "/":
29
  + from toolbox import run_gradio_in_subpath
30
  + run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
31
+ + else:
32
  + demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png")
33
 
34
  if __name__ == "__main__":
docs/test_markdown_format.py CHANGED
@@ -7,13 +7,27 @@ sample = """
7
  """
8
  import re
9
 
 
10
  def preprocess_newbing_out(s):
11
- pattern = r'\^(\d+)\^' # 匹配^数字^
12
- pattern2 = r'\[(\d+)\]' # 匹配^数字^
13
- sub = lambda m: '\['+m.group(1)+'\]' # 将匹配到的数字作为替换值
14
- result = re.sub(pattern, sub, s) # 替换操作
15
- if '[1]' in result:
16
- result += '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>' + "<br/>".join([re.sub(pattern2, sub, r) for r in result.split('\n') if r.startswith('[')]) + '</small>'
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  return result
18
 
19
 
@@ -28,37 +42,39 @@ def close_up_code_segment_during_stream(gpt_reply):
28
  str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
29
 
30
  """
31
- if '```' not in gpt_reply:
32
  return gpt_reply
33
- if gpt_reply.endswith('```'):
34
  return gpt_reply
35
 
36
  # 排除了以上两个情况,我们
37
- segments = gpt_reply.split('```')
38
  n_mark = len(segments) - 1
39
  if n_mark % 2 == 1:
40
  # print('输出代码片段中!')
41
- return gpt_reply+'\n```'
42
  else:
43
  return gpt_reply
44
-
 
45
  import markdown
46
  from latex2mathml.converter import convert as tex2mathml
47
- from functools import wraps, lru_cache
 
48
  def markdown_convertion(txt):
49
  """
50
  将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
51
  """
52
  pre = '<div class="markdown-body">'
53
- suf = '</div>'
54
  if txt.startswith(pre) and txt.endswith(suf):
55
  # print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
56
- return txt # 已经被转化过,不需要再次转化
57
-
58
  markdown_extension_configs = {
59
- 'mdx_math': {
60
- 'enable_dollar_delimiter': True,
61
- 'use_gitlab_delimiters': False,
62
  },
63
  }
64
  find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
@@ -72,19 +88,19 @@ def markdown_convertion(txt):
72
 
73
  def replace_math_no_render(match):
74
  content = match.group(1)
75
- if 'mode=display' in match.group(0):
76
- content = content.replace('\n', '</br>')
77
- return f"<font color=\"#00FF00\">$$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$$</font>"
78
  else:
79
- return f"<font color=\"#00FF00\">$</font><font color=\"#FF00FF\">{content}</font><font color=\"#00FF00\">$</font>"
80
 
81
  def replace_math_render(match):
82
  content = match.group(1)
83
- if 'mode=display' in match.group(0):
84
- if '\\begin{aligned}' in content:
85
- content = content.replace('\\begin{aligned}', '\\begin{array}')
86
- content = content.replace('\\end{aligned}', '\\end{array}')
87
- content = content.replace('&', ' ')
88
  content = tex2mathml_catch_exception(content, display="block")
89
  return content
90
  else:
@@ -94,37 +110,58 @@ def markdown_convertion(txt):
94
  """
95
  解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
96
  """
97
- content = content.replace('<script type="math/tex">\n<script type="math/tex; mode=display">', '<script type="math/tex; mode=display">')
98
- content = content.replace('</script>\n</script>', '</script>')
 
 
 
99
  return content
100
 
101
-
102
- if ('$' in txt) and ('```' not in txt): # 有$标识的公式符号,且没有代码段```的标识
103
  # convert everything to html format
104
- split = markdown.markdown(text='---')
105
- convert_stage_1 = markdown.markdown(text=txt, extensions=['mdx_math', 'fenced_code', 'tables', 'sane_lists'], extension_configs=markdown_extension_configs)
 
 
 
 
106
  convert_stage_1 = markdown_bug_hunt(convert_stage_1)
107
  # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
108
  # 1. convert to easy-to-copy tex (do not render math)
109
- convert_stage_2_1, n = re.subn(find_equation_pattern, replace_math_no_render, convert_stage_1, flags=re.DOTALL)
 
 
 
 
 
110
  # 2. convert to rendered equation
111
- convert_stage_2_2, n = re.subn(find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL)
 
 
112
  # cat them together
113
- return pre + convert_stage_2_1 + f'{split}' + convert_stage_2_2 + suf
114
  else:
115
- return pre + markdown.markdown(txt, extensions=['fenced_code', 'codehilite', 'tables', 'sane_lists']) + suf
 
 
 
 
 
 
116
 
117
 
118
  sample = preprocess_newbing_out(sample)
119
  sample = close_up_code_segment_during_stream(sample)
120
  sample = markdown_convertion(sample)
121
- with open('tmp.html', 'w', encoding='utf8') as f:
122
- f.write("""
 
123
 
124
  <head>
125
  <title>My Website</title>
126
  <link rel="stylesheet" type="text/css" href="style.css">
127
  </head>
128
 
129
- """)
 
130
  f.write(sample)
 
7
  """
8
  import re
9
 
10
+
11
  def preprocess_newbing_out(s):
12
+ pattern = r"\^(\d+)\^" # 匹配^数字^
13
+ pattern2 = r"\[(\d+)\]" # 匹配^数字^
14
+
15
+ def sub(m):
16
+ return "\\[" + m.group(1) + "\\]" # 将匹配到的数字作为替换值
17
+
18
+ result = re.sub(pattern, sub, s) # 替换操作
19
+ if "[1]" in result:
20
+ result += (
21
+ '<br/><hr style="border-top: dotted 1px #44ac5c;"><br/><small>'
22
+ + "<br/>".join(
23
+ [
24
+ re.sub(pattern2, sub, r)
25
+ for r in result.split("\n")
26
+ if r.startswith("[")
27
+ ]
28
+ )
29
+ + "</small>"
30
+ )
31
  return result
32
 
33
 
 
42
  str: 返回一个新的字符串,将输出代码片段的“后面的```”补上。
43
 
44
  """
45
+ if "```" not in gpt_reply:
46
  return gpt_reply
47
+ if gpt_reply.endswith("```"):
48
  return gpt_reply
49
 
50
  # 排除了以上两个情况,我们
51
+ segments = gpt_reply.split("```")
52
  n_mark = len(segments) - 1
53
  if n_mark % 2 == 1:
54
  # print('输出代码片段中!')
55
+ return gpt_reply + "\n```"
56
  else:
57
  return gpt_reply
58
+
59
+
60
  import markdown
61
  from latex2mathml.converter import convert as tex2mathml
62
+
63
+
64
  def markdown_convertion(txt):
65
  """
66
  将Markdown格式的文本转换为HTML格式。如果包含数学公式,则先将公式转换为HTML格式。
67
  """
68
  pre = '<div class="markdown-body">'
69
+ suf = "</div>"
70
  if txt.startswith(pre) and txt.endswith(suf):
71
  # print('警告,输入了已经经过转化的字符串,二次转化可能出问题')
72
+ return txt # 已经被转化过,不需要再次转化
73
+
74
  markdown_extension_configs = {
75
+ "mdx_math": {
76
+ "enable_dollar_delimiter": True,
77
+ "use_gitlab_delimiters": False,
78
  },
79
  }
80
  find_equation_pattern = r'<script type="math/tex(?:.*?)>(.*?)</script>'
 
88
 
89
  def replace_math_no_render(match):
90
  content = match.group(1)
91
+ if "mode=display" in match.group(0):
92
+ content = content.replace("\n", "</br>")
93
+ return f'<font color="#00FF00">$$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$$</font>'
94
  else:
95
+ return f'<font color="#00FF00">$</font><font color="#FF00FF">{content}</font><font color="#00FF00">$</font>'
96
 
97
  def replace_math_render(match):
98
  content = match.group(1)
99
+ if "mode=display" in match.group(0):
100
+ if "\\begin{aligned}" in content:
101
+ content = content.replace("\\begin{aligned}", "\\begin{array}")
102
+ content = content.replace("\\end{aligned}", "\\end{array}")
103
+ content = content.replace("&", " ")
104
  content = tex2mathml_catch_exception(content, display="block")
105
  return content
106
  else:
 
110
  """
111
  解决一个mdx_math的bug(单$包裹begin命令时多余<script>)
112
  """
113
+ content = content.replace(
114
+ '<script type="math/tex">\n<script type="math/tex; mode=display">',
115
+ '<script type="math/tex; mode=display">',
116
+ )
117
+ content = content.replace("</script>\n</script>", "</script>")
118
  return content
119
 
120
+ if ("$" in txt) and ("```" not in txt): # 有$标识的公式符号,且没有代码段```的标识
 
121
  # convert everything to html format
122
+ split = markdown.markdown(text="---")
123
+ convert_stage_1 = markdown.markdown(
124
+ text=txt,
125
+ extensions=["mdx_math", "fenced_code", "tables", "sane_lists"],
126
+ extension_configs=markdown_extension_configs,
127
+ )
128
  convert_stage_1 = markdown_bug_hunt(convert_stage_1)
129
  # re.DOTALL: Make the '.' special character match any character at all, including a newline; without this flag, '.' will match anything except a newline. Corresponds to the inline flag (?s).
130
  # 1. convert to easy-to-copy tex (do not render math)
131
+ convert_stage_2_1, n = re.subn(
132
+ find_equation_pattern,
133
+ replace_math_no_render,
134
+ convert_stage_1,
135
+ flags=re.DOTALL,
136
+ )
137
  # 2. convert to rendered equation
138
+ convert_stage_2_2, n = re.subn(
139
+ find_equation_pattern, replace_math_render, convert_stage_1, flags=re.DOTALL
140
+ )
141
  # cat them together
142
+ return pre + convert_stage_2_1 + f"{split}" + convert_stage_2_2 + suf
143
  else:
144
+ return (
145
+ pre
146
+ + markdown.markdown(
147
+ txt, extensions=["fenced_code", "codehilite", "tables", "sane_lists"]
148
+ )
149
+ + suf
150
+ )
151
 
152
 
153
  sample = preprocess_newbing_out(sample)
154
  sample = close_up_code_segment_during_stream(sample)
155
  sample = markdown_convertion(sample)
156
+ with open("tmp.html", "w", encoding="utf8") as f:
157
+ f.write(
158
+ """
159
 
160
  <head>
161
  <title>My Website</title>
162
  <link rel="stylesheet" type="text/css" href="style.css">
163
  </head>
164
 
165
+ """
166
+ )
167
  f.write(sample)
docs/translate_japanese.json CHANGED
@@ -2106,4 +2106,4 @@
2106
  "改变输入参数的顺序与结构": "入力パラメータの順序と構造を変更する",
2107
  "正在精细切分latex文件": "LaTeXファイルを細かく分割しています",
2108
  "读取文件": "ファイルを読み込んでいます"
2109
- }
 
2106
  "改变输入参数的顺序与结构": "入力パラメータの順序と構造を変更する",
2107
  "正在精细切分latex文件": "LaTeXファイルを細かく分割しています",
2108
  "读取文件": "ファイルを読み込んでいます"
2109
+ }
docs/translate_std.json CHANGED
@@ -98,4 +98,4 @@
98
  "图片生成_DALLE2": "ImageGeneration_DALLE2",
99
  "图片生成_DALLE3": "ImageGeneration_DALLE3",
100
  "图片修改_DALLE2": "ImageModification_DALLE2"
101
- }
 
98
  "图片生成_DALLE2": "ImageGeneration_DALLE2",
99
  "图片生成_DALLE3": "ImageGeneration_DALLE3",
100
  "图片修改_DALLE2": "ImageModification_DALLE2"
101
+ }
docs/use_audio.md CHANGED
@@ -61,4 +61,3 @@ VI 两种音频监听模式切换时,需要刷新页面才有效。
61
  VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
62
 
63
  ## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
64
-
 
61
  VII 非localhost运行+非https情况下无法打开录音功能的坑:https://blog.csdn.net/weixin_39461487/article/details/109594434
62
 
63
  ## 5.点击函数插件区“实时音频采集” 或者其他音频交互功能
 
docs/waifu_plugin/autoload.js CHANGED
@@ -8,8 +8,8 @@ try {
8
  live2d_settings['modelId'] = 5; // 默认模型 ID
9
  live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
10
  live2d_settings['modelStorage'] = false; // 不储存模型 ID
11
- live2d_settings['waifuSize'] = '210x187';
12
- live2d_settings['waifuTipsSize'] = '187x52';
13
  live2d_settings['canSwitchModel'] = true;
14
  live2d_settings['canSwitchTextures'] = true;
15
  live2d_settings['canSwitchHitokoto'] = false;
 
8
  live2d_settings['modelId'] = 5; // 默认模型 ID
9
  live2d_settings['modelTexturesId'] = 1; // 默认材质 ID
10
  live2d_settings['modelStorage'] = false; // 不储存模型 ID
11
+ live2d_settings['waifuSize'] = '210x187';
12
+ live2d_settings['waifuTipsSize'] = '187x52';
13
  live2d_settings['canSwitchModel'] = true;
14
  live2d_settings['canSwitchTextures'] = true;
15
  live2d_settings['canSwitchHitokoto'] = false;
docs/waifu_plugin/flat-ui-icons-regular.svg CHANGED
docs/waifu_plugin/jquery-ui.min.js CHANGED
The diff for this file is too large to render. See raw diff
 
docs/waifu_plugin/source CHANGED
@@ -1 +1 @@
1
- https://github.com/fghrsh/live2d_demo
 
1
+ https://github.com/fghrsh/live2d_demo
docs/waifu_plugin/waifu-tips.js CHANGED
@@ -5,11 +5,11 @@ window.live2d_settings = Array(); /*
5
        /`ー'    L//`ヽ、 Live2D 看板娘 参数设置
6
       /  /,  /|  ,  ,    ', Version 1.4.2
7
     イ  / /-‐/ i L_ ハ ヽ!  i Update 2018.11.12
8
-     レ ヘ 7イ`ト  レ'ァ-ト、!ハ|  |
9
       !,/7 '0'   ´0iソ|   |   
10
       |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘
11
       レ'| i>.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html
12
-       レ'| | / k_7_/レ'ヽ, ハ. |
13
         | |/i 〈|/  i ,.ヘ | i | Thanks
14
        .|/ / i:   ヘ!  \ | journey-ad / https://github.com/journey-ad/live2d_src
15
          kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
@@ -77,11 +77,11 @@ String.prototype.render = function(context) {
77
 
78
  return this.replace(tokenReg, function (word, slash1, token, slash2) {
79
  if (slash1 || slash2) { return word.replace('\\', ''); }
80
-
81
  var variables = token.replace(/\s/g, '').split('.');
82
  var currentObject = context;
83
  var i, length, variable;
84
-
85
  for (i = 0, length = variables.length; i < length; ++i) {
86
  variable = variables[i];
87
  currentObject = currentObject[variable];
@@ -101,9 +101,9 @@ function showMessage(text, timeout, flag) {
101
  if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
102
  if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
103
  if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
104
-
105
  if(flag) sessionStorage.setItem('waifu-text', text);
106
-
107
  $('.waifu-tips').stop();
108
  $('.waifu-tips').html(text).fadeTo(200, 1);
109
  if (timeout === undefined) timeout = 5000;
@@ -121,15 +121,15 @@ function hideMessage(timeout) {
121
  function initModel(waifuPath, type) {
122
  /* console welcome message */
123
  eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
124
-
125
  /* 判断 JQuery */
126
  if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
127
-
128
  /* 加载看板娘样式 */
129
  live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
130
  live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
131
  live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
132
-
133
  $("#live2d").attr("width",live2d_settings.waifuSize[0]);
134
  $("#live2d").attr("height",live2d_settings.waifuSize[1]);
135
  $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
@@ -138,32 +138,32 @@ function initModel(waifuPath, type) {
138
  $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
139
  $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
140
  $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
141
-
142
  if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
143
  else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
144
-
145
  window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
146
  if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
147
-
148
  try {
149
  if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
150
  else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
151
  else $(".waifu").css("transition", 'all .3s ease-in-out');
152
  } catch(err) { console.log('[Error] JQuery UI is not defined.') }
153
-
154
  live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
155
  if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
156
-
157
  $('.waifu-tool .fui-home').click(function (){
158
  //window.location = 'https://www.fghrsh.net/';
159
  window.location = live2d_settings.homePageUrl;
160
  });
161
-
162
  $('.waifu-tool .fui-info-circle').click(function (){
163
  //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
164
  window.open(live2d_settings.aboutPageUrl);
165
  });
166
-
167
  if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
168
  $.ajax({
169
  cache: true,
@@ -172,7 +172,7 @@ function initModel(waifuPath, type) {
172
  success: function (result){ loadTipsMessage(result); }
173
  });
174
  }
175
-
176
  if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
177
  if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
178
  if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
@@ -185,7 +185,7 @@ function initModel(waifuPath, type) {
185
  if (waifuPath === undefined) waifuPath = '';
186
  var modelId = localStorage.getItem('modelId');
187
  var modelTexturesId = localStorage.getItem('modelTexturesId');
188
-
189
  if (!live2d_settings.modelStorage || modelId == null) {
190
  var modelId = live2d_settings.modelId;
191
  var modelTexturesId = live2d_settings.modelTexturesId;
@@ -204,7 +204,7 @@ function loadModel(modelId, modelTexturesId=0) {
204
 
205
  function loadTipsMessage(result) {
206
  window.waifu_tips = result;
207
-
208
  $.each(result.mouseover, function (index, tips){
209
  $(document).on("mouseover", tips.selector, function (){
210
  var text = getRandText(tips.text);
@@ -223,50 +223,50 @@ function loadTipsMessage(result) {
223
  var now = new Date();
224
  var after = tips.date.split('-')[0];
225
  var before = tips.date.split('-')[1] || after;
226
-
227
- if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
228
  (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
229
  var text = getRandText(tips.text);
230
  text = text.render({year: now.getFullYear()});
231
  showMessage(text, 6000, true);
232
  }
233
  });
234
-
235
  if (live2d_settings.showF12OpenMsg) {
236
  re.toString = function() {
237
  showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
238
  return '';
239
  };
240
  }
241
-
242
  if (live2d_settings.showCopyMessage) {
243
  $(document).on('copy', function() {
244
  showMessage(getRandText(result.waifu.copy_message), 5000, true);
245
  });
246
  }
247
-
248
  $('.waifu-tool .fui-photo').click(function(){
249
  showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
250
  window.Live2D.captureName = live2d_settings.screenshotCaptureName;
251
  window.Live2D.captureFrame = true;
252
  });
253
-
254
  $('.waifu-tool .fui-cross').click(function(){
255
  sessionStorage.setItem('waifu-dsiplay', 'none');
256
  showMessage(getRandText(result.waifu.hidden_message), 1300, true);
257
  window.setTimeout(function() {$('.waifu').hide();}, 1300);
258
  });
259
-
260
  window.showWelcomeMessage = function(result) {
261
  showMessage('欢迎使用GPT-Academic', 6000);
262
  }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
263
-
264
  var waifu_tips = result.waifu;
265
-
266
  function loadOtherModel() {
267
  var modelId = modelStorageGetItem('modelId');
268
  var modelRandMode = live2d_settings.modelRandMode;
269
-
270
  $.ajax({
271
  cache: modelRandMode == 'switch' ? true : false,
272
  url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
@@ -279,12 +279,12 @@ function loadTipsMessage(result) {
279
  }
280
  });
281
  }
282
-
283
  function loadRandTextures() {
284
  var modelId = modelStorageGetItem('modelId');
285
  var modelTexturesId = modelStorageGetItem('modelTexturesId');
286
  var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
287
-
288
  $.ajax({
289
  cache: modelTexturesRandMode == 'switch' ? true : false,
290
  url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
@@ -297,32 +297,32 @@ function loadTipsMessage(result) {
297
  }
298
  });
299
  }
300
-
301
  function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
302
-
303
  /* 检测用户活动状态,并在空闲时显示一言 */
304
  if (live2d_settings.showHitokoto) {
305
  window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
306
  $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
307
  setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
308
  }
309
-
310
  function ifActed() {
311
  if (!hitokotoInterval) {
312
  hitokotoInterval = true;
313
  hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
314
  }
315
  }
316
-
317
  function elseActed() {
318
  getActed = hitokotoInterval = false;
319
  window.clearInterval(hitokotoTimer);
320
  }
321
-
322
  function showHitokotoActed() {
323
  if ($(document)[0].visibilityState == 'visible') showHitokoto();
324
  }
325
-
326
  function showHitokoto() {
327
  switch(live2d_settings.hitokotoAPI) {
328
  case 'lwl12.com':
@@ -366,7 +366,7 @@ function loadTipsMessage(result) {
366
  });
367
  }
368
  }
369
-
370
  $('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
371
  $('.waifu-tool .fui-user').click(function (){loadRandTextures()});
372
  $('.waifu-tool .fui-chat').click(function (){showHitokoto()});
 
5
        /`ー'    L//`ヽ、 Live2D 看板娘 参数设置
6
       /  /,  /|  ,  ,    ', Version 1.4.2
7
     イ  / /-‐/ i L_ ハ ヽ!  i Update 2018.11.12
8
+     レ ヘ 7イ`ト  レ'ァ-ト、!ハ|  |
9
       !,/7 '0'   ´0iソ|   |   
10
       |.从"  _   ,,,, / |./   | 网页添加 Live2D 看板娘
11
       レ'| i>.、,,__ _,.イ /  .i  | https://www.fghrsh.net/post/123.html
12
+       レ'| | / k_7_/レ'ヽ, ハ. |
13
         | |/i 〈|/  i ,.ヘ | i | Thanks
14
        .|/ / i:   ヘ!  \ | journey-ad / https://github.com/journey-ad/live2d_src
15
          kヽ>、ハ   _,.ヘ、   /、! xiazeyu / https://github.com/xiazeyu/live2d-widget.js
 
77
 
78
  return this.replace(tokenReg, function (word, slash1, token, slash2) {
79
  if (slash1 || slash2) { return word.replace('\\', ''); }
80
+
81
  var variables = token.replace(/\s/g, '').split('.');
82
  var currentObject = context;
83
  var i, length, variable;
84
+
85
  for (i = 0, length = variables.length; i < length; ++i) {
86
  variable = variables[i];
87
  currentObject = currentObject[variable];
 
101
  if(flag || sessionStorage.getItem('waifu-text') === '' || sessionStorage.getItem('waifu-text') === null){
102
  if(Array.isArray(text)) text = text[Math.floor(Math.random() * text.length + 1)-1];
103
  if (live2d_settings.showF12Message) console.log('[Message]', text.replace(/<[^<>]+>/g,''));
104
+
105
  if(flag) sessionStorage.setItem('waifu-text', text);
106
+
107
  $('.waifu-tips').stop();
108
  $('.waifu-tips').html(text).fadeTo(200, 1);
109
  if (timeout === undefined) timeout = 5000;
 
121
  function initModel(waifuPath, type) {
122
  /* console welcome message */
123
  eval(function(p,a,c,k,e,r){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};if(!''.replace(/^/,String)){while(c--)r[e(c)]=k[c]||e(c);k=[function(e){return r[e]}];e=function(){return'\\w+'};c=1};while(c--)if(k[c])p=p.replace(new RegExp('\\b'+e(c)+'\\b','g'),k[c]);return p}('8.d(" ");8.d("\\U,.\\y\\5.\\1\\1\\1\\1/\\1,\\u\\2 \\H\\n\\1\\1\\1\\1\\1\\b \', !-\\r\\j-i\\1/\\1/\\g\\n\\1\\1\\1 \\1 \\a\\4\\f\'\\1\\1\\1 L/\\a\\4\\5\\2\\n\\1\\1 \\1 /\\1 \\a,\\1 /|\\1 ,\\1 ,\\1\\1\\1 \',\\n\\1\\1\\1\\q \\1/ /-\\j/\\1\\h\\E \\9 \\5!\\1 i\\n\\1\\1\\1 \\3 \\6 7\\q\\4\\c\\1 \\3\'\\s-\\c\\2!\\t|\\1 |\\n\\1\\1\\1\\1 !,/7 \'0\'\\1\\1 \\X\\w| \\1 |\\1\\1\\1\\n\\1\\1\\1\\1 |.\\x\\"\\1\\l\\1\\1 ,,,, / |./ \\1 |\\n\\1\\1\\1\\1 \\3\'| i\\z.\\2,,A\\l,.\\B / \\1.i \\1|\\n\\1\\1\\1\\1\\1 \\3\'| | / C\\D/\\3\'\\5,\\1\\9.\\1|\\n\\1\\1\\1\\1\\1\\1 | |/i \\m|/\\1 i\\1,.\\6 |\\F\\1|\\n\\1\\1\\1\\1\\1\\1.|/ /\\1\\h\\G \\1 \\6!\\1\\1\\b\\1|\\n\\1\\1\\1 \\1 \\1 k\\5>\\2\\9 \\1 o,.\\6\\2 \\1 /\\2!\\n\\1\\1\\1\\1\\1\\1 !\'\\m//\\4\\I\\g\', \\b \\4\'7\'\\J\'\\n\\1\\1\\1\\1\\1\\1 \\3\'\\K|M,p,\\O\\3|\\P\\n\\1\\1\\1\\1\\1 \\1\\1\\1\\c-,/\\1|p./\\n\\1\\1\\1\\1\\1 \\1\\1\\1\'\\f\'\\1\\1!o,.:\\Q \\R\\S\\T v"+e.V+" / W "+e.N);8.d(" ");',60,60,'|u3000|uff64|uff9a|uff40|u30fd|uff8d||console|uff8a|uff0f|uff3c|uff84|log|live2d_settings|uff70|u00b4|uff49||u2010||u3000_|u3008||_|___|uff72|u2500|uff67|u30cf|u30fc||u30bd|u4ece|u30d8|uff1e|__|u30a4|k_|uff17_|u3000L_|u3000i|uff1a|u3009|uff34|uff70r|u30fdL__||___i|l2dVerDate|u30f3|u30ce|nLive2D|u770b|u677f|u5a18|u304f__|l2dVersion|FGHRSH|u00b40i'.split('|'),0,{}));
124
+
125
  /* 判断 JQuery */
126
  if (typeof($.ajax) != 'function') typeof(jQuery.ajax) == 'function' ? window.$ = jQuery : console.log('[Error] JQuery is not defined.');
127
+
128
  /* 加载看板娘样式 */
129
  live2d_settings.waifuSize = live2d_settings.waifuSize.split('x');
130
  live2d_settings.waifuTipsSize = live2d_settings.waifuTipsSize.split('x');
131
  live2d_settings.waifuEdgeSide = live2d_settings.waifuEdgeSide.split(':');
132
+
133
  $("#live2d").attr("width",live2d_settings.waifuSize[0]);
134
  $("#live2d").attr("height",live2d_settings.waifuSize[1]);
135
  $(".waifu-tips").width(live2d_settings.waifuTipsSize[0]);
 
138
  $(".waifu-tips").css("font-size",live2d_settings.waifuFontSize);
139
  $(".waifu-tool").css("font-size",live2d_settings.waifuToolFont);
140
  $(".waifu-tool span").css("line-height",live2d_settings.waifuToolLine);
141
+
142
  if (live2d_settings.waifuEdgeSide[0] == 'left') $(".waifu").css("left",live2d_settings.waifuEdgeSide[1]+'px');
143
  else if (live2d_settings.waifuEdgeSide[0] == 'right') $(".waifu").css("right",live2d_settings.waifuEdgeSide[1]+'px');
144
+
145
  window.waifuResize = function() { $(window).width() <= Number(live2d_settings.waifuMinWidth.replace('px','')) ? $(".waifu").hide() : $(".waifu").show(); };
146
  if (live2d_settings.waifuMinWidth != 'disable') { waifuResize(); $(window).resize(function() {waifuResize()}); }
147
+
148
  try {
149
  if (live2d_settings.waifuDraggable == 'axis-x') $(".waifu").draggable({ axis: "x", revert: live2d_settings.waifuDraggableRevert });
150
  else if (live2d_settings.waifuDraggable == 'unlimited') $(".waifu").draggable({ revert: live2d_settings.waifuDraggableRevert });
151
  else $(".waifu").css("transition", 'all .3s ease-in-out');
152
  } catch(err) { console.log('[Error] JQuery UI is not defined.') }
153
+
154
  live2d_settings.homePageUrl = live2d_settings.homePageUrl == 'auto' ? window.location.protocol+'//'+window.location.hostname+'/' : live2d_settings.homePageUrl;
155
  if (window.location.protocol == 'file:' && live2d_settings.modelAPI.substr(0,2) == '//') live2d_settings.modelAPI = 'http:'+live2d_settings.modelAPI;
156
+
157
  $('.waifu-tool .fui-home').click(function (){
158
  //window.location = 'https://www.fghrsh.net/';
159
  window.location = live2d_settings.homePageUrl;
160
  });
161
+
162
  $('.waifu-tool .fui-info-circle').click(function (){
163
  //window.open('https://imjad.cn/archives/lab/add-dynamic-poster-girl-with-live2d-to-your-blog-02');
164
  window.open(live2d_settings.aboutPageUrl);
165
  });
166
+
167
  if (typeof(waifuPath) == "object") loadTipsMessage(waifuPath); else {
168
  $.ajax({
169
  cache: true,
 
172
  success: function (result){ loadTipsMessage(result); }
173
  });
174
  }
175
+
176
  if (!live2d_settings.showToolMenu) $('.waifu-tool').hide();
177
  if (!live2d_settings.canCloseLive2d) $('.waifu-tool .fui-cross').hide();
178
  if (!live2d_settings.canSwitchModel) $('.waifu-tool .fui-eye').hide();
 
185
  if (waifuPath === undefined) waifuPath = '';
186
  var modelId = localStorage.getItem('modelId');
187
  var modelTexturesId = localStorage.getItem('modelTexturesId');
188
+
189
  if (!live2d_settings.modelStorage || modelId == null) {
190
  var modelId = live2d_settings.modelId;
191
  var modelTexturesId = live2d_settings.modelTexturesId;
 
204
 
205
  function loadTipsMessage(result) {
206
  window.waifu_tips = result;
207
+
208
  $.each(result.mouseover, function (index, tips){
209
  $(document).on("mouseover", tips.selector, function (){
210
  var text = getRandText(tips.text);
 
223
  var now = new Date();
224
  var after = tips.date.split('-')[0];
225
  var before = tips.date.split('-')[1] || after;
226
+
227
+ if((after.split('/')[0] <= now.getMonth()+1 && now.getMonth()+1 <= before.split('/')[0]) &&
228
  (after.split('/')[1] <= now.getDate() && now.getDate() <= before.split('/')[1])){
229
  var text = getRandText(tips.text);
230
  text = text.render({year: now.getFullYear()});
231
  showMessage(text, 6000, true);
232
  }
233
  });
234
+
235
  if (live2d_settings.showF12OpenMsg) {
236
  re.toString = function() {
237
  showMessage(getRandText(result.waifu.console_open_msg), 5000, true);
238
  return '';
239
  };
240
  }
241
+
242
  if (live2d_settings.showCopyMessage) {
243
  $(document).on('copy', function() {
244
  showMessage(getRandText(result.waifu.copy_message), 5000, true);
245
  });
246
  }
247
+
248
  $('.waifu-tool .fui-photo').click(function(){
249
  showMessage(getRandText(result.waifu.screenshot_message), 5000, true);
250
  window.Live2D.captureName = live2d_settings.screenshotCaptureName;
251
  window.Live2D.captureFrame = true;
252
  });
253
+
254
  $('.waifu-tool .fui-cross').click(function(){
255
  sessionStorage.setItem('waifu-dsiplay', 'none');
256
  showMessage(getRandText(result.waifu.hidden_message), 1300, true);
257
  window.setTimeout(function() {$('.waifu').hide();}, 1300);
258
  });
259
+
260
  window.showWelcomeMessage = function(result) {
261
  showMessage('欢迎使用GPT-Academic', 6000);
262
  }; if (live2d_settings.showWelcomeMessage) showWelcomeMessage(result);
263
+
264
  var waifu_tips = result.waifu;
265
+
266
  function loadOtherModel() {
267
  var modelId = modelStorageGetItem('modelId');
268
  var modelRandMode = live2d_settings.modelRandMode;
269
+
270
  $.ajax({
271
  cache: modelRandMode == 'switch' ? true : false,
272
  url: live2d_settings.modelAPI+modelRandMode+'/?id='+modelId,
 
279
  }
280
  });
281
  }
282
+
283
  function loadRandTextures() {
284
  var modelId = modelStorageGetItem('modelId');
285
  var modelTexturesId = modelStorageGetItem('modelTexturesId');
286
  var modelTexturesRandMode = live2d_settings.modelTexturesRandMode;
287
+
288
  $.ajax({
289
  cache: modelTexturesRandMode == 'switch' ? true : false,
290
  url: live2d_settings.modelAPI+modelTexturesRandMode+'_textures/?id='+modelId+'-'+modelTexturesId,
 
297
  }
298
  });
299
  }
300
+
301
  function modelStorageGetItem(key) { return live2d_settings.modelStorage ? localStorage.getItem(key) : sessionStorage.getItem(key); }
302
+
303
  /* 检测用户活动状态,并在空闲时显示一言 */
304
  if (live2d_settings.showHitokoto) {
305
  window.getActed = false; window.hitokotoTimer = 0; window.hitokotoInterval = false;
306
  $(document).mousemove(function(e){getActed = true;}).keydown(function(){getActed = true;});
307
  setInterval(function(){ if (!getActed) ifActed(); else elseActed(); }, 1000);
308
  }
309
+
310
  function ifActed() {
311
  if (!hitokotoInterval) {
312
  hitokotoInterval = true;
313
  hitokotoTimer = window.setInterval(showHitokotoActed, 30000);
314
  }
315
  }
316
+
317
  function elseActed() {
318
  getActed = hitokotoInterval = false;
319
  window.clearInterval(hitokotoTimer);
320
  }
321
+
322
  function showHitokotoActed() {
323
  if ($(document)[0].visibilityState == 'visible') showHitokoto();
324
  }
325
+
326
  function showHitokoto() {
327
  switch(live2d_settings.hitokotoAPI) {
328
  case 'lwl12.com':
 
366
  });
367
  }
368
  }
369
+
370
  $('.waifu-tool .fui-eye').click(function (){loadOtherModel()});
371
  $('.waifu-tool .fui-user').click(function (){loadRandTextures()});
372
  $('.waifu-tool .fui-chat').click(function (){showHitokoto()});
docs/waifu_plugin/waifu-tips.json CHANGED
@@ -31,7 +31,7 @@
31
  },
32
  "model_message": {
33
  "1": ["来自 Potion Maker 的 Pio 酱 ~"],
34
- "2": ["来自 Potion Maker 的 Tia 酱 ~"]
35
  },
36
  "hitokoto_api_message": {
37
  "lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
@@ -111,4 +111,4 @@
111
  { "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
112
  { "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
113
  ]
114
- }
 
31
  },
32
  "model_message": {
33
  "1": ["来自 Potion Maker 的 Pio 酱 ~"],
34
+ "2": ["来自 Potion Maker 的 Tia 酱 ~"]
35
  },
36
  "hitokoto_api_message": {
37
  "lwl12.com": ["这句一言来自 <span style=\"color:#0099cc;\">『{source}』</span>", ",是 <span style=\"color:#0099cc;\">{creator}</span> 投稿的", "。"],
 
111
  { "date": "11/05-11/12", "text": ["今年的<span style=\"color:#0099cc;\">双十一</span>是和谁一起过的呢~"] },
112
  { "date": "12/20-12/31", "text": ["这几天是<span style=\"color:#0099cc;\">圣诞节</span>,主人肯定又去剁手买买买了~"] }
113
  ]
114
+ }
docs/waifu_plugin/waifu.css CHANGED
@@ -287,4 +287,4 @@
287
  }
288
  .fui-user:before {
289
  content: "\e631";
290
- }
 
287
  }
288
  .fui-user:before {
289
  content: "\e631";
290
+ }
multi_language.py CHANGED
@@ -352,9 +352,9 @@ def step_1_core_key_translate():
352
  chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
353
  chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
354
 
355
- # ===============================================
356
  # copy
357
- # ===============================================
358
  def copy_source_code():
359
 
360
  from toolbox import get_conf
@@ -367,9 +367,9 @@ def step_1_core_key_translate():
367
  shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
368
  copy_source_code()
369
 
370
- # ===============================================
371
  # primary key replace
372
- # ===============================================
373
  directory_path = f'./multi-language/{LANG}/'
374
  for root, dirs, files in os.walk(directory_path):
375
  for file in files:
@@ -389,9 +389,9 @@ def step_1_core_key_translate():
389
 
390
  def step_2_core_key_translate():
391
 
392
- # =================================================================================================
393
  # step2
394
- # =================================================================================================
395
 
396
  def load_string(strings, string_input):
397
  string_ = string_input.strip().strip(',').strip().strip('.').strip()
@@ -492,9 +492,9 @@ def step_2_core_key_translate():
492
  cached_translation.update(read_map_from_json(language=LANG_STD))
493
  cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
494
 
495
- # ===============================================
496
  # literal key replace
497
- # ===============================================
498
  directory_path = f'./multi-language/{LANG}/'
499
  for root, dirs, files in os.walk(directory_path):
500
  for file in files:
 
352
  chinese_core_keys_norepeat_mapping.update({k:cached_translation[k]})
353
  chinese_core_keys_norepeat_mapping = dict(sorted(chinese_core_keys_norepeat_mapping.items(), key=lambda x: -len(x[0])))
354
 
355
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
356
  # copy
357
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
358
  def copy_source_code():
359
 
360
  from toolbox import get_conf
 
367
  shutil.copytree('./', backup_dir, ignore=lambda x, y: blacklist)
368
  copy_source_code()
369
 
370
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
371
  # primary key replace
372
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
373
  directory_path = f'./multi-language/{LANG}/'
374
  for root, dirs, files in os.walk(directory_path):
375
  for file in files:
 
389
 
390
  def step_2_core_key_translate():
391
 
392
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
393
  # step2
394
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
395
 
396
  def load_string(strings, string_input):
397
  string_ = string_input.strip().strip(',').strip().strip('.').strip()
 
492
  cached_translation.update(read_map_from_json(language=LANG_STD))
493
  cached_translation = dict(sorted(cached_translation.items(), key=lambda x: -len(x[0])))
494
 
495
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
496
  # literal key replace
497
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
498
  directory_path = f'./multi-language/{LANG}/'
499
  for root, dirs, files in os.walk(directory_path):
500
  for file in files:
request_llms/README.md CHANGED
@@ -32,4 +32,4 @@ P.S. 如果您按照以下步骤成功接入了新的大模型,欢迎发Pull R
32
 
33
  5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
34
 
35
- 6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
 
32
 
33
  5. 测试通过后,在`request_llms/bridge_all.py`中做最后的修改,把你的模型完全接入到框架中(聪慧如您,只需要看一眼该文件就明白怎么修改了)
34
 
35
+ 6. 修改`LLM_MODEL`配置,然后运行`python main.py`,测试最后的效果
request_llms/bridge_all.py CHANGED
@@ -28,6 +28,9 @@ from .bridge_chatglm3 import predict as chatglm3_ui
28
  from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
29
  from .bridge_qianfan import predict as qianfan_ui
30
 
 
 
 
31
  colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
32
 
33
  class LazyloadTiktoken(object):
@@ -246,6 +249,22 @@ model_info = {
246
  "tokenizer": tokenizer_gpt35,
247
  "token_cnt": get_token_num_gpt35,
248
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  }
250
 
251
  # -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
@@ -479,22 +498,6 @@ if "qwen-turbo" in AVAIL_LLM_MODELS or "qwen-plus" in AVAIL_LLM_MODELS or "qwen-
479
  })
480
  except:
481
  print(trimmed_format_exc())
482
- if "chatgpt_website" in AVAIL_LLM_MODELS: # 接入一些逆向工程https://github.com/acheong08/ChatGPT-to-API/
483
- try:
484
- from .bridge_chatgpt_website import predict_no_ui_long_connection as chatgpt_website_noui
485
- from .bridge_chatgpt_website import predict as chatgpt_website_ui
486
- model_info.update({
487
- "chatgpt_website": {
488
- "fn_with_ui": chatgpt_website_ui,
489
- "fn_without_ui": chatgpt_website_noui,
490
- "endpoint": openai_endpoint,
491
- "max_token": 4096,
492
- "tokenizer": tokenizer_gpt35,
493
- "token_cnt": get_token_num_gpt35,
494
- }
495
- })
496
- except:
497
- print(trimmed_format_exc())
498
  if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
499
  try:
500
  from .bridge_spark import predict_no_ui_long_connection as spark_noui
@@ -591,6 +594,23 @@ if "deepseekcoder" in AVAIL_LLM_MODELS: # deepseekcoder
591
  })
592
  except:
593
  print(trimmed_format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594
 
595
  # <-- 用于定义和切换多个azure模型 -->
596
  AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
 
28
  from .bridge_qianfan import predict_no_ui_long_connection as qianfan_noui
29
  from .bridge_qianfan import predict as qianfan_ui
30
 
31
+ from .bridge_google_gemini import predict as genai_ui
32
+ from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
33
+
34
  colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
35
 
36
  class LazyloadTiktoken(object):
 
249
  "tokenizer": tokenizer_gpt35,
250
  "token_cnt": get_token_num_gpt35,
251
  },
252
+ "gemini-pro": {
253
+ "fn_with_ui": genai_ui,
254
+ "fn_without_ui": genai_noui,
255
+ "endpoint": None,
256
+ "max_token": 1024 * 32,
257
+ "tokenizer": tokenizer_gpt35,
258
+ "token_cnt": get_token_num_gpt35,
259
+ },
260
+ "gemini-pro-vision": {
261
+ "fn_with_ui": genai_ui,
262
+ "fn_without_ui": genai_noui,
263
+ "endpoint": None,
264
+ "max_token": 1024 * 32,
265
+ "tokenizer": tokenizer_gpt35,
266
+ "token_cnt": get_token_num_gpt35,
267
+ },
268
  }
269
 
270
  # -=-=-=-=-=-=- api2d 对齐支持 -=-=-=-=-=-=-
 
498
  })
499
  except:
500
  print(trimmed_format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501
  if "spark" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
502
  try:
503
  from .bridge_spark import predict_no_ui_long_connection as spark_noui
 
594
  })
595
  except:
596
  print(trimmed_format_exc())
597
+ # if "skylark" in AVAIL_LLM_MODELS:
598
+ # try:
599
+ # from .bridge_skylark2 import predict_no_ui_long_connection as skylark_noui
600
+ # from .bridge_skylark2 import predict as skylark_ui
601
+ # model_info.update({
602
+ # "skylark": {
603
+ # "fn_with_ui": skylark_ui,
604
+ # "fn_without_ui": skylark_noui,
605
+ # "endpoint": None,
606
+ # "max_token": 4096,
607
+ # "tokenizer": tokenizer_gpt35,
608
+ # "token_cnt": get_token_num_gpt35,
609
+ # }
610
+ # })
611
+ # except:
612
+ # print(trimmed_format_exc())
613
+
614
 
615
  # <-- 用于定义和切换多个azure模型 -->
616
  AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
request_llms/bridge_chatgpt.py CHANGED
@@ -244,6 +244,9 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
244
  if has_choices and not choice_valid:
245
  # 一些垃圾第三方接口的出现这样的错误
246
  continue
 
 
 
247
  # 前者是API2D的结束条件,后者是OPENAI的结束条件
248
  if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
249
  # 判定为数据流的结束,gpt_replying_buffer也写完了
 
244
  if has_choices and not choice_valid:
245
  # 一些垃圾第三方接口的出现这样的错误
246
  continue
247
+ if ('data: [DONE]' not in chunk_decoded) and len(chunk_decoded) > 0 and (chunkjson is None):
248
+ # 传递进来一些奇怪的东西
249
+ raise ValueError(f'无法读取以下数据,请检查配置。\n\n{chunk_decoded}')
250
  # 前者是API2D的结束条件,后者是OPENAI的结束条件
251
  if ('data: [DONE]' in chunk_decoded) or (len(chunkjson['choices'][0]["delta"]) == 0):
252
  # 判定为数据流的结束,gpt_replying_buffer也写完了
request_llms/bridge_google_gemini.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ # @Time : 2023/12/21
3
+ # @Author : Spike
4
+ # @Descr :
5
+ import json
6
+ import re
7
+ import os
8
+ import time
9
+ from request_llms.com_google import GoogleChatInit
10
+ from toolbox import get_conf, update_ui, update_ui_lastest_msg, have_any_recent_upload_image_files, trimmed_format_exc
11
+
12
+ proxies, TIMEOUT_SECONDS, MAX_RETRY = get_conf('proxies', 'TIMEOUT_SECONDS', 'MAX_RETRY')
13
+ timeout_bot_msg = '[Local Message] Request timeout. Network error. Please check proxy settings in config.py.' + \
14
+ '网络错误,检查代理服务器是否可用,以及代理设置的格式是否正确,格式须是[协议]://[地址]:[端口],缺一不可。'
15
+
16
+
17
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None,
18
+ console_slience=False):
19
+ # 检查API_KEY
20
+ if get_conf("GEMINI_API_KEY") == "":
21
+ raise ValueError(f"请配置 GEMINI_API_KEY。")
22
+
23
+ genai = GoogleChatInit()
24
+ watch_dog_patience = 5 # 看门狗的耐心, 设置5秒即可
25
+ gpt_replying_buffer = ''
26
+ stream_response = genai.generate_chat(inputs, llm_kwargs, history, sys_prompt)
27
+ for response in stream_response:
28
+ results = response.decode()
29
+ match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
30
+ error_match = re.search(r'\"message\":\s*\"(.*?)\"', results, flags=re.DOTALL)
31
+ if match:
32
+ try:
33
+ paraphrase = json.loads('{"text": "%s"}' % match.group(1))
34
+ except:
35
+ raise ValueError(f"解析GEMINI消息出错。")
36
+ buffer = paraphrase['text']
37
+ gpt_replying_buffer += buffer
38
+ if len(observe_window) >= 1:
39
+ observe_window[0] = gpt_replying_buffer
40
+ if len(observe_window) >= 2:
41
+ if (time.time() - observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
42
+ if error_match:
43
+ raise RuntimeError(f'{gpt_replying_buffer} 对话错误')
44
+ return gpt_replying_buffer
45
+
46
+
47
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
48
+ # 检查API_KEY
49
+ if get_conf("GEMINI_API_KEY") == "":
50
+ yield from update_ui_lastest_msg(f"请配置 GEMINI_API_KEY。", chatbot=chatbot, history=history, delay=0)
51
+ return
52
+
53
+ # 适配润色区域
54
+ if additional_fn is not None:
55
+ from core_functional import handle_core_functionality
56
+ inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
57
+
58
+ if "vision" in llm_kwargs["llm_model"]:
59
+ have_recent_file, image_paths = have_any_recent_upload_image_files(chatbot)
60
+ def make_media_input(inputs, image_paths):
61
+ for image_path in image_paths:
62
+ inputs = inputs + f'<br/><br/><div align="center"><img src="file={os.path.abspath(image_path)}"></div>'
63
+ return inputs
64
+ if have_recent_file:
65
+ inputs = make_media_input(inputs, image_paths)
66
+
67
+ chatbot.append((inputs, ""))
68
+ yield from update_ui(chatbot=chatbot, history=history)
69
+ genai = GoogleChatInit()
70
+ retry = 0
71
+ while True:
72
+ try:
73
+ stream_response = genai.generate_chat(inputs, llm_kwargs, history, system_prompt)
74
+ break
75
+ except Exception as e:
76
+ retry += 1
77
+ chatbot[-1] = ((chatbot[-1][0], trimmed_format_exc()))
78
+ yield from update_ui(chatbot=chatbot, history=history, msg="请求失败") # 刷新界面
79
+ return
80
+ gpt_replying_buffer = ""
81
+ gpt_security_policy = ""
82
+ history.extend([inputs, ''])
83
+ for response in stream_response:
84
+ results = response.decode("utf-8") # 被这个解码给耍了。。
85
+ gpt_security_policy += results
86
+ match = re.search(r'"text":\s*"((?:[^"\\]|\\.)*)"', results, flags=re.DOTALL)
87
+ error_match = re.search(r'\"message\":\s*\"(.*)\"', results, flags=re.DOTALL)
88
+ if match:
89
+ try:
90
+ paraphrase = json.loads('{"text": "%s"}' % match.group(1))
91
+ except:
92
+ raise ValueError(f"解析GEMINI消息出错。")
93
+ gpt_replying_buffer += paraphrase['text'] # 使用 json 解析库进行处理
94
+ chatbot[-1] = (inputs, gpt_replying_buffer)
95
+ history[-1] = gpt_replying_buffer
96
+ yield from update_ui(chatbot=chatbot, history=history)
97
+ if error_match:
98
+ history = history[-2] # 错误的不纳入对话
99
+ chatbot[-1] = (inputs, gpt_replying_buffer + f"对话错误,请查看message\n\n```\n{error_match.group(1)}\n```")
100
+ yield from update_ui(chatbot=chatbot, history=history)
101
+ raise RuntimeError('对话错误')
102
+ if not gpt_replying_buffer:
103
+ history = history[-2] # 错误的不纳入对话
104
+ chatbot[-1] = (inputs, gpt_replying_buffer + f"触发了Google的安全访问策略,没有回答\n\n```\n{gpt_security_policy}\n```")
105
+ yield from update_ui(chatbot=chatbot, history=history)
106
+
107
+
108
+
109
+ if __name__ == '__main__':
110
+ import sys
111
+ llm_kwargs = {'llm_model': 'gemini-pro'}
112
+ result = predict('Write long a story about a magic backpack.', llm_kwargs, llm_kwargs, [])
113
+ for i in result:
114
+ print(i)
request_llms/bridge_newbingfree.py CHANGED
@@ -1,16 +1,17 @@
1
  """
2
- ========================================================================
3
  第一部分:来自EdgeGPT.py
4
  https://github.com/acheong08/EdgeGPT
5
- ========================================================================
6
  """
7
  from .edge_gpt_free import Chatbot as NewbingChatbot
 
8
  load_message = "等待NewBing响应。"
9
 
10
  """
11
- ========================================================================
12
  第二部分:子进程Worker(调用主体)
13
- ========================================================================
14
  """
15
  import time
16
  import json
@@ -22,19 +23,30 @@ import threading
22
  from toolbox import update_ui, get_conf, trimmed_format_exc
23
  from multiprocessing import Process, Pipe
24
 
 
25
  def preprocess_newbing_out(s):
26
- pattern = r'\^(\d+)\^' # 匹配^数字^
27
- sub = lambda m: '('+m.group(1)+')' # 将匹配到的数字作为替换值
28
- result = re.sub(pattern, sub, s) # 替换操作
29
- if '[1]' in result:
30
- result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
 
 
 
 
31
  return result
32
 
 
33
  def preprocess_newbing_out_simple(result):
34
- if '[1]' in result:
35
- result += '\n\n```reference\n' + "\n".join([r for r in result.split('\n') if r.startswith('[')]) + '\n```\n'
 
 
 
 
36
  return result
37
 
 
38
  class NewBingHandle(Process):
39
  def __init__(self):
40
  super().__init__(daemon=True)
@@ -46,11 +58,12 @@ class NewBingHandle(Process):
46
  self.check_dependency()
47
  self.start()
48
  self.threadLock = threading.Lock()
49
-
50
  def check_dependency(self):
51
  try:
52
  self.success = False
53
  import certifi, httpx, rich
 
54
  self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
55
  self.success = True
56
  except:
@@ -62,18 +75,19 @@ class NewBingHandle(Process):
62
 
63
  async def async_run(self):
64
  # 读取配置
65
- NEWBING_STYLE = get_conf('NEWBING_STYLE')
66
  from request_llms.bridge_all import model_info
67
- endpoint = model_info['newbing']['endpoint']
 
68
  while True:
69
  # 等待
70
  kwargs = self.child.recv()
71
- question=kwargs['query']
72
- history=kwargs['history']
73
- system_prompt=kwargs['system_prompt']
74
 
75
  # 是否重置
76
- if len(self.local_history) > 0 and len(history)==0:
77
  await self.newbing_model.reset()
78
  self.local_history = []
79
 
@@ -81,34 +95,33 @@ class NewBingHandle(Process):
81
  prompt = ""
82
  if system_prompt not in self.local_history:
83
  self.local_history.append(system_prompt)
84
- prompt += system_prompt + '\n'
85
 
86
  # 追加历史
87
  for ab in history:
88
  a, b = ab
89
  if a not in self.local_history:
90
  self.local_history.append(a)
91
- prompt += a + '\n'
92
 
93
  # 问题
94
  prompt += question
95
  self.local_history.append(question)
96
- print('question:', prompt)
97
  # 提交
98
  async for final, response in self.newbing_model.ask_stream(
99
  prompt=question,
100
- conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
101
- wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
102
  ):
103
  if not final:
104
  print(response)
105
  self.child.send(str(response))
106
  else:
107
- print('-------- receive final ---------')
108
- self.child.send('[Finish]')
109
  # self.local_history.append(response)
110
 
111
-
112
  def run(self):
113
  """
114
  这个函数运行在子进程
@@ -118,32 +131,37 @@ class NewBingHandle(Process):
118
  self.local_history = []
119
  if (self.newbing_model is None) or (not self.success):
120
  # 代理设置
121
- proxies, NEWBING_COOKIES = get_conf('proxies', 'NEWBING_COOKIES')
122
- if proxies is None:
123
  self.proxies_https = None
124
- else:
125
- self.proxies_https = proxies['https']
126
 
127
  if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
128
  try:
129
  cookies = json.loads(NEWBING_COOKIES)
130
  except:
131
  self.success = False
132
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
133
- self.child.send(f'[Local Message] NEWBING_COOKIES未填写或有格式错误。')
134
- self.child.send('[Fail]'); self.child.send('[Finish]')
 
135
  raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
136
  else:
137
  cookies = None
138
 
139
  try:
140
- self.newbing_model = NewbingChatbot(proxy=self.proxies_https, cookies=cookies)
 
 
141
  except:
142
  self.success = False
143
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
144
- self.child.send(f'[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}')
145
- self.child.send('[Fail]')
146
- self.child.send('[Finish]')
 
 
147
  raise RuntimeError(f"不能加载Newbing组件,请注意Newbing组件已不再维护。")
148
 
149
  self.success = True
@@ -151,66 +169,100 @@ class NewBingHandle(Process):
151
  # 进入任务等待状态
152
  asyncio.run(self.async_run())
153
  except Exception:
154
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
155
- self.child.send(f'[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}.')
156
- self.child.send('[Fail]')
157
- self.child.send('[Finish]')
158
-
 
 
159
  def stream_chat(self, **kwargs):
160
  """
161
  这个函数运行在主进程
162
  """
163
- self.threadLock.acquire() # 获取线程锁
164
- self.parent.send(kwargs) # 请求子进程
165
  while True:
166
- res = self.parent.recv() # 等待newbing回复的片段
167
- if res == '[Finish]': break # 结束
168
- elif res == '[Fail]': self.success = False; break # 失败
169
- else: yield res # newbing回复的片段
170
- self.threadLock.release() # 释放线程锁
 
 
 
 
171
 
172
 
173
  """
174
- ========================================================================
175
  第三部分:主进程统一调用函数接口
176
- ========================================================================
177
  """
178
  global newbingfree_handle
179
  newbingfree_handle = None
180
 
181
- def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
 
 
 
 
 
 
 
 
182
  """
183
- 多线程方法
184
- 函数的说明请见 request_llms/bridge_all.py
185
  """
186
  global newbingfree_handle
187
  if (newbingfree_handle is None) or (not newbingfree_handle.success):
188
  newbingfree_handle = NewBingHandle()
189
- if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
190
- if not newbingfree_handle.success:
 
191
  error = newbingfree_handle.info
192
  newbingfree_handle = None
193
  raise RuntimeError(error)
194
 
195
  # 没有 sys_prompt 接口,因此把prompt加入 history
196
  history_feedin = []
197
- for i in range(len(history)//2):
198
- history_feedin.append([history[2*i], history[2*i+1]] )
199
 
200
- watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
201
  response = ""
202
- if len(observe_window) >= 1: observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
203
- for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
204
- if len(observe_window) >= 1: observe_window[0] = preprocess_newbing_out_simple(response)
205
- if len(observe_window) >= 2:
206
- if (time.time()-observe_window[1]) > watch_dog_patience:
 
 
 
 
 
 
 
 
 
207
  raise RuntimeError("程序终止。")
208
  return preprocess_newbing_out_simple(response)
209
 
210
- def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
 
 
 
 
 
 
 
 
 
 
211
  """
212
- 单线程方法
213
- 函数的说明请见 request_llms/bridge_all.py
214
  """
215
  chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ..."))
216
 
@@ -219,27 +271,41 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
219
  newbingfree_handle = NewBingHandle()
220
  chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
221
  yield from update_ui(chatbot=chatbot, history=[])
222
- if not newbingfree_handle.success:
223
  newbingfree_handle = None
224
  return
225
 
226
  if additional_fn is not None:
227
  from core_functional import handle_core_functionality
228
- inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
 
 
 
229
 
230
  history_feedin = []
231
- for i in range(len(history)//2):
232
- history_feedin.append([history[2*i], history[2*i+1]] )
233
 
234
  chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
235
  response = "[Local Message] 等待NewBing响应中 ..."
236
- yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
237
- for response in newbingfree_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
 
 
 
 
 
 
 
 
 
238
  chatbot[-1] = (inputs, preprocess_newbing_out(response))
239
- yield from update_ui(chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
240
- if response == "[Local Message] 等待NewBing响应中 ...": response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
 
 
 
241
  history.extend([inputs, response])
242
- logging.info(f'[raw_input] {inputs}')
243
- logging.info(f'[response] {response}')
244
  yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
245
-
 
1
  """
2
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
3
  第一部分:来自EdgeGPT.py
4
  https://github.com/acheong08/EdgeGPT
5
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
6
  """
7
  from .edge_gpt_free import Chatbot as NewbingChatbot
8
+
9
  load_message = "等待NewBing响应。"
10
 
11
  """
12
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
13
  第二部分:子进程Worker(调用主体)
14
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
15
  """
16
  import time
17
  import json
 
23
  from toolbox import update_ui, get_conf, trimmed_format_exc
24
  from multiprocessing import Process, Pipe
25
 
26
+
27
  def preprocess_newbing_out(s):
28
+ pattern = r"\^(\d+)\^" # 匹配^数字^
29
+ sub = lambda m: "(" + m.group(1) + ")" # 将匹配到的数字作为替换值
30
+ result = re.sub(pattern, sub, s) # 替换操作
31
+ if "[1]" in result:
32
+ result += (
33
+ "\n\n```reference\n"
34
+ + "\n".join([r for r in result.split("\n") if r.startswith("[")])
35
+ + "\n```\n"
36
+ )
37
  return result
38
 
39
+
40
  def preprocess_newbing_out_simple(result):
41
+ if "[1]" in result:
42
+ result += (
43
+ "\n\n```reference\n"
44
+ + "\n".join([r for r in result.split("\n") if r.startswith("[")])
45
+ + "\n```\n"
46
+ )
47
  return result
48
 
49
+
50
  class NewBingHandle(Process):
51
  def __init__(self):
52
  super().__init__(daemon=True)
 
58
  self.check_dependency()
59
  self.start()
60
  self.threadLock = threading.Lock()
61
+
62
  def check_dependency(self):
63
  try:
64
  self.success = False
65
  import certifi, httpx, rich
66
+
67
  self.info = "依赖检测通过,等待NewBing响应。注意目前不能多人同时调用NewBing接口(有线程锁),否则将导致每个人的NewBing问询历史互相渗透。调用NewBing时,会自动使用已配置的代理。"
68
  self.success = True
69
  except:
 
75
 
76
  async def async_run(self):
77
  # 读取配置
78
+ NEWBING_STYLE = get_conf("NEWBING_STYLE")
79
  from request_llms.bridge_all import model_info
80
+
81
+ endpoint = model_info["newbing"]["endpoint"]
82
  while True:
83
  # 等待
84
  kwargs = self.child.recv()
85
+ question = kwargs["query"]
86
+ history = kwargs["history"]
87
+ system_prompt = kwargs["system_prompt"]
88
 
89
  # 是否重置
90
+ if len(self.local_history) > 0 and len(history) == 0:
91
  await self.newbing_model.reset()
92
  self.local_history = []
93
 
 
95
  prompt = ""
96
  if system_prompt not in self.local_history:
97
  self.local_history.append(system_prompt)
98
+ prompt += system_prompt + "\n"
99
 
100
  # 追加历史
101
  for ab in history:
102
  a, b = ab
103
  if a not in self.local_history:
104
  self.local_history.append(a)
105
+ prompt += a + "\n"
106
 
107
  # 问题
108
  prompt += question
109
  self.local_history.append(question)
110
+ print("question:", prompt)
111
  # 提交
112
  async for final, response in self.newbing_model.ask_stream(
113
  prompt=question,
114
+ conversation_style=NEWBING_STYLE, # ["creative", "balanced", "precise"]
115
+ wss_link=endpoint, # "wss://sydney.bing.com/sydney/ChatHub"
116
  ):
117
  if not final:
118
  print(response)
119
  self.child.send(str(response))
120
  else:
121
+ print("-------- receive final ---------")
122
+ self.child.send("[Finish]")
123
  # self.local_history.append(response)
124
 
 
125
  def run(self):
126
  """
127
  这个函数运行在子进程
 
131
  self.local_history = []
132
  if (self.newbing_model is None) or (not self.success):
133
  # 代理设置
134
+ proxies, NEWBING_COOKIES = get_conf("proxies", "NEWBING_COOKIES")
135
+ if proxies is None:
136
  self.proxies_https = None
137
+ else:
138
+ self.proxies_https = proxies["https"]
139
 
140
  if (NEWBING_COOKIES is not None) and len(NEWBING_COOKIES) > 100:
141
  try:
142
  cookies = json.loads(NEWBING_COOKIES)
143
  except:
144
  self.success = False
145
+ tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
146
+ self.child.send(f"[Local Message] NEWBING_COOKIES未填写或有格式错误。")
147
+ self.child.send("[Fail]")
148
+ self.child.send("[Finish]")
149
  raise RuntimeError(f"NEWBING_COOKIES未填写或有格式错误。")
150
  else:
151
  cookies = None
152
 
153
  try:
154
+ self.newbing_model = NewbingChatbot(
155
+ proxy=self.proxies_https, cookies=cookies
156
+ )
157
  except:
158
  self.success = False
159
+ tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
160
+ self.child.send(
161
+ f"[Local Message] 不能加载Newbing组件,请注意Newbing组件已不再维护。{tb_str}"
162
+ )
163
+ self.child.send("[Fail]")
164
+ self.child.send("[Finish]")
165
  raise RuntimeError(f"不能加载Newbing组件,请注意Newbing组件已不再维护。")
166
 
167
  self.success = True
 
169
  # 进入任务等待状态
170
  asyncio.run(self.async_run())
171
  except Exception:
172
+ tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
173
+ self.child.send(
174
+ f"[Local Message] Newbing 请求失败,报错信息如下. 如果是与网络相关的问题,建议更换代理协议(推荐http)或代理节点 {tb_str}."
175
+ )
176
+ self.child.send("[Fail]")
177
+ self.child.send("[Finish]")
178
+
179
  def stream_chat(self, **kwargs):
180
  """
181
  这个函数运行在主进程
182
  """
183
+ self.threadLock.acquire() # 获取线程锁
184
+ self.parent.send(kwargs) # 请求子进程
185
  while True:
186
+ res = self.parent.recv() # 等待newbing回复的片段
187
+ if res == "[Finish]":
188
+ break # 结束
189
+ elif res == "[Fail]":
190
+ self.success = False
191
+ break # 失败
192
+ else:
193
+ yield res # newbing回复的片段
194
+ self.threadLock.release() # 释放线程锁
195
 
196
 
197
  """
198
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
199
  第三部分:主进程统一调用函数接口
200
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
201
  """
202
  global newbingfree_handle
203
  newbingfree_handle = None
204
 
205
+
206
+ def predict_no_ui_long_connection(
207
+ inputs,
208
+ llm_kwargs,
209
+ history=[],
210
+ sys_prompt="",
211
+ observe_window=[],
212
+ console_slience=False,
213
+ ):
214
  """
215
+ 多线程方法
216
+ 函数的说明请见 request_llms/bridge_all.py
217
  """
218
  global newbingfree_handle
219
  if (newbingfree_handle is None) or (not newbingfree_handle.success):
220
  newbingfree_handle = NewBingHandle()
221
+ if len(observe_window) >= 1:
222
+ observe_window[0] = load_message + "\n\n" + newbingfree_handle.info
223
+ if not newbingfree_handle.success:
224
  error = newbingfree_handle.info
225
  newbingfree_handle = None
226
  raise RuntimeError(error)
227
 
228
  # 没有 sys_prompt 接口,因此把prompt加入 history
229
  history_feedin = []
230
+ for i in range(len(history) // 2):
231
+ history_feedin.append([history[2 * i], history[2 * i + 1]])
232
 
233
+ watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
234
  response = ""
235
+ if len(observe_window) >= 1:
236
+ observe_window[0] = "[Local Message] 等待NewBing响应中 ..."
237
+ for response in newbingfree_handle.stream_chat(
238
+ query=inputs,
239
+ history=history_feedin,
240
+ system_prompt=sys_prompt,
241
+ max_length=llm_kwargs["max_length"],
242
+ top_p=llm_kwargs["top_p"],
243
+ temperature=llm_kwargs["temperature"],
244
+ ):
245
+ if len(observe_window) >= 1:
246
+ observe_window[0] = preprocess_newbing_out_simple(response)
247
+ if len(observe_window) >= 2:
248
+ if (time.time() - observe_window[1]) > watch_dog_patience:
249
  raise RuntimeError("程序终止。")
250
  return preprocess_newbing_out_simple(response)
251
 
252
+
253
+ def predict(
254
+ inputs,
255
+ llm_kwargs,
256
+ plugin_kwargs,
257
+ chatbot,
258
+ history=[],
259
+ system_prompt="",
260
+ stream=True,
261
+ additional_fn=None,
262
+ ):
263
  """
264
+ 单线程方法
265
+ 函数的说明请见 request_llms/bridge_all.py
266
  """
267
  chatbot.append((inputs, "[Local Message] 等待NewBing响应中 ..."))
268
 
 
271
  newbingfree_handle = NewBingHandle()
272
  chatbot[-1] = (inputs, load_message + "\n\n" + newbingfree_handle.info)
273
  yield from update_ui(chatbot=chatbot, history=[])
274
+ if not newbingfree_handle.success:
275
  newbingfree_handle = None
276
  return
277
 
278
  if additional_fn is not None:
279
  from core_functional import handle_core_functionality
280
+
281
+ inputs, history = handle_core_functionality(
282
+ additional_fn, inputs, history, chatbot
283
+ )
284
 
285
  history_feedin = []
286
+ for i in range(len(history) // 2):
287
+ history_feedin.append([history[2 * i], history[2 * i + 1]])
288
 
289
  chatbot[-1] = (inputs, "[Local Message] 等待NewBing响应中 ...")
290
  response = "[Local Message] 等待NewBing响应中 ..."
291
+ yield from update_ui(
292
+ chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
293
+ )
294
+ for response in newbingfree_handle.stream_chat(
295
+ query=inputs,
296
+ history=history_feedin,
297
+ system_prompt=system_prompt,
298
+ max_length=llm_kwargs["max_length"],
299
+ top_p=llm_kwargs["top_p"],
300
+ temperature=llm_kwargs["temperature"],
301
+ ):
302
  chatbot[-1] = (inputs, preprocess_newbing_out(response))
303
+ yield from update_ui(
304
+ chatbot=chatbot, history=history, msg="NewBing响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
305
+ )
306
+ if response == "[Local Message] 等待NewBing响应中 ...":
307
+ response = "[Local Message] NewBing响应异常,请刷新界面重试 ..."
308
  history.extend([inputs, response])
309
+ logging.info(f"[raw_input] {inputs}")
310
+ logging.info(f"[response] {response}")
311
  yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提交新问题。")
 
request_llms/bridge_skylark2.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from toolbox import update_ui, get_conf, update_ui_lastest_msg
3
+ from toolbox import check_packages, report_exception
4
+
5
+ model_name = '云雀大模型'
6
+
7
+ def validate_key():
8
+ YUNQUE_SECRET_KEY = get_conf("YUNQUE_SECRET_KEY")
9
+ if YUNQUE_SECRET_KEY == '': return False
10
+ return True
11
+
12
+ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False):
13
+ """
14
+ ⭐ 多线程方法
15
+ 函数的说明请见 request_llms/bridge_all.py
16
+ """
17
+ watch_dog_patience = 5
18
+ response = ""
19
+
20
+ if validate_key() is False:
21
+ raise RuntimeError('请配置YUNQUE_SECRET_KEY')
22
+
23
+ from .com_skylark2api import YUNQUERequestInstance
24
+ sri = YUNQUERequestInstance()
25
+ for response in sri.generate(inputs, llm_kwargs, history, sys_prompt):
26
+ if len(observe_window) >= 1:
27
+ observe_window[0] = response
28
+ if len(observe_window) >= 2:
29
+ if (time.time()-observe_window[1]) > watch_dog_patience: raise RuntimeError("程序终止。")
30
+ return response
31
+
32
+ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
33
+ """
34
+ ⭐ 单线程方法
35
+ 函数的说明请见 request_llms/bridge_all.py
36
+ """
37
+ chatbot.append((inputs, ""))
38
+ yield from update_ui(chatbot=chatbot, history=history)
39
+
40
+ # 尝试导入依赖,如果缺少依赖,则给出安装建议
41
+ try:
42
+ check_packages(["zhipuai"])
43
+ except:
44
+ yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
45
+ chatbot=chatbot, history=history, delay=0)
46
+ return
47
+
48
+ if validate_key() is False:
49
+ yield from update_ui_lastest_msg(lastmsg="[Local Message] 请配置HUOSHAN_API_KEY", chatbot=chatbot, history=history, delay=0)
50
+ return
51
+
52
+ if additional_fn is not None:
53
+ from core_functional import handle_core_functionality
54
+ inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
55
+
56
+ # 开始接收回复
57
+ from .com_skylark2api import YUNQUERequestInstance
58
+ sri = YUNQUERequestInstance()
59
+ for response in sri.generate(inputs, llm_kwargs, history, system_prompt):
60
+ chatbot[-1] = (inputs, response)
61
+ yield from update_ui(chatbot=chatbot, history=history)
62
+
63
+ # 总结输出
64
+ if response == f"[Local Message] 等待{model_name}响应中 ...":
65
+ response = f"[Local Message] {model_name}响应异常 ..."
66
+ history.extend([inputs, response])
67
+ yield from update_ui(chatbot=chatbot, history=history)
request_llms/bridge_stackclaude.py CHANGED
@@ -7,14 +7,15 @@ import logging
7
  import time
8
  from toolbox import get_conf
9
  import asyncio
 
10
  load_message = "正在加载Claude组件,请稍候..."
11
 
12
  try:
13
  """
14
- ========================================================================
15
  第一部分:Slack API Client
16
  https://github.com/yokonsan/claude-in-slack-api
17
- ========================================================================
18
  """
19
 
20
  from slack_sdk.errors import SlackApiError
@@ -23,20 +24,23 @@ try:
23
  class SlackClient(AsyncWebClient):
24
  """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
25
 
26
- 属性:
27
- - CHANNEL_ID:str类型,表示频道ID。
28
 
29
- 方法:
30
- - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
31
- - chat(text: str):异步方法。向已打开的频道发送一条文本消息。
32
- - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
33
- - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
34
 
35
  """
 
36
  CHANNEL_ID = None
37
 
38
  async def open_channel(self):
39
- response = await self.conversations_open(users=get_conf('SLACK_CLAUDE_BOT_ID'))
 
 
40
  self.CHANNEL_ID = response["channel"]["id"]
41
 
42
  async def chat(self, text):
@@ -49,33 +53,39 @@ try:
49
  async def get_slack_messages(self):
50
  try:
51
  # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
52
- resp = await self.conversations_history(channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1)
53
- msg = [msg for msg in resp["messages"]
54
- if msg.get("user") == get_conf('SLACK_CLAUDE_BOT_ID')]
 
 
 
 
 
55
  return msg
56
  except (SlackApiError, KeyError) as e:
57
  raise RuntimeError(f"获取Slack消息失败。")
58
-
59
  async def get_reply(self):
60
  while True:
61
  slack_msgs = await self.get_slack_messages()
62
  if len(slack_msgs) == 0:
63
  await asyncio.sleep(0.5)
64
  continue
65
-
66
  msg = slack_msgs[-1]
67
  if msg["text"].endswith("Typing…_"):
68
  yield False, msg["text"]
69
  else:
70
  yield True, msg["text"]
71
  break
 
72
  except:
73
  pass
74
 
75
  """
76
- ========================================================================
77
  第二部分:子进程Worker(调用主体)
78
- ========================================================================
79
  """
80
 
81
 
@@ -88,7 +98,7 @@ class ClaudeHandle(Process):
88
  self.success = True
89
  self.local_history = []
90
  self.check_dependency()
91
- if self.success:
92
  self.start()
93
  self.threadLock = threading.Lock()
94
 
@@ -96,6 +106,7 @@ class ClaudeHandle(Process):
96
  try:
97
  self.success = False
98
  import slack_sdk
 
99
  self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
100
  self.success = True
101
  except:
@@ -103,40 +114,44 @@ class ClaudeHandle(Process):
103
  self.success = False
104
 
105
  def ready(self):
106
- return self.claude_model is not None
107
-
108
  async def async_run(self):
109
  await self.claude_model.open_channel()
110
  while True:
111
  # 等待
112
  kwargs = self.child.recv()
113
- question = kwargs['query']
114
- history = kwargs['history']
115
 
116
  # 开始问问题
117
  prompt = ""
118
 
119
  # 问题
120
  prompt += question
121
- print('question:', prompt)
122
 
123
  # 提交
124
  await self.claude_model.chat(prompt)
125
-
126
  # 获取回复
127
- async for final, response in self.claude_model.get_reply():
128
  if not final:
129
  print(response)
130
  self.child.send(str(response))
131
  else:
132
  # 防止丢失最后一条消息
133
  slack_msgs = await self.claude_model.get_slack_messages()
134
- last_msg = slack_msgs[-1]["text"] if slack_msgs and len(slack_msgs) > 0 else ""
 
 
 
 
135
  if last_msg:
136
  self.child.send(last_msg)
137
- print('-------- receive final ---------')
138
- self.child.send('[Finish]')
139
-
140
  def run(self):
141
  """
142
  这个函数运行在子进程
@@ -146,22 +161,24 @@ class ClaudeHandle(Process):
146
  self.local_history = []
147
  if (self.claude_model is None) or (not self.success):
148
  # 代理设置
149
- proxies = get_conf('proxies')
150
  if proxies is None:
151
  self.proxies_https = None
152
  else:
153
- self.proxies_https = proxies['https']
154
 
155
  try:
156
- SLACK_CLAUDE_USER_TOKEN = get_conf('SLACK_CLAUDE_USER_TOKEN')
157
- self.claude_model = SlackClient(token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https)
158
- print('Claude组件初始化成功。')
 
 
159
  except:
160
  self.success = False
161
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
162
- self.child.send(f'[Local Message] 不能加载Claude组件。{tb_str}')
163
- self.child.send('[Fail]')
164
- self.child.send('[Finish]')
165
  raise RuntimeError(f"不能加载Claude组件。")
166
 
167
  self.success = True
@@ -169,42 +186,49 @@ class ClaudeHandle(Process):
169
  # 进入任务等待状态
170
  asyncio.run(self.async_run())
171
  except Exception:
172
- tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n'
173
- self.child.send(f'[Local Message] Claude失败 {tb_str}.')
174
- self.child.send('[Fail]')
175
- self.child.send('[Finish]')
176
 
177
  def stream_chat(self, **kwargs):
178
  """
179
  这个函数运行在主进程
180
  """
181
  self.threadLock.acquire()
182
- self.parent.send(kwargs) # 发送请求到子进程
183
  while True:
184
- res = self.parent.recv() # 等待Claude回复的片段
185
- if res == '[Finish]':
186
- break # 结束
187
- elif res == '[Fail]':
188
  self.success = False
189
  break
190
  else:
191
- yield res # Claude回复的片段
192
  self.threadLock.release()
193
 
194
 
195
  """
196
- ========================================================================
197
  第三部分:主进程统一调用函数接口
198
- ========================================================================
199
  """
200
  global claude_handle
201
  claude_handle = None
202
 
203
 
204
- def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
 
 
 
 
 
 
 
205
  """
206
- 多线程方法
207
- 函数的说明请见 request_llms/bridge_all.py
208
  """
209
  global claude_handle
210
  if (claude_handle is None) or (not claude_handle.success):
@@ -217,24 +241,40 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="",
217
 
218
  # 没有 sys_prompt 接口,因此把prompt加入 history
219
  history_feedin = []
220
- for i in range(len(history)//2):
221
- history_feedin.append([history[2*i], history[2*i+1]])
222
 
223
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
224
  response = ""
225
  observe_window[0] = "[Local Message] 等待Claude响应中 ..."
226
- for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
 
 
 
 
 
 
 
227
  observe_window[0] = preprocess_newbing_out_simple(response)
228
  if len(observe_window) >= 2:
229
- if (time.time()-observe_window[1]) > watch_dog_patience:
230
  raise RuntimeError("程序终止。")
231
  return preprocess_newbing_out_simple(response)
232
 
233
 
234
- def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream=True, additional_fn=None):
 
 
 
 
 
 
 
 
 
235
  """
236
- 单线程方法
237
- 函数的说明请见 request_llms/bridge_all.py
238
  """
239
  chatbot.append((inputs, "[Local Message] 等待Claude响应中 ..."))
240
 
@@ -249,21 +289,30 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
249
 
250
  if additional_fn is not None:
251
  from core_functional import handle_core_functionality
252
- inputs, history = handle_core_functionality(additional_fn, inputs, history, chatbot)
 
 
 
253
 
254
  history_feedin = []
255
- for i in range(len(history)//2):
256
- history_feedin.append([history[2*i], history[2*i+1]])
257
 
258
  chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
259
  response = "[Local Message] 等待Claude响应中 ..."
260
- yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
261
- for response in claude_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt):
 
 
 
 
262
  chatbot[-1] = (inputs, preprocess_newbing_out(response))
263
- yield from update_ui(chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。")
 
 
264
  if response == "[Local Message] 等待Claude响应中 ...":
265
  response = "[Local Message] Claude响应异常,请刷新界面重试 ..."
266
  history.extend([inputs, response])
267
- logging.info(f'[raw_input] {inputs}')
268
- logging.info(f'[response] {response}')
269
  yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提���新问题。")
 
7
  import time
8
  from toolbox import get_conf
9
  import asyncio
10
+
11
  load_message = "正在加载Claude组件,请稍候..."
12
 
13
  try:
14
  """
15
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
16
  第一部分:Slack API Client
17
  https://github.com/yokonsan/claude-in-slack-api
18
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
19
  """
20
 
21
  from slack_sdk.errors import SlackApiError
 
24
  class SlackClient(AsyncWebClient):
25
  """SlackClient类用于与Slack API进行交互,实现消息发送、接收等功能。
26
 
27
+ 属性:
28
+ - CHANNEL_ID:str类型,表示频道ID。
29
 
30
+ 方法:
31
+ - open_channel():异步方法。通过调用conversations_open方法打开一个频道,并将返回的频道ID保存在属性CHANNEL_ID中。
32
+ - chat(text: str):异步方法。向已打开的频道发送一条文本消息。
33
+ - get_slack_messages():异步方法。获取已打开频道的最新消息并返回消息列表,目前不支持历史消息查询。
34
+ - get_reply():异步方法。循环监听已打开频道的消息,如果收到"Typing…_"结尾的消息说明Claude还在继续输出,否则结束循环。
35
 
36
  """
37
+
38
  CHANNEL_ID = None
39
 
40
  async def open_channel(self):
41
+ response = await self.conversations_open(
42
+ users=get_conf("SLACK_CLAUDE_BOT_ID")
43
+ )
44
  self.CHANNEL_ID = response["channel"]["id"]
45
 
46
  async def chat(self, text):
 
53
  async def get_slack_messages(self):
54
  try:
55
  # TODO:暂时不支持历史消息,因为在同一个频道里存在多人使用时历史消息渗透问题
56
+ resp = await self.conversations_history(
57
+ channel=self.CHANNEL_ID, oldest=self.LAST_TS, limit=1
58
+ )
59
+ msg = [
60
+ msg
61
+ for msg in resp["messages"]
62
+ if msg.get("user") == get_conf("SLACK_CLAUDE_BOT_ID")
63
+ ]
64
  return msg
65
  except (SlackApiError, KeyError) as e:
66
  raise RuntimeError(f"获取Slack消息失败。")
67
+
68
  async def get_reply(self):
69
  while True:
70
  slack_msgs = await self.get_slack_messages()
71
  if len(slack_msgs) == 0:
72
  await asyncio.sleep(0.5)
73
  continue
74
+
75
  msg = slack_msgs[-1]
76
  if msg["text"].endswith("Typing…_"):
77
  yield False, msg["text"]
78
  else:
79
  yield True, msg["text"]
80
  break
81
+
82
  except:
83
  pass
84
 
85
  """
86
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
87
  第二部分:子进程Worker(调用主体)
88
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
89
  """
90
 
91
 
 
98
  self.success = True
99
  self.local_history = []
100
  self.check_dependency()
101
+ if self.success:
102
  self.start()
103
  self.threadLock = threading.Lock()
104
 
 
106
  try:
107
  self.success = False
108
  import slack_sdk
109
+
110
  self.info = "依赖检测通过,等待Claude响应。注意目前不能多人同时调用Claude接口(有线程锁),否则将导致每个人的Claude问询历史互相渗透。调用Claude时,会自动使用已配置的代理。"
111
  self.success = True
112
  except:
 
114
  self.success = False
115
 
116
  def ready(self):
117
+ return self.claude_model is not None
118
+
119
  async def async_run(self):
120
  await self.claude_model.open_channel()
121
  while True:
122
  # 等待
123
  kwargs = self.child.recv()
124
+ question = kwargs["query"]
125
+ history = kwargs["history"]
126
 
127
  # 开始问问题
128
  prompt = ""
129
 
130
  # 问题
131
  prompt += question
132
+ print("question:", prompt)
133
 
134
  # 提交
135
  await self.claude_model.chat(prompt)
136
+
137
  # 获取回复
138
+ async for final, response in self.claude_model.get_reply():
139
  if not final:
140
  print(response)
141
  self.child.send(str(response))
142
  else:
143
  # 防止丢失最后一条消息
144
  slack_msgs = await self.claude_model.get_slack_messages()
145
+ last_msg = (
146
+ slack_msgs[-1]["text"]
147
+ if slack_msgs and len(slack_msgs) > 0
148
+ else ""
149
+ )
150
  if last_msg:
151
  self.child.send(last_msg)
152
+ print("-------- receive final ---------")
153
+ self.child.send("[Finish]")
154
+
155
  def run(self):
156
  """
157
  这个函数运行在子进程
 
161
  self.local_history = []
162
  if (self.claude_model is None) or (not self.success):
163
  # 代理设置
164
+ proxies = get_conf("proxies")
165
  if proxies is None:
166
  self.proxies_https = None
167
  else:
168
+ self.proxies_https = proxies["https"]
169
 
170
  try:
171
+ SLACK_CLAUDE_USER_TOKEN = get_conf("SLACK_CLAUDE_USER_TOKEN")
172
+ self.claude_model = SlackClient(
173
+ token=SLACK_CLAUDE_USER_TOKEN, proxy=self.proxies_https
174
+ )
175
+ print("Claude组件初始化成功。")
176
  except:
177
  self.success = False
178
+ tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
179
+ self.child.send(f"[Local Message] 不能加载Claude组件。{tb_str}")
180
+ self.child.send("[Fail]")
181
+ self.child.send("[Finish]")
182
  raise RuntimeError(f"不能加载Claude组件。")
183
 
184
  self.success = True
 
186
  # 进入任务等待状态
187
  asyncio.run(self.async_run())
188
  except Exception:
189
+ tb_str = "\n```\n" + trimmed_format_exc() + "\n```\n"
190
+ self.child.send(f"[Local Message] Claude失败 {tb_str}.")
191
+ self.child.send("[Fail]")
192
+ self.child.send("[Finish]")
193
 
194
  def stream_chat(self, **kwargs):
195
  """
196
  这个函数运行在主进程
197
  """
198
  self.threadLock.acquire()
199
+ self.parent.send(kwargs) # 发送请求到子进程
200
  while True:
201
+ res = self.parent.recv() # 等待Claude回复的片段
202
+ if res == "[Finish]":
203
+ break # 结束
204
+ elif res == "[Fail]":
205
  self.success = False
206
  break
207
  else:
208
+ yield res # Claude回复的片段
209
  self.threadLock.release()
210
 
211
 
212
  """
213
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
214
  第三部分:主进程统一调用函数接口
215
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
216
  """
217
  global claude_handle
218
  claude_handle = None
219
 
220
 
221
+ def predict_no_ui_long_connection(
222
+ inputs,
223
+ llm_kwargs,
224
+ history=[],
225
+ sys_prompt="",
226
+ observe_window=None,
227
+ console_slience=False,
228
+ ):
229
  """
230
+ 多线程方法
231
+ 函数的说明请见 request_llms/bridge_all.py
232
  """
233
  global claude_handle
234
  if (claude_handle is None) or (not claude_handle.success):
 
241
 
242
  # 没有 sys_prompt 接口,因此把prompt加入 history
243
  history_feedin = []
244
+ for i in range(len(history) // 2):
245
+ history_feedin.append([history[2 * i], history[2 * i + 1]])
246
 
247
  watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
248
  response = ""
249
  observe_window[0] = "[Local Message] 等待Claude响应中 ..."
250
+ for response in claude_handle.stream_chat(
251
+ query=inputs,
252
+ history=history_feedin,
253
+ system_prompt=sys_prompt,
254
+ max_length=llm_kwargs["max_length"],
255
+ top_p=llm_kwargs["top_p"],
256
+ temperature=llm_kwargs["temperature"],
257
+ ):
258
  observe_window[0] = preprocess_newbing_out_simple(response)
259
  if len(observe_window) >= 2:
260
+ if (time.time() - observe_window[1]) > watch_dog_patience:
261
  raise RuntimeError("程序终止。")
262
  return preprocess_newbing_out_simple(response)
263
 
264
 
265
+ def predict(
266
+ inputs,
267
+ llm_kwargs,
268
+ plugin_kwargs,
269
+ chatbot,
270
+ history=[],
271
+ system_prompt="",
272
+ stream=True,
273
+ additional_fn=None,
274
+ ):
275
  """
276
+ 单线程方法
277
+ 函数的说明请见 request_llms/bridge_all.py
278
  """
279
  chatbot.append((inputs, "[Local Message] 等待Claude响应中 ..."))
280
 
 
289
 
290
  if additional_fn is not None:
291
  from core_functional import handle_core_functionality
292
+
293
+ inputs, history = handle_core_functionality(
294
+ additional_fn, inputs, history, chatbot
295
+ )
296
 
297
  history_feedin = []
298
+ for i in range(len(history) // 2):
299
+ history_feedin.append([history[2 * i], history[2 * i + 1]])
300
 
301
  chatbot[-1] = (inputs, "[Local Message] 等待Claude响应中 ...")
302
  response = "[Local Message] 等待Claude响应中 ..."
303
+ yield from update_ui(
304
+ chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
305
+ )
306
+ for response in claude_handle.stream_chat(
307
+ query=inputs, history=history_feedin, system_prompt=system_prompt
308
+ ):
309
  chatbot[-1] = (inputs, preprocess_newbing_out(response))
310
+ yield from update_ui(
311
+ chatbot=chatbot, history=history, msg="Claude响应缓慢,尚未完成全部响应,请耐心完成后再提交新问题。"
312
+ )
313
  if response == "[Local Message] 等待Claude响应中 ...":
314
  response = "[Local Message] Claude响应异常,请刷新界面重试 ..."
315
  history.extend([inputs, response])
316
+ logging.info(f"[raw_input] {inputs}")
317
+ logging.info(f"[response] {response}")
318
  yield from update_ui(chatbot=chatbot, history=history, msg="完成全部响应,请提���新问题。")
request_llms/bridge_zhipu.py CHANGED
@@ -42,7 +42,7 @@ def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_promp
42
  try:
43
  check_packages(["zhipuai"])
44
  except:
45
- yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install --upgrade zhipuai```。",
46
  chatbot=chatbot, history=history, delay=0)
47
  return
48
 
 
42
  try:
43
  check_packages(["zhipuai"])
44
  except:
45
+ yield from update_ui_lastest_msg(f"导入软件依赖失败。使用该模型需要额外依赖,安装方法```pip install zhipuai==1.0.7```。",
46
  chatbot=chatbot, history=history, delay=0)
47
  return
48
 
request_llms/com_google.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # encoding: utf-8
2
+ # @Time : 2023/12/25
3
+ # @Author : Spike
4
+ # @Descr :
5
+ import json
6
+ import os
7
+ import re
8
+ import requests
9
+ from typing import List, Dict, Tuple
10
+ from toolbox import get_conf, encode_image, get_pictures_list
11
+
12
+ proxies, TIMEOUT_SECONDS = get_conf("proxies", "TIMEOUT_SECONDS")
13
+
14
+ """
15
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
16
+ 第五部分 一些文件处理方法
17
+ files_filter_handler 根据type过滤文件
18
+ input_encode_handler 提取input中的文件,并解析
19
+ file_manifest_filter_html 根据type过滤文件, 并解析为html or md 文本
20
+ link_mtime_to_md 文件增加本地时间参数,避免下载到缓存文件
21
+ html_view_blank 超链接
22
+ html_local_file 本地文件取相对路径
23
+ to_markdown_tabs 文件list 转换为 md tab
24
+ =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
25
+ """
26
+
27
+
28
+ def files_filter_handler(file_list):
29
+ new_list = []
30
+ filter_ = [
31
+ "png",
32
+ "jpg",
33
+ "jpeg",
34
+ "bmp",
35
+ "svg",
36
+ "webp",
37
+ "ico",
38
+ "tif",
39
+ "tiff",
40
+ "raw",
41
+ "eps",
42
+ ]
43
+ for file in file_list:
44
+ file = str(file).replace("file=", "")
45
+ if os.path.exists(file):
46
+ if str(os.path.basename(file)).split(".")[-1] in filter_:
47
+ new_list.append(file)
48
+ return new_list
49
+
50
+
51
+ def input_encode_handler(inputs, llm_kwargs):
52
+ if llm_kwargs["most_recent_uploaded"].get("path"):
53
+ image_paths = get_pictures_list(llm_kwargs["most_recent_uploaded"]["path"])
54
+ md_encode = []
55
+ for md_path in image_paths:
56
+ type_ = os.path.splitext(md_path)[1].replace(".", "")
57
+ type_ = "jpeg" if type_ == "jpg" else type_
58
+ md_encode.append({"data": encode_image(md_path), "type": type_})
59
+ return inputs, md_encode
60
+
61
+
62
+ def file_manifest_filter_html(file_list, filter_: list = None, md_type=False):
63
+ new_list = []
64
+ if not filter_:
65
+ filter_ = [
66
+ "png",
67
+ "jpg",
68
+ "jpeg",
69
+ "bmp",
70
+ "svg",
71
+ "webp",
72
+ "ico",
73
+ "tif",
74
+ "tiff",
75
+ "raw",
76
+ "eps",
77
+ ]
78
+ for file in file_list:
79
+ if str(os.path.basename(file)).split(".")[-1] in filter_:
80
+ new_list.append(html_local_img(file, md=md_type))
81
+ elif os.path.exists(file):
82
+ new_list.append(link_mtime_to_md(file))
83
+ else:
84
+ new_list.append(file)
85
+ return new_list
86
+
87
+
88
+ def link_mtime_to_md(file):
89
+ link_local = html_local_file(file)
90
+ link_name = os.path.basename(file)
91
+ a = f"[{link_name}]({link_local}?{os.path.getmtime(file)})"
92
+ return a
93
+
94
+
95
+ def html_local_file(file):
96
+ base_path = os.path.dirname(__file__) # 项目目录
97
+ if os.path.exists(str(file)):
98
+ file = f'file={file.replace(base_path, ".")}'
99
+ return file
100
+
101
+
102
+ def html_local_img(__file, layout="left", max_width=None, max_height=None, md=True):
103
+ style = ""
104
+ if max_width is not None:
105
+ style += f"max-width: {max_width};"
106
+ if max_height is not None:
107
+ style += f"max-height: {max_height};"
108
+ __file = html_local_file(__file)
109
+ a = f'<div align="{layout}"><img src="{__file}" style="{style}"></div>'
110
+ if md:
111
+ a = f"![{__file}]({__file})"
112
+ return a
113
+
114
+
115
+ def to_markdown_tabs(head: list, tabs: list, alignment=":---:", column=False):
116
+ """
117
+ Args:
118
+ head: 表头:[]
119
+ tabs: 表值:[[列1], [列2], [列3], [列4]]
120
+ alignment: :--- 左对齐, :---: 居中对齐, ---: 右对齐
121
+ column: True to keep data in columns, False to keep data in rows (default).
122
+ Returns:
123
+ A string representation of the markdown table.
124
+ """
125
+ if column:
126
+ transposed_tabs = list(map(list, zip(*tabs)))
127
+ else:
128
+ transposed_tabs = tabs
129
+ # Find the maximum length among the columns
130
+ max_len = max(len(column) for column in transposed_tabs)
131
+
132
+ tab_format = "| %s "
133
+ tabs_list = "".join([tab_format % i for i in head]) + "|\n"
134
+ tabs_list += "".join([tab_format % alignment for i in head]) + "|\n"
135
+
136
+ for i in range(max_len):
137
+ row_data = [tab[i] if i < len(tab) else "" for tab in transposed_tabs]
138
+ row_data = file_manifest_filter_html(row_data, filter_=None)
139
+ tabs_list += "".join([tab_format % i for i in row_data]) + "|\n"
140
+
141
+ return tabs_list
142
+
143
+
144
+ class GoogleChatInit:
145
+ def __init__(self):
146
+ self.url_gemini = "https://generativelanguage.googleapis.com/v1beta/models/%m:streamGenerateContent?key=%k"
147
+
148
+ def generate_chat(self, inputs, llm_kwargs, history, system_prompt):
149
+ headers, payload = self.generate_message_payload(
150
+ inputs, llm_kwargs, history, system_prompt
151
+ )
152
+ response = requests.post(
153
+ url=self.url_gemini,
154
+ headers=headers,
155
+ data=json.dumps(payload),
156
+ stream=True,
157
+ proxies=proxies,
158
+ timeout=TIMEOUT_SECONDS,
159
+ )
160
+ return response.iter_lines()
161
+
162
+ def __conversation_user(self, user_input, llm_kwargs):
163
+ what_i_have_asked = {"role": "user", "parts": []}
164
+ if "vision" not in self.url_gemini:
165
+ input_ = user_input
166
+ encode_img = []
167
+ else:
168
+ input_, encode_img = input_encode_handler(user_input, llm_kwargs=llm_kwargs)
169
+ what_i_have_asked["parts"].append({"text": input_})
170
+ if encode_img:
171
+ for data in encode_img:
172
+ what_i_have_asked["parts"].append(
173
+ {
174
+ "inline_data": {
175
+ "mime_type": f"image/{data['type']}",
176
+ "data": data["data"],
177
+ }
178
+ }
179
+ )
180
+ return what_i_have_asked
181
+
182
+ def __conversation_history(self, history, llm_kwargs):
183
+ messages = []
184
+ conversation_cnt = len(history) // 2
185
+ if conversation_cnt:
186
+ for index in range(0, 2 * conversation_cnt, 2):
187
+ what_i_have_asked = self.__conversation_user(history[index], llm_kwargs)
188
+ what_gpt_answer = {
189
+ "role": "model",
190
+ "parts": [{"text": history[index + 1]}],
191
+ }
192
+ messages.append(what_i_have_asked)
193
+ messages.append(what_gpt_answer)
194
+ return messages
195
+
196
+ def generate_message_payload(
197
+ self, inputs, llm_kwargs, history, system_prompt
198
+ ) -> Tuple[Dict, Dict]:
199
+ messages = [
200
+ # {"role": "system", "parts": [{"text": system_prompt}]}, # gemini 不允许对话轮次为偶数,所以这个没有用,看后续支持吧。。。
201
+ # {"role": "user", "parts": [{"text": ""}]},
202
+ # {"role": "model", "parts": [{"text": ""}]}
203
+ ]
204
+ self.url_gemini = self.url_gemini.replace(
205
+ "%m", llm_kwargs["llm_model"]
206
+ ).replace("%k", get_conf("GEMINI_API_KEY"))
207
+ header = {"Content-Type": "application/json"}
208
+ if "vision" not in self.url_gemini: # 不是vision 才处理history
209
+ messages.extend(
210
+ self.__conversation_history(history, llm_kwargs)
211
+ ) # 处理 history
212
+ messages.append(self.__conversation_user(inputs, llm_kwargs)) # 处理用户对话
213
+ payload = {
214
+ "contents": messages,
215
+ "generationConfig": {
216
+ # "maxOutputTokens": 800,
217
+ "stopSequences": str(llm_kwargs.get("stop", "")).split(" "),
218
+ "temperature": llm_kwargs.get("temperature", 1),
219
+ "topP": llm_kwargs.get("top_p", 0.8),
220
+ "topK": 10,
221
+ },
222
+ }
223
+ return header, payload
224
+
225
+
226
+ if __name__ == "__main__":
227
+ google = GoogleChatInit()
228
+ # print(gootle.generate_message_payload('你好呀', {}, ['123123', '3123123'], ''))
229
+ # gootle.input_encode_handle('123123[123123](./123123), ![53425](./asfafa/fff.jpg)')