Spaces:
Runtime error
Runtime error
update
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- app.py +71 -60
- config.py +26 -19
- core_functional.py +39 -16
- crazy_functional.py +41 -28
- crazy_functions/Latex全文润色.py +4 -4
- crazy_functions/Latex全文翻译.py +2 -2
- crazy_functions/Latex输出PDF.py +484 -0
- crazy_functions/agent_fns/pipe.py +2 -2
- crazy_functions/chatglm微调工具.py +4 -4
- crazy_functions/crazy_utils.py +25 -26
- crazy_functions/diagram_fns/file_tree.py +122 -0
- crazy_functions/pdf_fns/parse_word.py +85 -0
- crazy_functions/下载arxiv论文翻译摘要.py +1 -1
- crazy_functions/互动小游戏.py +2 -2
- crazy_functions/交互功能函数模板.py +2 -2
- crazy_functions/函数动态生成.py +2 -2
- crazy_functions/命令行助手.py +2 -2
- crazy_functions/图片生成.py +4 -4
- crazy_functions/多智能体.py +3 -3
- crazy_functions/对话历史存档.py +6 -6
- crazy_functions/总结word文档.py +1 -1
- crazy_functions/批量Markdown翻译.py +3 -3
- crazy_functions/批量总结PDF文档.py +1 -1
- crazy_functions/批量总结PDF文档pdfminer.py +1 -1
- crazy_functions/批量翻译PDF文档_NOUGAT.py +1 -1
- crazy_functions/批量翻译PDF文档_多线程.py +1 -1
- crazy_functions/数学动画生成manim.py +2 -2
- crazy_functions/理解PDF文档内容.py +1 -1
- crazy_functions/生成函数注释.py +1 -1
- crazy_functions/生成多种Mermaid图表.py +296 -0
- crazy_functions/知识库问答.py +3 -3
- crazy_functions/联网的ChatGPT.py +2 -2
- crazy_functions/联网的ChatGPT_bing版.py +2 -2
- crazy_functions/虚空终端.py +3 -3
- crazy_functions/解析JupyterNotebook.py +8 -2
- crazy_functions/解析项目源代码.py +17 -13
- crazy_functions/询问多个大语言模型.py +4 -4
- crazy_functions/语音助手.py +1 -1
- crazy_functions/读文章写摘要.py +1 -1
- crazy_functions/谷歌检索小助手.py +1 -1
- crazy_functions/辅助功能.py +2 -2
- crazy_functions/高级功能函数模板.py +34 -6
- docs/GithubAction+NoLocal+AudioAssistant +1 -1
- docs/self_analysis.md +1 -1
- docs/translate_english.json +4 -2
- docs/translate_japanese.json +1 -1
- docs/translate_std.json +10 -3
- docs/translate_traditionalchinese.json +1 -1
- docs/use_audio.md +1 -1
- request_llms/bridge_all.py +61 -23
app.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
|
3 |
help_menu_description = \
|
4 |
-
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
5 |
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
6 |
-
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
7 |
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
8 |
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
9 |
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
@@ -15,27 +15,27 @@ help_menu_description = \
|
|
15 |
|
16 |
def main():
|
17 |
import subprocess, sys
|
18 |
-
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://
|
19 |
import gradio as gr
|
20 |
-
if gr.__version__ not in ['3.32.
|
21 |
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
22 |
from request_llms.bridge_all import predict
|
23 |
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
24 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
25 |
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
26 |
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
27 |
-
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME')
|
28 |
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
29 |
INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
|
30 |
|
31 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
32 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
33 |
from check_proxy import get_current_version
|
34 |
-
from themes.theme import adjust_theme, advanced_css, theme_declaration
|
35 |
-
from themes.theme import js_code_for_css_changing,
|
36 |
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
37 |
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
38 |
-
|
39 |
# 问询记录, python 版本建议3.9+(越新越好)
|
40 |
import logging, uuid
|
41 |
os.makedirs(PATH_LOGGING, exist_ok=True)
|
@@ -67,7 +67,7 @@ def main():
|
|
67 |
proxy_info = check_proxy(proxies)
|
68 |
|
69 |
gr_L1 = lambda: gr.Row().style()
|
70 |
-
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id)
|
71 |
if LAYOUT == "TOP-DOWN":
|
72 |
gr_L1 = lambda: DummyWith()
|
73 |
gr_L2 = lambda scale, elem_id: gr.Row()
|
@@ -79,7 +79,7 @@ def main():
|
|
79 |
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
80 |
gr.HTML(title_html)
|
81 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
82 |
-
secret_css, dark_mode,
|
83 |
cookies = gr.State(load_chat_cookies())
|
84 |
with gr_L1():
|
85 |
with gr_L2(scale=2, elem_id="gpt-chat"):
|
@@ -96,11 +96,12 @@ def main():
|
|
96 |
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
|
97 |
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
|
98 |
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
|
99 |
-
if ENABLE_AUDIO:
|
100 |
with gr.Row():
|
101 |
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
102 |
with gr.Row():
|
103 |
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
|
|
104 |
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
105 |
with gr.Row():
|
106 |
for k in range(NUM_CUSTOM_BASIC_BTN):
|
@@ -117,7 +118,7 @@ def main():
|
|
117 |
with gr.Row():
|
118 |
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
119 |
with gr.Row(elem_id="input-plugin-group"):
|
120 |
-
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
121 |
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
122 |
with gr.Row():
|
123 |
for k, plugin in plugins.items():
|
@@ -125,7 +126,7 @@ def main():
|
|
125 |
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
|
126 |
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
|
127 |
info = plugins[k].get("Info", k)
|
128 |
-
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
|
129 |
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
|
130 |
with gr.Row():
|
131 |
with gr.Accordion("更多函数插件", open=True):
|
@@ -137,7 +138,7 @@ def main():
|
|
137 |
with gr.Row():
|
138 |
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
|
139 |
with gr.Row():
|
140 |
-
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
141 |
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
142 |
with gr.Row():
|
143 |
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
@@ -145,13 +146,12 @@ def main():
|
|
145 |
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
146 |
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
147 |
|
148 |
-
|
149 |
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
150 |
with gr.Row():
|
151 |
with gr.Tab("上传文件", elem_id="interact-panel"):
|
152 |
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
153 |
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
154 |
-
|
155 |
with gr.Tab("更换模型", elem_id="interact-panel"):
|
156 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
157 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
@@ -161,10 +161,11 @@ def main():
|
|
161 |
|
162 |
with gr.Tab("界面外观", elem_id="interact-panel"):
|
163 |
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
164 |
-
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"],
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
168 |
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
169 |
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
170 |
with gr.Tab("帮助", elem_id="interact-panel"):
|
@@ -181,7 +182,7 @@ def main():
|
|
181 |
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
182 |
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
183 |
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
184 |
-
clearBtn2 = gr.Button("清除", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
185 |
|
186 |
|
187 |
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
@@ -195,10 +196,12 @@ def main():
|
|
195 |
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
196 |
with gr.Column(scale=1, min_width=70):
|
197 |
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
198 |
-
|
199 |
-
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix):
|
200 |
ret = {}
|
|
|
201 |
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
|
|
202 |
customize_fn_overwrite_.update({
|
203 |
basic_btn_dropdown_:
|
204 |
{
|
@@ -208,27 +211,41 @@ def main():
|
|
208 |
}
|
209 |
}
|
210 |
)
|
211 |
-
|
|
|
|
|
|
|
212 |
if basic_btn_dropdown_ in customize_btns:
|
213 |
-
|
|
|
214 |
else:
|
215 |
-
|
|
|
216 |
ret.update({cookies: cookies_})
|
217 |
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
218 |
except: persistent_cookie_ = {}
|
219 |
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
220 |
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
221 |
-
ret.update({
|
222 |
return ret
|
223 |
-
|
224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
ret = {}
|
226 |
for k in customize_btns:
|
227 |
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
228 |
|
229 |
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
230 |
except: return ret
|
231 |
-
|
232 |
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
233 |
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
234 |
ret.update({cookies: cookies_})
|
@@ -238,26 +255,17 @@ def main():
|
|
238 |
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
239 |
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
240 |
return ret
|
241 |
-
|
242 |
-
basic_fn_load.click(reflesh_btn, [persistent_cookie, cookies], [cookies, *customize_btns.values(), *predefined_btns.values()])
|
243 |
-
h = basic_fn_confirm.click(assign_btn, [persistent_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
244 |
-
[persistent_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
245 |
-
# save persistent cookie
|
246 |
-
h.then(None, [persistent_cookie], None, _js="""(persistent_cookie)=>{setCookie("persistent_cookie", persistent_cookie, 5);}""")
|
247 |
|
248 |
# 功能区显示开关与功能区的互动
|
249 |
def fn_area_visibility(a):
|
250 |
ret = {}
|
251 |
-
ret.update({area_basic_fn: gr.update(visible=("基础功能区" in a))})
|
252 |
-
ret.update({area_crazy_fn: gr.update(visible=("函数插件区" in a))})
|
253 |
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
|
254 |
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
|
255 |
-
ret.update({clearBtn: gr.update(visible=("输入清除键" in a))})
|
256 |
-
ret.update({clearBtn2: gr.update(visible=("输入清除键" in a))})
|
257 |
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
258 |
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
259 |
return ret
|
260 |
-
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2,
|
|
|
261 |
|
262 |
# 功能区显示开关与功能区的互动
|
263 |
def fn_area_visibility_2(a):
|
@@ -265,6 +273,7 @@ def main():
|
|
265 |
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
266 |
return ret
|
267 |
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
|
|
268 |
|
269 |
# 整理反复出现的控件句柄组合
|
270 |
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
@@ -275,15 +284,17 @@ def main():
|
|
275 |
cancel_handles.append(txt2.submit(**predict_args))
|
276 |
cancel_handles.append(submitBtn.click(**predict_args))
|
277 |
cancel_handles.append(submitBtn2.click(**predict_args))
|
278 |
-
resetBtn.click(
|
279 |
-
resetBtn2.click(
|
280 |
-
|
281 |
-
|
|
|
|
|
282 |
if AUTO_CLEAR_TXT:
|
283 |
-
submitBtn.click(
|
284 |
-
submitBtn2.click(
|
285 |
-
txt.submit(
|
286 |
-
txt2.submit(
|
287 |
# 基础功能区的回调函数注册
|
288 |
for k in functional:
|
289 |
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
@@ -324,7 +335,7 @@ def main():
|
|
324 |
else:
|
325 |
css_part2 = adjust_theme()._get_theme_css()
|
326 |
return css_part2 + css_part1
|
327 |
-
|
328 |
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css])
|
329 |
theme_handle.then(
|
330 |
None,
|
@@ -349,13 +360,13 @@ def main():
|
|
349 |
if not group_list: # 处理特殊情况:没有选择任何插件组
|
350 |
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
|
351 |
for k, plugin in plugins.items():
|
352 |
-
if plugin.get("AsButton", True):
|
353 |
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
|
354 |
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
355 |
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
|
356 |
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
|
357 |
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
|
358 |
-
if ENABLE_AUDIO:
|
359 |
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
|
360 |
rad = RealtimeAudioDistribution()
|
361 |
def deal_audio(audio, cookies):
|
@@ -363,12 +374,12 @@ def main():
|
|
363 |
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
364 |
|
365 |
|
366 |
-
demo.load(init_cookie, inputs=[cookies
|
367 |
-
|
368 |
-
|
369 |
-
demo.load(None, inputs=[dark_mode], outputs=None, _js=
|
370 |
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
371 |
-
|
372 |
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
373 |
def run_delayed_tasks():
|
374 |
import threading, webbrowser, time
|
@@ -379,7 +390,7 @@ def main():
|
|
379 |
def auto_updates(): time.sleep(0); auto_update()
|
380 |
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
381 |
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
382 |
-
|
383 |
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
384 |
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
385 |
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
@@ -390,10 +401,10 @@ def main():
|
|
390 |
|
391 |
# 如果需要在二级路径下运行
|
392 |
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
393 |
-
# if CUSTOM_PATH != "/":
|
394 |
# from toolbox import run_gradio_in_subpath
|
395 |
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
396 |
-
# else:
|
397 |
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
|
398 |
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
399 |
|
|
|
1 |
import os; os.environ['no_proxy'] = '*' # 避免代理网络产生意外污染
|
2 |
|
3 |
help_menu_description = \
|
4 |
+
"""Github源代码开源和更新[地址🚀](https://github.com/binary-husky/gpt_academic),
|
5 |
感谢热情的[开发者们❤️](https://github.com/binary-husky/gpt_academic/graphs/contributors).
|
6 |
+
</br></br>常见问题请查阅[项目Wiki](https://github.com/binary-husky/gpt_academic/wiki),
|
7 |
如遇到Bug请前往[Bug反馈](https://github.com/binary-husky/gpt_academic/issues).
|
8 |
</br></br>普通对话使用说明: 1. 输入问题; 2. 点击提交
|
9 |
</br></br>基础功能区使用说明: 1. 输入文本; 2. 点击任意基础功能区按钮
|
|
|
15 |
|
16 |
def main():
|
17 |
import subprocess, sys
|
18 |
+
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'https://public.agent-matrix.com/publish/gradio-3.32.8-py3-none-any.whl'])
|
19 |
import gradio as gr
|
20 |
+
if gr.__version__ not in ['3.32.8']:
|
21 |
raise ModuleNotFoundError("使用项目内置Gradio获取最优体验! 请运行 `pip install -r requirements.txt` 指令安装内置Gradio及其他依赖, 详情信息见requirements.txt.")
|
22 |
from request_llms.bridge_all import predict
|
23 |
from toolbox import format_io, find_free_port, on_file_uploaded, on_report_generated, get_conf, ArgsGeneralWrapper, load_chat_cookies, DummyWith
|
24 |
# 建议您复制一个config_private.py放自己的秘密, 如API和代理网址
|
25 |
proxies, WEB_PORT, LLM_MODEL, CONCURRENT_COUNT, AUTHENTICATION = get_conf('proxies', 'WEB_PORT', 'LLM_MODEL', 'CONCURRENT_COUNT', 'AUTHENTICATION')
|
26 |
CHATBOT_HEIGHT, LAYOUT, AVAIL_LLM_MODELS, AUTO_CLEAR_TXT = get_conf('CHATBOT_HEIGHT', 'LAYOUT', 'AVAIL_LLM_MODELS', 'AUTO_CLEAR_TXT')
|
27 |
+
ENABLE_AUDIO, AUTO_CLEAR_TXT, PATH_LOGGING, AVAIL_THEMES, THEME, ADD_WAIFU = get_conf('ENABLE_AUDIO', 'AUTO_CLEAR_TXT', 'PATH_LOGGING', 'AVAIL_THEMES', 'THEME', 'ADD_WAIFU')
|
28 |
DARK_MODE, NUM_CUSTOM_BASIC_BTN, SSL_KEYFILE, SSL_CERTFILE = get_conf('DARK_MODE', 'NUM_CUSTOM_BASIC_BTN', 'SSL_KEYFILE', 'SSL_CERTFILE')
|
29 |
INIT_SYS_PROMPT = get_conf('INIT_SYS_PROMPT')
|
30 |
|
31 |
# 如果WEB_PORT是-1, 则随机选取WEB端口
|
32 |
PORT = find_free_port() if WEB_PORT <= 0 else WEB_PORT
|
33 |
from check_proxy import get_current_version
|
34 |
+
from themes.theme import adjust_theme, advanced_css, theme_declaration, js_code_clear, js_code_reset, js_code_show_or_hide, js_code_show_or_hide_group2
|
35 |
+
from themes.theme import js_code_for_css_changing, js_code_for_toggle_darkmode, js_code_for_persistent_cookie_init
|
36 |
from themes.theme import load_dynamic_theme, to_cookie_str, from_cookie_str, init_cookie
|
37 |
title_html = f"<h1 align=\"center\">GPT 学术优化 {get_current_version()}</h1>{theme_declaration}"
|
38 |
+
|
39 |
# 问询记录, python 版本建议3.9+(越新越好)
|
40 |
import logging, uuid
|
41 |
os.makedirs(PATH_LOGGING, exist_ok=True)
|
|
|
67 |
proxy_info = check_proxy(proxies)
|
68 |
|
69 |
gr_L1 = lambda: gr.Row().style()
|
70 |
+
gr_L2 = lambda scale, elem_id: gr.Column(scale=scale, elem_id=elem_id, min_width=400)
|
71 |
if LAYOUT == "TOP-DOWN":
|
72 |
gr_L1 = lambda: DummyWith()
|
73 |
gr_L2 = lambda scale, elem_id: gr.Row()
|
|
|
79 |
with gr.Blocks(title="GPT 学术优化", theme=set_theme, analytics_enabled=False, css=advanced_css) as demo:
|
80 |
gr.HTML(title_html)
|
81 |
gr.HTML('''<center><a href="https://huggingface.co/spaces/qingxu98/gpt-academic?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>请您打开此页面后务必点击上方的“复制空间”(Duplicate Space)按钮!<font color="#FF00FF">使用时,先在输入框填入API-KEY然后回车。</font><br/>切忌在“复制空间”(Duplicate Space)之前填入API_KEY或进行提问,否则您的API_KEY将极可能被空间所有者攫取!<br/>支持任意数量的OpenAI的密钥和API2D的密钥共存,例如输入"OpenAI密钥1,API2D密钥2",然后提交,即可同时使用两种模型接口。</center>''')
|
82 |
+
secret_css, dark_mode, py_pickle_cookie = gr.Textbox(visible=False), gr.Textbox(DARK_MODE, visible=False), gr.Textbox(visible=False)
|
83 |
cookies = gr.State(load_chat_cookies())
|
84 |
with gr_L1():
|
85 |
with gr_L2(scale=2, elem_id="gpt-chat"):
|
|
|
96 |
resetBtn = gr.Button("重置", elem_id="elem_reset", variant="secondary"); resetBtn.style(size="sm")
|
97 |
stopBtn = gr.Button("停止", elem_id="elem_stop", variant="secondary"); stopBtn.style(size="sm")
|
98 |
clearBtn = gr.Button("清除", elem_id="elem_clear", variant="secondary", visible=False); clearBtn.style(size="sm")
|
99 |
+
if ENABLE_AUDIO:
|
100 |
with gr.Row():
|
101 |
audio_mic = gr.Audio(source="microphone", type="numpy", elem_id="elem_audio", streaming=True, show_label=False).style(container=False)
|
102 |
with gr.Row():
|
103 |
status = gr.Markdown(f"Tip: 按Enter提交, 按Shift+Enter换行。当前模型: {LLM_MODEL} \n {proxy_info}", elem_id="state-panel")
|
104 |
+
|
105 |
with gr.Accordion("基础功能区", open=True, elem_id="basic-panel") as area_basic_fn:
|
106 |
with gr.Row():
|
107 |
for k in range(NUM_CUSTOM_BASIC_BTN):
|
|
|
118 |
with gr.Row():
|
119 |
gr.Markdown("插件可读取“输入区”文本/路径作为参数(上传文件自动修正路径)")
|
120 |
with gr.Row(elem_id="input-plugin-group"):
|
121 |
+
plugin_group_sel = gr.Dropdown(choices=all_plugin_groups, label='', show_label=False, value=DEFAULT_FN_GROUPS,
|
122 |
multiselect=True, interactive=True, elem_classes='normal_mut_select').style(container=False)
|
123 |
with gr.Row():
|
124 |
for k, plugin in plugins.items():
|
|
|
126 |
visible = True if match_group(plugin['Group'], DEFAULT_FN_GROUPS) else False
|
127 |
variant = plugins[k]["Color"] if "Color" in plugin else "secondary"
|
128 |
info = plugins[k].get("Info", k)
|
129 |
+
plugin['Button'] = plugins[k]['Button'] = gr.Button(k, variant=variant,
|
130 |
visible=visible, info_str=f'函数插件区: {info}').style(size="sm")
|
131 |
with gr.Row():
|
132 |
with gr.Accordion("更多函数插件", open=True):
|
|
|
138 |
with gr.Row():
|
139 |
dropdown = gr.Dropdown(dropdown_fn_list, value=r"打开插件列表", label="", show_label=False).style(container=False)
|
140 |
with gr.Row():
|
141 |
+
plugin_advanced_arg = gr.Textbox(show_label=True, label="高级参数输入区", visible=False,
|
142 |
placeholder="这里是特殊函数插件的高级参数输入区").style(container=False)
|
143 |
with gr.Row():
|
144 |
switchy_bt = gr.Button(r"请先从插件列表中选择", variant="secondary").style(size="sm")
|
|
|
146 |
with gr.Accordion("点击展开“文件下载区”。", open=False) as area_file_up:
|
147 |
file_upload = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload")
|
148 |
|
|
|
149 |
with gr.Floating(init_x="0%", init_y="0%", visible=True, width=None, drag="forbidden", elem_id="tooltip"):
|
150 |
with gr.Row():
|
151 |
with gr.Tab("上传文件", elem_id="interact-panel"):
|
152 |
gr.Markdown("请上传本地文件/压缩包供“函数插件区”功能调用。请注意: 上传文件后会自动把输入区修改为相应路径。")
|
153 |
file_upload_2 = gr.Files(label="任何文件, 推荐上传压缩文件(zip, tar)", file_count="multiple", elem_id="elem_upload_float")
|
154 |
+
|
155 |
with gr.Tab("更换模型", elem_id="interact-panel"):
|
156 |
md_dropdown = gr.Dropdown(AVAIL_LLM_MODELS, value=LLM_MODEL, label="更换LLM模型/请求源").style(container=False)
|
157 |
top_p = gr.Slider(minimum=-0, maximum=1.0, value=1.0, step=0.01,interactive=True, label="Top-p (nucleus sampling)",)
|
|
|
161 |
|
162 |
with gr.Tab("界面外观", elem_id="interact-panel"):
|
163 |
theme_dropdown = gr.Dropdown(AVAIL_THEMES, value=THEME, label="更换UI主题").style(container=False)
|
164 |
+
checkboxes = gr.CheckboxGroup(["基础功能区", "函数插件区", "浮动输入区", "输入清除键", "插件参数区"], value=["基础功能区", "函数插件区"], label="显示/隐藏功能区", elem_id='cbs').style(container=False)
|
165 |
+
opt = ["自定义菜单"]
|
166 |
+
value=[]
|
167 |
+
if ADD_WAIFU: opt += ["添加Live2D形象"]; value += ["添加Live2D形象"]
|
168 |
+
checkboxes_2 = gr.CheckboxGroup(opt, value=value, label="显示/隐藏自定义菜单", elem_id='cbsc').style(container=False)
|
169 |
dark_mode_btn = gr.Button("切换界面明暗 ☀", variant="secondary").style(size="sm")
|
170 |
dark_mode_btn.click(None, None, None, _js=js_code_for_toggle_darkmode)
|
171 |
with gr.Tab("帮助", elem_id="interact-panel"):
|
|
|
182 |
submitBtn2 = gr.Button("提交", variant="primary"); submitBtn2.style(size="sm")
|
183 |
resetBtn2 = gr.Button("重置", variant="secondary"); resetBtn2.style(size="sm")
|
184 |
stopBtn2 = gr.Button("停止", variant="secondary"); stopBtn2.style(size="sm")
|
185 |
+
clearBtn2 = gr.Button("清除", elem_id="elem_clear2", variant="secondary", visible=False); clearBtn2.style(size="sm")
|
186 |
|
187 |
|
188 |
with gr.Floating(init_x="20%", init_y="50%", visible=False, width="40%", drag="top") as area_customize:
|
|
|
196 |
basic_fn_suffix = gr.Textbox(show_label=False, placeholder="输入新提示后缀", lines=4).style(container=False)
|
197 |
with gr.Column(scale=1, min_width=70):
|
198 |
basic_fn_confirm = gr.Button("确认并保存", variant="primary"); basic_fn_confirm.style(size="sm")
|
199 |
+
basic_fn_clean = gr.Button("恢复默认", variant="primary"); basic_fn_clean.style(size="sm")
|
200 |
+
def assign_btn(persistent_cookie_, cookies_, basic_btn_dropdown_, basic_fn_title, basic_fn_prefix, basic_fn_suffix, clean_up=False):
|
201 |
ret = {}
|
202 |
+
# 读取之前的自定义按钮
|
203 |
customize_fn_overwrite_ = cookies_['customize_fn_overwrite']
|
204 |
+
# 更新新的自定义按钮
|
205 |
customize_fn_overwrite_.update({
|
206 |
basic_btn_dropdown_:
|
207 |
{
|
|
|
211 |
}
|
212 |
}
|
213 |
)
|
214 |
+
if clean_up:
|
215 |
+
customize_fn_overwrite_ = {}
|
216 |
+
cookies_.update(customize_fn_overwrite_) # 更新cookie
|
217 |
+
visible = (not clean_up) and (basic_fn_title != "")
|
218 |
if basic_btn_dropdown_ in customize_btns:
|
219 |
+
# 是自定义按钮,不是预定义按钮
|
220 |
+
ret.update({customize_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
221 |
else:
|
222 |
+
# 是预定义按钮
|
223 |
+
ret.update({predefined_btns[basic_btn_dropdown_]: gr.update(visible=visible, value=basic_fn_title)})
|
224 |
ret.update({cookies: cookies_})
|
225 |
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
226 |
except: persistent_cookie_ = {}
|
227 |
persistent_cookie_["custom_bnt"] = customize_fn_overwrite_ # dict update new value
|
228 |
persistent_cookie_ = to_cookie_str(persistent_cookie_) # persistent cookie to dict
|
229 |
+
ret.update({py_pickle_cookie: persistent_cookie_}) # write persistent cookie
|
230 |
return ret
|
231 |
+
|
232 |
+
# update btn
|
233 |
+
h = basic_fn_confirm.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix],
|
234 |
+
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
235 |
+
h.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
236 |
+
# clean up btn
|
237 |
+
h2 = basic_fn_clean.click(assign_btn, [py_pickle_cookie, cookies, basic_btn_dropdown, basic_fn_title, basic_fn_prefix, basic_fn_suffix, gr.State(True)],
|
238 |
+
[py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()])
|
239 |
+
h2.then(None, [py_pickle_cookie], None, _js="""(py_pickle_cookie)=>{setCookie("py_pickle_cookie", py_pickle_cookie, 365);}""")
|
240 |
+
|
241 |
+
def persistent_cookie_reload(persistent_cookie_, cookies_):
|
242 |
ret = {}
|
243 |
for k in customize_btns:
|
244 |
ret.update({customize_btns[k]: gr.update(visible=False, value="")})
|
245 |
|
246 |
try: persistent_cookie_ = from_cookie_str(persistent_cookie_) # persistent cookie to dict
|
247 |
except: return ret
|
248 |
+
|
249 |
customize_fn_overwrite_ = persistent_cookie_.get("custom_bnt", {})
|
250 |
cookies_['customize_fn_overwrite'] = customize_fn_overwrite_
|
251 |
ret.update({cookies: cookies_})
|
|
|
255 |
if k in customize_btns: ret.update({customize_btns[k]: gr.update(visible=True, value=v['Title'])})
|
256 |
else: ret.update({predefined_btns[k]: gr.update(visible=True, value=v['Title'])})
|
257 |
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
# 功能区显示开关与功能区的互动
|
260 |
def fn_area_visibility(a):
|
261 |
ret = {}
|
|
|
|
|
262 |
ret.update({area_input_primary: gr.update(visible=("浮动输入区" not in a))})
|
263 |
ret.update({area_input_secondary: gr.update(visible=("浮动输入区" in a))})
|
|
|
|
|
264 |
ret.update({plugin_advanced_arg: gr.update(visible=("插件参数区" in a))})
|
265 |
if "浮动输入区" in a: ret.update({txt: gr.update(value="")})
|
266 |
return ret
|
267 |
+
checkboxes.select(fn_area_visibility, [checkboxes], [area_basic_fn, area_crazy_fn, area_input_primary, area_input_secondary, txt, txt2, plugin_advanced_arg] )
|
268 |
+
checkboxes.select(None, [checkboxes], None, _js=js_code_show_or_hide)
|
269 |
|
270 |
# 功能区显示开关与功能区的互动
|
271 |
def fn_area_visibility_2(a):
|
|
|
273 |
ret.update({area_customize: gr.update(visible=("自定义菜单" in a))})
|
274 |
return ret
|
275 |
checkboxes_2.select(fn_area_visibility_2, [checkboxes_2], [area_customize] )
|
276 |
+
checkboxes_2.select(None, [checkboxes_2], None, _js=js_code_show_or_hide_group2)
|
277 |
|
278 |
# 整理反复出现的控件句柄组合
|
279 |
input_combo = [cookies, max_length_sl, md_dropdown, txt, txt2, top_p, temperature, chatbot, history, system_prompt, plugin_advanced_arg]
|
|
|
284 |
cancel_handles.append(txt2.submit(**predict_args))
|
285 |
cancel_handles.append(submitBtn.click(**predict_args))
|
286 |
cancel_handles.append(submitBtn2.click(**predict_args))
|
287 |
+
resetBtn.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
288 |
+
resetBtn2.click(None, None, [chatbot, history, status], _js=js_code_reset) # 先在前端快速清除chatbot&status
|
289 |
+
resetBtn.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
290 |
+
resetBtn2.click(lambda: ([], [], "已重置"), None, [chatbot, history, status]) # 再在后端清除history
|
291 |
+
clearBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
292 |
+
clearBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
293 |
if AUTO_CLEAR_TXT:
|
294 |
+
submitBtn.click(None, None, [txt, txt2], _js=js_code_clear)
|
295 |
+
submitBtn2.click(None, None, [txt, txt2], _js=js_code_clear)
|
296 |
+
txt.submit(None, None, [txt, txt2], _js=js_code_clear)
|
297 |
+
txt2.submit(None, None, [txt, txt2], _js=js_code_clear)
|
298 |
# 基础功能区的回调函数注册
|
299 |
for k in functional:
|
300 |
if ("Visible" in functional[k]) and (not functional[k]["Visible"]): continue
|
|
|
335 |
else:
|
336 |
css_part2 = adjust_theme()._get_theme_css()
|
337 |
return css_part2 + css_part1
|
338 |
+
|
339 |
theme_handle = theme_dropdown.select(on_theme_dropdown_changed, [theme_dropdown, secret_css], [secret_css])
|
340 |
theme_handle.then(
|
341 |
None,
|
|
|
360 |
if not group_list: # 处理特殊情况:没有选择任何插件组
|
361 |
return [*[plugin['Button'].update(visible=False) for _, plugin in plugins_as_btn.items()], gr.Dropdown.update(choices=[])]
|
362 |
for k, plugin in plugins.items():
|
363 |
+
if plugin.get("AsButton", True):
|
364 |
btn_list.append(plugin['Button'].update(visible=match_group(plugin['Group'], group_list))) # 刷新按钮
|
365 |
if plugin.get('AdvancedArgs', False): dropdown_fn_list.append(k) # 对于需要高级参数的插件,亦在下拉菜单中显示
|
366 |
elif match_group(plugin['Group'], group_list): fns_list.append(k) # 刷新下拉列表
|
367 |
return [*btn_list, gr.Dropdown.update(choices=fns_list)]
|
368 |
plugin_group_sel.select(fn=on_group_change, inputs=[plugin_group_sel], outputs=[*[plugin['Button'] for name, plugin in plugins_as_btn.items()], dropdown])
|
369 |
+
if ENABLE_AUDIO:
|
370 |
from crazy_functions.live_audio.audio_io import RealtimeAudioDistribution
|
371 |
rad = RealtimeAudioDistribution()
|
372 |
def deal_audio(audio, cookies):
|
|
|
374 |
audio_mic.stream(deal_audio, inputs=[audio_mic, cookies])
|
375 |
|
376 |
|
377 |
+
demo.load(init_cookie, inputs=[cookies], outputs=[cookies])
|
378 |
+
demo.load(persistent_cookie_reload, inputs = [py_pickle_cookie, cookies],
|
379 |
+
outputs = [py_pickle_cookie, cookies, *customize_btns.values(), *predefined_btns.values()], _js=js_code_for_persistent_cookie_init)
|
380 |
+
demo.load(None, inputs=[dark_mode], outputs=None, _js="""(dark_mode)=>{apply_cookie_for_checkbox(dark_mode);}""") # 配置暗色主题或亮色主题
|
381 |
demo.load(None, inputs=[gr.Textbox(LAYOUT, visible=False)], outputs=None, _js='(LAYOUT)=>{GptAcademicJavaScriptInit(LAYOUT);}')
|
382 |
+
|
383 |
# gradio的inbrowser触发不太稳定,回滚代码到原始的浏览器打开函数
|
384 |
def run_delayed_tasks():
|
385 |
import threading, webbrowser, time
|
|
|
390 |
def auto_updates(): time.sleep(0); auto_update()
|
391 |
def open_browser(): time.sleep(2); webbrowser.open_new_tab(f"http://localhost:{PORT}")
|
392 |
def warm_up_mods(): time.sleep(6); warm_up_modules()
|
393 |
+
|
394 |
threading.Thread(target=auto_updates, name="self-upgrade", daemon=True).start() # 查看自动更新
|
395 |
threading.Thread(target=open_browser, name="open-browser", daemon=True).start() # 打开浏览器页面
|
396 |
threading.Thread(target=warm_up_mods, name="warm-up", daemon=True).start() # 预热tiktoken模块
|
|
|
401 |
|
402 |
# 如果需要在二级路径下运行
|
403 |
# CUSTOM_PATH = get_conf('CUSTOM_PATH')
|
404 |
+
# if CUSTOM_PATH != "/":
|
405 |
# from toolbox import run_gradio_in_subpath
|
406 |
# run_gradio_in_subpath(demo, auth=AUTHENTICATION, port=PORT, custom_path=CUSTOM_PATH)
|
407 |
+
# else:
|
408 |
# demo.launch(server_name="0.0.0.0", server_port=PORT, auth=AUTHENTICATION, favicon_path="docs/logo.png",
|
409 |
# blocked_paths=["config.py","config_private.py","docker-compose.yml","Dockerfile",f"{PATH_LOGGING}/admin"])
|
410 |
|
config.py
CHANGED
@@ -2,8 +2,8 @@
|
|
2 |
以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。
|
3 |
读取优先级:环境变量 > config_private.py > config.py
|
4 |
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
|
5 |
-
All the following configurations also support using environment variables to override,
|
6 |
-
and the environment variable configuration format can be seen in docker-compose.yml.
|
7 |
Configuration reading priority: environment variable > config_private.py > config.py
|
8 |
"""
|
9 |
|
@@ -37,7 +37,7 @@ else:
|
|
37 |
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
38 |
|
39 |
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
40 |
-
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
41 |
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
|
42 |
API_URL_REDIRECT = {}
|
43 |
|
@@ -93,14 +93,14 @@ DEFAULT_FN_GROUPS = ['对话', '编程', '学术', '智能体']
|
|
93 |
|
94 |
|
95 |
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
96 |
-
LLM_MODEL = "gpt-3.5-turbo" # 可选 ↓↓↓
|
97 |
-
AVAIL_LLM_MODELS = ["gpt-
|
98 |
-
"gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
99 |
-
"gpt-4", "gpt-4-32k", "azure-gpt-4", "
|
100 |
-
"gemini-pro", "chatglm3", "claude-2"
|
101 |
# P.S. 其他可用的模型还包括 [
|
102 |
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
103 |
-
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
104 |
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
105 |
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
106 |
# ]
|
@@ -165,7 +165,7 @@ API_ORG = ""
|
|
165 |
|
166 |
|
167 |
# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
|
168 |
-
SLACK_CLAUDE_BOT_ID = ''
|
169 |
SLACK_CLAUDE_USER_TOKEN = ''
|
170 |
|
171 |
|
@@ -202,7 +202,7 @@ XFYUN_API_KEY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
|
202 |
|
203 |
# 接入智谱大模型
|
204 |
ZHIPUAI_API_KEY = ""
|
205 |
-
ZHIPUAI_MODEL = "
|
206 |
|
207 |
|
208 |
# # 火山引擎YUNQUE大模型
|
@@ -215,6 +215,11 @@ ZHIPUAI_MODEL = "glm-4" # 可选 "glm-3-turbo" "glm-4"
|
|
215 |
ANTHROPIC_API_KEY = ""
|
216 |
|
217 |
|
|
|
|
|
|
|
|
|
|
|
218 |
# 自定义API KEY格式
|
219 |
CUSTOM_API_KEY_PATTERN = ""
|
220 |
|
@@ -231,8 +236,8 @@ HUGGINGFACE_ACCESS_TOKEN = "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"
|
|
231 |
# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
|
232 |
GROBID_URLS = [
|
233 |
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
|
234 |
-
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
235 |
-
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
236 |
]
|
237 |
|
238 |
|
@@ -253,7 +258,7 @@ PATH_LOGGING = "gpt_log"
|
|
253 |
|
254 |
|
255 |
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
256 |
-
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
257 |
"Warmup_Modules", "Nougat_Download", "AutoGen"]
|
258 |
|
259 |
|
@@ -304,9 +309,8 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|
304 |
│ ├── BAIDU_CLOUD_API_KEY
|
305 |
│ └── BAIDU_CLOUD_SECRET_KEY
|
306 |
│
|
307 |
-
├── "zhipuai" 智谱AI大模型
|
308 |
-
│
|
309 |
-
│ └── ZHIPUAI_MODEL
|
310 |
│
|
311 |
├── "qwen-turbo" 等通义千问大模型
|
312 |
│ └── DASHSCOPE_API_KEY
|
@@ -318,7 +322,7 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|
318 |
├── NEWBING_STYLE
|
319 |
└── NEWBING_COOKIES
|
320 |
|
321 |
-
|
322 |
本地大模型示意图
|
323 |
│
|
324 |
├── "chatglm3"
|
@@ -358,6 +362,9 @@ NUM_CUSTOM_BASIC_BTN = 4
|
|
358 |
│ └── ALIYUN_SECRET
|
359 |
│
|
360 |
└── PDF文档精准解析
|
361 |
-
|
|
|
|
|
|
|
362 |
|
363 |
"""
|
|
|
2 |
以下所有配置也都支持利用环境变量覆写,环境变量配置格式见docker-compose.yml。
|
3 |
读取优先级:环境变量 > config_private.py > config.py
|
4 |
--- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
|
5 |
+
All the following configurations also support using environment variables to override,
|
6 |
+
and the environment variable configuration format can be seen in docker-compose.yml.
|
7 |
Configuration reading priority: environment variable > config_private.py > config.py
|
8 |
"""
|
9 |
|
|
|
37 |
# ------------------------------------ 以下配置可以优化体验, 但大部分场合下并不需要修改 ------------------------------------
|
38 |
|
39 |
# 重新URL重新定向,实现更换API_URL的作用(高危设置! 常规情况下不要修改! 通过修改此设置,您将把您的API-KEY和对话隐私完全暴露给您设定的中间人!)
|
40 |
+
# 格式: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "在这里填写重定向的api.openai.com的URL"}
|
41 |
# 举例: API_URL_REDIRECT = {"https://api.openai.com/v1/chat/completions": "https://reverse-proxy-url/v1/chat/completions"}
|
42 |
API_URL_REDIRECT = {}
|
43 |
|
|
|
93 |
|
94 |
|
95 |
# 模型选择是 (注意: LLM_MODEL是默认选中的模型, 它*必须*被包含在AVAIL_LLM_MODELS列表中 )
|
96 |
+
LLM_MODEL = "gpt-3.5-turbo-16k" # 可选 ↓↓↓
|
97 |
+
AVAIL_LLM_MODELS = ["gpt-4-1106-preview", "gpt-4-turbo-preview", "gpt-4-vision-preview",
|
98 |
+
"gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k", "gpt-3.5-turbo", "azure-gpt-3.5",
|
99 |
+
"gpt-4", "gpt-4-32k", "azure-gpt-4", "glm-4", "glm-3-turbo",
|
100 |
+
"gemini-pro", "chatglm3", "claude-2"]
|
101 |
# P.S. 其他可用的模型还包括 [
|
102 |
# "moss", "qwen-turbo", "qwen-plus", "qwen-max"
|
103 |
+
# "zhipuai", "qianfan", "deepseekcoder", "llama2", "qwen-local", "gpt-3.5-turbo-0613",
|
104 |
# "gpt-3.5-turbo-16k-0613", "gpt-3.5-random", "api2d-gpt-3.5-turbo", 'api2d-gpt-3.5-turbo-16k',
|
105 |
# "spark", "sparkv2", "sparkv3", "chatglm_onnx", "claude-1-100k", "claude-2", "internlm", "jittorllms_pangualpha", "jittorllms_llama"
|
106 |
# ]
|
|
|
165 |
|
166 |
|
167 |
# 如果需要使用Slack Claude,使用教程详情见 request_llms/README.md
|
168 |
+
SLACK_CLAUDE_BOT_ID = ''
|
169 |
SLACK_CLAUDE_USER_TOKEN = ''
|
170 |
|
171 |
|
|
|
202 |
|
203 |
# 接入智谱大模型
|
204 |
ZHIPUAI_API_KEY = ""
|
205 |
+
ZHIPUAI_MODEL = "" # 此选项已废弃,不再需要填写
|
206 |
|
207 |
|
208 |
# # 火山引擎YUNQUE大模型
|
|
|
215 |
ANTHROPIC_API_KEY = ""
|
216 |
|
217 |
|
218 |
+
# Mathpix 拥有执行PDF的OCR功能,但是需要注册账号
|
219 |
+
MATHPIX_APPID = ""
|
220 |
+
MATHPIX_APPKEY = ""
|
221 |
+
|
222 |
+
|
223 |
# 自定义API KEY格式
|
224 |
CUSTOM_API_KEY_PATTERN = ""
|
225 |
|
|
|
236 |
# 获取方法:复制以下空间https://huggingface.co/spaces/qingxu98/grobid,设为public,然后GROBID_URL = "https://(你的hf用户名如qingxu98)-(你的填写的空间名如grobid).hf.space"
|
237 |
GROBID_URLS = [
|
238 |
"https://qingxu98-grobid.hf.space","https://qingxu98-grobid2.hf.space","https://qingxu98-grobid3.hf.space",
|
239 |
+
"https://qingxu98-grobid4.hf.space","https://qingxu98-grobid5.hf.space", "https://qingxu98-grobid6.hf.space",
|
240 |
+
"https://qingxu98-grobid7.hf.space", "https://qingxu98-grobid8.hf.space",
|
241 |
]
|
242 |
|
243 |
|
|
|
258 |
|
259 |
|
260 |
# 除了连接OpenAI之外,还有哪些场合允许使用代理,请勿修改
|
261 |
+
WHEN_TO_USE_PROXY = ["Download_LLM", "Download_Gradio_Theme", "Connect_Grobid",
|
262 |
"Warmup_Modules", "Nougat_Download", "AutoGen"]
|
263 |
|
264 |
|
|
|
309 |
│ ├── BAIDU_CLOUD_API_KEY
|
310 |
│ └── BAIDU_CLOUD_SECRET_KEY
|
311 |
│
|
312 |
+
├── "glm-4", "glm-3-turbo", "zhipuai" 智谱AI大模型
|
313 |
+
│ └── ZHIPUAI_API_KEY
|
|
|
314 |
│
|
315 |
├── "qwen-turbo" 等通义千问大模型
|
316 |
│ └── DASHSCOPE_API_KEY
|
|
|
322 |
├── NEWBING_STYLE
|
323 |
└── NEWBING_COOKIES
|
324 |
|
325 |
+
|
326 |
本地大模型示意图
|
327 |
│
|
328 |
├── "chatglm3"
|
|
|
362 |
│ └── ALIYUN_SECRET
|
363 |
│
|
364 |
└── PDF文档精准解析
|
365 |
+
├── GROBID_URLS
|
366 |
+
├── MATHPIX_APPID
|
367 |
+
└── MATHPIX_APPKEY
|
368 |
+
|
369 |
|
370 |
"""
|
core_functional.py
CHANGED
@@ -3,18 +3,27 @@
|
|
3 |
# 'stop' 颜色对应 theme.py 中的 color_er
|
4 |
import importlib
|
5 |
from toolbox import clear_line_break
|
|
|
|
|
6 |
from textwrap import dedent
|
7 |
|
8 |
def get_core_functions():
|
9 |
return {
|
10 |
|
11 |
-
"
|
12 |
-
# [1*]
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
"Suffix": r"",
|
19 |
# [3] 按钮颜色 (可选参数,默认 secondary)
|
20 |
"Color": r"secondary",
|
@@ -32,8 +41,10 @@ def get_core_functions():
|
|
32 |
"Prefix": r"",
|
33 |
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
34 |
"Suffix":
|
|
|
35 |
dedent("\n"+r'''
|
36 |
==============================
|
|
|
37 |
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
38 |
|
39 |
以下是对以上文本的总结,以mermaid flowchart的形式展示:
|
@@ -83,14 +94,22 @@ def get_core_functions():
|
|
83 |
|
84 |
|
85 |
"学术英中互译": {
|
86 |
-
"Prefix":
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
"Suffix": r"",
|
95 |
},
|
96 |
|
@@ -140,7 +159,11 @@ def handle_core_functionality(additional_fn, inputs, history, chatbot):
|
|
140 |
if "PreProcess" in core_functional[additional_fn]:
|
141 |
if core_functional[additional_fn]["PreProcess"] is not None:
|
142 |
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
143 |
-
|
|
|
|
|
|
|
|
|
144 |
if core_functional[additional_fn].get("AutoClearHistory", False):
|
145 |
history = []
|
146 |
return inputs, history
|
|
|
3 |
# 'stop' 颜色对应 theme.py 中的 color_er
|
4 |
import importlib
|
5 |
from toolbox import clear_line_break
|
6 |
+
from toolbox import apply_gpt_academic_string_mask_langbased
|
7 |
+
from toolbox import build_gpt_academic_masked_string_langbased
|
8 |
from textwrap import dedent
|
9 |
|
10 |
def get_core_functions():
|
11 |
return {
|
12 |
|
13 |
+
"学术语料润色": {
|
14 |
+
# [1*] 前缀字符串,会被加在你的输入之前。例如,用来描述你的要求,例如翻译、解释代码、润色等等。
|
15 |
+
# 这里填一个提示词字符串就行了,这里为了区分中英文情景搞复杂了一点
|
16 |
+
"Prefix": build_gpt_academic_masked_string_langbased(
|
17 |
+
text_show_english=
|
18 |
+
r"Below is a paragraph from an academic paper. Polish the writing to meet the academic style, "
|
19 |
+
r"improve the spelling, grammar, clarity, concision and overall readability. When necessary, rewrite the whole sentence. "
|
20 |
+
r"Firstly, you should provide the polished paragraph. "
|
21 |
+
r"Secondly, you should list all your modification and explain the reasons to do so in markdown table.",
|
22 |
+
text_show_chinese=
|
23 |
+
r"作为一名中文学术论文写作改进助理,你的任务是改进所提供文本的拼写、语法、清晰、简洁和整体可读性,"
|
24 |
+
r"同时分解长句,减少重复,并提供改进建议。请先提供文本的更正版本,然后在markdown表格中列出修改的内容,并给出修改的理由:"
|
25 |
+
) + "\n\n",
|
26 |
+
# [2*] 后缀字符串,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
27 |
"Suffix": r"",
|
28 |
# [3] 按钮颜色 (可选参数,默认 secondary)
|
29 |
"Color": r"secondary",
|
|
|
41 |
"Prefix": r"",
|
42 |
# 后缀,会被加在你的输入之后。例如,配合前缀可以把你的输入内容用引号圈起来
|
43 |
"Suffix":
|
44 |
+
# dedent() 函数用于去除多行字符串的缩进
|
45 |
dedent("\n"+r'''
|
46 |
==============================
|
47 |
+
|
48 |
使用mermaid flowchart对以上文本进行总结,概括上述段落的内容以及内在逻辑关系,例如:
|
49 |
|
50 |
以下是对以上文本的总结,以mermaid flowchart的形式展示:
|
|
|
94 |
|
95 |
|
96 |
"学术英中互译": {
|
97 |
+
"Prefix": build_gpt_academic_masked_string_langbased(
|
98 |
+
text_show_chinese=
|
99 |
+
r"I want you to act as a scientific English-Chinese translator, "
|
100 |
+
r"I will provide you with some paragraphs in one language "
|
101 |
+
r"and your task is to accurately and academically translate the paragraphs only into the other language. "
|
102 |
+
r"Do not repeat the original provided paragraphs after translation. "
|
103 |
+
r"You should use artificial intelligence tools, "
|
104 |
+
r"such as natural language processing, and rhetorical knowledge "
|
105 |
+
r"and experience about effective writing techniques to reply. "
|
106 |
+
r"I'll give you my paragraphs as follows, tell me what language it is written in, and then translate:",
|
107 |
+
text_show_english=
|
108 |
+
r"你是经验丰富的翻译,请把以下学术文章段落翻译成中文,"
|
109 |
+
r"并同时充分考虑中文的语法、清晰、简洁和整体可读性,"
|
110 |
+
r"必要时,你可以修改整个句子的顺序以确保翻译后的段落符合中文的语言习惯。"
|
111 |
+
r"你需要翻译的文本如下:"
|
112 |
+
) + "\n\n",
|
113 |
"Suffix": r"",
|
114 |
},
|
115 |
|
|
|
159 |
if "PreProcess" in core_functional[additional_fn]:
|
160 |
if core_functional[additional_fn]["PreProcess"] is not None:
|
161 |
inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
|
162 |
+
# 为字符串加上上面定义的前缀和后缀。
|
163 |
+
inputs = apply_gpt_academic_string_mask_langbased(
|
164 |
+
string = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"],
|
165 |
+
lang_reference = inputs,
|
166 |
+
)
|
167 |
if core_functional[additional_fn].get("AutoClearHistory", False):
|
168 |
history = []
|
169 |
return inputs, history
|
crazy_functional.py
CHANGED
@@ -32,10 +32,9 @@ def get_crazy_functions():
|
|
32 |
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
33 |
from crazy_functions.Latex全文润色 import Latex中文润色
|
34 |
from crazy_functions.Latex全文润色 import Latex英文纠错
|
35 |
-
from crazy_functions.Latex全文翻译 import Latex中译英
|
36 |
-
from crazy_functions.Latex全文翻译 import Latex英译中
|
37 |
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
38 |
from crazy_functions.虚空终端 import 虚空终端
|
|
|
39 |
|
40 |
function_plugins = {
|
41 |
"虚空终端": {
|
@@ -71,6 +70,15 @@ def get_crazy_functions():
|
|
71 |
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
72 |
"Function": HotReload(清除缓存),
|
73 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
"批量总结Word文档": {
|
75 |
"Group": "学术",
|
76 |
"Color": "stop",
|
@@ -237,13 +245,7 @@ def get_crazy_functions():
|
|
237 |
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
238 |
"Function": HotReload(Latex英文润色),
|
239 |
},
|
240 |
-
|
241 |
-
"Group": "学术",
|
242 |
-
"Color": "stop",
|
243 |
-
"AsButton": False, # 加入下拉菜单中
|
244 |
-
"Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
245 |
-
"Function": HotReload(Latex英文纠错),
|
246 |
-
},
|
247 |
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
248 |
"Group": "学术",
|
249 |
"Color": "stop",
|
@@ -252,6 +254,14 @@ def get_crazy_functions():
|
|
252 |
"Function": HotReload(Latex中文润色),
|
253 |
},
|
254 |
# 已经被新插件取代
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
255 |
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
256 |
# "Group": "学术",
|
257 |
# "Color": "stop",
|
@@ -522,7 +532,9 @@ def get_crazy_functions():
|
|
522 |
print("Load function plugin failed")
|
523 |
|
524 |
try:
|
525 |
-
from crazy_functions.Latex输出PDF
|
|
|
|
|
526 |
|
527 |
function_plugins.update(
|
528 |
{
|
@@ -533,38 +545,39 @@ def get_crazy_functions():
|
|
533 |
"AdvancedArgs": True,
|
534 |
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
535 |
"Function": HotReload(Latex英文纠错加PDF对比),
|
536 |
-
}
|
537 |
-
}
|
538 |
-
)
|
539 |
-
from crazy_functions.Latex输出PDF结果 import Latex翻译中文并重新编译PDF
|
540 |
-
|
541 |
-
function_plugins.update(
|
542 |
-
{
|
543 |
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
544 |
"Group": "学术",
|
545 |
"Color": "stop",
|
546 |
"AsButton": False,
|
547 |
"AdvancedArgs": True,
|
548 |
-
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
549 |
-
|
550 |
-
|
551 |
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
552 |
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
553 |
-
}
|
554 |
-
}
|
555 |
-
)
|
556 |
-
function_plugins.update(
|
557 |
-
{
|
558 |
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
559 |
"Group": "学术",
|
560 |
"Color": "stop",
|
561 |
"AsButton": False,
|
562 |
"AdvancedArgs": True,
|
563 |
-
"ArgsReminder": "如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
564 |
-
|
565 |
-
|
566 |
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
567 |
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
568 |
}
|
569 |
}
|
570 |
)
|
|
|
32 |
from crazy_functions.理解PDF文档内容 import 理解PDF文档内容标准文件输入
|
33 |
from crazy_functions.Latex全文润色 import Latex中文润色
|
34 |
from crazy_functions.Latex全文润色 import Latex英文纠错
|
|
|
|
|
35 |
from crazy_functions.批量Markdown翻译 import Markdown中译英
|
36 |
from crazy_functions.虚空终端 import 虚空终端
|
37 |
+
from crazy_functions.生成多种Mermaid图表 import 生成多种Mermaid图表
|
38 |
|
39 |
function_plugins = {
|
40 |
"虚空终端": {
|
|
|
70 |
"Info": "清除所有缓存文件,谨慎操作 | 不需要输入参数",
|
71 |
"Function": HotReload(清除缓存),
|
72 |
},
|
73 |
+
"生成多种Mermaid图表(从当前对话或路径(.pdf/.md/.docx)中生产图表)": {
|
74 |
+
"Group": "对话",
|
75 |
+
"Color": "stop",
|
76 |
+
"AsButton": False,
|
77 |
+
"Info" : "基于当前对话或文件生成多种Mermaid图表,图表类型由模型判断",
|
78 |
+
"Function": HotReload(生成多种Mermaid图表),
|
79 |
+
"AdvancedArgs": True,
|
80 |
+
"ArgsReminder": "请输入图类型对应的数字,不输入则为模型自行判断:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图,9-思维导图",
|
81 |
+
},
|
82 |
"批量总结Word文档": {
|
83 |
"Group": "学术",
|
84 |
"Color": "stop",
|
|
|
245 |
"Info": "对英文Latex项目全文进行润色处理 | 输入参数为路径或上传压缩包",
|
246 |
"Function": HotReload(Latex英文润色),
|
247 |
},
|
248 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
249 |
"中文Latex项目全文润色(输入路径或上传压缩包)": {
|
250 |
"Group": "学术",
|
251 |
"Color": "stop",
|
|
|
254 |
"Function": HotReload(Latex中文润色),
|
255 |
},
|
256 |
# 已经被新插件取代
|
257 |
+
# "英文Latex项目全文纠错(输入路径或上传压缩包)": {
|
258 |
+
# "Group": "学术",
|
259 |
+
# "Color": "stop",
|
260 |
+
# "AsButton": False, # 加入下拉菜单中
|
261 |
+
# "Info": "对英文Latex项目全文进行纠错处理 | 输入参数为路径或上传压缩包",
|
262 |
+
# "Function": HotReload(Latex英文纠错),
|
263 |
+
# },
|
264 |
+
# 已经被新插件取代
|
265 |
# "Latex项目全文中译英(输入路径或上传压缩包)": {
|
266 |
# "Group": "学术",
|
267 |
# "Color": "stop",
|
|
|
532 |
print("Load function plugin failed")
|
533 |
|
534 |
try:
|
535 |
+
from crazy_functions.Latex输出PDF import Latex英文纠错加PDF对比
|
536 |
+
from crazy_functions.Latex输出PDF import Latex翻译中文并重新编译PDF
|
537 |
+
from crazy_functions.Latex输出PDF import PDF翻译中文并重新编译PDF
|
538 |
|
539 |
function_plugins.update(
|
540 |
{
|
|
|
545 |
"AdvancedArgs": True,
|
546 |
"ArgsReminder": "如果有必要, 请在此处追加更细致的矫错指令(使用英文)。",
|
547 |
"Function": HotReload(Latex英文纠错加PDF对比),
|
548 |
+
},
|
|
|
|
|
|
|
|
|
|
|
|
|
549 |
"Arxiv论文精细翻译(输入arxivID)[需Latex]": {
|
550 |
"Group": "学术",
|
551 |
"Color": "stop",
|
552 |
"AsButton": False,
|
553 |
"AdvancedArgs": True,
|
554 |
+
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
555 |
+
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
556 |
+
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
557 |
"Info": "Arixv论文精细翻译 | 输入参数arxiv论文的ID,比如1812.10695",
|
558 |
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
559 |
+
},
|
|
|
|
|
|
|
|
|
560 |
"本地Latex论文精细翻译(上传Latex项目)[需Latex]": {
|
561 |
"Group": "学术",
|
562 |
"Color": "stop",
|
563 |
"AsButton": False,
|
564 |
"AdvancedArgs": True,
|
565 |
+
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
566 |
+
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
567 |
+
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
568 |
"Info": "本地Latex论文精细翻译 | 输入参数是路径",
|
569 |
"Function": HotReload(Latex翻译中文并重新编译PDF),
|
570 |
+
},
|
571 |
+
"PDF翻译中文并重新编译PDF(上传PDF)[需Latex]": {
|
572 |
+
"Group": "学术",
|
573 |
+
"Color": "stop",
|
574 |
+
"AsButton": False,
|
575 |
+
"AdvancedArgs": True,
|
576 |
+
"ArgsReminder": r"如果有必要, 请在此处给出自定义翻译命令, 解决部分词汇翻译不准确的问题。 "
|
577 |
+
r"例如当单词'agent'翻译不准确时, 请尝试把以下指令复制到高级参数区: "
|
578 |
+
r'If the term "agent" is used in this section, it should be translated to "智能体". ',
|
579 |
+
"Info": "PDF翻译中文,并重新编译PDF | 输入参数为路径",
|
580 |
+
"Function": HotReload(PDF翻译中文并重新编译PDF)
|
581 |
}
|
582 |
}
|
583 |
)
|
crazy_functions/Latex全文润色.py
CHANGED
@@ -135,11 +135,11 @@ def 多文件润色(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
135 |
|
136 |
|
137 |
@CatchException
|
138 |
-
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
139 |
# 基本信息:功能、贡献者
|
140 |
chatbot.append([
|
141 |
"函数插件功能?",
|
142 |
-
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex
|
143 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
144 |
|
145 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
@@ -173,7 +173,7 @@ def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|
173 |
|
174 |
|
175 |
@CatchException
|
176 |
-
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
177 |
# 基本信息:功能、贡献者
|
178 |
chatbot.append([
|
179 |
"函数插件功能?",
|
@@ -209,7 +209,7 @@ def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|
209 |
|
210 |
|
211 |
@CatchException
|
212 |
-
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
213 |
# 基本信息:功能、贡献者
|
214 |
chatbot.append([
|
215 |
"函数插件功能?",
|
|
|
135 |
|
136 |
|
137 |
@CatchException
|
138 |
+
def Latex英文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
139 |
# 基本信息:功能、贡献者
|
140 |
chatbot.append([
|
141 |
"函数插件功能?",
|
142 |
+
"对整个Latex项目进行润色。函数插件贡献者: Binary-Husky。(注意,此插件不调用Latex,如果有Latex环境,请使用「Latex英文纠错+高亮修正位置(需Latex)插件」"])
|
143 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
144 |
|
145 |
# 尝试导入依赖,如果缺少依赖,则给出安装建议
|
|
|
173 |
|
174 |
|
175 |
@CatchException
|
176 |
+
def Latex中文润色(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
177 |
# 基本信息:功能、贡献者
|
178 |
chatbot.append([
|
179 |
"函数插件功能?",
|
|
|
209 |
|
210 |
|
211 |
@CatchException
|
212 |
+
def Latex英文纠错(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
213 |
# 基本信息:功能、贡献者
|
214 |
chatbot.append([
|
215 |
"函数插件功能?",
|
crazy_functions/Latex全文翻译.py
CHANGED
@@ -106,7 +106,7 @@ def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, ch
|
|
106 |
|
107 |
|
108 |
@CatchException
|
109 |
-
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
110 |
# 基本信息:功能、贡献者
|
111 |
chatbot.append([
|
112 |
"函数插件功能?",
|
@@ -143,7 +143,7 @@ def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prom
|
|
143 |
|
144 |
|
145 |
@CatchException
|
146 |
-
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
147 |
# 基本信息:功能、贡献者
|
148 |
chatbot.append([
|
149 |
"函数插件功能?",
|
|
|
106 |
|
107 |
|
108 |
@CatchException
|
109 |
+
def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
110 |
# 基本信息:功能、贡献者
|
111 |
chatbot.append([
|
112 |
"函数插件功能?",
|
|
|
143 |
|
144 |
|
145 |
@CatchException
|
146 |
+
def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
147 |
# 基本信息:功能、贡献者
|
148 |
chatbot.append([
|
149 |
"函数插件功能?",
|
crazy_functions/Latex输出PDF.py
ADDED
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import update_ui, trimmed_format_exc, get_conf, get_log_folder, promote_file_to_downloadzone
|
2 |
+
from toolbox import CatchException, report_exception, update_ui_lastest_msg, zip_result, gen_time_str
|
3 |
+
from functools import partial
|
4 |
+
import glob, os, requests, time, json, tarfile
|
5 |
+
|
6 |
+
pj = os.path.join
|
7 |
+
ARXIV_CACHE_DIR = os.path.expanduser(f"~/arxiv_cache/")
|
8 |
+
|
9 |
+
|
10 |
+
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 工具函数 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
|
11 |
+
# 专业词汇声明 = 'If the term "agent" is used in this section, it should be translated to "智能体". '
|
12 |
+
def switch_prompt(pfg, mode, more_requirement):
|
13 |
+
"""
|
14 |
+
Generate prompts and system prompts based on the mode for proofreading or translating.
|
15 |
+
Args:
|
16 |
+
- pfg: Proofreader or Translator instance.
|
17 |
+
- mode: A string specifying the mode, either 'proofread' or 'translate_zh'.
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
- inputs_array: A list of strings containing prompts for users to respond to.
|
21 |
+
- sys_prompt_array: A list of strings containing prompts for system prompts.
|
22 |
+
"""
|
23 |
+
n_split = len(pfg.sp_file_contents)
|
24 |
+
if mode == 'proofread_en':
|
25 |
+
inputs_array = [r"Below is a section from an academic paper, proofread this section." +
|
26 |
+
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " + more_requirement +
|
27 |
+
r"Answer me only with the revised text:" +
|
28 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
29 |
+
sys_prompt_array = ["You are a professional academic paper writer." for _ in range(n_split)]
|
30 |
+
elif mode == 'translate_zh':
|
31 |
+
inputs_array = [
|
32 |
+
r"Below is a section from an English academic paper, translate it into Chinese. " + more_requirement +
|
33 |
+
r"Do not modify any latex command such as \section, \cite, \begin, \item and equations. " +
|
34 |
+
r"Answer me only with the translated text:" +
|
35 |
+
f"\n\n{frag}" for frag in pfg.sp_file_contents]
|
36 |
+
sys_prompt_array = ["You are a professional translator." for _ in range(n_split)]
|
37 |
+
else:
|
38 |
+
assert False, "未知指令"
|
39 |
+
return inputs_array, sys_prompt_array
|
40 |
+
|
41 |
+
|
42 |
+
def desend_to_extracted_folder_if_exist(project_folder):
|
43 |
+
"""
|
44 |
+
Descend into the extracted folder if it exists, otherwise return the original folder.
|
45 |
+
|
46 |
+
Args:
|
47 |
+
- project_folder: A string specifying the folder path.
|
48 |
+
|
49 |
+
Returns:
|
50 |
+
- A string specifying the path to the extracted folder, or the original folder if there is no extracted folder.
|
51 |
+
"""
|
52 |
+
maybe_dir = [f for f in glob.glob(f'{project_folder}/*') if os.path.isdir(f)]
|
53 |
+
if len(maybe_dir) == 0: return project_folder
|
54 |
+
if maybe_dir[0].endswith('.extract'): return maybe_dir[0]
|
55 |
+
return project_folder
|
56 |
+
|
57 |
+
|
58 |
+
def move_project(project_folder, arxiv_id=None):
|
59 |
+
"""
|
60 |
+
Create a new work folder and copy the project folder to it.
|
61 |
+
|
62 |
+
Args:
|
63 |
+
- project_folder: A string specifying the folder path of the project.
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
- A string specifying the path to the new work folder.
|
67 |
+
"""
|
68 |
+
import shutil, time
|
69 |
+
time.sleep(2) # avoid time string conflict
|
70 |
+
if arxiv_id is not None:
|
71 |
+
new_workfolder = pj(ARXIV_CACHE_DIR, arxiv_id, 'workfolder')
|
72 |
+
else:
|
73 |
+
new_workfolder = f'{get_log_folder()}/{gen_time_str()}'
|
74 |
+
try:
|
75 |
+
shutil.rmtree(new_workfolder)
|
76 |
+
except:
|
77 |
+
pass
|
78 |
+
|
79 |
+
# align subfolder if there is a folder wrapper
|
80 |
+
items = glob.glob(pj(project_folder, '*'))
|
81 |
+
items = [item for item in items if os.path.basename(item) != '__MACOSX']
|
82 |
+
if len(glob.glob(pj(project_folder, '*.tex'))) == 0 and len(items) == 1:
|
83 |
+
if os.path.isdir(items[0]): project_folder = items[0]
|
84 |
+
|
85 |
+
shutil.copytree(src=project_folder, dst=new_workfolder)
|
86 |
+
return new_workfolder
|
87 |
+
|
88 |
+
|
89 |
+
def arxiv_download(chatbot, history, txt, allow_cache=True):
|
90 |
+
def check_cached_translation_pdf(arxiv_id):
|
91 |
+
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'translation')
|
92 |
+
if not os.path.exists(translation_dir):
|
93 |
+
os.makedirs(translation_dir)
|
94 |
+
target_file = pj(translation_dir, 'translate_zh.pdf')
|
95 |
+
if os.path.exists(target_file):
|
96 |
+
promote_file_to_downloadzone(target_file, rename_file=None, chatbot=chatbot)
|
97 |
+
target_file_compare = pj(translation_dir, 'comparison.pdf')
|
98 |
+
if os.path.exists(target_file_compare):
|
99 |
+
promote_file_to_downloadzone(target_file_compare, rename_file=None, chatbot=chatbot)
|
100 |
+
return target_file
|
101 |
+
return False
|
102 |
+
|
103 |
+
def is_float(s):
|
104 |
+
try:
|
105 |
+
float(s)
|
106 |
+
return True
|
107 |
+
except ValueError:
|
108 |
+
return False
|
109 |
+
|
110 |
+
if ('.' in txt) and ('/' not in txt) and is_float(txt): # is arxiv ID
|
111 |
+
txt = 'https://arxiv.org/abs/' + txt.strip()
|
112 |
+
if ('.' in txt) and ('/' not in txt) and is_float(txt[:10]): # is arxiv ID
|
113 |
+
txt = 'https://arxiv.org/abs/' + txt[:10]
|
114 |
+
|
115 |
+
if not txt.startswith('https://arxiv.org'):
|
116 |
+
return txt, None # 是本地文件,跳过下载
|
117 |
+
|
118 |
+
# <-------------- inspect format ------------->
|
119 |
+
chatbot.append([f"检测到arxiv文档连接", '尝试下载 ...'])
|
120 |
+
yield from update_ui(chatbot=chatbot, history=history)
|
121 |
+
time.sleep(1) # 刷新界面
|
122 |
+
|
123 |
+
url_ = txt # https://arxiv.org/abs/1707.06690
|
124 |
+
if not txt.startswith('https://arxiv.org/abs/'):
|
125 |
+
msg = f"解析arxiv网址失败, 期望格式例如: https://arxiv.org/abs/1707.06690。实际得到格式: {url_}。"
|
126 |
+
yield from update_ui_lastest_msg(msg, chatbot=chatbot, history=history) # 刷新界面
|
127 |
+
return msg, None
|
128 |
+
# <-------------- set format ------------->
|
129 |
+
arxiv_id = url_.split('/abs/')[-1]
|
130 |
+
if 'v' in arxiv_id: arxiv_id = arxiv_id[:10]
|
131 |
+
cached_translation_pdf = check_cached_translation_pdf(arxiv_id)
|
132 |
+
if cached_translation_pdf and allow_cache: return cached_translation_pdf, arxiv_id
|
133 |
+
|
134 |
+
url_tar = url_.replace('/abs/', '/e-print/')
|
135 |
+
translation_dir = pj(ARXIV_CACHE_DIR, arxiv_id, 'e-print')
|
136 |
+
extract_dst = pj(ARXIV_CACHE_DIR, arxiv_id, 'extract')
|
137 |
+
os.makedirs(translation_dir, exist_ok=True)
|
138 |
+
|
139 |
+
# <-------------- download arxiv source file ------------->
|
140 |
+
dst = pj(translation_dir, arxiv_id + '.tar')
|
141 |
+
if os.path.exists(dst):
|
142 |
+
yield from update_ui_lastest_msg("调用缓存", chatbot=chatbot, history=history) # 刷新界面
|
143 |
+
else:
|
144 |
+
yield from update_ui_lastest_msg("开始下载", chatbot=chatbot, history=history) # 刷新界面
|
145 |
+
proxies = get_conf('proxies')
|
146 |
+
r = requests.get(url_tar, proxies=proxies)
|
147 |
+
with open(dst, 'wb+') as f:
|
148 |
+
f.write(r.content)
|
149 |
+
# <-------------- extract file ------------->
|
150 |
+
yield from update_ui_lastest_msg("下载完成", chatbot=chatbot, history=history) # 刷新界面
|
151 |
+
from toolbox import extract_archive
|
152 |
+
extract_archive(file_path=dst, dest_dir=extract_dst)
|
153 |
+
return extract_dst, arxiv_id
|
154 |
+
|
155 |
+
|
156 |
+
def pdf2tex_project(pdf_file_path):
|
157 |
+
# Mathpix API credentials
|
158 |
+
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
159 |
+
headers = {"app_id": app_id, "app_key": app_key}
|
160 |
+
|
161 |
+
# Step 1: Send PDF file for processing
|
162 |
+
options = {
|
163 |
+
"conversion_formats": {"tex.zip": True},
|
164 |
+
"math_inline_delimiters": ["$", "$"],
|
165 |
+
"rm_spaces": True
|
166 |
+
}
|
167 |
+
|
168 |
+
response = requests.post(url="https://api.mathpix.com/v3/pdf",
|
169 |
+
headers=headers,
|
170 |
+
data={"options_json": json.dumps(options)},
|
171 |
+
files={"file": open(pdf_file_path, "rb")})
|
172 |
+
|
173 |
+
if response.ok:
|
174 |
+
pdf_id = response.json()["pdf_id"]
|
175 |
+
print(f"PDF processing initiated. PDF ID: {pdf_id}")
|
176 |
+
|
177 |
+
# Step 2: Check processing status
|
178 |
+
while True:
|
179 |
+
conversion_response = requests.get(f"https://api.mathpix.com/v3/pdf/{pdf_id}", headers=headers)
|
180 |
+
conversion_data = conversion_response.json()
|
181 |
+
|
182 |
+
if conversion_data["status"] == "completed":
|
183 |
+
print("PDF processing completed.")
|
184 |
+
break
|
185 |
+
elif conversion_data["status"] == "error":
|
186 |
+
print("Error occurred during processing.")
|
187 |
+
else:
|
188 |
+
print(f"Processing status: {conversion_data['status']}")
|
189 |
+
time.sleep(5) # wait for a few seconds before checking again
|
190 |
+
|
191 |
+
# Step 3: Save results to local files
|
192 |
+
output_dir = os.path.join(os.path.dirname(pdf_file_path), 'mathpix_output')
|
193 |
+
if not os.path.exists(output_dir):
|
194 |
+
os.makedirs(output_dir)
|
195 |
+
|
196 |
+
url = f"https://api.mathpix.com/v3/pdf/{pdf_id}.tex"
|
197 |
+
response = requests.get(url, headers=headers)
|
198 |
+
file_name_wo_dot = '_'.join(os.path.basename(pdf_file_path).split('.')[:-1])
|
199 |
+
output_name = f"{file_name_wo_dot}.tex.zip"
|
200 |
+
output_path = os.path.join(output_dir, output_name)
|
201 |
+
with open(output_path, "wb") as output_file:
|
202 |
+
output_file.write(response.content)
|
203 |
+
print(f"tex.zip file saved at: {output_path}")
|
204 |
+
|
205 |
+
import zipfile
|
206 |
+
unzip_dir = os.path.join(output_dir, file_name_wo_dot)
|
207 |
+
with zipfile.ZipFile(output_path, 'r') as zip_ref:
|
208 |
+
zip_ref.extractall(unzip_dir)
|
209 |
+
|
210 |
+
return unzip_dir
|
211 |
+
|
212 |
+
else:
|
213 |
+
print(f"Error sending PDF for processing. Status code: {response.status_code}")
|
214 |
+
return None
|
215 |
+
|
216 |
+
|
217 |
+
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序1 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
218 |
+
|
219 |
+
|
220 |
+
@CatchException
|
221 |
+
def Latex英文纠错加PDF对比(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
222 |
+
# <-------------- information about this plugin ------------->
|
223 |
+
chatbot.append(["函数插件功能?",
|
224 |
+
"对整个Latex项目进行纠错, 用latex编译为PDF对修正处做高亮。函数插件贡献者: Binary-Husky。注意事项: 目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。仅在Windows系统进行了测试,其他操作系��表现未知。"])
|
225 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
226 |
+
|
227 |
+
# <-------------- more requirements ------------->
|
228 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
229 |
+
more_req = plugin_kwargs.get("advanced_arg", "")
|
230 |
+
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
231 |
+
|
232 |
+
# <-------------- check deps ------------->
|
233 |
+
try:
|
234 |
+
import glob, os, time, subprocess
|
235 |
+
subprocess.Popen(['pdflatex', '-version'])
|
236 |
+
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
237 |
+
except Exception as e:
|
238 |
+
chatbot.append([f"解析项目: {txt}",
|
239 |
+
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
240 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
241 |
+
return
|
242 |
+
|
243 |
+
# <-------------- clear history and read input ------------->
|
244 |
+
history = []
|
245 |
+
if os.path.exists(txt):
|
246 |
+
project_folder = txt
|
247 |
+
else:
|
248 |
+
if txt == "": txt = '空空如也的输入栏'
|
249 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无权访问: {txt}")
|
250 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
251 |
+
return
|
252 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
253 |
+
if len(file_manifest) == 0:
|
254 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
255 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
256 |
+
return
|
257 |
+
|
258 |
+
# <-------------- if is a zip/tar file ------------->
|
259 |
+
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
260 |
+
|
261 |
+
# <-------------- move latex project away from temp folder ------------->
|
262 |
+
project_folder = move_project(project_folder, arxiv_id=None)
|
263 |
+
|
264 |
+
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
265 |
+
if not os.path.exists(project_folder + '/merge_proofread_en.tex'):
|
266 |
+
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
267 |
+
chatbot, history, system_prompt, mode='proofread_en',
|
268 |
+
switch_prompt=_switch_prompt_)
|
269 |
+
|
270 |
+
# <-------------- compile PDF ------------->
|
271 |
+
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
272 |
+
main_file_modified='merge_proofread_en',
|
273 |
+
work_folder_original=project_folder, work_folder_modified=project_folder,
|
274 |
+
work_folder=project_folder)
|
275 |
+
|
276 |
+
# <-------------- zip PDF ------------->
|
277 |
+
zip_res = zip_result(project_folder)
|
278 |
+
if success:
|
279 |
+
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
280 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
281 |
+
time.sleep(1) # 刷新界面
|
282 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
283 |
+
else:
|
284 |
+
chatbot.append((f"失败了",
|
285 |
+
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 也是可读的, 您可以到Github Issue区, 用该压缩包+对话历史存档进行反馈 ...'))
|
286 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
287 |
+
time.sleep(1) # 刷新界面
|
288 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
289 |
+
|
290 |
+
# <-------------- we are done ------------->
|
291 |
+
return success
|
292 |
+
|
293 |
+
|
294 |
+
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= 插件主程序2 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
295 |
+
|
296 |
+
@CatchException
|
297 |
+
def Latex翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
298 |
+
# <-------------- information about this plugin ------------->
|
299 |
+
chatbot.append([
|
300 |
+
"函数插件功能?",
|
301 |
+
"对整个Latex项目进行翻译, 生成中文PDF。函数插件贡献者: Binary-Husky。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
302 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
303 |
+
|
304 |
+
# <-------------- more requirements ------------->
|
305 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
306 |
+
more_req = plugin_kwargs.get("advanced_arg", "")
|
307 |
+
no_cache = more_req.startswith("--no-cache")
|
308 |
+
if no_cache: more_req.lstrip("--no-cache")
|
309 |
+
allow_cache = not no_cache
|
310 |
+
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
311 |
+
|
312 |
+
# <-------------- check deps ------------->
|
313 |
+
try:
|
314 |
+
import glob, os, time, subprocess
|
315 |
+
subprocess.Popen(['pdflatex', '-version'])
|
316 |
+
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
317 |
+
except Exception as e:
|
318 |
+
chatbot.append([f"解析项目: {txt}",
|
319 |
+
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
320 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
321 |
+
return
|
322 |
+
|
323 |
+
# <-------------- clear history and read input ------------->
|
324 |
+
history = []
|
325 |
+
try:
|
326 |
+
txt, arxiv_id = yield from arxiv_download(chatbot, history, txt, allow_cache)
|
327 |
+
except tarfile.ReadError as e:
|
328 |
+
yield from update_ui_lastest_msg(
|
329 |
+
"无法自动下载该论文的Latex源码,请前往arxiv打开此论文下载页面,点other Formats,然后download source手动下载latex源码包。接下来调用本地Latex翻译插件即可。",
|
330 |
+
chatbot=chatbot, history=history)
|
331 |
+
return
|
332 |
+
|
333 |
+
if txt.endswith('.pdf'):
|
334 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"发现已经存在翻译好的PDF文档")
|
335 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
336 |
+
return
|
337 |
+
|
338 |
+
if os.path.exists(txt):
|
339 |
+
project_folder = txt
|
340 |
+
else:
|
341 |
+
if txt == "": txt = '空空如也的输入栏'
|
342 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
343 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
344 |
+
return
|
345 |
+
|
346 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
347 |
+
if len(file_manifest) == 0:
|
348 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
349 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
350 |
+
return
|
351 |
+
|
352 |
+
# <-------------- if is a zip/tar file ------------->
|
353 |
+
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
354 |
+
|
355 |
+
# <-------------- move latex project away from temp folder ------------->
|
356 |
+
project_folder = move_project(project_folder, arxiv_id)
|
357 |
+
|
358 |
+
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
359 |
+
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
360 |
+
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
361 |
+
chatbot, history, system_prompt, mode='translate_zh',
|
362 |
+
switch_prompt=_switch_prompt_)
|
363 |
+
|
364 |
+
# <-------------- compile PDF ------------->
|
365 |
+
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
366 |
+
main_file_modified='merge_translate_zh', mode='translate_zh',
|
367 |
+
work_folder_original=project_folder, work_folder_modified=project_folder,
|
368 |
+
work_folder=project_folder)
|
369 |
+
|
370 |
+
# <-------------- zip PDF ------------->
|
371 |
+
zip_res = zip_result(project_folder)
|
372 |
+
if success:
|
373 |
+
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
374 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
375 |
+
time.sleep(1) # 刷新界面
|
376 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
377 |
+
else:
|
378 |
+
chatbot.append((f"失败了",
|
379 |
+
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
380 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
381 |
+
time.sleep(1) # 刷新界面
|
382 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
383 |
+
|
384 |
+
# <-------------- we are done ------------->
|
385 |
+
return success
|
386 |
+
|
387 |
+
|
388 |
+
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- 插件主程序3 =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
389 |
+
|
390 |
+
@CatchException
|
391 |
+
def PDF翻译中文并重新编译PDF(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
392 |
+
# <-------------- information about this plugin ------------->
|
393 |
+
chatbot.append([
|
394 |
+
"函数插件功能?",
|
395 |
+
"将PDF转换为Latex项目,翻译为中文后重新编译为PDF。函数插件贡献者: Marroh。注意事项: 此插件Windows支持最佳,Linux下必须使用Docker安装,详见项目主README.md。目前仅支持GPT3.5/GPT4,其他模型转化效果未知。目前对机器学习类文献转化效果最好,其他类型文献转化效果未知。"])
|
396 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
397 |
+
|
398 |
+
# <-------------- more requirements ------------->
|
399 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
400 |
+
more_req = plugin_kwargs.get("advanced_arg", "")
|
401 |
+
no_cache = more_req.startswith("--no-cache")
|
402 |
+
if no_cache: more_req.lstrip("--no-cache")
|
403 |
+
allow_cache = not no_cache
|
404 |
+
_switch_prompt_ = partial(switch_prompt, more_requirement=more_req)
|
405 |
+
|
406 |
+
# <-------------- check deps ------------->
|
407 |
+
try:
|
408 |
+
import glob, os, time, subprocess
|
409 |
+
subprocess.Popen(['pdflatex', '-version'])
|
410 |
+
from .latex_fns.latex_actions import Latex精细分解与转化, 编译Latex
|
411 |
+
except Exception as e:
|
412 |
+
chatbot.append([f"解析项目: {txt}",
|
413 |
+
f"尝试执行Latex指令失败。Latex没有安装, 或者不在环境变量PATH中。安装方法https://tug.org/texlive/。报错信息\n\n```\n\n{trimmed_format_exc()}\n\n```\n\n"])
|
414 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
415 |
+
return
|
416 |
+
|
417 |
+
# <-------------- clear history and read input ------------->
|
418 |
+
if os.path.exists(txt):
|
419 |
+
project_folder = txt
|
420 |
+
else:
|
421 |
+
if txt == "": txt = '空空如也的输入栏'
|
422 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到本地项目或无法处理: {txt}")
|
423 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
424 |
+
return
|
425 |
+
|
426 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)]
|
427 |
+
if len(file_manifest) == 0:
|
428 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.pdf文件: {txt}")
|
429 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
430 |
+
return
|
431 |
+
if len(file_manifest) != 1:
|
432 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"不支持同时处理多个pdf文件: {txt}")
|
433 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
434 |
+
return
|
435 |
+
app_id, app_key = get_conf('MATHPIX_APPID', 'MATHPIX_APPKEY')
|
436 |
+
if len(app_id) == 0 or len(app_key) == 0:
|
437 |
+
report_exception(chatbot, history, a="缺失 MATHPIX_APPID 和 MATHPIX_APPKEY。", b=f"请配置 MATHPIX_APPID 和 MATHPIX_APPKEY")
|
438 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
439 |
+
return
|
440 |
+
|
441 |
+
# <-------------- convert pdf into tex ------------->
|
442 |
+
project_folder = pdf2tex_project(file_manifest[0])
|
443 |
+
|
444 |
+
# Translate English Latex to Chinese Latex, and compile it
|
445 |
+
file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
|
446 |
+
if len(file_manifest) == 0:
|
447 |
+
report_exception(chatbot, history, a=f"解析项目: {txt}", b=f"找不到任何.tex文件: {txt}")
|
448 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
449 |
+
return
|
450 |
+
|
451 |
+
# <-------------- if is a zip/tar file ------------->
|
452 |
+
project_folder = desend_to_extracted_folder_if_exist(project_folder)
|
453 |
+
|
454 |
+
# <-------------- move latex project away from temp folder ------------->
|
455 |
+
project_folder = move_project(project_folder)
|
456 |
+
|
457 |
+
# <-------------- if merge_translate_zh is already generated, skip gpt req ------------->
|
458 |
+
if not os.path.exists(project_folder + '/merge_translate_zh.tex'):
|
459 |
+
yield from Latex精细分解与转化(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
460 |
+
chatbot, history, system_prompt, mode='translate_zh',
|
461 |
+
switch_prompt=_switch_prompt_)
|
462 |
+
|
463 |
+
# <-------------- compile PDF ------------->
|
464 |
+
success = yield from 编译Latex(chatbot, history, main_file_original='merge',
|
465 |
+
main_file_modified='merge_translate_zh', mode='translate_zh',
|
466 |
+
work_folder_original=project_folder, work_folder_modified=project_folder,
|
467 |
+
work_folder=project_folder)
|
468 |
+
|
469 |
+
# <-------------- zip PDF ------------->
|
470 |
+
zip_res = zip_result(project_folder)
|
471 |
+
if success:
|
472 |
+
chatbot.append((f"成功啦", '请查收结果(压缩包)...'))
|
473 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
474 |
+
time.sleep(1) # 刷新界面
|
475 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
476 |
+
else:
|
477 |
+
chatbot.append((f"失败了",
|
478 |
+
'虽然PDF生成失败了, 但请查收结果(压缩包), 内含已经翻译的Tex文档, 您可以到Github Issue区, 用该压缩包进行反馈。如系统是Linux,请检查系统字体(见Github wiki) ...'))
|
479 |
+
yield from update_ui(chatbot=chatbot, history=history);
|
480 |
+
time.sleep(1) # 刷新界面
|
481 |
+
promote_file_to_downloadzone(file=zip_res, chatbot=chatbot)
|
482 |
+
|
483 |
+
# <-------------- we are done ------------->
|
484 |
+
return success
|
crazy_functions/agent_fns/pipe.py
CHANGED
@@ -9,7 +9,7 @@ class PipeCom:
|
|
9 |
|
10 |
|
11 |
class PluginMultiprocessManager:
|
12 |
-
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
13 |
# ⭐ run in main process
|
14 |
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
15 |
self.previous_work_dir_files = {}
|
@@ -18,7 +18,7 @@ class PluginMultiprocessManager:
|
|
18 |
self.chatbot = chatbot
|
19 |
self.history = history
|
20 |
self.system_prompt = system_prompt
|
21 |
-
# self.
|
22 |
self.alive = True
|
23 |
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
24 |
self.last_user_input = ""
|
|
|
9 |
|
10 |
|
11 |
class PluginMultiprocessManager:
|
12 |
+
def __init__(self, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
13 |
# ⭐ run in main process
|
14 |
self.autogen_work_dir = os.path.join(get_log_folder("autogen"), gen_time_str())
|
15 |
self.previous_work_dir_files = {}
|
|
|
18 |
self.chatbot = chatbot
|
19 |
self.history = history
|
20 |
self.system_prompt = system_prompt
|
21 |
+
# self.user_request = user_request
|
22 |
self.alive = True
|
23 |
self.use_docker = get_conf("AUTOGEN_USE_DOCKER")
|
24 |
self.last_user_input = ""
|
crazy_functions/chatglm微调工具.py
CHANGED
@@ -32,7 +32,7 @@ def string_to_options(arguments):
|
|
32 |
return args
|
33 |
|
34 |
@CatchException
|
35 |
-
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
36 |
"""
|
37 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
38 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -40,7 +40,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
40 |
chatbot 聊天显示框的句柄,用于显示给用户
|
41 |
history 聊天历史,前情提要
|
42 |
system_prompt 给gpt的静默提醒
|
43 |
-
|
44 |
"""
|
45 |
history = [] # 清空历史,以免输入溢出
|
46 |
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
@@ -80,7 +80,7 @@ def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
80 |
|
81 |
|
82 |
@CatchException
|
83 |
-
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
84 |
"""
|
85 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
86 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -88,7 +88,7 @@ def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
88 |
chatbot 聊天显示框的句柄,用于显示给用户
|
89 |
history 聊天历史,前情提要
|
90 |
system_prompt 给gpt的静默提醒
|
91 |
-
|
92 |
"""
|
93 |
import subprocess
|
94 |
history = [] # 清空历史,以免输入溢出
|
|
|
32 |
return args
|
33 |
|
34 |
@CatchException
|
35 |
+
def 微调数据集生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
36 |
"""
|
37 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
38 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
40 |
chatbot 聊天显示框的句柄,用于显示给用户
|
41 |
history 聊天历史,前情提要
|
42 |
system_prompt 给gpt的静默提醒
|
43 |
+
user_request 当前用户的请求信息(IP地址等)
|
44 |
"""
|
45 |
history = [] # 清空历史,以免输入溢出
|
46 |
chatbot.append(("这是什么功能?", "[Local Message] 微调数据集生成"))
|
|
|
80 |
|
81 |
|
82 |
@CatchException
|
83 |
+
def 启动微调(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
84 |
"""
|
85 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
86 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
88 |
chatbot 聊天显示框的句柄,用于显示给用户
|
89 |
history 聊天历史,前情提要
|
90 |
system_prompt 给gpt的静默提醒
|
91 |
+
user_request 当前用户的请求信息(IP地址等)
|
92 |
"""
|
93 |
import subprocess
|
94 |
history = [] # 清空历史,以免输入溢出
|
crazy_functions/crazy_utils.py
CHANGED
@@ -12,7 +12,7 @@ def input_clipping(inputs, history, max_token_limit):
|
|
12 |
mode = 'input-and-history'
|
13 |
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
14 |
input_token_num = get_token_num(inputs)
|
15 |
-
if input_token_num < max_token_limit//2:
|
16 |
mode = 'only-history'
|
17 |
max_token_limit = max_token_limit - input_token_num
|
18 |
|
@@ -21,7 +21,7 @@ def input_clipping(inputs, history, max_token_limit):
|
|
21 |
n_token = get_token_num('\n'.join(everything))
|
22 |
everything_token = [get_token_num(e) for e in everything]
|
23 |
delta = max(everything_token) // 16 # 截断时的颗粒度
|
24 |
-
|
25 |
while n_token > max_token_limit:
|
26 |
where = np.argmax(everything_token)
|
27 |
encoded = enc.encode(everything[where], disallowed_special=())
|
@@ -38,9 +38,9 @@ def input_clipping(inputs, history, max_token_limit):
|
|
38 |
return inputs, history
|
39 |
|
40 |
def request_gpt_model_in_new_thread_with_ui_alive(
|
41 |
-
inputs, inputs_show_user, llm_kwargs,
|
42 |
chatbot, history, sys_prompt, refresh_interval=0.2,
|
43 |
-
handle_token_exceed=True,
|
44 |
retry_times_at_unknown_error=2,
|
45 |
):
|
46 |
"""
|
@@ -77,7 +77,7 @@ def request_gpt_model_in_new_thread_with_ui_alive(
|
|
77 |
exceeded_cnt = 0
|
78 |
while True:
|
79 |
# watchdog error
|
80 |
-
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
81 |
raise RuntimeError("检测到程序终止。")
|
82 |
try:
|
83 |
# 【第一种情况】:顺利完成
|
@@ -140,12 +140,12 @@ def can_multi_process(llm):
|
|
140 |
if llm.startswith('api2d-'): return True
|
141 |
if llm.startswith('azure-'): return True
|
142 |
if llm.startswith('spark'): return True
|
143 |
-
if llm.startswith('zhipuai'): return True
|
144 |
return False
|
145 |
|
146 |
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
147 |
-
inputs_array, inputs_show_user_array, llm_kwargs,
|
148 |
-
chatbot, history_array, sys_prompt_array,
|
149 |
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
150 |
handle_token_exceed=True, show_user_at_complete=False,
|
151 |
retry_times_at_unknown_error=2,
|
@@ -189,7 +189,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
189 |
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
190 |
if not can_multi_process(llm_kwargs['llm_model']):
|
191 |
max_workers = 1
|
192 |
-
|
193 |
executor = ThreadPoolExecutor(max_workers=max_workers)
|
194 |
n_frag = len(inputs_array)
|
195 |
# 用户反馈
|
@@ -214,7 +214,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
214 |
try:
|
215 |
# 【第一种情况】:顺利完成
|
216 |
gpt_say = predict_no_ui_long_connection(
|
217 |
-
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
218 |
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
219 |
)
|
220 |
mutable[index][2] = "已成功"
|
@@ -246,7 +246,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
246 |
print(tb_str)
|
247 |
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
248 |
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
249 |
-
if retry_op > 0:
|
250 |
retry_op -= 1
|
251 |
wait = random.randint(5, 20)
|
252 |
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
@@ -284,12 +284,11 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
284 |
# 在前端打印些好玩的东西
|
285 |
for thread_index, _ in enumerate(worker_done):
|
286 |
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
287 |
-
replace('\n', '').replace('`', '.').replace(
|
288 |
-
' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
289 |
observe_win.append(print_something_really_funny)
|
290 |
# 在前端打印些好玩的东西
|
291 |
-
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
292 |
-
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
293 |
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
294 |
# 在前端打印些好玩的东西
|
295 |
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
@@ -303,7 +302,7 @@ def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
|
303 |
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
304 |
gpt_res = f.result()
|
305 |
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
306 |
-
|
307 |
# 是否在结束时,在界面上显示结果
|
308 |
if show_user_at_complete:
|
309 |
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
@@ -353,7 +352,7 @@ def read_and_clean_pdf_text(fp):
|
|
353 |
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
354 |
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
355 |
return max(fsize_statiscs, key=fsize_statiscs.get)
|
356 |
-
|
357 |
def ffsize_same(a,b):
|
358 |
"""
|
359 |
提取字体大小是否近似相等
|
@@ -389,7 +388,7 @@ def read_and_clean_pdf_text(fp):
|
|
389 |
if index == 0:
|
390 |
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
391 |
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
392 |
-
|
393 |
############################## <第 2 步,获取正文主字体> ##################################
|
394 |
try:
|
395 |
fsize_statiscs = {}
|
@@ -405,7 +404,7 @@ def read_and_clean_pdf_text(fp):
|
|
405 |
mega_sec = []
|
406 |
sec = []
|
407 |
for index, line in enumerate(meta_line):
|
408 |
-
if index == 0:
|
409 |
sec.append(line[fc])
|
410 |
continue
|
411 |
if REMOVE_FOOT_NOTE:
|
@@ -502,12 +501,12 @@ def get_files_from_everything(txt, type): # type='.md'
|
|
502 |
"""
|
503 |
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
|
504 |
下面是对每个参数和返回值的说明:
|
505 |
-
参数
|
506 |
-
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
|
507 |
- type: 字符串,表示要搜索的文件类型。默认是.md。
|
508 |
-
返回值
|
509 |
-
- success: 布尔值,表示函数是否成功执行。
|
510 |
-
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
|
511 |
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
|
512 |
该函数详细注释已添加,请确认是否满足您的需要。
|
513 |
"""
|
@@ -571,7 +570,7 @@ class nougat_interface():
|
|
571 |
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
572 |
from toolbox import update_ui_lastest_msg
|
573 |
|
574 |
-
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
575 |
chatbot=chatbot, history=history, delay=0)
|
576 |
self.threadLock.acquire()
|
577 |
import glob, threading, os
|
@@ -579,7 +578,7 @@ class nougat_interface():
|
|
579 |
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
580 |
os.makedirs(dst)
|
581 |
|
582 |
-
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
583 |
chatbot=chatbot, history=history, delay=0)
|
584 |
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
585 |
res = glob.glob(os.path.join(dst,'*.mmd'))
|
|
|
12 |
mode = 'input-and-history'
|
13 |
# 当 输入部分的token占比 小于 全文的一半时,只裁剪历史
|
14 |
input_token_num = get_token_num(inputs)
|
15 |
+
if input_token_num < max_token_limit//2:
|
16 |
mode = 'only-history'
|
17 |
max_token_limit = max_token_limit - input_token_num
|
18 |
|
|
|
21 |
n_token = get_token_num('\n'.join(everything))
|
22 |
everything_token = [get_token_num(e) for e in everything]
|
23 |
delta = max(everything_token) // 16 # 截断时的颗粒度
|
24 |
+
|
25 |
while n_token > max_token_limit:
|
26 |
where = np.argmax(everything_token)
|
27 |
encoded = enc.encode(everything[where], disallowed_special=())
|
|
|
38 |
return inputs, history
|
39 |
|
40 |
def request_gpt_model_in_new_thread_with_ui_alive(
|
41 |
+
inputs, inputs_show_user, llm_kwargs,
|
42 |
chatbot, history, sys_prompt, refresh_interval=0.2,
|
43 |
+
handle_token_exceed=True,
|
44 |
retry_times_at_unknown_error=2,
|
45 |
):
|
46 |
"""
|
|
|
77 |
exceeded_cnt = 0
|
78 |
while True:
|
79 |
# watchdog error
|
80 |
+
if len(mutable) >= 2 and (time.time()-mutable[1]) > watch_dog_patience:
|
81 |
raise RuntimeError("检测到程序终止。")
|
82 |
try:
|
83 |
# 【第一种情况】:顺利完成
|
|
|
140 |
if llm.startswith('api2d-'): return True
|
141 |
if llm.startswith('azure-'): return True
|
142 |
if llm.startswith('spark'): return True
|
143 |
+
if llm.startswith('zhipuai') or llm.startswith('glm-'): return True
|
144 |
return False
|
145 |
|
146 |
def request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
|
147 |
+
inputs_array, inputs_show_user_array, llm_kwargs,
|
148 |
+
chatbot, history_array, sys_prompt_array,
|
149 |
refresh_interval=0.2, max_workers=-1, scroller_max_len=30,
|
150 |
handle_token_exceed=True, show_user_at_complete=False,
|
151 |
retry_times_at_unknown_error=2,
|
|
|
189 |
# 屏蔽掉 chatglm的多线程,可能会导致严重卡顿
|
190 |
if not can_multi_process(llm_kwargs['llm_model']):
|
191 |
max_workers = 1
|
192 |
+
|
193 |
executor = ThreadPoolExecutor(max_workers=max_workers)
|
194 |
n_frag = len(inputs_array)
|
195 |
# 用户反馈
|
|
|
214 |
try:
|
215 |
# 【第一种情况】:顺利完成
|
216 |
gpt_say = predict_no_ui_long_connection(
|
217 |
+
inputs=inputs, llm_kwargs=llm_kwargs, history=history,
|
218 |
sys_prompt=sys_prompt, observe_window=mutable[index], console_slience=True
|
219 |
)
|
220 |
mutable[index][2] = "已成功"
|
|
|
246 |
print(tb_str)
|
247 |
gpt_say += f"[Local Message] 警告,线程{index}在执行过程中遭遇问题, Traceback:\n\n{tb_str}\n\n"
|
248 |
if len(mutable[index][0]) > 0: gpt_say += "此线程失败前收到的回答:\n\n" + mutable[index][0]
|
249 |
+
if retry_op > 0:
|
250 |
retry_op -= 1
|
251 |
wait = random.randint(5, 20)
|
252 |
if ("Rate limit reached" in tb_str) or ("Too Many Requests" in tb_str):
|
|
|
284 |
# 在前端打印些好玩的东西
|
285 |
for thread_index, _ in enumerate(worker_done):
|
286 |
print_something_really_funny = "[ ...`"+mutable[thread_index][0][-scroller_max_len:].\
|
287 |
+
replace('\n', '').replace('`', '.').replace(' ', '.').replace('<br/>', '.....').replace('$', '.')+"`... ]"
|
|
|
288 |
observe_win.append(print_something_really_funny)
|
289 |
# 在前端打印些好玩的东西
|
290 |
+
stat_str = ''.join([f'`{mutable[thread_index][2]}`: {obs}\n\n'
|
291 |
+
if not done else f'`{mutable[thread_index][2]}`\n\n'
|
292 |
for thread_index, done, obs in zip(range(len(worker_done)), worker_done, observe_win)])
|
293 |
# 在前端打印些好玩的东西
|
294 |
chatbot[-1] = [chatbot[-1][0], f'多线程操作已经开始,完成情况: \n\n{stat_str}' + ''.join(['.']*(cnt % 10+1))]
|
|
|
302 |
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
303 |
gpt_res = f.result()
|
304 |
gpt_response_collection.extend([inputs_show_user, gpt_res])
|
305 |
+
|
306 |
# 是否在结束时,在界面上显示结果
|
307 |
if show_user_at_complete:
|
308 |
for inputs_show_user, f in zip(inputs_show_user_array, futures):
|
|
|
352 |
if wtf['size'] not in fsize_statiscs: fsize_statiscs[wtf['size']] = 0
|
353 |
fsize_statiscs[wtf['size']] += len(wtf['text'])
|
354 |
return max(fsize_statiscs, key=fsize_statiscs.get)
|
355 |
+
|
356 |
def ffsize_same(a,b):
|
357 |
"""
|
358 |
提取字体大小是否近似相等
|
|
|
388 |
if index == 0:
|
389 |
page_one_meta = [" ".join(["".join([wtf['text'] for wtf in l['spans']]) for l in t['lines']]).replace(
|
390 |
'- ', '') for t in text_areas['blocks'] if 'lines' in t]
|
391 |
+
|
392 |
############################## <第 2 步,获取正文主字体> ##################################
|
393 |
try:
|
394 |
fsize_statiscs = {}
|
|
|
404 |
mega_sec = []
|
405 |
sec = []
|
406 |
for index, line in enumerate(meta_line):
|
407 |
+
if index == 0:
|
408 |
sec.append(line[fc])
|
409 |
continue
|
410 |
if REMOVE_FOOT_NOTE:
|
|
|
501 |
"""
|
502 |
这个函数是用来获取指定目录下所有指定类型(如.md)的文件,并且对于网络上的文件,也可以获取它。
|
503 |
下面是对每个参数和返回值的说明:
|
504 |
+
参数
|
505 |
+
- txt: 路径或网址,表示要搜索的文件或者文件夹路径或网络上的文件。
|
506 |
- type: 字符串,表示要搜索的文件类型。默认是.md。
|
507 |
+
返回值
|
508 |
+
- success: 布尔值,表示函数是否成功执行。
|
509 |
+
- file_manifest: 文件路径列表,里面包含以指定类型为后缀名的所有文件的绝对路径。
|
510 |
- project_folder: 字符串,表示文件所在的文件夹路径。如果是网络上的文件,就是临时文件夹的路径。
|
511 |
该函数详细注释已添加,请确认是否满足您的需要。
|
512 |
"""
|
|
|
570 |
def NOUGAT_parse_pdf(self, fp, chatbot, history):
|
571 |
from toolbox import update_ui_lastest_msg
|
572 |
|
573 |
+
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在排队, 等待线程锁...",
|
574 |
chatbot=chatbot, history=history, delay=0)
|
575 |
self.threadLock.acquire()
|
576 |
import glob, threading, os
|
|
|
578 |
dst = os.path.join(get_log_folder(plugin_name='nougat'), gen_time_str())
|
579 |
os.makedirs(dst)
|
580 |
|
581 |
+
yield from update_ui_lastest_msg("正在解析论文, 请稍候。进度:正在加载NOUGAT... (提示:首次运行需要花费较长时间下载NOUGAT参数)",
|
582 |
chatbot=chatbot, history=history, delay=0)
|
583 |
self.nougat_with_timeout(f'nougat --out "{os.path.abspath(dst)}" "{os.path.abspath(fp)}"', os.getcwd(), timeout=3600)
|
584 |
res = glob.glob(os.path.join(dst,'*.mmd'))
|
crazy_functions/diagram_fns/file_tree.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from textwrap import indent
|
3 |
+
|
4 |
+
class FileNode:
|
5 |
+
def __init__(self, name):
|
6 |
+
self.name = name
|
7 |
+
self.children = []
|
8 |
+
self.is_leaf = False
|
9 |
+
self.level = 0
|
10 |
+
self.parenting_ship = []
|
11 |
+
self.comment = ""
|
12 |
+
self.comment_maxlen_show = 50
|
13 |
+
|
14 |
+
@staticmethod
|
15 |
+
def add_linebreaks_at_spaces(string, interval=10):
|
16 |
+
return '\n'.join(string[i:i+interval] for i in range(0, len(string), interval))
|
17 |
+
|
18 |
+
def sanitize_comment(self, comment):
|
19 |
+
if len(comment) > self.comment_maxlen_show: suf = '...'
|
20 |
+
else: suf = ''
|
21 |
+
comment = comment[:self.comment_maxlen_show]
|
22 |
+
comment = comment.replace('\"', '').replace('`', '').replace('\n', '').replace('`', '').replace('$', '')
|
23 |
+
comment = self.add_linebreaks_at_spaces(comment, 10)
|
24 |
+
return '`' + comment + suf + '`'
|
25 |
+
|
26 |
+
def add_file(self, file_path, file_comment):
|
27 |
+
directory_names, file_name = os.path.split(file_path)
|
28 |
+
current_node = self
|
29 |
+
level = 1
|
30 |
+
if directory_names == "":
|
31 |
+
new_node = FileNode(file_name)
|
32 |
+
current_node.children.append(new_node)
|
33 |
+
new_node.is_leaf = True
|
34 |
+
new_node.comment = self.sanitize_comment(file_comment)
|
35 |
+
new_node.level = level
|
36 |
+
current_node = new_node
|
37 |
+
else:
|
38 |
+
dnamesplit = directory_names.split(os.sep)
|
39 |
+
for i, directory_name in enumerate(dnamesplit):
|
40 |
+
found_child = False
|
41 |
+
level += 1
|
42 |
+
for child in current_node.children:
|
43 |
+
if child.name == directory_name:
|
44 |
+
current_node = child
|
45 |
+
found_child = True
|
46 |
+
break
|
47 |
+
if not found_child:
|
48 |
+
new_node = FileNode(directory_name)
|
49 |
+
current_node.children.append(new_node)
|
50 |
+
new_node.level = level - 1
|
51 |
+
current_node = new_node
|
52 |
+
term = FileNode(file_name)
|
53 |
+
term.level = level
|
54 |
+
term.comment = self.sanitize_comment(file_comment)
|
55 |
+
term.is_leaf = True
|
56 |
+
current_node.children.append(term)
|
57 |
+
|
58 |
+
def print_files_recursively(self, level=0, code="R0"):
|
59 |
+
print(' '*level + self.name + ' ' + str(self.is_leaf) + ' ' + str(self.level))
|
60 |
+
for j, child in enumerate(self.children):
|
61 |
+
child.print_files_recursively(level=level+1, code=code+str(j))
|
62 |
+
self.parenting_ship.extend(child.parenting_ship)
|
63 |
+
p1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
64 |
+
p2 = """ --> """
|
65 |
+
p3 = f"""{code+str(j)}[\"🗎{child.name}\"]""" if child.is_leaf else f"""{code+str(j)}[[\"📁{child.name}\"]]"""
|
66 |
+
edge_code = p1 + p2 + p3
|
67 |
+
if edge_code in self.parenting_ship:
|
68 |
+
continue
|
69 |
+
self.parenting_ship.append(edge_code)
|
70 |
+
if self.comment != "":
|
71 |
+
pc1 = f"""{code}[\"🗎{self.name}\"]""" if self.is_leaf else f"""{code}[[\"📁{self.name}\"]]"""
|
72 |
+
pc2 = f""" -.-x """
|
73 |
+
pc3 = f"""C{code}[\"{self.comment}\"]:::Comment"""
|
74 |
+
edge_code = pc1 + pc2 + pc3
|
75 |
+
self.parenting_ship.append(edge_code)
|
76 |
+
|
77 |
+
|
78 |
+
MERMAID_TEMPLATE = r"""
|
79 |
+
```mermaid
|
80 |
+
flowchart LR
|
81 |
+
%% <gpt_academic_hide_mermaid_code> 一个特殊标记,用于在生成mermaid图表时隐藏代码块
|
82 |
+
classDef Comment stroke-dasharray: 5 5
|
83 |
+
subgraph {graph_name}
|
84 |
+
{relationship}
|
85 |
+
end
|
86 |
+
```
|
87 |
+
"""
|
88 |
+
|
89 |
+
def build_file_tree_mermaid_diagram(file_manifest, file_comments, graph_name):
|
90 |
+
# Create the root node
|
91 |
+
file_tree_struct = FileNode("root")
|
92 |
+
# Build the tree structure
|
93 |
+
for file_path, file_comment in zip(file_manifest, file_comments):
|
94 |
+
file_tree_struct.add_file(file_path, file_comment)
|
95 |
+
file_tree_struct.print_files_recursively()
|
96 |
+
cc = "\n".join(file_tree_struct.parenting_ship)
|
97 |
+
ccc = indent(cc, prefix=" "*8)
|
98 |
+
return MERMAID_TEMPLATE.format(graph_name=graph_name, relationship=ccc)
|
99 |
+
|
100 |
+
if __name__ == "__main__":
|
101 |
+
# File manifest
|
102 |
+
file_manifest = [
|
103 |
+
"cradle_void_terminal.ipynb",
|
104 |
+
"tests/test_utils.py",
|
105 |
+
"tests/test_plugins.py",
|
106 |
+
"tests/test_llms.py",
|
107 |
+
"config.py",
|
108 |
+
"build/ChatGLM-6b-onnx-u8s8/chatglm-6b-int8-onnx-merged/model_weights_0.bin",
|
109 |
+
"crazy_functions/latex_fns/latex_actions.py",
|
110 |
+
"crazy_functions/latex_fns/latex_toolbox.py"
|
111 |
+
]
|
112 |
+
file_comments = [
|
113 |
+
"根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件根据位置和名称,可能是一个模块的初始化文件",
|
114 |
+
"包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器包含一些用于文本处理和模型微调的函数和装饰器",
|
115 |
+
"用于构建HTML报告的类和方法用于构建HTML报告的类和方法��于构建HTML报告的类和方法",
|
116 |
+
"包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码包含了用于文本切分的函数,以及处理PDF文件的示例代码",
|
117 |
+
"用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数用于解析和翻译PDF文件的功能和相关辅助函数",
|
118 |
+
"是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块是一个包的初始化文件,用于初始化包的属性和导入模块",
|
119 |
+
"用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器用于加载和分割文件中的文本的通用文件加载器",
|
120 |
+
"包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类包含了用于构建和管理向量数据库的函数和类",
|
121 |
+
]
|
122 |
+
print(build_file_tree_mermaid_diagram(file_manifest, file_comments, "项目文件树"))
|
crazy_functions/pdf_fns/parse_word.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from crazy_functions.crazy_utils import read_and_clean_pdf_text, get_files_from_everything
|
2 |
+
import os
|
3 |
+
import re
|
4 |
+
def extract_text_from_files(txt, chatbot, history):
|
5 |
+
"""
|
6 |
+
查找pdf/md/word并获取文本内容并返回状态以及文本
|
7 |
+
|
8 |
+
输入参数 Args:
|
9 |
+
chatbot: chatbot inputs and outputs (用户界面对话窗口句柄,用于数据流可视化)
|
10 |
+
history (list): List of chat history (历史,对话历史列表)
|
11 |
+
|
12 |
+
输出 Returns:
|
13 |
+
文件是否存在(bool)
|
14 |
+
final_result(list):文本内容
|
15 |
+
page_one(list):第一页内容/摘要
|
16 |
+
file_manifest(list):文件路径
|
17 |
+
excption(string):需要用户手动处理的信息,如没出错则保持为空
|
18 |
+
"""
|
19 |
+
|
20 |
+
final_result = []
|
21 |
+
page_one = []
|
22 |
+
file_manifest = []
|
23 |
+
excption = ""
|
24 |
+
|
25 |
+
if txt == "":
|
26 |
+
final_result.append(txt)
|
27 |
+
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
28 |
+
|
29 |
+
#查找输入区内容中的文件
|
30 |
+
file_pdf,pdf_manifest,folder_pdf = get_files_from_everything(txt, '.pdf')
|
31 |
+
file_md,md_manifest,folder_md = get_files_from_everything(txt, '.md')
|
32 |
+
file_word,word_manifest,folder_word = get_files_from_everything(txt, '.docx')
|
33 |
+
file_doc,doc_manifest,folder_doc = get_files_from_everything(txt, '.doc')
|
34 |
+
|
35 |
+
if file_doc:
|
36 |
+
excption = "word"
|
37 |
+
return False, final_result, page_one, file_manifest, excption
|
38 |
+
|
39 |
+
file_num = len(pdf_manifest) + len(md_manifest) + len(word_manifest)
|
40 |
+
if file_num == 0:
|
41 |
+
final_result.append(txt)
|
42 |
+
return False, final_result, page_one, file_manifest, excption #如输入区内容不是文件则直接返回输入区内容
|
43 |
+
|
44 |
+
if file_pdf:
|
45 |
+
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
46 |
+
import fitz
|
47 |
+
except:
|
48 |
+
excption = "pdf"
|
49 |
+
return False, final_result, page_one, file_manifest, excption
|
50 |
+
for index, fp in enumerate(pdf_manifest):
|
51 |
+
file_content, pdf_one = read_and_clean_pdf_text(fp) # (尝试)按照章节切割PDF
|
52 |
+
file_content = file_content.encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
53 |
+
pdf_one = str(pdf_one).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
54 |
+
final_result.append(file_content)
|
55 |
+
page_one.append(pdf_one)
|
56 |
+
file_manifest.append(os.path.relpath(fp, folder_pdf))
|
57 |
+
|
58 |
+
if file_md:
|
59 |
+
for index, fp in enumerate(md_manifest):
|
60 |
+
with open(fp, 'r', encoding='utf-8', errors='replace') as f:
|
61 |
+
file_content = f.read()
|
62 |
+
file_content = file_content.encode('utf-8', 'ignore').decode()
|
63 |
+
headers = re.findall(r'^#\s(.*)$', file_content, re.MULTILINE) #接下来提取md中的一级/二级标题作为摘要
|
64 |
+
if len(headers) > 0:
|
65 |
+
page_one.append("\n".join(headers)) #合并所有的标题,以换行符分割
|
66 |
+
else:
|
67 |
+
page_one.append("")
|
68 |
+
final_result.append(file_content)
|
69 |
+
file_manifest.append(os.path.relpath(fp, folder_md))
|
70 |
+
|
71 |
+
if file_word:
|
72 |
+
try: # 尝试导入依赖,如果缺少依赖,则给出安装建议
|
73 |
+
from docx import Document
|
74 |
+
except:
|
75 |
+
excption = "word_pip"
|
76 |
+
return False, final_result, page_one, file_manifest, excption
|
77 |
+
for index, fp in enumerate(word_manifest):
|
78 |
+
doc = Document(fp)
|
79 |
+
file_content = '\n'.join([p.text for p in doc.paragraphs])
|
80 |
+
file_content = file_content.encode('utf-8', 'ignore').decode()
|
81 |
+
page_one.append(file_content[:200])
|
82 |
+
final_result.append(file_content)
|
83 |
+
file_manifest.append(os.path.relpath(fp, folder_word))
|
84 |
+
|
85 |
+
return True, final_result, page_one, file_manifest, excption
|
crazy_functions/下载arxiv论文翻译摘要.py
CHANGED
@@ -130,7 +130,7 @@ def get_name(_url_):
|
|
130 |
|
131 |
|
132 |
@CatchException
|
133 |
-
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
134 |
|
135 |
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
136 |
import glob
|
|
|
130 |
|
131 |
|
132 |
@CatchException
|
133 |
+
def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
134 |
|
135 |
CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……"
|
136 |
import glob
|
crazy_functions/互动小游戏.py
CHANGED
@@ -5,7 +5,7 @@ from request_llms.bridge_all import predict_no_ui_long_connection
|
|
5 |
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
6 |
|
7 |
@CatchException
|
8 |
-
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
9 |
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
|
10 |
# 清空历史
|
11 |
history = []
|
@@ -23,7 +23,7 @@ def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
23 |
|
24 |
|
25 |
@CatchException
|
26 |
-
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
27 |
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
|
28 |
# 清空历史
|
29 |
history = []
|
|
|
5 |
from crazy_functions.game_fns.game_utils import get_code_block, is_same_thing
|
6 |
|
7 |
@CatchException
|
8 |
+
def 随机小游戏(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
9 |
from crazy_functions.game_fns.game_interactive_story import MiniGame_ResumeStory
|
10 |
# 清空历史
|
11 |
history = []
|
|
|
23 |
|
24 |
|
25 |
@CatchException
|
26 |
+
def 随机小游戏1(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
27 |
from crazy_functions.game_fns.game_ascii_art import MiniGame_ASCII_Art
|
28 |
# 清空历史
|
29 |
history = []
|
crazy_functions/交互功能函数模板.py
CHANGED
@@ -3,7 +3,7 @@ from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
|
3 |
|
4 |
|
5 |
@CatchException
|
6 |
-
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
7 |
"""
|
8 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
9 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
@@ -11,7 +11,7 @@ def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
11 |
chatbot 聊天显示框的句柄,用于显示给用户
|
12 |
history 聊天历史,前情提要
|
13 |
system_prompt 给gpt的静默提醒
|
14 |
-
|
15 |
"""
|
16 |
history = [] # 清空历史,以免输入溢出
|
17 |
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
|
|
3 |
|
4 |
|
5 |
@CatchException
|
6 |
+
def 交互功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
7 |
"""
|
8 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
9 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
|
|
11 |
chatbot 聊天显示框的句柄,用于显示给用户
|
12 |
history 聊天历史,前情提要
|
13 |
system_prompt 给gpt的静默提醒
|
14 |
+
user_request 当前用户的请求信息(IP地址等)
|
15 |
"""
|
16 |
history = [] # 清空历史,以免输入溢出
|
17 |
chatbot.append(("这是什么功能?", "交互功能函数模板。在执行完成之后, 可以将自身的状态存储到cookie中, 等待用户的再次调用。"))
|
crazy_functions/函数动态生成.py
CHANGED
@@ -139,7 +139,7 @@ def get_recent_file_prompt_support(chatbot):
|
|
139 |
return path
|
140 |
|
141 |
@CatchException
|
142 |
-
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
143 |
"""
|
144 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
145 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -147,7 +147,7 @@ def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
147 |
chatbot 聊天显示框的句柄,用于显示给用户
|
148 |
history 聊天历史,前情提要
|
149 |
system_prompt 给gpt的静默提醒
|
150 |
-
|
151 |
"""
|
152 |
|
153 |
# 清空历史
|
|
|
139 |
return path
|
140 |
|
141 |
@CatchException
|
142 |
+
def 函数动态生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
143 |
"""
|
144 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
145 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
147 |
chatbot 聊天显示框的句柄,用于显示给用户
|
148 |
history 聊天历史,前情提要
|
149 |
system_prompt 给gpt的静默提醒
|
150 |
+
user_request 当前用户的请求信息(IP地址等)
|
151 |
"""
|
152 |
|
153 |
# 清空历史
|
crazy_functions/命令行助手.py
CHANGED
@@ -4,7 +4,7 @@ from .crazy_utils import input_clipping
|
|
4 |
import copy, json
|
5 |
|
6 |
@CatchException
|
7 |
-
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
8 |
"""
|
9 |
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
|
10 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
@@ -12,7 +12,7 @@ def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
12 |
chatbot 聊天显示框的句柄, 用于显示给用户
|
13 |
history 聊天历史, 前情提要
|
14 |
system_prompt 给gpt的静默提醒
|
15 |
-
|
16 |
"""
|
17 |
# 清空历史, 以免输入溢出
|
18 |
history = []
|
|
|
4 |
import copy, json
|
5 |
|
6 |
@CatchException
|
7 |
+
def 命令行助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
8 |
"""
|
9 |
txt 输入栏用户输入的文本, 例如需要翻译的一段话, 再例如一个包含了待处理文件的路径
|
10 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
|
|
12 |
chatbot 聊天显示框的句柄, 用于显示给用户
|
13 |
history 聊天历史, 前情提要
|
14 |
system_prompt 给gpt的静默提醒
|
15 |
+
user_request 当前用户的请求信息(IP地址等)
|
16 |
"""
|
17 |
# 清空历史, 以免输入溢出
|
18 |
history = []
|
crazy_functions/图片生成.py
CHANGED
@@ -93,7 +93,7 @@ def edit_image(llm_kwargs, prompt, image_path, resolution="1024x1024", model="da
|
|
93 |
|
94 |
|
95 |
@CatchException
|
96 |
-
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
97 |
"""
|
98 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
99 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -101,7 +101,7 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|
101 |
chatbot 聊天显示框的句柄,用于显示给用户
|
102 |
history 聊天历史,前情提要
|
103 |
system_prompt 给gpt的静默提醒
|
104 |
-
|
105 |
"""
|
106 |
history = [] # 清空历史,以免输入溢出
|
107 |
if prompt.strip() == "":
|
@@ -123,7 +123,7 @@ def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|
123 |
|
124 |
|
125 |
@CatchException
|
126 |
-
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
127 |
history = [] # 清空历史,以免输入溢出
|
128 |
if prompt.strip() == "":
|
129 |
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
@@ -209,7 +209,7 @@ class ImageEditState(GptAcademicState):
|
|
209 |
return all([x['value'] is not None for x in self.req])
|
210 |
|
211 |
@CatchException
|
212 |
-
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
213 |
# 尚未完成
|
214 |
history = [] # 清空历史
|
215 |
state = ImageEditState.get_state(chatbot, ImageEditState)
|
|
|
93 |
|
94 |
|
95 |
@CatchException
|
96 |
+
def 图片生成_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
97 |
"""
|
98 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
99 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
101 |
chatbot 聊天显示框的句柄,用于显示给用户
|
102 |
history 聊天历史,前情提要
|
103 |
system_prompt 给gpt的静默提醒
|
104 |
+
user_request 当前用户的请求信息(IP地址等)
|
105 |
"""
|
106 |
history = [] # 清空历史,以免输入溢出
|
107 |
if prompt.strip() == "":
|
|
|
123 |
|
124 |
|
125 |
@CatchException
|
126 |
+
def 图片生成_DALLE3(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
127 |
history = [] # 清空历史,以免输入溢出
|
128 |
if prompt.strip() == "":
|
129 |
chatbot.append((prompt, "[Local Message] 图像生成提示为空白,请在“输入区”输入图像生成提示。"))
|
|
|
209 |
return all([x['value'] is not None for x in self.req])
|
210 |
|
211 |
@CatchException
|
212 |
+
def 图片修改_DALLE2(prompt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
213 |
# 尚未完成
|
214 |
history = [] # 清空历史
|
215 |
state = ImageEditState.get_state(chatbot, ImageEditState)
|
crazy_functions/多智能体.py
CHANGED
@@ -21,7 +21,7 @@ def remove_model_prefix(llm):
|
|
21 |
|
22 |
|
23 |
@CatchException
|
24 |
-
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
25 |
"""
|
26 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
27 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -29,7 +29,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
29 |
chatbot 聊天显示框的句柄,用于显示给用户
|
30 |
history 聊天历史,前情提要
|
31 |
system_prompt 给gpt的静默提醒
|
32 |
-
|
33 |
"""
|
34 |
# 检查当前的模型是否符合要求
|
35 |
supported_llms = [
|
@@ -89,7 +89,7 @@ def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
89 |
history = []
|
90 |
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
91 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
92 |
-
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
93 |
persistent_class_multi_user_manager.set(persistent_key, executor)
|
94 |
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
|
95 |
|
|
|
21 |
|
22 |
|
23 |
@CatchException
|
24 |
+
def 多智能体终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
25 |
"""
|
26 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
27 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
29 |
chatbot 聊天显示框的句柄,用于显示给用户
|
30 |
history 聊天历史,前情提要
|
31 |
system_prompt 给gpt的静默提醒
|
32 |
+
user_request 当前用户的请求信息(IP地址等)
|
33 |
"""
|
34 |
# 检查当前的模型是否符合要求
|
35 |
supported_llms = [
|
|
|
89 |
history = []
|
90 |
chatbot.append(["正在启动: 多智能体终端", "插件动态生成, 执行开始, 作者 Microsoft & Binary-Husky."])
|
91 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
92 |
+
executor = AutoGenMath(llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
93 |
persistent_class_multi_user_manager.set(persistent_key, executor)
|
94 |
exit_reason = yield from executor.main_process_ui_control(txt, create_or_resume="create")
|
95 |
|
crazy_functions/对话历史存档.py
CHANGED
@@ -69,7 +69,7 @@ def read_file_to_chat(chatbot, history, file_name):
|
|
69 |
return chatbot, history
|
70 |
|
71 |
@CatchException
|
72 |
-
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
73 |
"""
|
74 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
75 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -77,7 +77,7 @@ def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
77 |
chatbot 聊天显示框的句柄,用于显示给用户
|
78 |
history 聊天历史,前情提要
|
79 |
system_prompt 给gpt的静默提醒
|
80 |
-
|
81 |
"""
|
82 |
|
83 |
chatbot.append(("保存当前对话",
|
@@ -91,7 +91,7 @@ def hide_cwd(str):
|
|
91 |
return str.replace(current_path, replace_path)
|
92 |
|
93 |
@CatchException
|
94 |
-
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
95 |
"""
|
96 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
97 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -99,7 +99,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
99 |
chatbot 聊天显示框的句柄,用于显示给用户
|
100 |
history 聊天历史,前情提要
|
101 |
system_prompt 给gpt的静默提醒
|
102 |
-
|
103 |
"""
|
104 |
from .crazy_utils import get_files_from_everything
|
105 |
success, file_manifest, _ = get_files_from_everything(txt, type='.html')
|
@@ -126,7 +126,7 @@ def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
126 |
return
|
127 |
|
128 |
@CatchException
|
129 |
-
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
130 |
"""
|
131 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
132 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -134,7 +134,7 @@ def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot
|
|
134 |
chatbot 聊天显示框的句柄,用于显示给用户
|
135 |
history 聊天历史,前情提要
|
136 |
system_prompt 给gpt的静默提醒
|
137 |
-
|
138 |
"""
|
139 |
|
140 |
import glob, os
|
|
|
69 |
return chatbot, history
|
70 |
|
71 |
@CatchException
|
72 |
+
def 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
73 |
"""
|
74 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
75 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
77 |
chatbot 聊天显示框的句柄,用于显示给用户
|
78 |
history 聊天历史,前情提要
|
79 |
system_prompt 给gpt的静默提醒
|
80 |
+
user_request 当前用户的请求信息(IP地址等)
|
81 |
"""
|
82 |
|
83 |
chatbot.append(("保存当前对话",
|
|
|
91 |
return str.replace(current_path, replace_path)
|
92 |
|
93 |
@CatchException
|
94 |
+
def 载入对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
95 |
"""
|
96 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
97 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
99 |
chatbot 聊天显示框的句柄,用于显示给用户
|
100 |
history 聊天历史,前情提要
|
101 |
system_prompt 给gpt的静默提醒
|
102 |
+
user_request 当前用户的请求信息(IP地址等)
|
103 |
"""
|
104 |
from .crazy_utils import get_files_from_everything
|
105 |
success, file_manifest, _ = get_files_from_everything(txt, type='.html')
|
|
|
126 |
return
|
127 |
|
128 |
@CatchException
|
129 |
+
def 删除所有本地对话历史记录(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
130 |
"""
|
131 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
132 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
134 |
chatbot 聊天显示框的句柄,用于显示给用户
|
135 |
history 聊天历史,前情提要
|
136 |
system_prompt 给gpt的静默提醒
|
137 |
+
user_request 当前用户的请求信息(IP地址等)
|
138 |
"""
|
139 |
|
140 |
import glob, os
|
crazy_functions/总结word文档.py
CHANGED
@@ -79,7 +79,7 @@ def 解析docx(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot
|
|
79 |
|
80 |
|
81 |
@CatchException
|
82 |
-
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
83 |
import glob, os
|
84 |
|
85 |
# 基本信息:功能、贡献者
|
|
|
79 |
|
80 |
|
81 |
@CatchException
|
82 |
+
def 总结word文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
83 |
import glob, os
|
84 |
|
85 |
# 基本信息:功能、贡献者
|
crazy_functions/批量Markdown翻译.py
CHANGED
@@ -153,7 +153,7 @@ def get_files_from_everything(txt, preference=''):
|
|
153 |
|
154 |
|
155 |
@CatchException
|
156 |
-
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
157 |
# 基本信息:功能、贡献者
|
158 |
chatbot.append([
|
159 |
"函数插件功能?",
|
@@ -193,7 +193,7 @@ def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|
193 |
|
194 |
|
195 |
@CatchException
|
196 |
-
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
197 |
# 基本信息:功能、贡献者
|
198 |
chatbot.append([
|
199 |
"函数插件功能?",
|
@@ -226,7 +226,7 @@ def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_p
|
|
226 |
|
227 |
|
228 |
@CatchException
|
229 |
-
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
230 |
# 基本信息:功能、贡献者
|
231 |
chatbot.append([
|
232 |
"函数插件功能?",
|
|
|
153 |
|
154 |
|
155 |
@CatchException
|
156 |
+
def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
157 |
# 基本信息:功能、贡献者
|
158 |
chatbot.append([
|
159 |
"函数插件功能?",
|
|
|
193 |
|
194 |
|
195 |
@CatchException
|
196 |
+
def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
197 |
# 基本信息:功能、贡献者
|
198 |
chatbot.append([
|
199 |
"函数插件功能?",
|
|
|
226 |
|
227 |
|
228 |
@CatchException
|
229 |
+
def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
230 |
# 基本信息:功能、贡献者
|
231 |
chatbot.append([
|
232 |
"函数插件功能?",
|
crazy_functions/批量总结PDF文档.py
CHANGED
@@ -101,7 +101,7 @@ do not have too much repetitive information, numerical values using the original
|
|
101 |
|
102 |
|
103 |
@CatchException
|
104 |
-
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
105 |
import glob, os
|
106 |
|
107 |
# 基本信息:功能、贡献者
|
|
|
101 |
|
102 |
|
103 |
@CatchException
|
104 |
+
def 批量总结PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
105 |
import glob, os
|
106 |
|
107 |
# 基本信息:功能、贡献者
|
crazy_functions/批量总结PDF文档pdfminer.py
CHANGED
@@ -124,7 +124,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
124 |
|
125 |
|
126 |
@CatchException
|
127 |
-
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
128 |
history = [] # 清空历史,以免输入溢出
|
129 |
import glob, os
|
130 |
|
|
|
124 |
|
125 |
|
126 |
@CatchException
|
127 |
+
def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
128 |
history = [] # 清空历史,以免输入溢出
|
129 |
import glob, os
|
130 |
|
crazy_functions/批量翻译PDF文档_NOUGAT.py
CHANGED
@@ -48,7 +48,7 @@ def markdown_to_dict(article_content):
|
|
48 |
|
49 |
|
50 |
@CatchException
|
51 |
-
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
52 |
|
53 |
disable_auto_promotion(chatbot)
|
54 |
# 基本信息:功能、贡献者
|
|
|
48 |
|
49 |
|
50 |
@CatchException
|
51 |
+
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
52 |
|
53 |
disable_auto_promotion(chatbot)
|
54 |
# 基本信息:功能、贡献者
|
crazy_functions/批量翻译PDF文档_多线程.py
CHANGED
@@ -10,7 +10,7 @@ import os
|
|
10 |
|
11 |
|
12 |
@CatchException
|
13 |
-
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
14 |
|
15 |
disable_auto_promotion(chatbot)
|
16 |
# 基本信息:功能、贡献者
|
|
|
10 |
|
11 |
|
12 |
@CatchException
|
13 |
+
def 批量翻译PDF文档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
14 |
|
15 |
disable_auto_promotion(chatbot)
|
16 |
# 基本信息:功能、贡献者
|
crazy_functions/数学动画生成manim.py
CHANGED
@@ -50,7 +50,7 @@ def get_code_block(reply):
|
|
50 |
return matches[0].strip('python') # code block
|
51 |
|
52 |
@CatchException
|
53 |
-
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
54 |
"""
|
55 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
56 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -58,7 +58,7 @@ def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
58 |
chatbot 聊天显示框的句柄,用于显示给用户
|
59 |
history 聊天历史,前情提要
|
60 |
system_prompt 给gpt的静默提醒
|
61 |
-
|
62 |
"""
|
63 |
# 清空历史,以免输入溢出
|
64 |
history = []
|
|
|
50 |
return matches[0].strip('python') # code block
|
51 |
|
52 |
@CatchException
|
53 |
+
def 动画生成(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
54 |
"""
|
55 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
56 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
58 |
chatbot 聊天显示框的句柄,用于显示给用户
|
59 |
history 聊天历史,前情提要
|
60 |
system_prompt 给gpt的静默提醒
|
61 |
+
user_request 当前用户的请求信息(IP地址等)
|
62 |
"""
|
63 |
# 清空历史,以免输入溢出
|
64 |
history = []
|
crazy_functions/理解PDF文档内容.py
CHANGED
@@ -63,7 +63,7 @@ def 解析PDF(file_name, llm_kwargs, plugin_kwargs, chatbot, history, system_pro
|
|
63 |
|
64 |
|
65 |
@CatchException
|
66 |
-
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
67 |
import glob, os
|
68 |
|
69 |
# 基本信息:功能、贡献者
|
|
|
63 |
|
64 |
|
65 |
@CatchException
|
66 |
+
def 理解PDF文档内容标准文件输入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
67 |
import glob, os
|
68 |
|
69 |
# 基本信息:功能、贡献者
|
crazy_functions/生成函数注释.py
CHANGED
@@ -36,7 +36,7 @@ def 生成函数注释(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
36 |
|
37 |
|
38 |
@CatchException
|
39 |
-
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
40 |
history = [] # 清空历史,以免输入溢出
|
41 |
import glob, os
|
42 |
if os.path.exists(txt):
|
|
|
36 |
|
37 |
|
38 |
@CatchException
|
39 |
+
def 批量生成函数注释(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
40 |
history = [] # 清空历史,以免输入溢出
|
41 |
import glob, os
|
42 |
if os.path.exists(txt):
|
crazy_functions/生成多种Mermaid图表.py
ADDED
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from toolbox import CatchException, update_ui, report_exception
|
2 |
+
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
+
import datetime
|
4 |
+
|
5 |
+
#以下是每类图表的PROMPT
|
6 |
+
SELECT_PROMPT = """
|
7 |
+
“{subject}”
|
8 |
+
=============
|
9 |
+
以上是从文章中提取的摘要,将会使用这些摘要绘制图表。请你选择一个合适的图表类型:
|
10 |
+
1 流程图
|
11 |
+
2 序列图
|
12 |
+
3 类图
|
13 |
+
4 饼图
|
14 |
+
5 甘特图
|
15 |
+
6 状态图
|
16 |
+
7 实体关系图
|
17 |
+
8 象限提示图
|
18 |
+
不需要解释原因,仅需要输出单个不带任何标点符号的数字。
|
19 |
+
"""
|
20 |
+
#没有思维导图!!!测试发现模型始终会优先选择思维导图
|
21 |
+
#流程图
|
22 |
+
PROMPT_1 = """
|
23 |
+
请你给出围绕“{subject}”的逻辑关系图,使用mermaid语法,mermaid语法举例:
|
24 |
+
```mermaid
|
25 |
+
graph TD
|
26 |
+
P(编程) --> L1(Python)
|
27 |
+
P(编程) --> L2(C)
|
28 |
+
P(编程) --> L3(C++)
|
29 |
+
P(编程) --> L4(Javascipt)
|
30 |
+
P(编程) --> L5(PHP)
|
31 |
+
```
|
32 |
+
"""
|
33 |
+
#序列图
|
34 |
+
PROMPT_2 = """
|
35 |
+
请你给出围绕“{subject}”的序列图,使用mermaid语法,mermaid语法举例:
|
36 |
+
```mermaid
|
37 |
+
sequenceDiagram
|
38 |
+
participant A as 用户
|
39 |
+
participant B as 系统
|
40 |
+
A->>B: 登录请求
|
41 |
+
B->>A: 登录成功
|
42 |
+
A->>B: 获取数据
|
43 |
+
B->>A: 返回数据
|
44 |
+
```
|
45 |
+
"""
|
46 |
+
#类图
|
47 |
+
PROMPT_3 = """
|
48 |
+
请你给出围绕“{subject}”的类图,使用mermaid语法,mermaid语法举例:
|
49 |
+
```mermaid
|
50 |
+
classDiagram
|
51 |
+
Class01 <|-- AveryLongClass : Cool
|
52 |
+
Class03 *-- Class04
|
53 |
+
Class05 o-- Class06
|
54 |
+
Class07 .. Class08
|
55 |
+
Class09 --> C2 : Where am i?
|
56 |
+
Class09 --* C3
|
57 |
+
Class09 --|> Class07
|
58 |
+
Class07 : equals()
|
59 |
+
Class07 : Object[] elementData
|
60 |
+
Class01 : size()
|
61 |
+
Class01 : int chimp
|
62 |
+
Class01 : int gorilla
|
63 |
+
Class08 <--> C2: Cool label
|
64 |
+
```
|
65 |
+
"""
|
66 |
+
#饼图
|
67 |
+
PROMPT_4 = """
|
68 |
+
请你给出围绕“{subject}”的饼图,使用mermaid语法,mermaid语法举例:
|
69 |
+
```mermaid
|
70 |
+
pie title Pets adopted by volunteers
|
71 |
+
"狗" : 386
|
72 |
+
"猫" : 85
|
73 |
+
"兔子" : 15
|
74 |
+
```
|
75 |
+
"""
|
76 |
+
#甘特图
|
77 |
+
PROMPT_5 = """
|
78 |
+
请你给出围绕“{subject}”的甘特图,使用mermaid语法,mermaid语法举例:
|
79 |
+
```mermaid
|
80 |
+
gantt
|
81 |
+
title 项目开发流程
|
82 |
+
dateFormat YYYY-MM-DD
|
83 |
+
section 设计
|
84 |
+
需求分析 :done, des1, 2024-01-06,2024-01-08
|
85 |
+
原型设计 :active, des2, 2024-01-09, 3d
|
86 |
+
UI设计 : des3, after des2, 5d
|
87 |
+
section 开发
|
88 |
+
前端开发 :2024-01-20, 10d
|
89 |
+
后端开发 :2024-01-20, 10d
|
90 |
+
```
|
91 |
+
"""
|
92 |
+
#状态图
|
93 |
+
PROMPT_6 = """
|
94 |
+
请你给出围绕“{subject}”的状态图,使用mermaid语法,mermaid语法举例:
|
95 |
+
```mermaid
|
96 |
+
stateDiagram-v2
|
97 |
+
[*] --> Still
|
98 |
+
Still --> [*]
|
99 |
+
Still --> Moving
|
100 |
+
Moving --> Still
|
101 |
+
Moving --> Crash
|
102 |
+
Crash --> [*]
|
103 |
+
```
|
104 |
+
"""
|
105 |
+
#实体关系图
|
106 |
+
PROMPT_7 = """
|
107 |
+
请你给出围绕“{subject}”的实体关系图,使用mermaid语法,mermaid语法举例:
|
108 |
+
```mermaid
|
109 |
+
erDiagram
|
110 |
+
CUSTOMER ||--o{ ORDER : places
|
111 |
+
ORDER ||--|{ LINE-ITEM : contains
|
112 |
+
CUSTOMER {
|
113 |
+
string name
|
114 |
+
string id
|
115 |
+
}
|
116 |
+
ORDER {
|
117 |
+
string orderNumber
|
118 |
+
date orderDate
|
119 |
+
string customerID
|
120 |
+
}
|
121 |
+
LINE-ITEM {
|
122 |
+
number quantity
|
123 |
+
string productID
|
124 |
+
}
|
125 |
+
```
|
126 |
+
"""
|
127 |
+
#象限提示图
|
128 |
+
PROMPT_8 = """
|
129 |
+
请你给出围绕“{subject}”的象限图,使用mermaid语法,mermaid语法举例:
|
130 |
+
```mermaid
|
131 |
+
graph LR
|
132 |
+
A[Hard skill] --> B(Programming)
|
133 |
+
A[Hard skill] --> C(Design)
|
134 |
+
D[Soft skill] --> E(Coordination)
|
135 |
+
D[Soft skill] --> F(Communication)
|
136 |
+
```
|
137 |
+
"""
|
138 |
+
#思维导图
|
139 |
+
PROMPT_9 = """
|
140 |
+
{subject}
|
141 |
+
==========
|
142 |
+
请给出上方内容的思维导图,充分考虑其之间的逻辑,使用mermaid语法,mermaid语法举例:
|
143 |
+
```mermaid
|
144 |
+
mindmap
|
145 |
+
root((mindmap))
|
146 |
+
Origins
|
147 |
+
Long history
|
148 |
+
::icon(fa fa-book)
|
149 |
+
Popularisation
|
150 |
+
British popular psychology author Tony Buzan
|
151 |
+
Research
|
152 |
+
On effectiveness<br/>and features
|
153 |
+
On Automatic creation
|
154 |
+
Uses
|
155 |
+
Creative techniques
|
156 |
+
Strategic planning
|
157 |
+
Argument mapping
|
158 |
+
Tools
|
159 |
+
Pen and paper
|
160 |
+
Mermaid
|
161 |
+
```
|
162 |
+
"""
|
163 |
+
|
164 |
+
def 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs):
|
165 |
+
############################## <第 0 步,切割输入> ##################################
|
166 |
+
# 借用PDF切割中的函数对文本进行切割
|
167 |
+
TOKEN_LIMIT_PER_FRAGMENT = 2500
|
168 |
+
txt = str(history).encode('utf-8', 'ignore').decode() # avoid reading non-utf8 chars
|
169 |
+
from crazy_functions.pdf_fns.breakdown_txt import breakdown_text_to_satisfy_token_limit
|
170 |
+
txt = breakdown_text_to_satisfy_token_limit(txt=txt, limit=TOKEN_LIMIT_PER_FRAGMENT, llm_model=llm_kwargs['llm_model'])
|
171 |
+
############################## <第 1 步,迭代地历遍整个文章,提取精炼信息> ##################################
|
172 |
+
results = []
|
173 |
+
MAX_WORD_TOTAL = 4096
|
174 |
+
n_txt = len(txt)
|
175 |
+
last_iteration_result = "从以下文本中提取摘要。"
|
176 |
+
if n_txt >= 20: print('文章极长,不能达到预期效果')
|
177 |
+
for i in range(n_txt):
|
178 |
+
NUM_OF_WORD = MAX_WORD_TOTAL // n_txt
|
179 |
+
i_say = f"Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words in Chinese: {txt[i]}"
|
180 |
+
i_say_show_user = f"[{i+1}/{n_txt}] Read this section, recapitulate the content of this section with less than {NUM_OF_WORD} words: {txt[i][:200]} ...."
|
181 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(i_say, i_say_show_user, # i_say=真正给chatgpt的提问, i_say_show_user=给用户看的提问
|
182 |
+
llm_kwargs, chatbot,
|
183 |
+
history=["The main content of the previous section is?", last_iteration_result], # 迭代上一次的结果
|
184 |
+
sys_prompt="Extracts the main content from the text section where it is located for graphing purposes, answer me with Chinese." # 提示
|
185 |
+
)
|
186 |
+
results.append(gpt_say)
|
187 |
+
last_iteration_result = gpt_say
|
188 |
+
############################## <第 2 步,根据整理的摘要选择图表类型> ##################################
|
189 |
+
if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
|
190 |
+
gpt_say = plugin_kwargs.get("advanced_arg", "") #将图表类型参数赋值为插件参数
|
191 |
+
results_txt = '\n'.join(results) #合并摘要
|
192 |
+
if gpt_say not in ['1','2','3','4','5','6','7','8','9']: #如插件参数不正确则使用对话模型判断
|
193 |
+
i_say_show_user = f'接下来将判断适合的图表类型,如连续3次判断失败将会使用流程图进行绘制'; gpt_say = "[Local Message] 收到。" # 用户提示
|
194 |
+
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=[]) # 更新UI
|
195 |
+
i_say = SELECT_PROMPT.format(subject=results_txt)
|
196 |
+
i_say_show_user = f'请判断适合使用的流程图类型,其中数字对应关系为:1-流程图,2-序列图,3-类图,4-饼图,5-甘特图,6-状态图,7-实体关系图,8-象限提示图。由于不管提供文本是什么,模型大概率认为"思维导图"最合适,因此思维导图仅能通过参数调用。'
|
197 |
+
for i in range(3):
|
198 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
199 |
+
inputs=i_say,
|
200 |
+
inputs_show_user=i_say_show_user,
|
201 |
+
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
202 |
+
sys_prompt=""
|
203 |
+
)
|
204 |
+
if gpt_say in ['1','2','3','4','5','6','7','8','9']: #判断返回是否正确
|
205 |
+
break
|
206 |
+
if gpt_say not in ['1','2','3','4','5','6','7','8','9']:
|
207 |
+
gpt_say = '1'
|
208 |
+
############################## <第 3 步,根据选择的图表类型绘制图表> ##################################
|
209 |
+
if gpt_say == '1':
|
210 |
+
i_say = PROMPT_1.format(subject=results_txt)
|
211 |
+
elif gpt_say == '2':
|
212 |
+
i_say = PROMPT_2.format(subject=results_txt)
|
213 |
+
elif gpt_say == '3':
|
214 |
+
i_say = PROMPT_3.format(subject=results_txt)
|
215 |
+
elif gpt_say == '4':
|
216 |
+
i_say = PROMPT_4.format(subject=results_txt)
|
217 |
+
elif gpt_say == '5':
|
218 |
+
i_say = PROMPT_5.format(subject=results_txt)
|
219 |
+
elif gpt_say == '6':
|
220 |
+
i_say = PROMPT_6.format(subject=results_txt)
|
221 |
+
elif gpt_say == '7':
|
222 |
+
i_say = PROMPT_7.replace("{subject}", results_txt) #由于实体关系图用到了{}符号
|
223 |
+
elif gpt_say == '8':
|
224 |
+
i_say = PROMPT_8.format(subject=results_txt)
|
225 |
+
elif gpt_say == '9':
|
226 |
+
i_say = PROMPT_9.format(subject=results_txt)
|
227 |
+
i_say_show_user = f'请根据判断结果绘制相应的图表。如需绘制思维导图请使用参数调用,同时过大的图表可能需要复制到在线编辑器中进行渲染。'
|
228 |
+
gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
229 |
+
inputs=i_say,
|
230 |
+
inputs_show_user=i_say_show_user,
|
231 |
+
llm_kwargs=llm_kwargs, chatbot=chatbot, history=[],
|
232 |
+
sys_prompt=""
|
233 |
+
)
|
234 |
+
history.append(gpt_say)
|
235 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新
|
236 |
+
|
237 |
+
@CatchException
|
238 |
+
def 生成多种Mermaid图表(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
|
239 |
+
"""
|
240 |
+
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
241 |
+
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
242 |
+
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
243 |
+
chatbot 聊天显示框的句柄,用于显示给用户
|
244 |
+
history 聊天历史,前情提要
|
245 |
+
system_prompt 给gpt的静默提醒
|
246 |
+
web_port 当前软件运行的端口号
|
247 |
+
"""
|
248 |
+
import os
|
249 |
+
|
250 |
+
# 基本信息:功能、贡献者
|
251 |
+
chatbot.append([
|
252 |
+
"函数插件功能?",
|
253 |
+
"根据当前聊天历史或指定的路径文件(文件内容优先)绘制多种mermaid图表,将会由对话模型首先判断适合的图表类型,随后绘制图表。\
|
254 |
+
\n您也可以使用插件参数指定绘制的图表类型,函数插件贡献者: Menghuan1918"])
|
255 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
256 |
+
|
257 |
+
if os.path.exists(txt): #如输入区无内容则直接解析历史记录
|
258 |
+
from crazy_functions.pdf_fns.parse_word import extract_text_from_files
|
259 |
+
file_exist, final_result, page_one, file_manifest, excption = extract_text_from_files(txt, chatbot, history)
|
260 |
+
else:
|
261 |
+
file_exist = False
|
262 |
+
excption = ""
|
263 |
+
file_manifest = []
|
264 |
+
|
265 |
+
if excption != "":
|
266 |
+
if excption == "word":
|
267 |
+
report_exception(chatbot, history,
|
268 |
+
a = f"解析项目: {txt}",
|
269 |
+
b = f"找到了.doc文件,但是该文件格式不被支持,请先转化为.docx格式。")
|
270 |
+
|
271 |
+
elif excption == "pdf":
|
272 |
+
report_exception(chatbot, history,
|
273 |
+
a = f"解析项目: {txt}",
|
274 |
+
b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pymupdf```。")
|
275 |
+
|
276 |
+
elif excption == "word_pip":
|
277 |
+
report_exception(chatbot, history,
|
278 |
+
a=f"解析项目: {txt}",
|
279 |
+
b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade python-docx pywin32```。")
|
280 |
+
|
281 |
+
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
282 |
+
|
283 |
+
else:
|
284 |
+
if not file_exist:
|
285 |
+
history.append(txt) #如输入区不是文件则将输入区内容加入历史记录
|
286 |
+
i_say_show_user = f'首先你从历史记录中提取摘要。'; gpt_say = "[Local Message] 收到。" # 用户提示
|
287 |
+
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
288 |
+
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
289 |
+
else:
|
290 |
+
file_num = len(file_manifest)
|
291 |
+
for i in range(file_num): #依次处理文件
|
292 |
+
i_say_show_user = f"[{i+1}/{file_num}]处理文件{file_manifest[i]}"; gpt_say = "[Local Message] 收到。" # 用户提示
|
293 |
+
chatbot.append([i_say_show_user, gpt_say]); yield from update_ui(chatbot=chatbot, history=history) # 更新UI
|
294 |
+
history = [] #如输入区内容为文件则清空历史记录
|
295 |
+
history.append(final_result[i])
|
296 |
+
yield from 解析历史输入(history,llm_kwargs,file_manifest,chatbot,plugin_kwargs)
|
crazy_functions/知识库问答.py
CHANGED
@@ -13,7 +13,7 @@ install_msg ="""
|
|
13 |
"""
|
14 |
|
15 |
@CatchException
|
16 |
-
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
17 |
"""
|
18 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
19 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
@@ -21,7 +21,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
21 |
chatbot 聊天显示框的句柄,用于显示给用户
|
22 |
history 聊天历史,前情提要
|
23 |
system_prompt 给gpt的静默提醒
|
24 |
-
|
25 |
"""
|
26 |
history = [] # 清空历史,以免输入溢出
|
27 |
|
@@ -84,7 +84,7 @@ def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
84 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
85 |
|
86 |
@CatchException
|
87 |
-
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
88 |
# resolve deps
|
89 |
try:
|
90 |
# from zh_langchain import construct_vector_store
|
|
|
13 |
"""
|
14 |
|
15 |
@CatchException
|
16 |
+
def 知识库文件注入(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
17 |
"""
|
18 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
19 |
llm_kwargs gpt模型参数, 如温度和top_p等, 一般原样传递下去就行
|
|
|
21 |
chatbot 聊天显示框的句柄,用于显示给用户
|
22 |
history 聊天历史,前情提要
|
23 |
system_prompt 给gpt的静默提醒
|
24 |
+
user_request 当前用户的请求信息(IP地址等)
|
25 |
"""
|
26 |
history = [] # 清空历史,以免输入溢出
|
27 |
|
|
|
84 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
85 |
|
86 |
@CatchException
|
87 |
+
def 读取知识库作答(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request=-1):
|
88 |
# resolve deps
|
89 |
try:
|
90 |
# from zh_langchain import construct_vector_store
|
crazy_functions/联网的ChatGPT.py
CHANGED
@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
|
|
55 |
return text
|
56 |
|
57 |
@CatchException
|
58 |
-
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
59 |
"""
|
60 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
61 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -63,7 +63,7 @@ def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
63 |
chatbot 聊天显示框的句柄,用于显示给用户
|
64 |
history 聊天历史,前情提要
|
65 |
system_prompt 给gpt的静默提醒
|
66 |
-
|
67 |
"""
|
68 |
history = [] # 清空历史,以免输入溢出
|
69 |
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
|
|
55 |
return text
|
56 |
|
57 |
@CatchException
|
58 |
+
def 连接网络回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
59 |
"""
|
60 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
61 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
63 |
chatbot 聊天显示框的句柄,用于显示给用户
|
64 |
history 聊天历史,前情提要
|
65 |
system_prompt 给gpt的静默提醒
|
66 |
+
user_request 当前用户的请求信息(IP地址等)
|
67 |
"""
|
68 |
history = [] # 清空历史,以免输入溢出
|
69 |
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
crazy_functions/联网的ChatGPT_bing版.py
CHANGED
@@ -55,7 +55,7 @@ def scrape_text(url, proxies) -> str:
|
|
55 |
return text
|
56 |
|
57 |
@CatchException
|
58 |
-
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
59 |
"""
|
60 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
61 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -63,7 +63,7 @@ def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, histor
|
|
63 |
chatbot 聊天显示框的句柄,用于显示给用户
|
64 |
history 聊天历史,前情提要
|
65 |
system_prompt 给gpt的静默提醒
|
66 |
-
|
67 |
"""
|
68 |
history = [] # 清空历史,以免输入溢出
|
69 |
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
|
|
55 |
return text
|
56 |
|
57 |
@CatchException
|
58 |
+
def 连接bing搜索回答问题(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
59 |
"""
|
60 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
61 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
63 |
chatbot 聊天显示框的句柄,用于显示给用户
|
64 |
history 聊天历史,前情提要
|
65 |
system_prompt 给gpt的静默提醒
|
66 |
+
user_request 当前用户的请求信息(IP地址等)
|
67 |
"""
|
68 |
history = [] # 清空历史,以免输入溢出
|
69 |
chatbot.append((f"请结合互联网信息回答以下问题:{txt}",
|
crazy_functions/虚空终端.py
CHANGED
@@ -104,7 +104,7 @@ def analyze_intention_with_simple_rules(txt):
|
|
104 |
|
105 |
|
106 |
@CatchException
|
107 |
-
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
108 |
disable_auto_promotion(chatbot=chatbot)
|
109 |
# 获取当前虚空终端状态
|
110 |
state = VoidTerminalState.get_state(chatbot)
|
@@ -121,7 +121,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
121 |
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
122 |
state.unlock_plugin(chatbot=chatbot)
|
123 |
yield from update_ui(chatbot=chatbot, history=history)
|
124 |
-
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
125 |
return
|
126 |
else:
|
127 |
# 如果意图模糊,提示
|
@@ -133,7 +133,7 @@ def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
133 |
|
134 |
|
135 |
|
136 |
-
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
137 |
history = []
|
138 |
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
139 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
104 |
|
105 |
|
106 |
@CatchException
|
107 |
+
def 虚空终端(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
108 |
disable_auto_promotion(chatbot=chatbot)
|
109 |
# 获取当前虚空终端状态
|
110 |
state = VoidTerminalState.get_state(chatbot)
|
|
|
121 |
state.set_state(chatbot=chatbot, key='has_provided_explaination', value=True)
|
122 |
state.unlock_plugin(chatbot=chatbot)
|
123 |
yield from update_ui(chatbot=chatbot, history=history)
|
124 |
+
yield from 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request)
|
125 |
return
|
126 |
else:
|
127 |
# 如果意图模糊,提示
|
|
|
133 |
|
134 |
|
135 |
|
136 |
+
def 虚空终端主路由(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
137 |
history = []
|
138 |
chatbot.append(("虚空终端状态: ", f"正在执行任务: {txt}"))
|
139 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/解析JupyterNotebook.py
CHANGED
@@ -12,6 +12,12 @@ class PaperFileGroup():
|
|
12 |
self.sp_file_index = []
|
13 |
self.sp_file_tag = []
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
def run_file_split(self, max_token_limit=1900):
|
16 |
"""
|
17 |
将长文本分离开来
|
@@ -54,7 +60,7 @@ def parseNotebook(filename, enable_markdown=1):
|
|
54 |
Code += f"This is {idx+1}th code block: \n"
|
55 |
Code += code+"\n"
|
56 |
|
57 |
-
return Code
|
58 |
|
59 |
|
60 |
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
@@ -109,7 +115,7 @@ def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
109 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
110 |
|
111 |
@CatchException
|
112 |
-
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
113 |
chatbot.append([
|
114 |
"函数插件功能?",
|
115 |
"对IPynb文件进行解析。Contributor: codycjy."])
|
|
|
12 |
self.sp_file_index = []
|
13 |
self.sp_file_tag = []
|
14 |
|
15 |
+
# count_token
|
16 |
+
from request_llms.bridge_all import model_info
|
17 |
+
enc = model_info["gpt-3.5-turbo"]['tokenizer']
|
18 |
+
def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
|
19 |
+
self.get_token_num = get_token_num
|
20 |
+
|
21 |
def run_file_split(self, max_token_limit=1900):
|
22 |
"""
|
23 |
将长文本分离开来
|
|
|
60 |
Code += f"This is {idx+1}th code block: \n"
|
61 |
Code += code+"\n"
|
62 |
|
63 |
+
return Code
|
64 |
|
65 |
|
66 |
def ipynb解释(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
|
|
|
115 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
116 |
|
117 |
@CatchException
|
118 |
+
def 解析ipynb文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
119 |
chatbot.append([
|
120 |
"函数插件功能?",
|
121 |
"对IPynb文件进行解析。Contributor: codycjy."])
|
crazy_functions/解析项目源代码.py
CHANGED
@@ -83,7 +83,8 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
83 |
history=this_iteration_history_feed, # 迭代之前的分析
|
84 |
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional)
|
85 |
|
86 |
-
|
|
|
87 |
summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
88 |
inputs=summary,
|
89 |
inputs_show_user=summary,
|
@@ -104,9 +105,12 @@ def 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs,
|
|
104 |
chatbot.append(("完成了吗?", res))
|
105 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
106 |
|
|
|
|
|
|
|
107 |
|
108 |
@CatchException
|
109 |
-
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
110 |
history = [] # 清空历史,以免输入溢出
|
111 |
import glob
|
112 |
file_manifest = [f for f in glob.glob('./*.py')] + \
|
@@ -119,7 +123,7 @@ def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
119 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
120 |
|
121 |
@CatchException
|
122 |
-
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
123 |
history = [] # 清空历史,以免输入溢出
|
124 |
import glob, os
|
125 |
if os.path.exists(txt):
|
@@ -137,7 +141,7 @@ def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
137 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
138 |
|
139 |
@CatchException
|
140 |
-
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
141 |
history = [] # 清空历史,以免输入溢出
|
142 |
import glob, os
|
143 |
if os.path.exists(txt):
|
@@ -155,7 +159,7 @@ def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
155 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
156 |
|
157 |
@CatchException
|
158 |
-
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
159 |
history = [] # 清空历史,以免输入溢出
|
160 |
import glob, os
|
161 |
if os.path.exists(txt):
|
@@ -175,7 +179,7 @@ def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, his
|
|
175 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
176 |
|
177 |
@CatchException
|
178 |
-
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
179 |
history = [] # 清空历史,以免输入溢出
|
180 |
import glob, os
|
181 |
if os.path.exists(txt):
|
@@ -197,7 +201,7 @@ def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system
|
|
197 |
|
198 |
|
199 |
@CatchException
|
200 |
-
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
201 |
history = [] # 清空历史,以免输入溢出
|
202 |
import glob, os
|
203 |
if os.path.exists(txt):
|
@@ -219,7 +223,7 @@ def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|
219 |
|
220 |
|
221 |
@CatchException
|
222 |
-
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
223 |
history = [] # 清空历史,以免输入溢出
|
224 |
import glob, os
|
225 |
if os.path.exists(txt):
|
@@ -248,7 +252,7 @@ def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
248 |
|
249 |
|
250 |
@CatchException
|
251 |
-
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
252 |
history = [] # 清空历史,以免输入溢出
|
253 |
import glob, os
|
254 |
if os.path.exists(txt):
|
@@ -269,7 +273,7 @@ def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
269 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
270 |
|
271 |
@CatchException
|
272 |
-
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
273 |
history = [] # 清空历史,以免输入溢出
|
274 |
import glob, os
|
275 |
if os.path.exists(txt):
|
@@ -289,7 +293,7 @@ def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, sys
|
|
289 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
290 |
|
291 |
@CatchException
|
292 |
-
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
293 |
history = [] # 清空历史,以免输入溢出
|
294 |
import glob, os
|
295 |
if os.path.exists(txt):
|
@@ -311,7 +315,7 @@ def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, syst
|
|
311 |
|
312 |
|
313 |
@CatchException
|
314 |
-
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
315 |
history = [] # 清空历史,以免输入溢出
|
316 |
import glob, os
|
317 |
if os.path.exists(txt):
|
@@ -331,7 +335,7 @@ def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, s
|
|
331 |
|
332 |
|
333 |
@CatchException
|
334 |
-
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
335 |
txt_pattern = plugin_kwargs.get("advanced_arg")
|
336 |
txt_pattern = txt_pattern.replace(",", ",")
|
337 |
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
|
|
|
83 |
history=this_iteration_history_feed, # 迭代之前的分析
|
84 |
sys_prompt="你是一个程序架构分析师,正在分析一个项目的源代码。" + sys_prompt_additional)
|
85 |
|
86 |
+
diagram_code = make_diagram(this_iteration_files, result, this_iteration_history_feed)
|
87 |
+
summary = "请用一句话概括这些文件的整体功能。\n\n" + diagram_code
|
88 |
summary_result = yield from request_gpt_model_in_new_thread_with_ui_alive(
|
89 |
inputs=summary,
|
90 |
inputs_show_user=summary,
|
|
|
105 |
chatbot.append(("完成了吗?", res))
|
106 |
yield from update_ui(chatbot=chatbot, history=history_to_return) # 刷新界面
|
107 |
|
108 |
+
def make_diagram(this_iteration_files, result, this_iteration_history_feed):
|
109 |
+
from crazy_functions.diagram_fns.file_tree import build_file_tree_mermaid_diagram
|
110 |
+
return build_file_tree_mermaid_diagram(this_iteration_history_feed[0::2], this_iteration_history_feed[1::2], "项目示意图")
|
111 |
|
112 |
@CatchException
|
113 |
+
def 解析项目本身(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
114 |
history = [] # 清空历史,以免输入溢出
|
115 |
import glob
|
116 |
file_manifest = [f for f in glob.glob('./*.py')] + \
|
|
|
123 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
124 |
|
125 |
@CatchException
|
126 |
+
def 解析一个Python项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
127 |
history = [] # 清空历史,以免输入溢出
|
128 |
import glob, os
|
129 |
if os.path.exists(txt):
|
|
|
141 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
142 |
|
143 |
@CatchException
|
144 |
+
def 解析一个Matlab项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
145 |
history = [] # 清空历史,以免输入溢出
|
146 |
import glob, os
|
147 |
if os.path.exists(txt):
|
|
|
159 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
160 |
|
161 |
@CatchException
|
162 |
+
def 解析一个C项目的头文件(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
163 |
history = [] # 清空历史,以免输入溢出
|
164 |
import glob, os
|
165 |
if os.path.exists(txt):
|
|
|
179 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
180 |
|
181 |
@CatchException
|
182 |
+
def 解析一个C项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
183 |
history = [] # 清空历史,以免输入溢出
|
184 |
import glob, os
|
185 |
if os.path.exists(txt):
|
|
|
201 |
|
202 |
|
203 |
@CatchException
|
204 |
+
def 解析一个Java项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
205 |
history = [] # 清空历史,以免输入溢出
|
206 |
import glob, os
|
207 |
if os.path.exists(txt):
|
|
|
223 |
|
224 |
|
225 |
@CatchException
|
226 |
+
def 解析一个前端项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
227 |
history = [] # 清空历史,以免输入溢出
|
228 |
import glob, os
|
229 |
if os.path.exists(txt):
|
|
|
252 |
|
253 |
|
254 |
@CatchException
|
255 |
+
def 解析一个Golang项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
256 |
history = [] # 清空历史,以免输入溢出
|
257 |
import glob, os
|
258 |
if os.path.exists(txt):
|
|
|
273 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
274 |
|
275 |
@CatchException
|
276 |
+
def 解析一个Rust项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
277 |
history = [] # 清空历史,以免输入溢出
|
278 |
import glob, os
|
279 |
if os.path.exists(txt):
|
|
|
293 |
yield from 解析源代码新(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
|
294 |
|
295 |
@CatchException
|
296 |
+
def 解析一个Lua项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
297 |
history = [] # 清空历史,以免输入溢出
|
298 |
import glob, os
|
299 |
if os.path.exists(txt):
|
|
|
315 |
|
316 |
|
317 |
@CatchException
|
318 |
+
def 解析一个CSharp项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
319 |
history = [] # 清空历史,以免输入溢出
|
320 |
import glob, os
|
321 |
if os.path.exists(txt):
|
|
|
335 |
|
336 |
|
337 |
@CatchException
|
338 |
+
def 解析任意code项目(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
339 |
txt_pattern = plugin_kwargs.get("advanced_arg")
|
340 |
txt_pattern = txt_pattern.replace(",", ",")
|
341 |
# 将要匹配的模式(例如: *.c, *.cpp, *.py, config.toml)
|
crazy_functions/询问多个大语言模型.py
CHANGED
@@ -2,7 +2,7 @@ from toolbox import CatchException, update_ui, get_conf
|
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
4 |
@CatchException
|
5 |
-
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
6 |
"""
|
7 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
8 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -10,7 +10,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
10 |
chatbot 聊天显示框的句柄,用于显示给用户
|
11 |
history 聊天历史,前情提要
|
12 |
system_prompt 给gpt的静默提醒
|
13 |
-
|
14 |
"""
|
15 |
history = [] # 清空历史,以免输入溢出
|
16 |
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
|
@@ -32,7 +32,7 @@ def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
32 |
|
33 |
|
34 |
@CatchException
|
35 |
-
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
36 |
"""
|
37 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
38 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -40,7 +40,7 @@ def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history,
|
|
40 |
chatbot 聊天显示框的句柄,用于显示给用户
|
41 |
history 聊天历史,前情提要
|
42 |
system_prompt 给gpt的静默提醒
|
43 |
-
|
44 |
"""
|
45 |
history = [] # 清空历史,以免输入溢出
|
46 |
|
|
|
2 |
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
4 |
@CatchException
|
5 |
+
def 同时问询(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
6 |
"""
|
7 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
8 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
10 |
chatbot 聊天显示框的句柄,用于显示给用户
|
11 |
history 聊天历史,前情提要
|
12 |
system_prompt 给gpt的静默提醒
|
13 |
+
user_request 当前用户的请求信息(IP地址等)
|
14 |
"""
|
15 |
history = [] # 清空历史,以免输入溢出
|
16 |
MULTI_QUERY_LLM_MODELS = get_conf('MULTI_QUERY_LLM_MODELS')
|
|
|
32 |
|
33 |
|
34 |
@CatchException
|
35 |
+
def 同时问询_指定模型(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
36 |
"""
|
37 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
38 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
40 |
chatbot 聊天显示框的句柄,用于显示给用户
|
41 |
history 聊天历史,前情提要
|
42 |
system_prompt 给gpt的静默提醒
|
43 |
+
user_request 当前用户的请求信息(IP地址等)
|
44 |
"""
|
45 |
history = [] # 清空历史,以免输入溢出
|
46 |
|
crazy_functions/语音助手.py
CHANGED
@@ -166,7 +166,7 @@ class InterviewAssistant(AliyunASR):
|
|
166 |
|
167 |
|
168 |
@CatchException
|
169 |
-
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
170 |
# pip install -U openai-whisper
|
171 |
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
|
172 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
|
|
166 |
|
167 |
|
168 |
@CatchException
|
169 |
+
def 语音助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
170 |
# pip install -U openai-whisper
|
171 |
chatbot.append(["对话助手函数插件:使用时,双手离开鼠标键盘吧", "音频助手, 正在听您讲话(点击“停止”键可终止程序)..."])
|
172 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
crazy_functions/读文章写摘要.py
CHANGED
@@ -44,7 +44,7 @@ def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbo
|
|
44 |
|
45 |
|
46 |
@CatchException
|
47 |
-
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
48 |
history = [] # 清空历史,以免输入溢出
|
49 |
import glob, os
|
50 |
if os.path.exists(txt):
|
|
|
44 |
|
45 |
|
46 |
@CatchException
|
47 |
+
def 读文章写摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
48 |
history = [] # 清空历史,以免输入溢出
|
49 |
import glob, os
|
50 |
if os.path.exists(txt):
|
crazy_functions/谷歌检索小助手.py
CHANGED
@@ -132,7 +132,7 @@ def get_meta_information(url, chatbot, history):
|
|
132 |
return profile
|
133 |
|
134 |
@CatchException
|
135 |
-
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
136 |
disable_auto_promotion(chatbot=chatbot)
|
137 |
# 基本信息:功能、贡献者
|
138 |
chatbot.append([
|
|
|
132 |
return profile
|
133 |
|
134 |
@CatchException
|
135 |
+
def 谷歌检索小助手(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
136 |
disable_auto_promotion(chatbot=chatbot)
|
137 |
# 基本信息:功能、贡献者
|
138 |
chatbot.append([
|
crazy_functions/辅助功能.py
CHANGED
@@ -11,7 +11,7 @@ import os
|
|
11 |
|
12 |
|
13 |
@CatchException
|
14 |
-
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
15 |
if txt:
|
16 |
show_say = txt
|
17 |
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
@@ -32,7 +32,7 @@ def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt
|
|
32 |
|
33 |
|
34 |
@CatchException
|
35 |
-
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
36 |
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
37 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
38 |
|
|
|
11 |
|
12 |
|
13 |
@CatchException
|
14 |
+
def 猜你想问(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
15 |
if txt:
|
16 |
show_say = txt
|
17 |
prompt = txt+'\n回答完问题后,再列出用户可能提出的三个问题。'
|
|
|
32 |
|
33 |
|
34 |
@CatchException
|
35 |
+
def 清除缓存(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
36 |
chatbot.append(['清除本地缓存数据', '执行中. 删除数据'])
|
37 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
|
38 |
|
crazy_functions/高级功能函数模板.py
CHANGED
@@ -1,19 +1,47 @@
|
|
1 |
from toolbox import CatchException, update_ui
|
2 |
-
from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
@CatchException
|
5 |
-
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
6 |
"""
|
|
|
|
|
7 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
8 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
9 |
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
10 |
chatbot 聊天显示框的句柄,用于显示给用户
|
11 |
history 聊天历史,前情提要
|
12 |
system_prompt 给gpt的静默提醒
|
13 |
-
|
14 |
"""
|
15 |
history = [] # 清空历史,以免输入溢出
|
16 |
-
chatbot.append((
|
|
|
|
|
17 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
18 |
for i in range(5):
|
19 |
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
@@ -43,7 +71,7 @@ graph TD
|
|
43 |
```
|
44 |
"""
|
45 |
@CatchException
|
46 |
-
def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
47 |
"""
|
48 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
49 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
@@ -51,7 +79,7 @@ def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_
|
|
51 |
chatbot 聊天显示框的句柄,用于显示给用户
|
52 |
history 聊天历史,前情提要
|
53 |
system_prompt 给gpt的静默提醒
|
54 |
-
|
55 |
"""
|
56 |
history = [] # 清空历史,以免输入溢出
|
57 |
chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一���关键词,然后使用mermaid+llm绘制图表。"))
|
|
|
1 |
from toolbox import CatchException, update_ui
|
2 |
+
from crazy_functions.crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
|
3 |
import datetime
|
4 |
+
|
5 |
+
高阶功能模板函数示意图 = f"""
|
6 |
+
```mermaid
|
7 |
+
flowchart TD
|
8 |
+
%% <gpt_academic_hide_mermaid_code> 一个特殊标记,用于在生成mermaid图表时隐藏代码块
|
9 |
+
subgraph 函数调用["函数调用过程"]
|
10 |
+
AA["输入栏用户输入的文本(txt)"] --> BB["gpt模型参数(llm_kwargs)"]
|
11 |
+
BB --> CC["插件模型参数(plugin_kwargs)"]
|
12 |
+
CC --> DD["对话显示框的句柄(chatbot)"]
|
13 |
+
DD --> EE["对话历史(history)"]
|
14 |
+
EE --> FF["系统提示词(system_prompt)"]
|
15 |
+
FF --> GG["当前用户信息(web_port)"]
|
16 |
+
|
17 |
+
A["开始(查询5天历史事件)"]
|
18 |
+
A --> B["获取当前月份和日期"]
|
19 |
+
B --> C["生成历史事件查询提示词"]
|
20 |
+
C --> D["调用大模型"]
|
21 |
+
D --> E["更新界面"]
|
22 |
+
E --> F["记录历史"]
|
23 |
+
F --> |"下一天"| B
|
24 |
+
end
|
25 |
+
```
|
26 |
+
"""
|
27 |
+
|
28 |
@CatchException
|
29 |
+
def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
30 |
"""
|
31 |
+
# 高阶功能模板函数示意图:https://mermaid.live/edit#pako:eNptk1tvEkEYhv8KmattQpvlvOyFCcdeeaVXuoYssBwie8gyhCIlqVoLhrbbtAWNUpEGUkyMEDW2Fmn_DDOL_8LZHdOwxrnamX3f7_3mmZk6yKhZCfAgV1KrmYKoQ9fDuKC4yChX0nld1Aou1JzjznQ5fWmejh8LYHW6vG2a47YAnlCLNSIRolnenKBXI_zRIBrcuqRT890u7jZx7zMDt-AaMbnW1--5olGiz2sQjwfoQxsZL0hxplSSU0-rop4vrzmKR6O2JxYjHmwcL2Y_HDatVMkXlf86YzHbGY9bO5j8XE7O8Nsbc3iNB3ukL2SMcH-XIQBgWoVOZzxuOxOJOyc63EPGV6ZQLENVrznViYStTiaJ2vw2M2d9bByRnOXkgCnXylCSU5quyto_IcmkbdvctELmJ-j1ASW3uB3g5xOmKqVTmqr_Na3AtuS_dtBFm8H90XJyHkDDT7S9xXWb4HGmRChx64AOL5HRpUm411rM5uh4H78Z4V7fCZzytjZz2seto9XaNPFue07clLaVZF8UNLygJ-VES8lah_n-O-5Ozc7-77NzJ0-K0yr0ZYrmHdqAk50t2RbA4qq9uNohBASw7YpSgaRkLWCCAtxAlnRZLGbJba9bPwUAC5IsCYAnn1kpJ1ZKUACC0iBSsQLVBzUlA3ioVyQ3qGhZEUrxokiehAz4nFgqk1VNVABfB1uAD_g2_AGPl-W8nMcbCvsDblADfNCz4feyobDPy3rYEMtxwYYbPFNVUoHdCPmDHBv2cP4AMfrCbiBli-Q-3afv0X6WdsIjW2-10fgDy1SAig
|
32 |
+
|
33 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
34 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
35 |
plugin_kwargs 插件模型的参数,用于灵活调整复杂功能的各种参数
|
36 |
chatbot 聊天显示框的句柄,用于显示给用户
|
37 |
history 聊天历史,前情提要
|
38 |
system_prompt 给gpt的静默提醒
|
39 |
+
user_request 当前用户的请求信息(IP地址等)
|
40 |
"""
|
41 |
history = [] # 清空历史,以免输入溢出
|
42 |
+
chatbot.append((
|
43 |
+
"您正在调用插件:历史上的今天",
|
44 |
+
"[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!" + 高阶功能模板函数示意图))
|
45 |
yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新
|
46 |
for i in range(5):
|
47 |
currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month
|
|
|
71 |
```
|
72 |
"""
|
73 |
@CatchException
|
74 |
+
def 测试图表渲染(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):
|
75 |
"""
|
76 |
txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径
|
77 |
llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行
|
|
|
79 |
chatbot 聊天显示框的句柄,用于显示给用户
|
80 |
history 聊天历史,前情提要
|
81 |
system_prompt 给gpt的静默提醒
|
82 |
+
user_request 当前用户的请求信息(IP地址等)
|
83 |
"""
|
84 |
history = [] # 清空历史,以免输入溢出
|
85 |
chatbot.append(("这是什么功能?", "一个测试mermaid绘制图表的功能,您可以在输入框中输入一���关键词,然后使用mermaid+llm绘制图表。"))
|
docs/GithubAction+NoLocal+AudioAssistant
CHANGED
@@ -13,7 +13,7 @@ COPY . .
|
|
13 |
RUN pip3 install -r requirements.txt
|
14 |
|
15 |
# 安装语音插件的额外依赖
|
16 |
-
RUN pip3 install pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
17 |
|
18 |
# 可选步骤,用于预热模块
|
19 |
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
|
|
13 |
RUN pip3 install -r requirements.txt
|
14 |
|
15 |
# 安装语音插件的额外依赖
|
16 |
+
RUN pip3 install aliyun-python-sdk-core==2.13.3 pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
17 |
|
18 |
# 可选步骤,用于预热模块
|
19 |
RUN python3 -c 'from check_proxy import warm_up_modules; warm_up_modules()'
|
docs/self_analysis.md
CHANGED
@@ -165,7 +165,7 @@ toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和
|
|
165 |
|
166 |
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
|
167 |
|
168 |
-
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt,
|
169 |
|
170 |
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
|
171 |
|
|
|
165 |
|
166 |
3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
|
167 |
|
168 |
+
4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, user_request):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
|
169 |
|
170 |
## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
|
171 |
|
docs/translate_english.json
CHANGED
@@ -1668,7 +1668,7 @@
|
|
1668 |
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
1669 |
"Langchain知识库": "LangchainKnowledgeBase",
|
1670 |
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
1671 |
-
"Latex输出PDF
|
1672 |
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
1673 |
"sprint亮靛": "SprintIndigo",
|
1674 |
"寻找Latex主文件": "FindLatexMainFile",
|
@@ -3004,5 +3004,7 @@
|
|
3004 |
"1. 上传图片": "TranslatedText",
|
3005 |
"保存状态": "TranslatedText",
|
3006 |
"GPT-Academic对话存档": "TranslatedText",
|
3007 |
-
"Arxiv论文精细翻译": "TranslatedText"
|
|
|
|
|
3008 |
}
|
|
|
1668 |
"Markdown翻译指定语言": "TranslateMarkdownToSpecifiedLanguage",
|
1669 |
"Langchain知识库": "LangchainKnowledgeBase",
|
1670 |
"Latex英文纠错加PDF对比": "CorrectEnglishInLatexWithPDFComparison",
|
1671 |
+
"Latex输出PDF": "OutputPDFFromLatex",
|
1672 |
"Latex翻译中文并重新编译PDF": "TranslateChineseToEnglishInLatexAndRecompilePDF",
|
1673 |
"sprint亮靛": "SprintIndigo",
|
1674 |
"寻找Latex主文件": "FindLatexMainFile",
|
|
|
3004 |
"1. 上传图片": "TranslatedText",
|
3005 |
"保存状态": "TranslatedText",
|
3006 |
"GPT-Academic对话存档": "TranslatedText",
|
3007 |
+
"Arxiv论文精细翻译": "TranslatedText",
|
3008 |
+
"from crazy_functions.AdvancedFunctionTemplate import 测试图表渲染": "from crazy_functions.AdvancedFunctionTemplate import test_chart_rendering",
|
3009 |
+
"测试图表渲染": "test_chart_rendering"
|
3010 |
}
|
docs/translate_japanese.json
CHANGED
@@ -1492,7 +1492,7 @@
|
|
1492 |
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
|
1493 |
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
|
1494 |
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
|
1495 |
-
"Latex输出PDF
|
1496 |
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
|
1497 |
"语音助手": "VoiceAssistant",
|
1498 |
"微调数据集生成": "FineTuneDatasetGeneration",
|
|
|
1492 |
"交互功能模板函数": "InteractiveFunctionTemplateFunction",
|
1493 |
"交互功能函数模板": "InteractiveFunctionFunctionTemplate",
|
1494 |
"Latex英文纠错加PDF对比": "LatexEnglishErrorCorrectionWithPDFComparison",
|
1495 |
+
"Latex输出PDF": "LatexOutputPDFResult",
|
1496 |
"Latex翻译中文并重新编译PDF": "TranslateChineseAndRecompilePDF",
|
1497 |
"语音助手": "VoiceAssistant",
|
1498 |
"微调数据集生成": "FineTuneDatasetGeneration",
|
docs/translate_std.json
CHANGED
@@ -16,7 +16,7 @@
|
|
16 |
"批量Markdown翻译": "BatchTranslateMarkdown",
|
17 |
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
18 |
"Langchain知识库": "LangchainKnowledgeBase",
|
19 |
-
"Latex输出PDF
|
20 |
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
|
21 |
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
22 |
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
|
@@ -97,5 +97,12 @@
|
|
97 |
"多智能体": "MultiAgent",
|
98 |
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
99 |
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
100 |
-
"图片修改_DALLE2": "ImageModification_DALLE2"
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
"批量Markdown翻译": "BatchTranslateMarkdown",
|
17 |
"连接bing搜索回答问题": "ConnectBingSearchAnswerQuestion",
|
18 |
"Langchain知识库": "LangchainKnowledgeBase",
|
19 |
+
"Latex输出PDF": "OutputPDFFromLatex",
|
20 |
"把字符太少的块清除为回车": "ClearBlocksWithTooFewCharactersToNewline",
|
21 |
"Latex精细分解与转化": "DecomposeAndConvertLatex",
|
22 |
"解析一个C项目的头文件": "ParseCProjectHeaderFiles",
|
|
|
97 |
"多智能体": "MultiAgent",
|
98 |
"图片生成_DALLE2": "ImageGeneration_DALLE2",
|
99 |
"图片生成_DALLE3": "ImageGeneration_DALLE3",
|
100 |
+
"图片修改_DALLE2": "ImageModification_DALLE2",
|
101 |
+
"生成多种Mermaid图表": "GenerateMultipleMermaidCharts",
|
102 |
+
"知识库文件注入": "InjectKnowledgeBaseFiles",
|
103 |
+
"PDF翻译中文并重新编译PDF": "TranslatePDFToChineseAndRecompilePDF",
|
104 |
+
"随机小游戏": "RandomMiniGame",
|
105 |
+
"互动小游戏": "InteractiveMiniGame",
|
106 |
+
"解析历史输入": "ParseHistoricalInput",
|
107 |
+
"高阶功能模板函数示意图": "HighOrderFunctionTemplateDiagram"
|
108 |
+
}
|
docs/translate_traditionalchinese.json
CHANGED
@@ -1468,7 +1468,7 @@
|
|
1468 |
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
|
1469 |
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
|
1470 |
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
|
1471 |
-
"Latex输出PDF
|
1472 |
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
|
1473 |
"语音助手": "VoiceAssistant",
|
1474 |
"微调数据集生成": "FineTuneDatasetGeneration",
|
|
|
1468 |
"交互功能模板函数": "InteractiveFunctionTemplateFunctions",
|
1469 |
"交互功能函数模板": "InteractiveFunctionFunctionTemplates",
|
1470 |
"Latex英文纠错加PDF对比": "LatexEnglishCorrectionWithPDFComparison",
|
1471 |
+
"Latex输出PDF": "OutputPDFFromLatex",
|
1472 |
"Latex翻译中文并重新编译PDF": "TranslateLatexToChineseAndRecompilePDF",
|
1473 |
"语音助手": "VoiceAssistant",
|
1474 |
"微调数据集生成": "FineTuneDatasetGeneration",
|
docs/use_audio.md
CHANGED
@@ -3,7 +3,7 @@
|
|
3 |
|
4 |
## 1. 安装额外依赖
|
5 |
```
|
6 |
-
pip install --upgrade pyOpenSSL scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
7 |
```
|
8 |
|
9 |
如果因为特色网络问题导致上述命令无法执行:
|
|
|
3 |
|
4 |
## 1. 安装额外依赖
|
5 |
```
|
6 |
+
pip install --upgrade pyOpenSSL webrtcvad scipy git+https://github.com/aliyun/alibabacloud-nls-python-sdk.git
|
7 |
```
|
8 |
|
9 |
如果因为特色网络问题导致上述命令无法执行:
|
request_llms/bridge_all.py
CHANGED
@@ -11,7 +11,7 @@
|
|
11 |
import tiktoken, copy
|
12 |
from functools import lru_cache
|
13 |
from concurrent.futures import ThreadPoolExecutor
|
14 |
-
from toolbox import get_conf, trimmed_format_exc
|
15 |
|
16 |
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
17 |
from .bridge_chatgpt import predict as chatgpt_ui
|
@@ -31,6 +31,9 @@ from .bridge_qianfan import predict as qianfan_ui
|
|
31 |
from .bridge_google_gemini import predict as genai_ui
|
32 |
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
33 |
|
|
|
|
|
|
|
34 |
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
35 |
|
36 |
class LazyloadTiktoken(object):
|
@@ -44,13 +47,13 @@ class LazyloadTiktoken(object):
|
|
44 |
tmp = tiktoken.encoding_for_model(model)
|
45 |
print('加载tokenizer完毕')
|
46 |
return tmp
|
47 |
-
|
48 |
def encode(self, *args, **kwargs):
|
49 |
-
encoder = self.get_encoder(self.model)
|
50 |
return encoder.encode(*args, **kwargs)
|
51 |
-
|
52 |
def decode(self, *args, **kwargs):
|
53 |
-
encoder = self.get_encoder(self.model)
|
54 |
return encoder.decode(*args, **kwargs)
|
55 |
|
56 |
# Endpoint 重定向
|
@@ -63,7 +66,7 @@ azure_endpoint = AZURE_ENDPOINT + f'openai/deployments/{AZURE_ENGINE}/chat/compl
|
|
63 |
# 兼容旧版的配置
|
64 |
try:
|
65 |
API_URL = get_conf("API_URL")
|
66 |
-
if API_URL != "https://api.openai.com/v1/chat/completions":
|
67 |
openai_endpoint = API_URL
|
68 |
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
69 |
except:
|
@@ -95,7 +98,7 @@ model_info = {
|
|
95 |
"tokenizer": tokenizer_gpt35,
|
96 |
"token_cnt": get_token_num_gpt35,
|
97 |
},
|
98 |
-
|
99 |
"gpt-3.5-turbo-16k": {
|
100 |
"fn_with_ui": chatgpt_ui,
|
101 |
"fn_without_ui": chatgpt_noui,
|
@@ -150,6 +153,15 @@ model_info = {
|
|
150 |
"token_cnt": get_token_num_gpt4,
|
151 |
},
|
152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
"gpt-4-1106-preview": {
|
154 |
"fn_with_ui": chatgpt_ui,
|
155 |
"fn_without_ui": chatgpt_noui,
|
@@ -159,6 +171,15 @@ model_info = {
|
|
159 |
"token_cnt": get_token_num_gpt4,
|
160 |
},
|
161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
"gpt-3.5-random": {
|
163 |
"fn_with_ui": chatgpt_ui,
|
164 |
"fn_without_ui": chatgpt_noui,
|
@@ -167,7 +188,7 @@ model_info = {
|
|
167 |
"tokenizer": tokenizer_gpt4,
|
168 |
"token_cnt": get_token_num_gpt4,
|
169 |
},
|
170 |
-
|
171 |
"gpt-4-vision-preview": {
|
172 |
"fn_with_ui": chatgpt_vision_ui,
|
173 |
"fn_without_ui": chatgpt_vision_noui,
|
@@ -197,16 +218,25 @@ model_info = {
|
|
197 |
"token_cnt": get_token_num_gpt4,
|
198 |
},
|
199 |
|
200 |
-
#
|
201 |
-
"
|
202 |
-
"fn_with_ui":
|
203 |
-
"fn_without_ui":
|
204 |
-
"endpoint":
|
205 |
-
"max_token":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
206 |
"tokenizer": tokenizer_gpt35,
|
207 |
"token_cnt": get_token_num_gpt35,
|
208 |
},
|
209 |
|
|
|
210 |
"api2d-gpt-4": {
|
211 |
"fn_with_ui": chatgpt_ui,
|
212 |
"fn_without_ui": chatgpt_noui,
|
@@ -530,7 +560,7 @@ if "sparkv2" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|
530 |
})
|
531 |
except:
|
532 |
print(trimmed_format_exc())
|
533 |
-
if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
534 |
try:
|
535 |
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
536 |
from .bridge_spark import predict as spark_ui
|
@@ -542,6 +572,14 @@ if "sparkv3" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
|
542 |
"max_token": 4096,
|
543 |
"tokenizer": tokenizer_gpt35,
|
544 |
"token_cnt": get_token_num_gpt35,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
545 |
}
|
546 |
})
|
547 |
except:
|
@@ -562,19 +600,17 @@ if "llama2" in AVAIL_LLM_MODELS: # llama2
|
|
562 |
})
|
563 |
except:
|
564 |
print(trimmed_format_exc())
|
565 |
-
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai
|
566 |
try:
|
567 |
-
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
568 |
-
from .bridge_zhipu import predict as zhipu_ui
|
569 |
model_info.update({
|
570 |
"zhipuai": {
|
571 |
"fn_with_ui": zhipu_ui,
|
572 |
"fn_without_ui": zhipu_noui,
|
573 |
"endpoint": None,
|
574 |
-
"max_token":
|
575 |
"tokenizer": tokenizer_gpt35,
|
576 |
"token_cnt": get_token_num_gpt35,
|
577 |
-
}
|
578 |
})
|
579 |
except:
|
580 |
print(trimmed_format_exc())
|
@@ -617,7 +653,7 @@ AZURE_CFG_ARRAY = get_conf("AZURE_CFG_ARRAY")
|
|
617 |
if len(AZURE_CFG_ARRAY) > 0:
|
618 |
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
619 |
# 可能会覆盖之前的配置,但这是意料之中的
|
620 |
-
if not azure_model_name.startswith('azure'):
|
621 |
raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头")
|
622 |
endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \
|
623 |
f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15'
|
@@ -668,6 +704,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
668 |
"""
|
669 |
import threading, time, copy
|
670 |
|
|
|
671 |
model = llm_kwargs['llm_model']
|
672 |
n_model = 1
|
673 |
if '&' not in model:
|
@@ -682,7 +719,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
682 |
executor = ThreadPoolExecutor(max_workers=4)
|
683 |
models = model.split('&')
|
684 |
n_model = len(models)
|
685 |
-
|
686 |
window_len = len(observe_window)
|
687 |
assert window_len==3
|
688 |
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
@@ -701,7 +738,7 @@ def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, obser
|
|
701 |
time.sleep(0.25)
|
702 |
if not window_mutex[-1]: break
|
703 |
# 看门狗(watchdog)
|
704 |
-
for i in range(n_model):
|
705 |
window_mutex[i][1] = observe_window[1]
|
706 |
# 观察窗(window)
|
707 |
chat_string = []
|
@@ -741,6 +778,7 @@ def predict(inputs, llm_kwargs, *args, **kwargs):
|
|
741 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
742 |
"""
|
743 |
|
|
|
744 |
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
745 |
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
746 |
|
|
|
11 |
import tiktoken, copy
|
12 |
from functools import lru_cache
|
13 |
from concurrent.futures import ThreadPoolExecutor
|
14 |
+
from toolbox import get_conf, trimmed_format_exc, apply_gpt_academic_string_mask
|
15 |
|
16 |
from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui
|
17 |
from .bridge_chatgpt import predict as chatgpt_ui
|
|
|
31 |
from .bridge_google_gemini import predict as genai_ui
|
32 |
from .bridge_google_gemini import predict_no_ui_long_connection as genai_noui
|
33 |
|
34 |
+
from .bridge_zhipu import predict_no_ui_long_connection as zhipu_noui
|
35 |
+
from .bridge_zhipu import predict as zhipu_ui
|
36 |
+
|
37 |
colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044']
|
38 |
|
39 |
class LazyloadTiktoken(object):
|
|
|
47 |
tmp = tiktoken.encoding_for_model(model)
|
48 |
print('加载tokenizer完毕')
|
49 |
return tmp
|
50 |
+
|
51 |
def encode(self, *args, **kwargs):
|
52 |
+
encoder = self.get_encoder(self.model)
|
53 |
return encoder.encode(*args, **kwargs)
|
54 |
+
|
55 |
def decode(self, *args, **kwargs):
|
56 |
+
encoder = self.get_encoder(self.model)
|
57 |
return encoder.decode(*args, **kwargs)
|
58 |
|
59 |
# Endpoint 重定向
|
|
|
66 |
# 兼容旧版的配置
|
67 |
try:
|
68 |
API_URL = get_conf("API_URL")
|
69 |
+
if API_URL != "https://api.openai.com/v1/chat/completions":
|
70 |
openai_endpoint = API_URL
|
71 |
print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置")
|
72 |
except:
|
|
|
98 |
"tokenizer": tokenizer_gpt35,
|
99 |
"token_cnt": get_token_num_gpt35,
|
100 |
},
|
101 |
+
|
102 |
"gpt-3.5-turbo-16k": {
|
103 |
"fn_with_ui": chatgpt_ui,
|
104 |
"fn_without_ui": chatgpt_noui,
|
|
|
153 |
"token_cnt": get_token_num_gpt4,
|
154 |
},
|
155 |
|
156 |
+
"gpt-4-turbo-preview": {
|
157 |
+
"fn_with_ui": chatgpt_ui,
|
158 |
+
"fn_without_ui": chatgpt_noui,
|
159 |
+
"endpoint": openai_endpoint,
|
160 |
+
"max_token": 128000,
|
161 |
+
"tokenizer": tokenizer_gpt4,
|
162 |
+
"token_cnt": get_token_num_gpt4,
|
163 |
+
},
|
164 |
+
|
165 |
"gpt-4-1106-preview": {
|
166 |
"fn_with_ui": chatgpt_ui,
|
167 |
"fn_without_ui": chatgpt_noui,
|
|
|
171 |
"token_cnt": get_token_num_gpt4,
|
172 |
},
|
173 |
|
174 |
+
"gpt-4-0125-preview": {
|
175 |
+
"fn_with_ui": chatgpt_ui,
|
176 |
+
"fn_without_ui": chatgpt_noui,
|
177 |
+
"endpoint": openai_endpoint,
|
178 |
+
"max_token": 128000,
|
179 |
+
"tokenizer": tokenizer_gpt4,
|
180 |
+
"token_cnt": get_token_num_gpt4,
|
181 |
+
},
|
182 |
+
|
183 |
"gpt-3.5-random": {
|
184 |
"fn_with_ui": chatgpt_ui,
|
185 |
"fn_without_ui": chatgpt_noui,
|
|
|
188 |
"tokenizer": tokenizer_gpt4,
|
189 |
"token_cnt": get_token_num_gpt4,
|
190 |
},
|
191 |
+
|
192 |
"gpt-4-vision-preview": {
|
193 |
"fn_with_ui": chatgpt_vision_ui,
|
194 |
"fn_without_ui": chatgpt_vision_noui,
|
|
|
218 |
"token_cnt": get_token_num_gpt4,
|
219 |
},
|
220 |
|
221 |
+
# 智谱AI
|
222 |
+
"glm-4": {
|
223 |
+
"fn_with_ui": zhipu_ui,
|
224 |
+
"fn_without_ui": zhipu_noui,
|
225 |
+
"endpoint": None,
|
226 |
+
"max_token": 10124 * 8,
|
227 |
+
"tokenizer": tokenizer_gpt35,
|
228 |
+
"token_cnt": get_token_num_gpt35,
|
229 |
+
},
|
230 |
+
"glm-3-turbo": {
|
231 |
+
"fn_with_ui": zhipu_ui,
|
232 |
+
"fn_without_ui": zhipu_noui,
|
233 |
+
"endpoint": None,
|
234 |
+
"max_token": 10124 * 4,
|
235 |
"tokenizer": tokenizer_gpt35,
|
236 |
"token_cnt": get_token_num_gpt35,
|
237 |
},
|
238 |
|
239 |
+
# api_2d (此后不需要在此处添加api2d的接口了,因为下面的代码会自动添加)
|
240 |
"api2d-gpt-4": {
|
241 |
"fn_with_ui": chatgpt_ui,
|
242 |
"fn_without_ui": chatgpt_noui,
|
|
|
560 |
})
|
561 |
except:
|
562 |
print(trimmed_format_exc())
|
563 |
+
if "sparkv3" in AVAIL_LLM_MODELS or "sparkv3.5" in AVAIL_LLM_MODELS: # 讯飞星火认知大模型
|
564 |
try:
|
565 |
from .bridge_spark import predict_no_ui_long_connection as spark_noui
|
566 |
from .bridge_spark import predict as spark_ui
|
|
|
572 |
"max_token": 4096,
|
573 |
"tokenizer": tokenizer_gpt35,
|
574 |
"token_cnt": get_token_num_gpt35,
|
575 |
+
},
|
576 |
+
"sparkv3.5": {
|
577 |
+
"fn_with_ui": spark_ui,
|
578 |
+
"fn_without_ui": spark_noui,
|
579 |
+
"endpoint": None,
|
580 |
+
"max_token": 4096,
|
581 |
+
"tokenizer": tokenizer_gpt35,
|
582 |
+
"token_cnt": get_token_num_gpt35,
|
583 |
}
|
584 |
})
|
585 |
except:
|
|
|
600 |
})
|
601 |
except:
|
602 |
print(trimmed_format_exc())
|
603 |
+
if "zhipuai" in AVAIL_LLM_MODELS: # zhipuai 是glm-4的别名,向后兼容配置
|
604 |
try:
|
|
|
|
|
605 |
model_info.update({
|
606 |
"zhipuai": {
|
607 |
"fn_with_ui": zhipu_ui,
|
608 |
"fn_without_ui": zhipu_noui,
|
609 |
"endpoint": None,
|
610 |
+
"max_token": 10124 * 8,
|
611 |
"tokenizer": tokenizer_gpt35,
|
612 |
"token_cnt": get_token_num_gpt35,
|
613 |
+
},
|
614 |
})
|
615 |
except:
|
616 |
print(trimmed_format_exc())
|
|
|
653 |
if len(AZURE_CFG_ARRAY) > 0:
|
654 |
for azure_model_name, azure_cfg_dict in AZURE_CFG_ARRAY.items():
|
655 |
# 可能会覆盖之前的配置,但这是意料之中的
|
656 |
+
if not azure_model_name.startswith('azure'):
|
657 |
raise ValueError("AZURE_CFG_ARRAY中配置的模型必须以azure开头")
|
658 |
endpoint_ = azure_cfg_dict["AZURE_ENDPOINT"] + \
|
659 |
f'openai/deployments/{azure_cfg_dict["AZURE_ENGINE"]}/chat/completions?api-version=2023-05-15'
|
|
|
704 |
"""
|
705 |
import threading, time, copy
|
706 |
|
707 |
+
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
708 |
model = llm_kwargs['llm_model']
|
709 |
n_model = 1
|
710 |
if '&' not in model:
|
|
|
719 |
executor = ThreadPoolExecutor(max_workers=4)
|
720 |
models = model.split('&')
|
721 |
n_model = len(models)
|
722 |
+
|
723 |
window_len = len(observe_window)
|
724 |
assert window_len==3
|
725 |
window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True]
|
|
|
738 |
time.sleep(0.25)
|
739 |
if not window_mutex[-1]: break
|
740 |
# 看门狗(watchdog)
|
741 |
+
for i in range(n_model):
|
742 |
window_mutex[i][1] = observe_window[1]
|
743 |
# 观察窗(window)
|
744 |
chat_string = []
|
|
|
778 |
additional_fn代表点击的哪个按钮,按钮见functional.py
|
779 |
"""
|
780 |
|
781 |
+
inputs = apply_gpt_academic_string_mask(inputs, mode="show_llm")
|
782 |
method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] # 如果这里报错,检查config中的AVAIL_LLM_MODELS选项
|
783 |
yield from method(inputs, llm_kwargs, *args, **kwargs)
|
784 |
|