Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,131 +10,30 @@ openai.api_key = os.environ['OPENAI_API_KEY']
|
|
10 |
|
11 |
user_db = {os.environ['username1']: os.environ['password1'], os.environ['username2']: os.environ['password2'], os.environ['username3']: os.environ['password3']}
|
12 |
|
13 |
-
messages = [{"role": "system", "content": 'You are a helpful assistant.'}]
|
14 |
|
15 |
|
16 |
|
17 |
-
def roleChoice(role):
|
18 |
-
global messages
|
19 |
-
messages = [{"role": "system", "content": role}]
|
20 |
-
return "role:" + role
|
21 |
-
|
22 |
-
|
23 |
-
def audioGPT(audio):
|
24 |
-
global messages
|
25 |
-
|
26 |
-
audio_file = open(audio, "rb")
|
27 |
-
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
28 |
-
|
29 |
-
messages.append({"role": "user", "content": transcript["text"]})
|
30 |
-
|
31 |
-
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
32 |
-
|
33 |
-
system_message = response["choices"][0]["message"]
|
34 |
-
messages.append(system_message)
|
35 |
-
|
36 |
-
chats = ""
|
37 |
-
for msg in messages:
|
38 |
-
if msg['role'] != 'system':
|
39 |
-
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
40 |
-
|
41 |
-
return chats
|
42 |
-
|
43 |
-
|
44 |
def textGPT(text):
|
45 |
-
|
46 |
-
|
47 |
-
messages.append({"role": "user", "content": text})
|
48 |
-
|
49 |
-
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
50 |
-
|
51 |
-
system_message = response["choices"][0]["message"]
|
52 |
-
messages.append(system_message)
|
53 |
-
|
54 |
-
chats = ""
|
55 |
-
for msg in messages:
|
56 |
-
if msg['role'] != 'system':
|
57 |
-
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
58 |
-
|
59 |
-
return chats
|
60 |
-
|
61 |
-
|
62 |
-
def siriGPT(audio):
|
63 |
-
global messages
|
64 |
-
|
65 |
-
audio_file = open(audio, "rb")
|
66 |
-
transcript = openai.Audio.transcribe("whisper-1", audio_file)
|
67 |
-
|
68 |
-
messages.append({"role": "user", "content": transcript["text"]})
|
69 |
-
|
70 |
-
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
71 |
-
|
72 |
-
system_message = response["choices"][0]["message"]
|
73 |
-
messages.append(system_message)
|
74 |
-
|
75 |
-
lang = detect(system_message['content'])
|
76 |
|
77 |
-
|
78 |
-
narrate_ans.save("narrate.wav")
|
79 |
|
80 |
-
|
81 |
-
|
82 |
|
83 |
-
|
84 |
-
global messages
|
85 |
-
|
86 |
-
file_text = extract_text(file_obj.name)
|
87 |
-
text = prompt + "\n\n" + file_text
|
88 |
-
|
89 |
-
messages.append({"role": "user", "content": text})
|
90 |
-
|
91 |
-
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
|
92 |
|
93 |
system_message = response["choices"][0]["message"]
|
94 |
-
|
95 |
-
|
96 |
-
chats = ""
|
97 |
-
for msg in messages:
|
98 |
-
if msg['role'] != 'system':
|
99 |
-
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
100 |
|
101 |
return chats
|
102 |
|
103 |
|
104 |
|
105 |
-
def clear():
|
106 |
-
global messages
|
107 |
-
messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]
|
108 |
-
return
|
109 |
-
|
110 |
-
def show():
|
111 |
-
global messages
|
112 |
-
chats = ""
|
113 |
-
for msg in messages:
|
114 |
-
if msg['role'] != 'system':
|
115 |
-
chats += msg['role'] + ": " + msg['content'] + "\n\n"
|
116 |
-
|
117 |
-
return chats
|
118 |
-
|
119 |
|
120 |
-
with gr.Blocks() as chatHistory:
|
121 |
-
gr.Markdown("Click the Clear button below to remove all the chat history.")
|
122 |
-
clear_btn = gr.Button("Clear")
|
123 |
-
clear_btn.click(fn=clear, inputs=None, outputs=None, queue=False)
|
124 |
|
125 |
-
gr.Markdown("Click the Display button below to show all the chat history.")
|
126 |
-
show_out = gr.Textbox()
|
127 |
-
show_btn = gr.Button("Display")
|
128 |
-
show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant. 你是一位 IT 架构师。 你是一位开发者关系顾问。你是一位机器学习工程师。你是一位高级 C++ 开发人员 ")
|
133 |
text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
|
134 |
-
|
135 |
-
|
136 |
-
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
|
137 |
-
demo = gr.TabbedInterface([role, text, audio, siri, file, chatHistory], [ "roleChoice", "chatGPT", "audioGPT", "siriGPT", "fileGPT", "ChatHistory"])
|
138 |
|
139 |
if __name__ == "__main__":
|
140 |
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
|
|
|
10 |
|
11 |
user_db = {os.environ['username1']: os.environ['password1'], os.environ['username2']: os.environ['password2'], os.environ['username3']: os.environ['password3']}
|
12 |
|
|
|
13 |
|
14 |
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def textGPT(text):
|
17 |
+
messages = [{"role": "system", "content": 'You are a coding assistant.'}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
|
19 |
+
cuda_codes = "Translate this CUDA code into HIP code:\n" + text + "\n\n###"
|
|
|
20 |
|
21 |
+
messages.append({"role": "user", "content": cuda_codes})
|
|
|
22 |
|
23 |
+
response = openai.ChatCompletion.create(model="davinci:ft-zhaoyi-2023-06-21-07-18-01", messages=messages, stop="###")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
system_message = response["choices"][0]["message"]
|
26 |
+
hip_codes = system_message["content"]
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
return chats
|
29 |
|
30 |
|
31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
|
|
|
|
|
|
|
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
|
35 |
+
|
36 |
+
demo = gr.TabbedInterface([text], [ "HipifyPlus"])
|
|
|
|
|
37 |
|
38 |
if __name__ == "__main__":
|
39 |
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
|