Update app.py
Browse files
app.py
CHANGED
@@ -158,6 +158,22 @@ with gr.Blocks() as chatHistory:
|
|
158 |
show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)
|
159 |
|
160 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
161 |
|
162 |
|
163 |
|
@@ -171,7 +187,7 @@ vector_server = gr.Interface(fn=process_file, inputs=["text", gr.inputs.File(fil
|
|
171 |
#audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
|
172 |
#siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
|
173 |
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs=None, description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
|
174 |
-
demo = gr.TabbedInterface([role, text, file, chatHistory, vector_server], [ "roleChoice", "chatGPT", "fileGPT", "ChatHistory", "VectorServer"])
|
175 |
|
176 |
if __name__ == "__main__":
|
177 |
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
|
|
|
158 |
show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)
|
159 |
|
160 |
|
161 |
+
#pinecone tools
|
162 |
+
with gr.Blocks() as pinecone_tools:
|
163 |
+
pinecone_list = gr.Textbox()
|
164 |
+
list = gr.Button(value="List")
|
165 |
+
list.click(fn=list_pinecone, inputs=None, outputs=pinecone_list, queue=False)
|
166 |
+
|
167 |
+
pinecone_delete_name = gr.Textbox()
|
168 |
+
delete = gr.Button(value="Delete")
|
169 |
+
delete.click(fn=delete_pinecone, inputs=pinecone_delete, outputs=None, queue=False)
|
170 |
+
|
171 |
+
pinecone_show_name = gr.Textbox()
|
172 |
+
pinecone_info = gr.Textbox()
|
173 |
+
show = gr.Button(value="Show")
|
174 |
+
show.click(fn=show_pinecone, inputs=pinecone_show_name, outputs=pinecone_info, queue=False)
|
175 |
+
|
176 |
+
|
177 |
|
178 |
|
179 |
|
|
|
187 |
#audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
|
188 |
#siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
|
189 |
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs=None, description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
|
190 |
+
demo = gr.TabbedInterface([role, text, file, chatHistory, vector_server, pinecone_tools], [ "roleChoice", "chatGPT", "fileGPT", "ChatHistory", "VectorServer", "PineconeTools"])
|
191 |
|
192 |
if __name__ == "__main__":
|
193 |
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
|