langchain / app.py
jozzy's picture
Update app.py
52f0ac3
raw
history blame
5.06 kB
import os
import gradio as gr
import openai
from langdetect import detect
from gtts import gTTS
from pdfminer.high_level import extract_text
#any vector server should work, trying pinecone first
import pinecone
#langchain part
from langchain.llms import OpenAI
from langchain.text_splitter import SpacyTextSplitter
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
openai.api_key = os.environ['OPENAI_API_KEY']
pinecone_key = os.environ['PINECONE_API_KEY']
pinecone_environment='us-west1-gcp'
user_db = {os.environ['username1']: os.environ['password1']}
messages = [{"role": "system", "content": 'You are a helpful assistant.'}]
def init_pinecone(index_name):
# initialize connection to Pinecone vector DB (app.pinecone.io for API key)
pinecone.init(
api_key=pinecone_key,
environment=pinecone_environment
)
#using openai embedding hence dim = 1536
pinecone.create_index(index_name, dimension=1536, metric="euclidean")
index = pinecone.Index(index_name)
return index
def process_file(index_name, docs):
init_pinecone(index_name)
embeddings = OpenAIEmbeddings()
#pipeline='zh_core_web_sm'
splter = SpacyTextSplitter(chunk_size=1000,chunk_overlap=200)
split_text = splter.split_documents(docs)
for document in split_text:
Pinecone.from_documents([document], embeddings, index_name=index_name)
return list_pinecone(index_name)
def list_pinecone(index_name):
index = pinecone.Index(index_name)
stats = index.describe_index_stats()
return stats
def roleChoice(role):
global messages
messages = [{"role": "system", "content": role}]
return "role:" + role
def textGPT(text):
global messages
messages.append({"role": "user", "content": text})
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
system_message = response["choices"][0]["message"]
messages.append(system_message)
chats = ""
for msg in messages:
if msg['role'] != 'system':
chats += msg['role'] + ": " + msg['content'] + "\n\n"
return chats
def fileGPT(prompt, file_obj):
global messages
file_text = extract_text(file_obj.name)
text = prompt + "\n\n" + file_text
messages.append({"role": "user", "content": text})
response = openai.ChatCompletion.create(model="gpt-4", messages=messages)
system_message = response["choices"][0]["message"]
messages.append(system_message)
chats = ""
for msg in messages:
if msg['role'] != 'system':
chats += msg['role'] + ": " + msg['content'] + "\n\n"
return chats
def clear():
global messages
messages = [{"role": "system", "content": 'You are a helpful technology assistant.'}]
return
def show():
global messages
chats = ""
for msg in messages:
if msg['role'] != 'system':
chats += msg['role'] + ": " + msg['content'] + "\n\n"
return chats
with gr.Blocks() as chatHistory:
gr.Markdown("Click the Clear button below to remove all the chat history.")
clear_btn = gr.Button("Clear")
clear_btn.click(fn=clear, inputs=None, outputs=None, queue=False)
gr.Markdown("Click the Display button below to show all the chat history.")
show_out = gr.Textbox()
show_btn = gr.Button("Display")
show_btn.click(fn=show, inputs=None, outputs=show_out, queue=False)
role = gr.Interface(fn=roleChoice, inputs="text", outputs="text", description = "Choose your GPT roles, e.g. You are a helpful technology assistant. 你是一位 IT 架构师。 你是一位开发者关系顾问。你是一位机器学习工程师。你是一位高级 C++ 开发人员 ")
text = gr.Interface(fn=textGPT, inputs="text", outputs="text")
pinecone = gr.Interface(fn=process_file, inputs=["text", "file"], outputs="text")
#audio = gr.Interface(fn=audioGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text")
#siri = gr.Interface(fn=siriGPT, inputs=gr.Audio(source="microphone", type="filepath"), outputs = "audio")
file = gr.Interface(fn=fileGPT, inputs=["text", "file"], outputs="text", description = "Enter prompt sentences and your PDF. e.g. lets think step by step, summarize this following text: 或者 让我们一步一步地思考,总结以下的内容:")
demo = gr.TabbedInterface([role, text, file, chatHistory, pinecone], [ "roleChoice", "chatGPT", "fileGPT", "ChatHistory", "Pinecone"])
if __name__ == "__main__":
demo.launch(enable_queue=False, auth=lambda u, p: user_db.get(u) == p,
auth_message="This is not designed to be used publicly as it links to a personal openAI API. However, you can copy my code and create your own multi-functional ChatGPT with your unique ID and password by utilizing the 'Repository secrets' feature in huggingface.")
#demo.launch()