Chat_ / app.py
linl03's picture
Update app.py
57a65f9 verified
raw
history blame
2.79 kB
import gradio as gr
# from langchain_community.chat_models import ChatOllama
# from langchain_community.embeddings import GPT4AllEmbeddings
# from langchain.prompts import ChatPromptTemplate
# from langchain.schema.runnable import RunnablePassthrough
# from langchain_community.vectorstores import FAISS
from langchain_community.llms import LlamaCpp
# import os
# os.system("ollama pull alen_ox/llama_3_fin")
vector_db_path = "vectorstores/db_faiss"
llm = LlamaCpp(
model_path="Qwen2-7B-Instruct.Q5_K_M.gguf",
temperature=0.75,
max_tokens=2000,
top_p=1,
# callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
# embeddings = OllamaEmbeddings(model="nomic-embed-text", show_progress=False)
# embeddings = GPT4AllEmbeddings(model_name = "all-MiniLM-L6-v2.gguf2.f16.gguf", gpt4all_kwargs = {'allow_download': 'True'})
# db = FAISS.load_local(vector_db_path, embeddings, allow_dangerous_deserialization=True)
# # # Create retriever
# retriever = db.as_retriever(
# search_type="similarity",
# search_kwargs= {"k": 3}
# )
# local_llm = 'llama3.1'
# llm = ChatOllama(model=local_llm,
# keep_alive="3h",
# max_tokens=512,
# temperature=0)
# Create prompt template
def respond(message, history, system_message, path_document):
# print(message, history, system_message, path_document)
# respon = ''
# print("Answer:\n\n", end=" ", flush=True)
# template = """Bạn là trợ lý ảo vì vậy bạn hãy sử dụng dữ liệu dưới đây để trả lời câu hỏi,
# nếu không có thông tin hãy đưa ra câu trả lời sát nhất với câu hỏi từ các thông tin tìm được
# Content: {content}
# Question: {question}
# Chỉ đưa ra các câu trả lời hữu ích.
# Helpful answer:
# """
# prompt = ChatPromptTemplate.from_template(template)
# rag_chain = (
# {"content": retriever, "question": RunnablePassthrough()}
# | prompt
# | llm
# )
# for chunk in rag_chain.stream(message):
# respon += chunk.content
# print(chunk.content, end="", flush=True)
# yield respon
for chunk in llm.stream(message):
respon += chunk
print(chunk.content, end="", flush=True)
yield respon
demo = gr.ChatInterface(
respond,
additional_inputs=[
# gr.Textbox(value="Trả lời câu hỏi CHỈ dựa trên ngữ cảnh sau không có thì bảo không có câu trả lời:", label="System message"),
gr.UploadButton("Upload a file", file_count="single"),
# gr.DownloadButton("Download the file")
],
)
if __name__ == "__main__":
demo.launch()