Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from langchain_groq import ChatGroq | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.chains import RetrievalQA, ConversationalRetrievalChain | |
from langchain.memory import ConversationBufferMemory | |
from langchain_huggingface.embeddings import HuggingFaceEmbeddings | |
from langchain_chroma import Chroma | |
def rag_retriever(message, history, system_prompt, num_sources=4, temperature=0): | |
chat = ChatGroq(temperature=temperature, model_name="llama3-70b-8192", api_key=os.getenv("GROQ_API_KEY")) | |
embeddings = HuggingFaceEmbeddings(model_name="avsolatorio/GIST-large-Embedding-v0") | |
store = Chroma(persist_directory='/home/user/app/db/', embedding_function=embeddings, collection_name='ai_act') | |
prompt_template = ChatPromptTemplate.from_messages([ | |
("system", system_prompt+""" | |
Use the following pieces of context to answer the user's question. | |
---------------- | |
{context}"""), | |
("human", "{question}") | |
]) | |
memory = ConversationBufferMemory(memory_key="chat_history", output_key="answer", return_messages=True) | |
retriever = store.as_retriever(search_type="similarity", search_kwargs={'k': num_sources}) | |
chain = ConversationalRetrievalChain.from_llm(llm=chat, | |
retriever=retriever, | |
return_source_documents=True, | |
memory=memory, | |
combine_docs_chain_kwargs={"prompt": prompt_template}) | |
output = chain.invoke({"question": message}) | |
sources = "" | |
for doc in output['source_documents']: | |
source_content = doc.page_content.strip().replace("\r\n", " ").replace("\r", " ").replace("\n", " ") | |
sources += f'<span style="color:green">Страница: {doc.metadata["page"]+1}</span><br><span style="color:gray">{source_content}</span><br><br>' | |
response = f"""<h5>Отговор:</h5>{output['answer']}<br><h5>Източници:</h5>{sources}""" | |
return response | |
rag = gr.ChatInterface(rag_retriever, | |
examples=[ | |
["Каква е целта на настоящия регламент", | |
"You are an expert assistant in Bulgarian regulations. Provide precise and clear answers. Provide a detailed and comprehensive answer, incorporating as much relevant information as possible. Always respond in Bulgarian, regardless of the language used in the question.", 4, 0], | |
["Какво са Системите с ИИ", | |
"You are an expert assistant in Bulgarian regulations. Provide precise and clear answers. Always respond in Bulgarian, regardless of the language used in the question.", 4, 1], | |
["Какво е равнище на технологично развитие", | |
"You are an expert assistant in Bulgarian regulations. Provide precise and clear answers. Always respond in Bulgarian, regardless of the language used in the question.", 4, 2] | |
], | |
title="Чатене с документа AI Act", | |
description="Питайте каквото пожелаете, но пишете на български.", | |
chatbot=gr.Chatbot(placeholder="<strong>Вашият личен AI Act помощник</strong><br>Питайте каквото пожелаете, но пишете на български.", height=630), | |
textbox=gr.Textbox(placeholder="Задайте своя въпрос...", container=False, scale=7), | |
retry_btn="Отново", | |
undo_btn="Назад", | |
clear_btn="Изчистете", | |
submit_btn="Изпрати", | |
additional_inputs=[ | |
gr.Textbox(value="You are an expert assistant in Bulgarian regulations. Provide precise and clear answers. Always respond in Bulgarian, regardless of the language used in the question.", label="System Prompt"), | |
gr.Slider(minimum=1, maximum=10, value=4, step=1, label="Брой препратки"), | |
gr.Slider(minimum=0, maximum=2, value=0, label="Креативност на модела", info="Ако е много високо моделът си измисля, но може да напише интересни неща.") | |
], | |
additional_inputs_accordion=gr.Accordion("Допълнителни настройки", open=False), | |
) | |
rag.launch() |