kheopss's picture
Update app.py
6e95c88 verified
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core import QueryBundle
import gradio as gr
import pandas as pd
from llama_index.core.postprocessor import LLMRerank
from IPython.display import display, HTML
from llama_index.core.vector_stores import (
MetadataFilter,
MetadataFilters,
FilterOperator,
FilterOperator
)
from llama_index.core.tools import RetrieverTool
from llama_index.core.retrievers import RouterRetriever
from llama_index.core.selectors import PydanticSingleSelector
from llama_index.core import (
VectorStoreIndex,
SimpleKeywordTableIndex,
SimpleDirectoryReader,
)
from llama_index.core import SummaryIndex, Settings
from llama_index.core.schema import IndexNode
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.llms.openai import OpenAI
from llama_index.core.callbacks import CallbackManager
from llama_index.core import Document
import os
from llama_index.embeddings.openai import OpenAIEmbedding
import nest_asyncio
import pandas as pd
import hashlib
import tiktoken
from dotenv import load_dotenv
load_dotenv()
nest_asyncio.apply()
openai_key = os.getenv('openai_key_secret')
os.environ["OPENAI_API_KEY"] = openai_key
llm=OpenAI(temperature=0, model="gpt-4o")
Settings.llm = llm
Settings.embed_model = OpenAIEmbedding(model="text-embedding-ada-002")
ds=pd.read_excel("data_metropole 2.xlsx")
# df est la DATAFRAME qui contient le fichier source
df=ds.drop(columns=['Theme ID', 'SousTheme ID', 'Signataire Matricule',
'Suppleant Matricule', 'Date Nomination', 'Date Commite Technique', 'Numero',
'Libelle', 'Date Creation', 'Date Debut'])
#la DATAFRAME (filter_signataire) est celle qui contient les colonne relative au signataire
#la DATAFRAME (filter) est celle qui contient les colonne relative au département
df['Item Text'] = df['Item Text'].replace('signature', '', regex=True)
df['Item Text'] = df['Item Text'].replace('cosignature', '', regex=True)
filter_signataire = df[['Signataire', 'Fonction']]
filter_signataire = filter_signataire.drop_duplicates()
filter = df[['Collectivite', 'Direction DGA', 'Liste Service Text']]
filter = filter.drop_duplicates()
# pre traitement est cleaning des dataframe
df = df.dropna(subset=['Item Text'])
df_sorted = df.sort_values(by=['Collectivite', 'Direction DGA', 'Liste Service Text', 'Item Text','Theme Title','SousTheme Title','Item Text'])
#traietement des dataframe
df.loc[:, 'content'] = df.apply(lambda x: f'''
/ Theme : {x['Theme Title'] or ''}
/ Sous-Theme : {x['SousTheme Title'] or ''}
/ Item : {x['Item Text'] or ''}
/ Signataire : {x['Signataire'] or ''}
/ Suppleant : {x['Suppleant'] or ''}
/ Les services : {x['Liste Service Text'] or ''}
''', axis=1)
#############
df = df.fillna(value='')
filter = filter.fillna(value='')
filter_signataire = filter_signataire.fillna(value='')
#############
df.loc[:, 'description'] = df.apply(lambda x: f'''Collectivite : {x['Collectivite'] or ''}
Direction : {x['Direction DGA'] or ''}
Liste des Service : {x['Liste Service Text'] or ''}
''', axis=1)
filter.loc[:, 'description'] = filter.apply(lambda x: f'''Collectivite : {x['Collectivite'] or ''}
Direction : {x['Direction DGA'] or ''}
Liste des Service : {x['Liste Service Text'] or ''}
''', axis=1)
filter_signataire.loc[:, 'description'] = filter_signataire.apply(lambda x: f'''Signataire : {x['Signataire'] or ''}
Fonction : {x['Fonction'] or ''}
''', axis=1)
def hachage(row):
return hashlib.sha1(row.encode("utf-8")).hexdigest()
# le hashage
df['hash'] = df.apply(lambda x: hachage(f'''Collectivite : {x['Collectivite'] or ''}
Direction : {x['Direction DGA'] or ''}
Liste des Service : {x['Liste Service Text'] or ''}
'''), axis=1)
filter['hash'] = filter.apply(lambda x: hachage(f'''Collectivite : {x['Collectivite'] or ''}
Direction : {x['Direction DGA'] or ''}
Liste des Service : {x['Liste Service Text'] or ''}
'''), axis=1)
#################################################"
filter_signataire['hash'] = filter_signataire.apply(lambda x: hachage(f'''Signataire : {x['Signataire'] or ''}
'''), axis=1)
#construction des DOCUMENTS pour la vectorisation
description_docs = [Document(text=row['description'],metadata={"id_documents": row['hash']}) for index, row in filter.iterrows()]
content_docs = [Document(text=row['content'],metadata={"id_documents": row['hash']}) for index, row in df.iterrows()]
signataire_docs = [Document(text=row['Signataire'],metadata={"id_signataire": row['hash']}) for index, row in filter_signataire.iterrows()]
content_signataire = [Document(text=row['content'],metadata={"id_signataire": row['hash']}) for index, row in df.iterrows()]
index = VectorStoreIndex.from_documents(
description_docs,
show_progress = True
)
index_all = VectorStoreIndex.from_documents(
content_docs,
show_progress = True
)
index_signataire = VectorStoreIndex.from_documents(
signataire_docs,
show_progress = True
)
index_all_signataire = VectorStoreIndex.from_documents(
content_signataire,
show_progress = True
)
def get_retrieved_nodes(
query_str, vector_top_k=10, reranker_top_n=3, with_reranker=False,index=index):
query_bundle = QueryBundle(query_str)
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=vector_top_k,
)
retrieved_nodes = retriever.retrieve(query_bundle)
if with_reranker:
# configure reranker
reranker = LLMRerank(
choice_batch_size=5,
top_n=reranker_top_n,
)
retrieved_nodes = reranker.postprocess_nodes(
retrieved_nodes, query_bundle
)
return retrieved_nodes
def get_all_text(new_nodes):
texts = []
for i, node in enumerate(new_nodes, 1):
texts.append(f"\nDocument {i} : {node.get_text()}")
return ' '.join(texts)
def further_retrieve(query):
# Retrieve new nodes based on the query
new_nodes = get_retrieved_nodes(
query,
index=index,
vector_top_k=10,
reranker_top_n=5,
with_reranker=False,
)
new_nodes_signataire = get_retrieved_nodes(
query,
index=index_all_signataire,
vector_top_k=10,
reranker_top_n=5,
with_reranker=False,
)
filters = MetadataFilters(
filters=[
MetadataFilter(key="id_documents", value=[node.metadata['id_documents'] for node in new_nodes], operator=FilterOperator.IN)
],
)
filters_s = MetadataFilters(
filters=[
MetadataFilter(key="id_signataire", value=[node.metadata['id_signataire'] for node in new_nodes_signataire], operator=FilterOperator.IN)
],
)
# Create a retriever with the specified filters
retriever_description = index_all.as_retriever(filters=filters, similarity_top_k=15)
retriever_signataire= index_all_signataire.as_retriever(filters=filters_s,similarity_top_k=4)
# initialize tools
description_tool = RetrieverTool.from_defaults(
retriever=retriever_description,
description="Useful for retrieving specific context from direction, liste service and collectivite",
)
signataire_tool = RetrieverTool.from_defaults(
retriever=retriever_signataire,
description="Useful for retrieving specific context from signataire and fonction",
)
# define retriever
retriever = RouterRetriever(
selector=PydanticSingleSelector.from_defaults(llm=llm),
retriever_tools=[
description_tool,
signataire_tool,
],
)
try :
query_bundle = QueryBundle(query)
# Retrieve nodes based on the original query and filters
retrieved_nodes = retriever.retrieve(query_bundle)
reranker = LLMRerank(
choice_batch_size=5, # Process 5 nodes at a time
top_n=10 # Return the top 7 reranked nodes
)
# Post-process the retrieved nodes by reranking them
reranked_nodes = reranker.postprocess_nodes(retrieved_nodes, query_bundle)
return get_all_text(reranked_nodes)
except :
print("No rerank")
return get_all_text(retriever.retrieve(query))
def estimate_tokens(text):
# Encoder le texte pour obtenir les tokens
encoding = tiktoken.get_encoding("cl100k_base")
tokens = encoding.encode(text)
return len(tokens)
def question_reformulation(question):
from openai import OpenAI
client = OpenAI(api_key=openai_key)
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "system", "content": "reformule la question en specifiant le domaine de la question."},
{"role": "user", "content": question}
],
)
resultat = stream.choices[0].message.content
return resultat
history_with_docs = []
def process_final(user_prom, history):
global history_with_docs
documents = further_retrieve(user_prom)
user_question = question_reformulation(user_prom)
history_with_docs.append((user_prom, documents))
system_p = f"""agit come un expert financier et un agent de la metropole expert dans la recherche des deleguation de signature . L'utilisateur posera une question et tu devras trouver la réponse dans les documents suivants.Focalise sur les service et la direction du signataire que l'utilisateur cherche. Tu ne dois pas poser de question en retour.Tu ne dois pas mentionner le numéro des documents. Tu t'exprimes dans la même langue que l'utilisateur.,
DOCUMENTS :
{documents}
instruction :
-donne les signataire et les supplient et reponds de facon directe.
-ta reponse peut se trouver sur plusieurs document
-justifie la raison de ta reponse
-la question fait reference a un service tres precis
-reponds par une liste structuree
"""
print("PHASE 03 passing to LLM\n")
sys_p = f"<|im_start|>system \n{system_p}\n<|im_end|>"
prompt_f = ""
# total_tokens = estimate_tokens(prompt_f)
# for val in reversed(history):
# if val[0]: # Si c'est une question utilisateur
# # Chercher le document correspondant dans history_with_docs
# for past_question, past_documents in reversed(history_with_docs):
# if past_question == val[0]:
# user_p = f" <|im_start|>user \n Documents: \n {past_documents}\n Question :{val[0]}\n<|im_end|>"
# break
# if val[1]: # Si c'est une réponse de l'assistant
# assistant_p = f" <|im_start|>assistant \n {val[1]}\n<|im_end|>"
# current_tokens = estimate_tokens(user_p+assistant_p)
# if total_tokens + current_tokens > 3000:
# break
# else:
# prompt_f = user_p + assistant_p + prompt_f
# total_tokens += current_tokens
prompt_f = f"{sys_p} <|im_start|>user\n {user_question} \n<|im_end|><|im_start|>assistant \n"
gen = llm.stream_complete(formatted=True, prompt=prompt_f)
# print(f"le nombre TOTAL de tokens : {total_tokens}\n")
print("_"*100)
print(prompt_f)
print("o"*100)
for response in gen:
yield response.text
from gradio import gradio as gr
# mychatbot = gr.Chatbot(
# avatar_images=["./user_icon.png", "./metro.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,
# )
# description = """
# <p>
# <center>
# <img src="https://www.nicecotedazur.org/wp-content/themes/mnca/images/logo-metropole-nca.png" alt="rick" width="250"/>
# </center>
# </p>
# <p style="text-align:right"> Made by KHEOPS AI</p>
# """
# demo = gr.ChatInterface(
# fn=process_final,
# chatbot=mychatbot,
# title="METROPOLE SIGNATAIRE CHATBOT",
# description=description,
# )
# demo.launch(share=True, debug =True)
# Gradio Interface
with gr.Blocks() as demo:
with gr.Row():
description = """
<h1 style ="font-size: 36px;font-weight: bold;"><center>METROPOLE SIGNATAIRE CHATBOT</center></h1>
<p>
<center>
<img src="https://www.nicecotedazur.org/wp-content/themes/mnca/images/logo-metropole-nca.png" alt="rick" width="250"/>
</center>
</p>
<p style="text-align:right"> Développé par KHEOPS AI</p>
"""
gr.HTML(description)
chatbot = gr.Chatbot(height = "20rem")
msg = gr.Textbox(show_label=False,placeholder = "Poser votre question ...")
clear = gr.Button("Réinitialiser")
def user(user_message, history):
# Capture the user message and pass it to 'process_final'
return "", history + [[user_message, None]]
def bot(history):
# Get the last user message from the history
user_message = history[-1][0]
# Process it using the 'process_final' function
gen = process_final(user_message, history)
bot_message = ""
for chunk in gen:
bot_message += chunk
history[-1][1] = bot_message # Update bot response in the conversation history
yield history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch(share=True, debug =True)