Spaces:
Running
Running
pedropauletti
commited on
Commit
•
1e40d63
1
Parent(s):
23b1e7c
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from haystack.document_stores import InMemoryDocumentStore
|
5 |
+
from haystack.nodes import EmbeddingRetriever
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
|
9 |
+
def load_qa_model():
|
10 |
+
document_store = InMemoryDocumentStore()
|
11 |
+
retriever = EmbeddingRetriever(
|
12 |
+
document_store=document_store,
|
13 |
+
embedding_model="sentence-transformers/all-MiniLM-L6-v2",
|
14 |
+
use_gpu=False,
|
15 |
+
scale_score=False,
|
16 |
+
)
|
17 |
+
# Get dataframe with columns "question", "answer" and some custom metadata
|
18 |
+
df = pd.read_csv('/content/social-faq.csv', on_bad_lines='skip', delimiter=';')
|
19 |
+
# Minimal cleaning
|
20 |
+
df.fillna(value="", inplace=True)
|
21 |
+
df["question"] = df["question"].apply(lambda x: x.strip())
|
22 |
+
|
23 |
+
questions = list(df["question"].values)
|
24 |
+
df["embedding"] = retriever.embed_queries(queries=questions).tolist()
|
25 |
+
df = df.rename(columns={"question": "content"})
|
26 |
+
|
27 |
+
# Convert Dataframe to list of dicts and index them in our DocumentStore
|
28 |
+
docs_to_index = df.to_dict(orient="records")
|
29 |
+
document_store.write_documents(docs_to_index)
|
30 |
+
|
31 |
+
return retriever
|
32 |
+
|
33 |
+
def add_text(history, text):
|
34 |
+
history = history + [(text, None)]
|
35 |
+
return history, gr.Textbox(value="", interactive=False)
|
36 |
+
|
37 |
+
|
38 |
+
def add_file(history, file):
|
39 |
+
history = history + [((file.name,), None)]
|
40 |
+
return history
|
41 |
+
|
42 |
+
|
43 |
+
def bot(history):
|
44 |
+
print(history)
|
45 |
+
# response = "**That's cool!**"
|
46 |
+
history[-1][1] = ""
|
47 |
+
|
48 |
+
global retriever
|
49 |
+
response = get_answers(retriever, history[0][0])
|
50 |
+
|
51 |
+
for character in response:
|
52 |
+
history[-1][1] += character
|
53 |
+
time.sleep(0.01)
|
54 |
+
yield history
|
55 |
+
|
56 |
+
|
57 |
+
|
58 |
+
def get_answers(retriever, query):
|
59 |
+
from haystack.pipelines import FAQPipeline
|
60 |
+
|
61 |
+
pipe = FAQPipeline(retriever=retriever)
|
62 |
+
|
63 |
+
from haystack.utils import print_answers
|
64 |
+
|
65 |
+
# Run any question and change top_k to see more or less answers
|
66 |
+
prediction = pipe.run(query=query, params={"Retriever": {"top_k": 1}})
|
67 |
+
|
68 |
+
answers = prediction['answers']
|
69 |
+
|
70 |
+
if answers:
|
71 |
+
return answers[0].answer
|
72 |
+
else:
|
73 |
+
return "I don't have an answer to that question"
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
retriever = load_qa_model()
|
79 |
+
|
80 |
+
|
81 |
+
with gr.Blocks() as demo:
|
82 |
+
chatbot = gr.Chatbot(
|
83 |
+
[],
|
84 |
+
elem_id="chatbot",
|
85 |
+
bubble_full_width=False,
|
86 |
+
# avatar_images=(None, "/content/avatar.png"),
|
87 |
+
)
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
txt = gr.Textbox(
|
91 |
+
scale=4,
|
92 |
+
show_label=False,
|
93 |
+
placeholder="Enter text and press enter",
|
94 |
+
container=False,
|
95 |
+
)
|
96 |
+
inputRecord = gr.Audio(label="Record a question", source="microphone", type="filepath")
|
97 |
+
audioOutput = gr.Audio(label="Listen the answer", interactive=False)
|
98 |
+
|
99 |
+
txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
|
100 |
+
bot, chatbot, chatbot
|
101 |
+
)
|
102 |
+
txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
|
103 |
+
|
104 |
+
demo.queue()
|
105 |
+
demo.launch()
|