Spaces:
Sleeping
Sleeping
File size: 2,363 Bytes
cd9c23a ed0c3ad cd9c23a ed0c3ad cd9c23a c2a504f cd9c23a 5dd5730 cd9c23a 5dd5730 cd9c23a ed0c3ad 5dd5730 ed0c3ad cd9c23a 5dd5730 ed0c3ad 5dd5730 cd9c23a ed0c3ad cd9c23a ed0c3ad c2a504f ed0c3ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import json
import pickle
import streamlit as st
# from haystack.document_stores import FAISSDocumentStore
from haystack.document_stores import InMemoryDocumentStore
from transformers import DPRContextEncoder, DPRContextEncoderTokenizer
from haystack.nodes import DensePassageRetriever
from haystack.nodes import FARMReader
from haystack.pipelines import ExtractiveQAPipeline
st.title("DPR on Supreme Court Judgements (Capital Gain)")
# with open("responses.json", 'r') as f:
# data = json.load(f)
# documents = [
# {
# "content": doc["text"],
# "meta": {
# "name": doc["title"],
# "url": doc["url"]
# }
# } for doc in data
# ]
# document_store = FAISSDocumentStore(embedding_dim=768, faiss_index_factory_str="Flat", sql_url="sqlite:///faiss_document_store.d")
with open("inmemory_document_store.pkl", "rb") as f:
document_store = pickle.load(f)
# document_store.write_documents(documents)
# document_store = FAISSDocumentStore.load(index_path="./faiss_index", config_path="./faiss_index.json")
retriever = DensePassageRetriever(
document_store=document_store,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
)
# document_store.update_embeddings(retriever)
# document_store.save(index_path="./faiss_index", config_path="./faiss_index.json")
# with open("inmemory_document_store.pkl", "wb") as f:
# pickle.dump(document_store, f)
reader = FARMReader(model_name_or_path="deepset/bert-base-cased-squad2")
pipeline = ExtractiveQAPipeline(reader=reader, retriever=retriever)
query = st.text_input("Enter your query:", "")
if query:
with st.spinner("Searching..."):
results = pipeline.run(query=query, params={"Retriever": {"top_k": 5}})
for answer in results['answers']:
st.markdown(f"=====================\nAnswer: {answer.answer}\nContext: {answer.context}\nScore: {answer.score}")
# query = st.text_input("Enter Question")
# query = "What is the subject matter of the petition in the Sadanand S. Varde case?"
# result = pipeline.run(query=query, params={"Retriever": {"top_k": 10}, "Reader": {"top_k": 5}})
# for answer in result['answers']:
# print(f"=====================\nAnswer: {answer.answer}\nContext: {answer.context}\nScore: {answer.score}") |