Spaces:
Runtime error
Runtime error
KushwanthK
commited on
Commit
•
33dd3d6
1
Parent(s):
a695e9a
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,9 @@ import math
|
|
10 |
from transformers import pipeline
|
11 |
from langchain.prompts import ChatPromptTemplate
|
12 |
from langchain_community.llms import HuggingFaceHub
|
|
|
13 |
import re
|
|
|
14 |
# import json
|
15 |
|
16 |
# st.config(PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION="python")
|
@@ -95,13 +97,36 @@ def prompt_engineer(text, longtext, query):
|
|
95 |
BULLET POINT SUMMARY:
|
96 |
"""
|
97 |
# Load the summarization pipeline with the specified model
|
98 |
-
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
99 |
|
100 |
# Generate the prompt
|
101 |
-
prompt = summary_prompt_template.format(text=text)
|
102 |
|
103 |
# Generate the summary
|
104 |
-
summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
with st.sidebar:
|
107 |
st.divider()
|
@@ -130,9 +155,9 @@ def prompt_engineer(text, longtext, query):
|
|
130 |
result = ""
|
131 |
|
132 |
try:
|
133 |
-
llm = HuggingFaceHub(
|
134 |
-
|
135 |
-
)
|
136 |
response_text = llm.invoke(prompt)
|
137 |
escaped_query = re.escape(query)
|
138 |
result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]
|
|
|
10 |
from transformers import pipeline
|
11 |
from langchain.prompts import ChatPromptTemplate
|
12 |
from langchain_community.llms import HuggingFaceHub
|
13 |
+
from langchain.chains.summarize import load_summarize_chain
|
14 |
import re
|
15 |
+
from dotenv import load_dotenv
|
16 |
# import json
|
17 |
|
18 |
# st.config(PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION="python")
|
|
|
97 |
BULLET POINT SUMMARY:
|
98 |
"""
|
99 |
# Load the summarization pipeline with the specified model
|
100 |
+
# summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
|
101 |
|
102 |
# Generate the prompt
|
103 |
+
# prompt = summary_prompt_template.format(text=text)
|
104 |
|
105 |
# Generate the summary
|
106 |
+
# summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
|
107 |
+
|
108 |
+
try:
|
109 |
+
llm = HuggingFaceHub(
|
110 |
+
repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
|
111 |
+
)
|
112 |
+
st.write("llm connection started..")
|
113 |
+
except Exception as e:
|
114 |
+
st.error(f"Error invoke: {e}")
|
115 |
+
|
116 |
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
117 |
+
from langchain.chains.llm import LLMChain
|
118 |
+
from langchain_core.prompts import ChatPromptTemplate
|
119 |
+
|
120 |
+
# Define prompt
|
121 |
+
prompt = ChatPromptTemplate.from_messages(
|
122 |
+
[("system", summary_prompt_template)]
|
123 |
+
)
|
124 |
+
|
125 |
+
# Instantiate chain
|
126 |
+
chain = create_stuff_documents_chain(llm, prompt)
|
127 |
+
|
128 |
+
# Invoke chain
|
129 |
+
summary = chain.invoke({"text": longtext})
|
130 |
|
131 |
with st.sidebar:
|
132 |
st.divider()
|
|
|
155 |
result = ""
|
156 |
|
157 |
try:
|
158 |
+
# llm = HuggingFaceHub(
|
159 |
+
# repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
|
160 |
+
# )
|
161 |
response_text = llm.invoke(prompt)
|
162 |
escaped_query = re.escape(query)
|
163 |
result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]
|