TheoLvs commited on
Commit
a56e564
1 Parent(s): 53cdd75

Corrected concurrency bug + probable error

Browse files
Files changed (2) hide show
  1. README.md +4 -1
  2. app.py +16 -11
README.md CHANGED
@@ -10,4 +10,7 @@ fullWidth: true
10
  pinned: false
11
  ---
12
 
13
- # Climate Q&A
 
 
 
 
10
  pinned: false
11
  ---
12
 
13
+ # Climate Q&A
14
+
15
+
16
+ To run locally run ``gradio app.py``
app.py CHANGED
@@ -86,6 +86,7 @@ def parse_output_llm_with_sources(output):
86
 
87
  # Create vectorstore and retriever
88
  vectorstore = get_pinecone_vectorstore(embeddings_function)
 
89
 
90
 
91
  def make_pairs(lst):
@@ -123,7 +124,6 @@ async def chat(query,history,audience,sources,reports):
123
  if len(reports) == 0:
124
  reports = []
125
 
126
- llm = get_llm(max_tokens = 1024,temperature = 0.0)
127
  retriever = ClimateQARetriever(vectorstore=vectorstore,sources = sources,reports = reports,k_summary = 3,k_total = 10,threshold=0.7)
128
  rag_chain = make_rag_chain(retriever,llm)
129
 
@@ -164,12 +164,16 @@ async def chat(query,history,audience,sources,reports):
164
  output_query = op["value"]["question"]
165
 
166
  elif op['path'] == retriever_path_id: # documents
167
- docs = op['value']['documents'] # List[Document]
168
- docs_html = []
169
- for i, d in enumerate(docs, 1):
170
- docs_html.append(make_html_source(d, i))
171
- docs_html = "".join(docs_html)
172
-
 
 
 
 
173
 
174
  elif op['path'] == final_answer_path_id: # final answer
175
  new_token = op['value'] # str
@@ -178,7 +182,8 @@ async def chat(query,history,audience,sources,reports):
178
  answer_yet = parse_output_llm_with_sources(answer_yet)
179
  history[-1] = (query,answer_yet)
180
 
181
-
 
182
 
183
  history = [tuple(x) for x in history]
184
  yield history,docs_html,output_query,output_language,gallery
@@ -396,13 +401,13 @@ with gr.Blocks(title="Climate Q&A", css="style.css", theme=theme,elem_id = "main
396
 
397
  (textbox
398
  .submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False)
399
- .success(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery])
400
  .success(finish_chat, None, [textbox])
401
  )
402
 
403
  (examples_hidden
404
  .change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False)
405
- .success(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery])
406
  .success(finish_chat, None, [textbox])
407
  )
408
 
@@ -636,4 +641,4 @@ Or around 2 to 4 times more than a typical Google search.
636
 
637
  demo.queue()
638
 
639
- demo.launch()
 
86
 
87
  # Create vectorstore and retriever
88
  vectorstore = get_pinecone_vectorstore(embeddings_function)
89
+ llm = get_llm(max_tokens = 1024,temperature = 0.0)
90
 
91
 
92
  def make_pairs(lst):
 
124
  if len(reports) == 0:
125
  reports = []
126
 
 
127
  retriever = ClimateQARetriever(vectorstore=vectorstore,sources = sources,reports = reports,k_summary = 3,k_total = 10,threshold=0.7)
128
  rag_chain = make_rag_chain(retriever,llm)
129
 
 
164
  output_query = op["value"]["question"]
165
 
166
  elif op['path'] == retriever_path_id: # documents
167
+ try:
168
+ docs = op['value']['documents'] # List[Document]
169
+ docs_html = []
170
+ for i, d in enumerate(docs, 1):
171
+ docs_html.append(make_html_source(d, i))
172
+ docs_html = "".join(docs_html)
173
+ except TypeError:
174
+ print("No documents found")
175
+ print("op: ",op)
176
+ continue
177
 
178
  elif op['path'] == final_answer_path_id: # final answer
179
  new_token = op['value'] # str
 
182
  answer_yet = parse_output_llm_with_sources(answer_yet)
183
  history[-1] = (query,answer_yet)
184
 
185
+ else:
186
+ continue
187
 
188
  history = [tuple(x) for x in history]
189
  yield history,docs_html,output_query,output_language,gallery
 
401
 
402
  (textbox
403
  .submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False)
404
+ .success(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8)
405
  .success(finish_chat, None, [textbox])
406
  )
407
 
408
  (examples_hidden
409
  .change(start_chat, [examples_hidden,chatbot], [textbox,tabs,chatbot],queue = False)
410
+ .success(chat, [examples_hidden,chatbot,dropdown_audience, dropdown_sources,dropdown_reports], [chatbot,sources_textbox,output_query,output_language,gallery],concurrency_limit = 8)
411
  .success(finish_chat, None, [textbox])
412
  )
413
 
 
641
 
642
  demo.queue()
643
 
644
+ demo.launch(max_threads = 8)