abnerguzman commited on
Commit
497bfcd
1 Parent(s): 4cade91

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -27
app.py CHANGED
@@ -8,59 +8,55 @@ pc = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))
8
 
9
 
10
  from llama_index.vector_stores.pinecone import PineconeVectorStore
11
-
12
  from llama_index.core import VectorStoreIndex
13
  from llama_index.core.response.pprint_utils import pprint_source_node
14
-
15
-
16
  from llama_index.llms.octoai import OctoAI
17
 
18
  octoai = OctoAI(
19
  token=os.getenv('OCTOML_KEY'),
20
  model="meta-llama-3-70b-instruct",
21
- # messages=[
22
- # {"role": "system", "content": "You are a helpful assistant."},
23
- # {"role": "user", "content": prompt_af}
24
- # ],
25
  max_tokens=512,
26
- # presence_penalty=0,
27
  temperature=0.1,
28
- # top_p=0.9,
29
  )
30
 
31
 
32
  from llama_index.core.memory import ChatMemoryBuffer
33
 
34
- # memory = ChatMemoryBuffer.from_defaults(token_limit=5000)
 
35
 
36
- # chat_engine = vindex.as_chat_engine(
37
- # chat_mode="context",
38
- # llm=octoai,
39
- # memory=memory,
40
- # system_prompt=(
41
- # "You are a chatbot, able to have normal interactions, as well as talk about news events."
42
- # ),
43
- # )
44
 
 
 
45
 
 
46
 
47
- import gradio as gr
 
 
 
48
 
49
- # def predict(message, history):
50
- # response = chat_engine.stream_chat(message)
 
 
 
51
 
52
- # partial_message = ""
53
- # for token in response.response_gen:
54
- # partial_message += token
55
- # yield partial_message
56
 
57
- # demo = gr.ChatInterface(predict)
58
 
59
  with gr.Blocks() as demo:
60
- chatbot = gr.Chatbot()
61
  msg = gr.Textbox()
62
  clear = gr.Button("Clear")
63
 
 
 
 
64
  def get_chat_engine():
65
  vector_store = PineconeVectorStore(pinecone_index=pc.Index('prorata-postman-ds-256'))
66
  vindex = VectorStoreIndex.from_vector_store(vector_store)
@@ -87,6 +83,8 @@ with gr.Blocks() as demo:
87
 
88
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, [chatbot, chat_engine_var], chatbot)
89
  clear.click(lambda x: x.reset(), chat_engine_var, chatbot, queue=False)
 
 
90
 
91
  if __name__ == "__main__":
92
  demo.queue()
 
8
 
9
 
10
  from llama_index.vector_stores.pinecone import PineconeVectorStore
 
11
  from llama_index.core import VectorStoreIndex
12
  from llama_index.core.response.pprint_utils import pprint_source_node
 
 
13
  from llama_index.llms.octoai import OctoAI
14
 
15
  octoai = OctoAI(
16
  token=os.getenv('OCTOML_KEY'),
17
  model="meta-llama-3-70b-instruct",
 
 
 
 
18
  max_tokens=512,
 
19
  temperature=0.1,
 
20
  )
21
 
22
 
23
  from llama_index.core.memory import ChatMemoryBuffer
24
 
25
+ import gradio as gr
26
+ from io import StringIO
27
 
28
+ def get_credit_dist(history):
29
+ atoms_l = cu.sentence_splitter.split_text(history[-1][1])
30
+ atoms_l = list(filter(lambda x: len(x) > 50, atoms_l))
31
+ atom_topkmatches_l = cu.get_atom_topk_matches_l_concurrent(atoms_l, max_workers=8)
 
 
 
 
32
 
33
+ atomidx_w_single_url_aggmatch_l = cu.aggregate_atom_topkmatches_l(atom_topkmatches_l)
34
+ atom_support_l = cu.get_atmom_support_l_from_atomidx_w_single_url_aggmatch_l_concurrent(atoms_l, atomidx_w_single_url_aggmatch_l, max_workers=8)
35
 
36
+ credit_dist = cu.credit_atom_support_list(atom_support_l)
37
 
38
+ _out = StringIO()
39
+ print(f"Credit distribution to sources:\n", file=_out)
40
+ cu.print_credit_dist(credit_dist, prefix=' ', url_to_id=None, file=_out)
41
+ print(file=_out)
42
 
43
+ print(f"Per claim support:\n", file=_out)
44
+ for j, atom_support in enumerate(atom_support_l):
45
+ print(f" Claim {j+1}: \"{atoms_l[j]}\"\n", file=_out)
46
+ cu.print_atom_support(atom_support, prefix=' ', file=_out)
47
+ print(file=_out)
48
 
49
+ return _out.getvalue()
 
 
 
50
 
 
51
 
52
  with gr.Blocks() as demo:
53
+ chatbot = gr.Chatbot(height=800)
54
  msg = gr.Textbox()
55
  clear = gr.Button("Clear")
56
 
57
+ credit_box = gr.Textbox(label="Credit distribution", lines=20, autoscroll=False)
58
+ credit_btn = gr.Button("Credit response")
59
+
60
  def get_chat_engine():
61
  vector_store = PineconeVectorStore(pinecone_index=pc.Index('prorata-postman-ds-256'))
62
  vindex = VectorStoreIndex.from_vector_store(vector_store)
 
83
 
84
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(bot, [chatbot, chat_engine_var], chatbot)
85
  clear.click(lambda x: x.reset(), chat_engine_var, chatbot, queue=False)
86
+
87
+ credit_btn.click(get_credit_dist, chatbot, credit_box)
88
 
89
  if __name__ == "__main__":
90
  demo.queue()