github-actions commited on
Commit
d4d8ea9
1 Parent(s): 4e6f87a

Sync updates from source repository

Browse files
Files changed (2) hide show
  1. app.py +19 -22
  2. query.py +84 -87
app.py CHANGED
@@ -7,8 +7,8 @@ from streamlit_pills import pills
7
 
8
  from PIL import Image
9
 
10
- max_examples = 4
11
-
12
  def isTrue(x) -> bool:
13
  if isinstance(x, bool):
14
  return x
@@ -22,7 +22,7 @@ def launch_bot():
22
  def generate_streaming_response(question):
23
  response = vq.submit_query_streaming(question)
24
  return response
25
-
26
  def show_example_questions():
27
  if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
28
  selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
@@ -31,27 +31,25 @@ def launch_bot():
31
  st.session_state.first_turn = False
32
  return True
33
  return False
34
-
35
  if 'cfg' not in st.session_state:
36
- corpus_ids = str(os.environ['corpus_ids']).split(',')
37
  cfg = OmegaConf.create({
38
- 'customer_id': str(os.environ['customer_id']),
39
- 'corpus_ids': corpus_ids,
40
  'api_key': str(os.environ['api_key']),
41
  'title': os.environ['title'],
42
- 'description': os.environ['description'],
43
  'source_data_desc': os.environ['source_data_desc'],
44
  'streaming': isTrue(os.environ.get('streaming', False)),
45
  'prompt_name': os.environ.get('prompt_name', None),
46
- 'examples': os.environ.get('examples', '')
47
  })
48
  st.session_state.cfg = cfg
49
  st.session_state.ex_prompt = None
50
  st.session_state.first_turn = True
51
  example_messages = [example.strip() for example in cfg.examples.split(",")]
52
  st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
53
-
54
- st.session_state.vq = VectaraQuery(cfg.api_key, cfg.customer_id, cfg.corpus_ids, cfg.prompt_name)
55
 
56
  cfg = st.session_state.cfg
57
  vq = st.session_state.vq
@@ -60,7 +58,8 @@ def launch_bot():
60
  # left side content
61
  with st.sidebar:
62
  image = Image.open('Vectara-logo.png')
63
- st.markdown(f"## Welcome to {cfg.title}\n\n"
 
64
  f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
65
 
66
  st.markdown("---")
@@ -71,25 +70,23 @@ def launch_bot():
71
  "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
72
  )
73
  st.markdown("---")
74
- st.image(image, width=250)
75
 
76
- st.markdown(f"<center> <h2> Vectara chat demo: {cfg.title} </h2> </center>", unsafe_allow_html=True)
77
- st.markdown(f"<center> <h4> {cfg.description} <h4> </center>", unsafe_allow_html=True)
78
 
79
  if "messages" not in st.session_state.keys():
80
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
81
-
 
 
 
 
82
 
83
  example_container = st.empty()
84
  with example_container:
85
  if show_example_questions():
86
  example_container.empty()
87
  st.rerun()
88
-
89
- # Display chat messages
90
- for message in st.session_state.messages:
91
- with st.chat_message(message["role"]):
92
- st.write(message["content"])
93
 
94
  # select prompt from example question or user provided input
95
  if st.session_state.ex_prompt:
@@ -117,4 +114,4 @@ def launch_bot():
117
  st.rerun()
118
 
119
  if __name__ == "__main__":
120
- launch_bot()
 
7
 
8
  from PIL import Image
9
 
10
+ max_examples = 6
11
+
12
  def isTrue(x) -> bool:
13
  if isinstance(x, bool):
14
  return x
 
22
  def generate_streaming_response(question):
23
  response = vq.submit_query_streaming(question)
24
  return response
25
+
26
  def show_example_questions():
27
  if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
28
  selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
 
31
  st.session_state.first_turn = False
32
  return True
33
  return False
34
+
35
  if 'cfg' not in st.session_state:
36
+ corpus_keys = str(os.environ['corpus_keys']).split(',')
37
  cfg = OmegaConf.create({
38
+ 'corpus_keys': corpus_keys,
 
39
  'api_key': str(os.environ['api_key']),
40
  'title': os.environ['title'],
 
41
  'source_data_desc': os.environ['source_data_desc'],
42
  'streaming': isTrue(os.environ.get('streaming', False)),
43
  'prompt_name': os.environ.get('prompt_name', None),
44
+ 'examples': os.environ.get('examples', None)
45
  })
46
  st.session_state.cfg = cfg
47
  st.session_state.ex_prompt = None
48
  st.session_state.first_turn = True
49
  example_messages = [example.strip() for example in cfg.examples.split(",")]
50
  st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
51
+
52
+ st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
53
 
54
  cfg = st.session_state.cfg
55
  vq = st.session_state.vq
 
58
  # left side content
59
  with st.sidebar:
60
  image = Image.open('Vectara-logo.png')
61
+ st.image(image, width=175)
62
+ st.markdown(f"## About\n\n"
63
  f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")
64
 
65
  st.markdown("---")
 
70
  "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
71
  )
72
  st.markdown("---")
73
+
74
 
75
+ st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
 
76
 
77
  if "messages" not in st.session_state.keys():
78
  st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
79
+
80
+ # Display chat messages
81
+ for message in st.session_state.messages:
82
+ with st.chat_message(message["role"]):
83
+ st.write(message["content"])
84
 
85
  example_container = st.empty()
86
  with example_container:
87
  if show_example_questions():
88
  example_container.empty()
89
  st.rerun()
 
 
 
 
 
90
 
91
  # select prompt from example question or user provided input
92
  if st.session_state.ex_prompt:
 
114
  st.rerun()
115
 
116
  if __name__ == "__main__":
117
+ launch_bot()
query.py CHANGED
@@ -3,52 +3,54 @@ import json
3
 
4
 
5
  class VectaraQuery():
6
- def __init__(self, api_key: str, customer_id: str, corpus_ids: list[str], prompt_name: str = None):
7
- self.customer_id = customer_id
8
- self.corpus_ids = corpus_ids
9
  self.api_key = api_key
10
- self.prompt_name = prompt_name if prompt_name else "vectara-experimental-summary-ext-2023-12-11-sml"
11
  self.conv_id = None
12
 
13
- def get_body(self, query_str: str):
14
- corpora_key_list = [{
15
- 'customer_id': self.customer_id, 'corpus_id': corpus_id, 'lexical_interpolation_config': {'lambda': 0.005}
16
- } for corpus_id in self.corpus_ids
 
17
  ]
18
 
19
  return {
20
- 'query': [
21
- {
22
- 'query': query_str,
23
- 'start': 0,
24
- 'numResults': 50,
25
- 'corpusKey': corpora_key_list,
26
- 'context_config': {
27
- 'sentences_before': 2,
28
- 'sentences_after': 2,
29
- 'start_tag': "%START_SNIPPET%",
30
- 'end_tag': "%END_SNIPPET%",
31
- },
32
- 'rerankingConfig':
33
- {
34
- 'rerankerId': 272725719,
35
- },
36
- 'summary': [
37
- {
38
- 'responseLang': 'eng',
39
- 'maxSummarizedResults': 10,
40
- 'summarizerPromptName': self.prompt_name,
41
- 'chat': {
42
- 'store': True,
43
- 'conversationId': self.conv_id
44
- },
45
- 'citationParams': {
46
- "style": "NONE",
47
- }
48
- }
49
- ]
50
- }
51
- ]
 
 
52
  }
53
 
54
 
@@ -56,76 +58,71 @@ class VectaraQuery():
56
  return {
57
  "Content-Type": "application/json",
58
  "Accept": "application/json",
59
- "customer-id": self.customer_id,
 
 
 
 
 
 
 
60
  "x-api-key": self.api_key,
61
  "grpc-timeout": "60S"
62
  }
63
 
64
  def submit_query(self, query_str: str):
65
 
66
- endpoint = f"https://api.vectara.io/v1/query"
67
- body = self.get_body(query_str)
 
 
 
 
 
 
68
 
69
- response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
70
  if response.status_code != 200:
71
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
 
 
72
  return "Sorry, something went wrong in my brain. Please try again later."
73
 
74
  res = response.json()
75
 
76
- summary = res['responseSet'][0]['summary'][0]['text']
77
- chat = res['responseSet'][0]['summary'][0].get('chat', None)
78
 
79
- if chat and chat['status'] is not None:
80
- st_code = chat['status']
81
- print(f"Chat query failed with code {st_code}")
82
- if st_code == 'RESOURCE_EXHAUSTED':
83
- self.conv_id = None
84
- return 'Sorry, Vectara chat turns exceeds plan limit.'
85
- return 'Sorry, something went wrong in my brain. Please try again later.'
86
 
87
- self.conv_id = chat['conversationId'] if chat else None
88
  return summary
89
 
90
  def submit_query_streaming(self, query_str: str):
91
 
92
- endpoint = "https://api.vectara.io/v1/stream-query"
93
- body = self.get_body(query_str)
 
 
 
 
 
 
94
 
95
- response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers(), stream=True)
96
  if response.status_code != 200:
97
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
98
- return "Sorry, something went wrong in my brain. Please try again later."
 
 
99
 
100
  chunks = []
101
  for line in response.iter_lines():
 
102
  if line: # filter out keep-alive new lines
103
- data = json.loads(line.decode('utf-8'))
104
- res = data['result']
105
- response_set = res['responseSet']
106
- if response_set is None:
107
- # grab next chunk and yield it as output
108
- summary = res.get('summary', None)
109
- if summary is None or len(summary)==0:
110
- continue
111
- else:
112
- chat = summary.get('chat', None)
113
- if chat and chat.get('status', None):
114
- st_code = chat['status']
115
- print(f"Chat query failed with code {st_code}")
116
- if st_code == 'RESOURCE_EXHAUSTED':
117
- self.conv_id = None
118
- return 'Sorry, Vectara chat turns exceeds plan limit.'
119
- return 'Sorry, something went wrong in my brain. Please try again later.'
120
- conv_id = chat.get('conversationId', None) if chat else None
121
- if conv_id:
122
- self.conv_id = conv_id
123
-
124
- chunk = summary['text']
125
- chunks.append(chunk)
126
- yield chunk
127
-
128
- if summary['done']:
129
- break
130
-
131
- return ''.join(chunks)
 
3
 
4
 
5
  class VectaraQuery():
6
+ def __init__(self, api_key: str, corpus_keys: list[str], prompt_name: str = None):
7
+ self.corpus_keys = corpus_keys
 
8
  self.api_key = api_key
9
+ self.prompt_name = prompt_name if prompt_name else "vectara-summary-ext-24-05-sml"
10
  self.conv_id = None
11
 
12
+
13
+ def get_body(self, query_str: str, stream: False):
14
+ corpora_list = [{
15
+ 'corpus_key': corpus_key, 'lexical_interpolation': 0.005
16
+ } for corpus_key in self.corpus_keys
17
  ]
18
 
19
  return {
20
+ 'query': query_str,
21
+ 'search':
22
+ {
23
+ 'corpora': corpora_list,
24
+ 'offset': 0,
25
+ 'limit': 50,
26
+ 'context_configuration':
27
+ {
28
+ 'sentences_before': 2,
29
+ 'sentences_after': 2,
30
+ 'start_tag': "%START_SNIPPET%",
31
+ 'end_tag': "%END_SNIPPET%",
32
+ },
33
+ 'reranker':
34
+ {
35
+ 'type': 'customer_reranker',
36
+ 'reranker_id': 'rnk_272725719'
37
+ },
38
+ },
39
+ 'generation':
40
+ {
41
+ 'prompt_name': self.prompt_name,
42
+ 'max_used_search_results': 10,
43
+ 'response_language': 'eng',
44
+ 'citations':
45
+ {
46
+ 'style': 'none'
47
+ }
48
+ },
49
+ 'chat':
50
+ {
51
+ 'store': True
52
+ },
53
+ 'stream_response': stream
54
  }
55
 
56
 
 
58
  return {
59
  "Content-Type": "application/json",
60
  "Accept": "application/json",
61
+ "x-api-key": self.api_key,
62
+ "grpc-timeout": "60S"
63
+ }
64
+
65
+ def get_stream_headers(self):
66
+ return {
67
+ "Content-Type": "application/json",
68
+ "Accept": "text/event-stream",
69
  "x-api-key": self.api_key,
70
  "grpc-timeout": "60S"
71
  }
72
 
73
  def submit_query(self, query_str: str):
74
 
75
+ if self.conv_id:
76
+ endpoint = f"https://api.vectara.io/v2/chats/{self.conv_id}/turns"
77
+ else:
78
+ endpoint = "https://api.vectara.io/v2/chats"
79
+
80
+ body = self.get_body(query_str, stream=False)
81
+
82
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_headers())
83
 
 
84
  if response.status_code != 200:
85
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
86
+ if response.status_code == 429:
87
+ return "Sorry, Vectara chat turns exceeds plan limit."
88
  return "Sorry, something went wrong in my brain. Please try again later."
89
 
90
  res = response.json()
91
 
92
+ if self.conv_id is None:
93
+ self.conv_id = res['chat_id']
94
 
95
+ summary = res['answer']
 
 
 
 
 
 
96
 
 
97
  return summary
98
 
99
  def submit_query_streaming(self, query_str: str):
100
 
101
+ if self.conv_id:
102
+ endpoint = f"https://api.vectara.io/v2/chats/{self.conv_id}/turns"
103
+ else:
104
+ endpoint = "https://api.vectara.io/v2/chats"
105
+
106
+ body = self.get_body(query_str, stream=True)
107
+
108
+ response = requests.post(endpoint, data=json.dumps(body), verify=True, headers=self.get_stream_headers(), stream=True)
109
 
 
110
  if response.status_code != 200:
111
  print(f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}")
112
+ if response.status_code == 429:
113
+ return "Sorry, Vectara chat turns exceeds plan limit."
114
+ return "Sorry, something went wrong in my brain. Please try again later."
115
 
116
  chunks = []
117
  for line in response.iter_lines():
118
+ line = line.decode('utf-8')
119
  if line: # filter out keep-alive new lines
120
+ key, value = line.split(':', 1)
121
+ if key == 'data':
122
+ line = json.loads(value)
123
+ if line['type'] == 'generation_chunk':
124
+ chunk = line['generation_chunk']
125
+ chunks.append(chunk)
126
+ yield chunk
127
+
128
+ return ''.join(chunks)