ofermend commited on
Commit
1744191
β€’
1 Parent(s): 12c2421

version bump

Browse files
Files changed (4) hide show
  1. agent.py +7 -13
  2. app.py +3 -146
  3. requirements.txt +1 -1
  4. st_app.py +163 -0
agent.py CHANGED
@@ -167,30 +167,23 @@ def create_assistant_tools(cfg):
167
 
168
  class QueryCaselawArgs(BaseModel):
169
  query: str = Field(..., description="The user query.")
170
- citations: Optional[str] = Field(default = None,
171
- description = "The citations of the case. Optional.",
172
- examples = ['253 P.2d 136', '10 Alaska 11', '6 C.M.A. 3'])
173
 
174
  vec_factory = VectaraToolFactory(vectara_api_key=cfg.api_key,
175
  vectara_customer_id=cfg.customer_id,
176
  vectara_corpus_id=cfg.corpus_id)
177
- tools_factory = ToolsFactory()
178
-
179
  ask_caselaw = vec_factory.create_rag_tool(
180
  tool_name = "ask_caselaw",
181
- tool_description = """
182
- Use this tool for general case law queries.
183
- Returns a response (str) to the user query base on case law in the state of Alaska.
184
- If 'citations' is provided, filters the response based on information from that case.
185
- """,
186
  tool_args_schema = QueryCaselawArgs,
187
  reranker = "multilingual_reranker_v1", rerank_k = 100,
188
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
189
  summary_num_results = 10,
190
- vectara_summarizer = 'vectara-experimental-summary-ext-2023-12-11-med-omni',
191
  include_citations = False,
192
  )
193
 
 
194
  return (
195
  [ask_caselaw] +
196
  [tools_factory.create_tool(tool) for tool in [
@@ -207,6 +200,7 @@ def get_agent_config() -> OmegaConf:
207
  cfg = OmegaConf.create({
208
  'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
209
  'corpus_id': str(os.environ['VECTARA_CORPUS_ID']),
 
210
  'api_key': str(os.environ['VECTARA_API_KEY']),
211
  'examples': os.environ.get('QUERY_EXAMPLES', None),
212
  'demo_name': "legal-agent",
@@ -227,7 +221,7 @@ def initialize_agent(_cfg, update_func=None):
227
  For example you can include citations, decision date, or case name.
228
  - Citations include 3 components: volume number, reporter, and first page.
229
  Here are some examples: '253 P.2d 136', '10 Alaska 11', '6 C.M.A. 3'
230
- Never use your internal knowledge to contruct or guess what the citation is.
231
  - If two cases have conflicting rulings, assume that the case with the more current ruling date is correct.
232
  - If the response is based on cases that are older than 5 years, make sure to inform the user that the information may be outdated,
233
  since some case opinions may no longer apply in law.
@@ -248,6 +242,6 @@ def initialize_agent(_cfg, update_func=None):
248
  tools=create_assistant_tools(_cfg),
249
  topic="Case law in Alaska",
250
  custom_instructions=legal_assistant_instructions,
251
- update_func=update_func
252
  )
253
  return agent
 
167
 
168
  class QueryCaselawArgs(BaseModel):
169
  query: str = Field(..., description="The user query.")
 
 
 
170
 
171
  vec_factory = VectaraToolFactory(vectara_api_key=cfg.api_key,
172
  vectara_customer_id=cfg.customer_id,
173
  vectara_corpus_id=cfg.corpus_id)
174
+ summarizer = 'vectara-experimental-summary-ext-2023-12-11-med-omni'
 
175
  ask_caselaw = vec_factory.create_rag_tool(
176
  tool_name = "ask_caselaw",
177
+ tool_description = "A tool for asking questions about case law in Alaska.",
 
 
 
 
178
  tool_args_schema = QueryCaselawArgs,
179
  reranker = "multilingual_reranker_v1", rerank_k = 100,
180
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
181
  summary_num_results = 10,
182
+ vectara_summarizer = summarizer,
183
  include_citations = False,
184
  )
185
 
186
+ tools_factory = ToolsFactory()
187
  return (
188
  [ask_caselaw] +
189
  [tools_factory.create_tool(tool) for tool in [
 
200
  cfg = OmegaConf.create({
201
  'customer_id': str(os.environ['VECTARA_CUSTOMER_ID']),
202
  'corpus_id': str(os.environ['VECTARA_CORPUS_ID']),
203
+ 'corpus_key': str(os.environ['VECTARA_CORPUS_KEY']),
204
  'api_key': str(os.environ['VECTARA_API_KEY']),
205
  'examples': os.environ.get('QUERY_EXAMPLES', None),
206
  'demo_name': "legal-agent",
 
221
  For example you can include citations, decision date, or case name.
222
  - Citations include 3 components: volume number, reporter, and first page.
223
  Here are some examples: '253 P.2d 136', '10 Alaska 11', '6 C.M.A. 3'
224
+ - Never use your internal knowledge to guess citations. Only use citations information provided by a tool or the user.
225
  - If two cases have conflicting rulings, assume that the case with the more current ruling date is correct.
226
  - If the response is based on cases that are older than 5 years, make sure to inform the user that the information may be outdated,
227
  since some case opinions may no longer apply in law.
 
242
  tools=create_assistant_tools(_cfg),
243
  topic="Case law in Alaska",
244
  custom_instructions=legal_assistant_instructions,
245
+ update_func=update_func,
246
  )
247
  return agent
app.py CHANGED
@@ -1,159 +1,16 @@
1
- from PIL import Image
2
- import sys
3
  import uuid
4
 
5
  import nest_asyncio
6
  import asyncio
7
 
8
- import streamlit as st
9
- from streamlit_pills import pills
10
- from streamlit_feedback import streamlit_feedback
11
-
12
- from vectara_agentic.agent import AgentStatusType
13
-
14
- from agent import initialize_agent, get_agent_config
15
- from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
16
-
17
- initial_prompt = "How can I help you today?"
18
-
19
  # Setup for HTTP API Calls to Amplitude Analytics
20
  if 'device_id' not in st.session_state:
21
  st.session_state.device_id = str(uuid.uuid4())
22
 
23
  if "feedback_key" not in st.session_state:
24
- st.session_state.feedback_key = 0
25
-
26
- def toggle_logs():
27
- st.session_state.show_logs = not st.session_state.show_logs
28
-
29
- def show_example_questions():
30
- if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
31
- selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
32
- if selected_example:
33
- st.session_state.ex_prompt = selected_example
34
- st.session_state.first_turn = False
35
- return True
36
- return False
37
-
38
- def update_func(status_type: AgentStatusType, msg: str):
39
- if status_type != AgentStatusType.AGENT_UPDATE:
40
- output = f"{status_type.value} - {msg}"
41
- st.session_state.log_messages.append(output)
42
-
43
- async def launch_bot():
44
- def reset():
45
- st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
46
- st.session_state.thinking_message = "Agent at work..."
47
- st.session_state.log_messages = []
48
- st.session_state.prompt = None
49
- st.session_state.ex_prompt = None
50
- st.session_state.first_turn = True
51
- st.session_state.show_logs = False
52
- if 'agent' not in st.session_state:
53
- st.session_state.agent = initialize_agent(cfg, update_func=update_func)
54
-
55
- if 'cfg' not in st.session_state:
56
- cfg = get_agent_config()
57
- st.session_state.cfg = cfg
58
- st.session_state.ex_prompt = None
59
- example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
60
- st.session_state.example_messages = [em for em in example_messages if len(em)>0]
61
- reset()
62
-
63
- cfg = st.session_state.cfg
64
-
65
- # left side content
66
- with st.sidebar:
67
- image = Image.open('Vectara-logo.png')
68
- st.image(image, width=175)
69
- st.markdown(f"## {cfg['demo_welcome']}")
70
- st.markdown(f"{cfg['demo_description']}")
71
-
72
- st.markdown("\n\n")
73
- bc1, _ = st.columns([1, 1])
74
- with bc1:
75
- if st.button('Start Over'):
76
- reset()
77
- st.rerun()
78
-
79
- st.divider()
80
- st.markdown(
81
- "## How this works?\n"
82
- "This app was built with [Vectara](https://vectara.com).\n\n"
83
- "It demonstrates the use of Agentic RAG functionality with Vectara"
84
- )
85
-
86
- if "messages" not in st.session_state.keys():
87
- reset()
88
-
89
- # Display chat messages
90
- for message in st.session_state.messages:
91
- with st.chat_message(message["role"], avatar=message["avatar"]):
92
- st.write(message["content"])
93
-
94
- example_container = st.empty()
95
- with example_container:
96
- if show_example_questions():
97
- example_container.empty()
98
- st.session_state.first_turn = False
99
- st.rerun()
100
-
101
- # User-provided prompt
102
- if st.session_state.ex_prompt:
103
- prompt = st.session_state.ex_prompt
104
- else:
105
- prompt = st.chat_input()
106
- if prompt:
107
- st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
108
- st.session_state.prompt = prompt # Save the prompt in session state
109
- st.session_state.log_messages = []
110
- st.session_state.show_logs = False
111
- with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
112
- print(f"Starting new question: {prompt}\n")
113
- st.write(prompt)
114
- st.session_state.ex_prompt = None
115
-
116
- # Generate a new response if last message is not from assistant
117
- if st.session_state.prompt:
118
- with st.chat_message("assistant", avatar='πŸ€–'):
119
- with st.spinner(st.session_state.thinking_message):
120
- res = st.session_state.agent.chat(st.session_state.prompt)
121
- res = escape_dollars_outside_latex(res)
122
- message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
123
- st.session_state.messages.append(message)
124
- st.markdown(res)
125
-
126
- send_amplitude_data(
127
- user_query=st.session_state.messages[-2]["content"],
128
- bot_response=st.session_state.messages[-1]["content"],
129
- demo_name=cfg['demo_name']
130
- )
131
-
132
- st.session_state.ex_prompt = None
133
- st.session_state.prompt = None
134
- st.session_state.first_turn = False
135
- st.rerun()
136
-
137
- # Record user feedback
138
- if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != "How can I help you today?"):
139
- streamlit_feedback(
140
- feedback_type="thumbs", on_submit = thumbs_feedback, key = st.session_state.feedback_key,
141
- kwargs = {"user_query": st.session_state.messages[-2]["content"],
142
- "bot_response": st.session_state.messages[-1]["content"],
143
- "demo_name": cfg["demo_name"]}
144
- )
145
-
146
- log_placeholder = st.empty()
147
- with log_placeholder.container():
148
- if st.session_state.show_logs:
149
- st.button("Hide Logs", on_click=toggle_logs)
150
- for msg in st.session_state.log_messages:
151
- st.text(msg)
152
- else:
153
- if len(st.session_state.log_messages) > 0:
154
- st.button("Show Logs", on_click=toggle_logs)
155
-
156
- sys.stdout.flush()
157
 
158
  if __name__ == "__main__":
159
  st.set_page_config(page_title="Legal Assistant", layout="wide")
 
1
+ import streamlit as st
2
+ from st_app import launch_bot
3
  import uuid
4
 
5
  import nest_asyncio
6
  import asyncio
7
 
 
 
 
 
 
 
 
 
 
 
 
8
  # Setup for HTTP API Calls to Amplitude Analytics
9
  if 'device_id' not in st.session_state:
10
  st.session_state.device_id = str(uuid.uuid4())
11
 
12
  if "feedback_key" not in st.session_state:
13
+ st.session_state.feedback_key = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  if __name__ == "__main__":
16
  st.set_page_config(page_title="Legal Assistant", layout="wide")
requirements.txt CHANGED
@@ -6,4 +6,4 @@ streamlit-feedback==0.1.3
6
  uuid==1.30
7
  langdetect==1.0.9
8
  langcodes==3.4.0
9
- vectara-agentic==0.1.15
 
6
  uuid==1.30
7
  langdetect==1.0.9
8
  langcodes==3.4.0
9
+ vectara-agentic==0.1.16
st_app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import sys
3
+
4
+ import streamlit as st
5
+ from streamlit_pills import pills
6
+ from streamlit_feedback import streamlit_feedback
7
+
8
+ from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
9
+
10
+ from vectara_agentic.agent import AgentStatusType
11
+ from agent import initialize_agent, get_agent_config
12
+
13
+ initial_prompt = "How can I help you today?"
14
+
15
+ def toggle_logs():
16
+ st.session_state.show_logs = not st.session_state.show_logs
17
+
18
+ def show_example_questions():
19
+ if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
20
+ selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
21
+ if selected_example:
22
+ st.session_state.ex_prompt = selected_example
23
+ st.session_state.first_turn = False
24
+ return True
25
+ return False
26
+
27
+ def update_func(status_type: AgentStatusType, msg: str):
28
+ if status_type != AgentStatusType.AGENT_UPDATE:
29
+ output = f"{status_type.value} - {msg}"
30
+ st.session_state.log_messages.append(output)
31
+
32
+ async def launch_bot():
33
+ def reset():
34
+ st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
35
+ st.session_state.thinking_message = "Agent at work..."
36
+ st.session_state.log_messages = []
37
+ st.session_state.prompt = None
38
+ st.session_state.ex_prompt = None
39
+ st.session_state.first_turn = True
40
+ st.session_state.logs_enabled = False
41
+ st.session_state.show_logs = False
42
+ if 'agent' not in st.session_state:
43
+ st.session_state.agent = initialize_agent(cfg, update_func=update_func)
44
+ else:
45
+ st.session_state.agent.clear_memory()
46
+
47
+ if 'cfg' not in st.session_state:
48
+ cfg = get_agent_config()
49
+ st.session_state.cfg = cfg
50
+ st.session_state.ex_prompt = None
51
+ example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
52
+ st.session_state.example_messages = [em for em in example_messages if len(em)>0]
53
+ reset()
54
+
55
+ cfg = st.session_state.cfg
56
+
57
+ # left side content
58
+ with st.sidebar:
59
+ image = Image.open('Vectara-logo.png')
60
+ st.image(image, width=175)
61
+ st.markdown(f"## {cfg['demo_welcome']}")
62
+ st.markdown(f"{cfg['demo_description']}")
63
+
64
+ st.markdown("\n\n")
65
+ bc1, bc2 = st.columns([1, 1])
66
+ with bc1:
67
+ if st.button('Start Over'):
68
+ reset()
69
+ st.rerun()
70
+ with bc2: # Updated button for enabling/disabling logs
71
+ if st.session_state.logs_enabled:
72
+ if st.button('Disable Logs', key='disable_logs'):
73
+ st.session_state.logs_enabled = False
74
+ st.rerun()
75
+ else:
76
+ if st.button('Enable Logs', key='enable_logs'):
77
+ st.session_state.logs_enabled = True
78
+ st.rerun()
79
+
80
+ st.divider()
81
+ st.markdown(
82
+ "## How this works?\n"
83
+ "This app was built with [Vectara](https://vectara.com).\n\n"
84
+ "It demonstrates the use of Agentic RAG functionality with Vectara"
85
+ )
86
+
87
+ if "messages" not in st.session_state.keys():
88
+ reset()
89
+
90
+ # Display chat messages
91
+ for message in st.session_state.messages:
92
+ with st.chat_message(message["role"], avatar=message["avatar"]):
93
+ st.write(message["content"])
94
+
95
+ example_container = st.empty()
96
+ with example_container:
97
+ if show_example_questions():
98
+ example_container.empty()
99
+ st.session_state.first_turn = False
100
+ st.rerun()
101
+
102
+ # User-provided prompt
103
+ if st.session_state.ex_prompt:
104
+ prompt = st.session_state.ex_prompt
105
+ else:
106
+ prompt = st.chat_input()
107
+ if prompt:
108
+ st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
109
+ st.session_state.prompt = prompt # Save the prompt in session state
110
+ st.session_state.log_messages = []
111
+ st.session_state.show_logs = False
112
+ with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
113
+ print(f"Starting new question: {prompt}\n")
114
+ st.write(prompt)
115
+ st.session_state.ex_prompt = None
116
+
117
+ # Generate a new response if last message is not from assistant
118
+ if st.session_state.prompt:
119
+ with st.chat_message("assistant", avatar='πŸ€–'):
120
+ with st.spinner(st.session_state.thinking_message):
121
+ res = st.session_state.agent.chat(st.session_state.prompt)
122
+ res = escape_dollars_outside_latex(res)
123
+ message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
124
+ st.session_state.messages.append(message)
125
+ st.markdown(res)
126
+
127
+ send_amplitude_data(
128
+ user_query=st.session_state.messages[-2]["content"],
129
+ bot_response=st.session_state.messages[-1]["content"],
130
+ demo_name=cfg['demo_name']
131
+ )
132
+
133
+ st.session_state.ex_prompt = None
134
+ st.session_state.prompt = None
135
+ st.session_state.first_turn = False
136
+ st.rerun()
137
+
138
+ # Record user feedback
139
+ if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != initial_prompt):
140
+ if st.session_state.show_logs and st.session_state.logs_enabled: # Only show logs if enabled
141
+ streamlit_feedback(
142
+ feedback_type="thumbs", on_submit=thumbs_feedback, key=st.session_state.feedback_key,
143
+ kwargs={"user_query": st.session_state.messages[-2]["content"],
144
+ "bot_response": st.session_state.messages[-1]["content"],
145
+ "demo_name": cfg["demo_name"]}
146
+ )
147
+
148
+ log_placeholder = st.empty()
149
+ with log_placeholder.container():
150
+ if st.session_state.logs_enabled: # Show logs button only if log toggle is enabled
151
+ if st.session_state.show_logs:
152
+ st.button("Hide Logs", on_click=toggle_logs)
153
+ for msg in st.session_state.log_messages:
154
+ if len(msg) > 100: # Use text_area for longer messages
155
+ st.text_area(label="Log", value=msg, height=100, disabled=True)
156
+ else:
157
+ st.text(msg)
158
+ else:
159
+ if len(st.session_state.log_messages) > 0:
160
+ st.button("Show Logs", on_click=toggle_logs)
161
+
162
+
163
+ sys.stdout.flush()