from utils_old import get_custom_agent, get_prompt_and_tools_for_custom_agent from utils_old import get_search_index question_starters = ['who', 'why', 'what', 'how', 'where', 'when', 'which', 'whom', 'whose'] def index(): get_search_index() return True def run(question): index = get_search_index() # prompt, tools = get_prompt_and_tools() # agent_chain = get_agent_chain(prompt, tools) prompt, tools = get_prompt_and_tools_for_custom_agent() agent_chain = get_custom_agent(prompt, tools) result = None try: result = agent_chain.run(question) except ValueError as ve: if "Could not parse LLM output:" in ve.args[0] and question.lower().startswith(tuple(question_starters)) and not question.lower().endswith('?'): question = question + '?' result = agent_chain.run(question) finally: print(result) return result