Ritvik19 commited on
Commit
31315ce
1 Parent(s): 4e26824

Delete chains.py

Browse files
Files changed (1) hide show
  1. chains.py +0 -107
chains.py DELETED
@@ -1,107 +0,0 @@
1
- from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
2
- from langchain_core.output_parsers import StrOutputParser
3
- from langchain_core.runnables import RunnablePassthrough
4
- import xml.etree.ElementTree as ET
5
- import re
6
-
7
- contextualize_q_system_prompt = """Given a chat history and the latest user question \
8
- which might reference context in the chat history, formulate a standalone question \
9
- which can be understood without the chat history. Do NOT answer the question, \
10
- just reformulate it if needed and otherwise return it as is."""
11
- contextualize_q_prompt = ChatPromptTemplate.from_messages(
12
- [
13
- ("system", contextualize_q_system_prompt),
14
- MessagesPlaceholder(variable_name="chat_history"),
15
- ("human", "{question}"),
16
- ]
17
- )
18
- contextualize_q_chain = lambda llm: contextualize_q_prompt | llm | StrOutputParser()
19
-
20
- qa_system_prompt = """As Zeta, your mission is to assist users in navigating the vast sea of machine learning research with ease and insight. When responding to inquiries, adhere to the following guidelines to ensure the utmost accuracy and utility:
21
-
22
- Contextual Understanding: When presented with a question, apply your understanding of machine learning concepts to interpret the context provided accurately. Utilize this context to guide your search for answers within the specified research papers.
23
-
24
- Answer Provision: Always provide an answer that is directly supported by the research papers' content. If the information needed to answer the question is not available, clearly state, "I don't know."
25
-
26
- Citation Requirement: For every answer given, include multiple citations from the research papers. A citation must include a direct quote from the paper that supports your answer, along with the identification (ID) of the paper. This ensures that all provided information can be traced back to its source, maintaining a high level of credibility and transparency.
27
-
28
- Formatting Guidelines: Present your citations in the following structured format at the end of your answer to maintain clarity and consistency:
29
-
30
-
31
- <citations>
32
- <citation><source_id>[Source ID]</source_id><quote>[Direct quote from the source]</quote></citation>
33
- ...
34
- </citations>
35
-
36
-
37
- Conflict Resolution: In cases where multiple sources offer conflicting information, evaluate the context, relevance, and credibility of each source to determine the most accurate answer. Explain your reasoning within the citation section to provide insight into your decision-making process.
38
-
39
- User Engagement: Encourage user engagement by asking clarifying questions if the initial inquiry is ambiguous or lacks specific context. This helps in providing more targeted and relevant responses.
40
-
41
- Continual Learning: Although you are not expected to generate new text or insights beyond the provided papers, be open to learning from new information as it becomes available to you through user interactions and queries.
42
-
43
- By following these guidelines, you ensure that users receive valuable, accurate, and source-backed insights into their inquiries, making their exploration of machine learning research more productive and enlightening.
44
-
45
- {context}"""
46
- qa_prompt = ChatPromptTemplate.from_messages(
47
- [
48
- ("system", qa_system_prompt),
49
- MessagesPlaceholder(variable_name="chat_history"),
50
- ("human", "{question}"),
51
- ]
52
- )
53
-
54
-
55
- def format_docs(docs):
56
- return "\n\n".join(
57
- f"{doc.metadata['chunk_id']}: {doc.page_content}" for doc in docs
58
- )
59
-
60
-
61
- def contextualized_question(input: dict):
62
- if input.get("chat_history"):
63
- return contextualize_q_chain
64
- else:
65
- return input["question"]
66
-
67
-
68
- rag_chain = lambda retriever, llm: (
69
- RunnablePassthrough.assign(
70
- context=contextualized_question | retriever | format_docs
71
- )
72
- | qa_prompt
73
- | llm
74
- )
75
-
76
-
77
- def parse_model_response(input_string):
78
- parsed_data = {"answer": "", "citations": []}
79
- xml_matches = re.findall(r"<citations>.*?</citations>", input_string, re.DOTALL)
80
- if not xml_matches:
81
- parsed_data["answer"] = input_string
82
- return parsed_data
83
-
84
- outside_text_parts = []
85
- last_end_pos = 0
86
-
87
- for xml_string in xml_matches:
88
- match = re.search(re.escape(xml_string), input_string[last_end_pos:], re.DOTALL)
89
-
90
- if match:
91
- outside_text_parts.append(
92
- input_string[last_end_pos : match.start() + last_end_pos]
93
- )
94
- last_end_pos += match.end()
95
-
96
- root = ET.fromstring(xml_string)
97
-
98
- for citation in root.findall("citation"):
99
- source_id = citation.find("source_id").text
100
- quote = citation.find("quote").text
101
- parsed_data["citations"].append({"source_id": source_id, "quote": quote})
102
-
103
- outside_text_parts.append(input_string[last_end_pos:])
104
-
105
- parsed_data["answer"] = "".join(outside_text_parts)
106
-
107
- return parsed_data