Roger Condori commited on
Commit
057f5e7
β€’
1 Parent(s): 33e49c0

add links and openai model

Browse files
Files changed (3) hide show
  1. app.py +52 -23
  2. conversadocs/bones.py +39 -17
  3. requirements.txt +1 -0
app.py CHANGED
@@ -37,18 +37,31 @@ else:
37
  os.system('pip install llama-cpp-python')
38
 
39
  css="""
40
- #col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
41
  """
42
 
43
  title = """
44
- <div style="text-align: center;max-width: 700px;">
45
- <h1>Chat with Documents πŸ“š - Falcon, Llama-2</h1>
46
- <p style="text-align: center;">Upload txt, pdf, doc, docx, enex, epub, html, md, odt, ptt, pttx; click the "Click to Upload Files" button, <br />
47
- Wait for the Status to show Loaded documents, start typing your questions. <br />
48
- The app is set to store chat-history</p>
49
  </div>
50
  """
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  theme='aliabid94/new-theme'
53
 
54
  def flag():
@@ -58,9 +71,10 @@ def upload_file(files, max_docs):
58
  file_paths = [file.name for file in files]
59
  return dc.call_load_db(file_paths, max_docs)
60
 
61
- def predict(message, chat_history, max_k):
62
  print(message)
63
- bot_message = dc.convchain(message, max_k)
 
64
  print(bot_message)
65
  return "", dc.get_chats()
66
 
@@ -77,6 +91,10 @@ def convert():
77
  data_docs += f"<hr><h3 style='color:red;'>{pg}</h2><p>{txt}</p><p>{sc}</p>"
78
  return data_docs
79
 
 
 
 
 
80
  # Max values in generation
81
  DOC_DB_LIMIT = 10
82
  MAX_NEW_TOKENS = 2048
@@ -89,25 +107,25 @@ if "SET_LIMIT" == os.getenv("DEMO"):
89
  with gr.Blocks(theme=theme, css=css) as demo:
90
  with gr.Tab("Chat"):
91
 
92
- with gr.Column(elem_id="col-container"):
93
  gr.HTML(title)
94
- upload_button = gr.UploadButton("Click to Upload Files", file_types=["pdf"], file_count="multiple")
95
  file_output = gr.HTML()
96
- chatbot = gr.Chatbot([], elem_id="chatbot").style(height=300)
 
97
  msg = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
 
 
 
 
98
 
99
  with gr.Column():
 
100
  sou = gr.HTML("")
101
 
102
- with gr.Tab("Chat Options"):
103
- max_docs = gr.inputs.Slider(1, DOC_DB_LIMIT, default=3, label="Maximum querys to the DB.", step=1)
104
- row_table = gr.HTML("<hr><h4> </h2>")
105
- clear_button = gr.Button("CLEAR CHAT HISTORY", )
106
- link_output = gr.HTML("")
107
  clear_button.click(flag,[],[link_output]).then(dc.clr_history,[], [link_output]).then(lambda: None, None, chatbot, queue=False)
108
-
109
- upload_button.upload(flag,[],[file_output]).then(upload_file, [upload_button, max_docs], file_output)
110
-
111
  with gr.Tab("Change model"):
112
  gr.HTML("<h3>Only models from the GGML library are accepted.</h3>")
113
  repo_ = gr.Textbox(label="Repository" ,value="TheBloke/Llama-2-7B-Chat-GGML")
@@ -118,14 +136,25 @@ with gr.Blocks(theme=theme, css=css) as demo:
118
  top_p = gr.inputs.Slider(0, 100, default=50, label="Top P", step=1)
119
  repeat_penalty = gr.inputs.Slider(0.1, 100., default=1.2, label="Repeat penalty", step=0.1)
120
  change_model_button = gr.Button("Load GGML Model")
121
- model_verify = gr.HTML("Loaded model Falcon 7B-instruct")
122
- default_model = gr.HTML("<hr><h4>Default Model</h2>")
123
  falcon_button = gr.Button("Load FALCON 7B-Instruct")
124
 
125
- msg.submit(predict,[msg, chatbot, max_docs],[msg, chatbot]).then(convert,[],[sou])
 
 
 
 
 
 
 
 
 
 
126
 
127
  change_model_button.click(dc.change_llm,[repo_, file_, max_tokens, temperature, top_p, top_k, repeat_penalty, max_docs],[model_verify])
128
 
129
  falcon_button.click(dc.default_falcon_model, [], [model_verify])
 
130
 
131
- demo.launch(enable_queue=True)
 
37
  os.system('pip install llama-cpp-python')
38
 
39
  css="""
40
+ #col-container {max-width: 1500px; margin-left: auto; margin-right: auto;}
41
  """
42
 
43
  title = """
44
+ <div style="text-align: center;max-width: 1500px;">
45
+ <h3>Chat with Documents πŸ“š - Falcon, Llama-2</h3>
46
+ <p style="text-align: center;">Upload txt, pdf, doc, docx, enex, epub, html, md, odt, ptt and pttx.
47
+ Wait for the Status to show Loaded documents, start typing your questions. This is a demo of <a href="https://github.com/R3gm/ConversaDocs">ConversaDocs</a>.<br /></p>
 
48
  </div>
49
  """
50
 
51
+ description = """
52
+ # Application Information
53
+
54
+ - Notebook for run ConversaDocs in Colab [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/R3gm/ConversaDocs/blob/main/ConversaDocs_Colab.ipynb)
55
+
56
+ - Oficial Repository [![a](https://img.shields.io/badge/GitHub-Repository-black?style=flat-square&logo=github)](https://github.com/R3gm/ConversaDocs/)
57
+
58
+ - This application works on both CPU and GPU. For fast inference with GGML models, use the GPU.
59
+
60
+ - You can clone the 'space' but to make it work, you need to set My_hf_token in secrets with a valid huggingface [token](https://huggingface.co/settings/tokens)
61
+
62
+ - For more information about what GGML models are, you can visit this notebook [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/R3gm/InsightSolver-Colab/blob/main/LLM_Inference_with_llama_cpp_python__Llama_2_13b_chat.ipynb)
63
+ """
64
+
65
  theme='aliabid94/new-theme'
66
 
67
  def flag():
 
71
  file_paths = [file.name for file in files]
72
  return dc.call_load_db(file_paths, max_docs)
73
 
74
+ def predict(message, chat_history, max_k, check_memory):
75
  print(message)
76
+ print(check_memory)
77
+ bot_message = dc.convchain(message, max_k, check_memory)
78
  print(bot_message)
79
  return "", dc.get_chats()
80
 
 
91
  data_docs += f"<hr><h3 style='color:red;'>{pg}</h2><p>{txt}</p><p>{sc}</p>"
92
  return data_docs
93
 
94
+ def clear_api_key(api_key):
95
+ return 'api_key...', dc.openai_model(api_key)
96
+
97
+
98
  # Max values in generation
99
  DOC_DB_LIMIT = 10
100
  MAX_NEW_TOKENS = 2048
 
107
  with gr.Blocks(theme=theme, css=css) as demo:
108
  with gr.Tab("Chat"):
109
 
110
+ with gr.Column():
111
  gr.HTML(title)
112
+ upload_button = gr.UploadButton("Click to Upload Files", file_count="multiple")
113
  file_output = gr.HTML()
114
+
115
+ chatbot = gr.Chatbot([], elem_id="chatbot") #.style(height=300)
116
  msg = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
117
+ with gr.Row():
118
+ check_memory = gr.inputs.Checkbox(label="Remember previous messages")
119
+ clear_button = gr.Button("CLEAR CHAT HISTORY", )
120
+ max_docs = gr.inputs.Slider(1, DOC_DB_LIMIT, default=3, label="Maximum querys to the DB.", step=1)
121
 
122
  with gr.Column():
123
+ link_output = gr.HTML("")
124
  sou = gr.HTML("")
125
 
 
 
 
 
 
126
  clear_button.click(flag,[],[link_output]).then(dc.clr_history,[], [link_output]).then(lambda: None, None, chatbot, queue=False)
127
+ upload_button.upload(flag,[],[file_output]).then(upload_file, [upload_button, max_docs], file_output).then(dc.clr_history,[], [link_output]).then(lambda: None, None, chatbot, queue=False)
128
+
 
129
  with gr.Tab("Change model"):
130
  gr.HTML("<h3>Only models from the GGML library are accepted.</h3>")
131
  repo_ = gr.Textbox(label="Repository" ,value="TheBloke/Llama-2-7B-Chat-GGML")
 
136
  top_p = gr.inputs.Slider(0, 100, default=50, label="Top P", step=1)
137
  repeat_penalty = gr.inputs.Slider(0.1, 100., default=1.2, label="Repeat penalty", step=0.1)
138
  change_model_button = gr.Button("Load GGML Model")
139
+
140
+ default_model = gr.HTML("<hr>Default Model</h2>")
141
  falcon_button = gr.Button("Load FALCON 7B-Instruct")
142
 
143
+ openai_gpt_model = gr.HTML("<hr>OpenAI Model gpt-3.5-turbo</h2>")
144
+ api_key = gr.Textbox(label="API KEY", value="api_key...")
145
+ openai_button = gr.Button("Load gpt-3.5-turbo")
146
+
147
+ line_ = gr.HTML("<hr> </h2>")
148
+ model_verify = gr.HTML("Loaded model Falcon 7B-instruct")
149
+
150
+ with gr.Tab("About"):
151
+ description_md = gr.Markdown(description)
152
+
153
+ msg.submit(predict,[msg, chatbot, max_docs, check_memory],[msg, chatbot]).then(convert,[],[sou])
154
 
155
  change_model_button.click(dc.change_llm,[repo_, file_, max_tokens, temperature, top_p, top_k, repeat_penalty, max_docs],[model_verify])
156
 
157
  falcon_button.click(dc.default_falcon_model, [], [model_verify])
158
+ openai_button.click(clear_api_key, [api_key], [api_key, model_verify])
159
 
160
+ demo.launch(debug=True,share=True, enable_queue=True)
conversadocs/bones.py CHANGED
@@ -38,7 +38,7 @@ llm_api=HuggingFaceHub(
38
 
39
 
40
  #alter
41
- def load_db(files, chain_type, k, llm):
42
  EXTENSIONS = {
43
  ".txt": (TextLoader, {"encoding": "utf8"}),
44
  ".pdf": (PyPDFLoader, {}),
@@ -81,7 +81,9 @@ def load_db(files, chain_type, k, llm):
81
 
82
  # create vector database from data
83
  db = DocArrayInMemorySearch.from_documents(docs, embeddings)
84
- # define retriever
 
 
85
  retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
86
  # create a chatbot chain. Memory is managed externally.
87
  qa = ConversationalRetrievalChain.from_llm(
@@ -94,6 +96,7 @@ def load_db(files, chain_type, k, llm):
94
  return qa
95
 
96
 
 
97
  class DocChat(param.Parameterized):
98
  chat_history = param.List([])
99
  answer = param.String("")
@@ -106,28 +109,42 @@ class DocChat(param.Parameterized):
106
  def __init__(self, **params):
107
  super(DocChat, self).__init__( **params)
108
  self.loaded_file = "demo_docs/demo.txt"
109
- self.qa = load_db(self.loaded_file,"stuff", self.k_value, self.llm)
 
 
110
 
111
  def call_load_db(self, path_file, k):
112
  if not os.path.exists(path_file[0]): # init or no file specified
113
  return "No file loaded"
114
  else:
115
  try:
116
- self.qa = load_db(path_file, "stuff", k, self.llm)
117
  self.loaded_file = path_file
 
 
 
 
118
  except:
119
  return f'No valid file'
120
- self.clr_history()
121
- return f"New DB created | Loaded File: {self.loaded_file}"
122
 
123
  # chat
124
- def convchain(self, query, k_max):
125
  if k_max != self.k_value:
126
- print("Maximum querys changed, reloading DB")
127
- self.qa = load_db(self.loaded_file,"stuff", k_max, self.llm)
128
  self.k_value = k_max
129
 
130
- result = self.qa({"question": query, "chat_history": self.chat_history})
 
 
 
 
 
 
 
 
 
131
  self.chat_history.extend([(query, result["answer"])])
132
  self.db_query = result["generated_question"]
133
  self.db_response = result["source_documents"]
@@ -152,9 +169,9 @@ class DocChat(param.Parameterized):
152
  top_k=top_k,
153
  repeat_penalty=repeat_penalty,
154
  )
155
- self.qa = load_db(self.loaded_file,"stuff", k, self.llm)
156
  self.k_value = k
157
- return f"Loaded {file_}"
158
  except:
159
  return "No valid model"
160
  else:
@@ -172,17 +189,22 @@ class DocChat(param.Parameterized):
172
  top_k=top_k,
173
  repeat_penalty=repeat_penalty,
174
  )
175
- self.qa = load_db(self.loaded_file,"stuff", k, self.llm)
176
  self.k_value = k
177
- return f"Loaded {file_}"
178
  except:
179
  return "No valid model"
180
 
181
  def default_falcon_model(self):
182
  self.llm = llm_api[0]
183
- self.qa = load_db(self.loaded_file,"stuff", self.k_value, self.llm)
184
- return "Loaded model Falcon 7B-instruct"
185
-
 
 
 
 
 
186
 
187
  @param.depends('db_query ', )
188
  def get_lquest(self):
 
38
 
39
 
40
  #alter
41
+ def load_db(files):
42
  EXTENSIONS = {
43
  ".txt": (TextLoader, {"encoding": "utf8"}),
44
  ".pdf": (PyPDFLoader, {}),
 
81
 
82
  # create vector database from data
83
  db = DocArrayInMemorySearch.from_documents(docs, embeddings)
84
+ return db
85
+
86
+ def q_a(db, chain_type="stuff", k=3, llm=None):
87
  retriever = db.as_retriever(search_type="similarity", search_kwargs={"k": k})
88
  # create a chatbot chain. Memory is managed externally.
89
  qa = ConversationalRetrievalChain.from_llm(
 
96
  return qa
97
 
98
 
99
+
100
  class DocChat(param.Parameterized):
101
  chat_history = param.List([])
102
  answer = param.String("")
 
109
  def __init__(self, **params):
110
  super(DocChat, self).__init__( **params)
111
  self.loaded_file = "demo_docs/demo.txt"
112
+ self.db = load_db(self.loaded_file)
113
+ self.qa = q_a(self.db, "stuff", self.k_value, self.llm)
114
+
115
 
116
  def call_load_db(self, path_file, k):
117
  if not os.path.exists(path_file[0]): # init or no file specified
118
  return "No file loaded"
119
  else:
120
  try:
121
+ self.db = load_db(path_file)
122
  self.loaded_file = path_file
123
+ self.qa = q_a(self.db, "stuff", k, self.llm)
124
+ self.k_value = k
125
+ #self.clr_history()
126
+ return f"New DB created and history cleared | Loaded File: {self.loaded_file}"
127
  except:
128
  return f'No valid file'
129
+
 
130
 
131
  # chat
132
+ def convchain(self, query, k_max, recall_previous_messages):
133
  if k_max != self.k_value:
134
+ print("Maximum querys changed")
135
+ self.qa = q_a(self.db, "stuff", k_max, self.llm)
136
  self.k_value = k_max
137
 
138
+ if not recall_previous_messages:
139
+ self.clr_history()
140
+
141
+ try:
142
+ result = self.qa({"question": query, "chat_history": self.chat_history})
143
+ except:
144
+ self.default_falcon_model()
145
+ self.qa = q_a(self.db, "stuff", k_max, self.llm)
146
+ result = self.qa({"question": query, "chat_history": self.chat_history})
147
+
148
  self.chat_history.extend([(query, result["answer"])])
149
  self.db_query = result["generated_question"]
150
  self.db_response = result["source_documents"]
 
169
  top_k=top_k,
170
  repeat_penalty=repeat_penalty,
171
  )
172
+ self.qa = q_a(self.db, "stuff", k, self.llm)
173
  self.k_value = k
174
+ return f"Loaded {file_} [GPU INFERENCE]"
175
  except:
176
  return "No valid model"
177
  else:
 
189
  top_k=top_k,
190
  repeat_penalty=repeat_penalty,
191
  )
192
+ self.qa = q_a(self.db, "stuff", k, self.llm)
193
  self.k_value = k
194
+ return f"Loaded {file_} [CPU INFERENCE SLOW]"
195
  except:
196
  return "No valid model"
197
 
198
  def default_falcon_model(self):
199
  self.llm = llm_api[0]
200
+ self.qa = q_a(self.db, "stuff", self.k_value, self.llm)
201
+ return "Loaded model Falcon 7B-instruct [API FAST INFERENCE]"
202
+
203
+ def openai_model(self, API_KEY):
204
+ self.llm = ChatOpenAI(temperature=0, openai_api_key=API_KEY, model_name='gpt-3.5-turbo')
205
+ self.qa = q_a(self.db, "stuff", self.k_value, self.llm)
206
+ API_KEY = ""
207
+ return "Loaded model OpenAI gpt-3.5-turbo [API FAST INFERENCE] | If there is no response from the API, Falcon 7B-instruct will be used."
208
 
209
  @param.depends('db_query ', )
210
  def get_lquest(self):
requirements.txt CHANGED
@@ -11,3 +11,4 @@ huggingface_hub
11
  unstructured[local-inference]
12
  gradio==3.35.2
13
  param==1.13.0
 
 
11
  unstructured[local-inference]
12
  gradio==3.35.2
13
  param==1.13.0
14
+ openai