srijaydeshpande commited on
Commit
c68892d
1 Parent(s): 5d7d47a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +378 -373
app.py CHANGED
@@ -1,270 +1,103 @@
1
- # from pdfminer.high_level import extract_pages
2
- # from pdfminer.layout import LTTextContainer
3
- # from tqdm import tqdm
4
- # import re
5
- # import gradio as gr
6
- # import os
7
- # import accelerate
8
- # import spaces
9
- # import subprocess
10
- # from huggingface_hub import hf_hub_download
11
- # from llama_cpp import Llama
12
- # from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
- # from llama_cpp_agent.providers import LlamaCppPythonProvider
14
- # from llama_cpp_agent.chat_history import BasicChatHistory
15
- # from llama_cpp_agent.chat_history.messages import Roles
16
-
17
- # # subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
18
- # # subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
19
-
20
-
21
- # # hf_hub_download(
22
- # # repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
23
- # # filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
24
- # # local_dir = "./models"
25
- # # )
26
-
27
- # hf_hub_download(
28
- # repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
29
- # filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
30
- # local_dir = "./models"
31
- # )
32
-
33
- # def process_document(pdf_path, page_ids=None):
34
- # extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
35
-
36
- # page2content = {}
37
-
38
- # for extracted_page in tqdm(extracted_pages):
39
- # page_id = extracted_page.pageid
40
- # content = process_page(extracted_page)
41
- # page2content[page_id] = content
42
-
43
- # return page2content
44
-
45
-
46
- # def process_page(extracted_page):
47
- # content = []
48
- # elements = [element for element in extracted_page._objs]
49
- # elements.sort(key=lambda a: a.y1, reverse=True)
50
- # for i, element in enumerate(elements):
51
- # if isinstance(element, LTTextContainer):
52
- # line_text = extract_text_and_normalize(element)
53
- # content.append(line_text)
54
- # content = re.sub('\n+', ' ', ''.join(content))
55
- # return content
56
-
57
-
58
- # def extract_text_and_normalize(element):
59
- # # Extract text from line and split it with new lines
60
- # line_texts = element.get_text().split('\n')
61
- # norm_text = ''
62
- # for line_text in line_texts:
63
- # line_text = line_text.strip()
64
- # if not line_text:
65
- # line_text = '\n'
66
- # else:
67
- # line_text = re.sub('\s+', ' ', line_text)
68
- # if not re.search('[\w\d\,\-]', line_text[-1]):
69
- # line_text += '\n'
70
- # else:
71
- # line_text += ' '
72
- # norm_text += line_text
73
- # return norm_text
74
-
75
-
76
- # def txt_to_html(text):
77
- # html_content = "<html><body>"
78
- # for line in text.split('\n'):
79
- # html_content += "<p>{}</p>".format(line.strip())
80
- # html_content += "</body></html>"
81
- # return html_content
82
-
83
-
84
- # def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
85
- # prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]"
86
-
87
- # # model_id = "models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf"
88
- # # # model = Llama(model_path=model_id, n_ctx=2048, n_threads=8, n_gpu_layers=-1, n_batch=128)
89
- # # model = Llama(
90
- # # model_path=model_id,
91
- # # flash_attn=True,
92
- # # n_gpu_layers=81,
93
- # # n_batch=1024,
94
- # # n_ctx=8192,
95
- # # )
96
-
97
- # llm = Llama(
98
- # model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
99
- # flash_attn=True,
100
- # n_gpu_layers=81,
101
- # n_batch=1024,
102
- # n_ctx=8192,
103
- # )
104
- # provider = LlamaCppPythonProvider(llm)
105
-
106
- # agent = LlamaCppAgent(
107
- # provider,
108
- # )
109
-
110
- # settings = provider.get_provider_default_settings()
111
- # settings.temperature = 0.7
112
- # settings.top_k = 40
113
- # settings.top_p = 0.95
114
- # settings.max_tokens = 2048
115
- # settings.repeat_penalty = 1.1
116
- # settings.stream = True
117
-
118
-
119
- # stream = agent.get_chat_response(
120
- # prompt + ' : ' + pdftext,
121
- # llm_sampling_settings=settings,
122
- # returns_streaming_generator=True,
123
- # print_output=False
124
- # )
125
-
126
- # outputs = ""
127
- # for output in stream:
128
- # outputs += output
129
- # yield outputs
130
-
131
- # # output = model.create_chat_completion(
132
- # # messages=[
133
- # # {"role": "assistant", "content": prompt},
134
- # # {
135
- # # "role": "user",
136
- # # "content": pdftext
137
- # # }
138
- # # ],
139
- # # max_tokens=maxtokens,
140
- # # temperature=temperature
141
- # # )
142
- # # output = output['choices'][0]['message']['content']
143
-
144
- # # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
145
- # # output = model.create_chat_completion(
146
- # # messages=[
147
- # # {"role": "assistant", "content": prompt},
148
- # # {
149
- # # "role": "user",
150
- # # "content": output
151
- # # }
152
- # # ],
153
- # # max_tokens=maxtokens,
154
- # # temperature=temperature
155
- # # )
156
- # # output = output['choices'][0]['message']['content']
157
-
158
- # # print(prompt)
159
- # # print(output)
160
- # # print('-------------------------------------------------------')
161
-
162
- # # return outputs
163
-
164
- # @spaces.GPU(duration=120)
165
- # def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
166
- # files=[files]#remove later
167
- # for file in files:
168
- # file_name = os.path.basename(file)
169
- # file_name_splt = file_name.split('.')
170
- # # print('File name is ', file_name)
171
- # if (len(file_name_splt) > 1 and file_name_splt[1] == 'pdf'):
172
- # page2content = process_document(file, page_ids=[0])
173
- # pdftext = page2content[1]
174
- # # pdftext = file # remove later
175
- # if (pdftext): #shift this if block to right later
176
- # anonymized_text = deidentify_doc(pdftext, maxtokens, temperature, top_probability)
177
- # return anonymized_text
178
-
179
-
180
- # css = ".gradio-container {background: 'logo.png'}"
181
- # temp_slider = gr.Slider(minimum=0, maximum=2, value=0.9, label="Temperature Value")
182
- # prob_slider = gr.Slider(minimum=0, maximum=1, value=0.95, label="Max Probability Value")
183
- # max_tokens = gr.Number(value=600, label="Max Tokens")
184
- # input_folder = gr.File(file_count='multiple')
185
- # input_folder_text = gr.Textbox(label='Enter output folder path')
186
- # output_text = gr.Textbox()
187
- # output_path_component = gr.File(label="Select Output Path")
188
- # iface = gr.Interface(
189
- # fn=pdf_to_text,
190
- # inputs='file',
191
- # # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
192
- # outputs=output_text,
193
- # title='COBIx Endoscopy Report De-Identification',
194
- # description="This application assists to remove personal information from the uploaded clinical report",
195
- # theme=gr.themes.Soft(),
196
- # )
197
- # iface.launch()
198
-
199
  import spaces
200
- import json
201
  import subprocess
 
202
  from llama_cpp import Llama
203
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
204
  from llama_cpp_agent.providers import LlamaCppPythonProvider
205
  from llama_cpp_agent.chat_history import BasicChatHistory
206
  from llama_cpp_agent.chat_history.messages import Roles
207
- import gradio as gr
208
- from huggingface_hub import hf_hub_download
 
 
 
 
 
 
 
 
209
 
210
  hf_hub_download(
211
  repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
212
  filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
213
  local_dir = "./models"
214
  )
215
- # hf_hub_download(
216
- # repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
217
- # filename="Mistral-7B-Instruct-v0.3-f32.gguf",
218
- # local_dir = "./models"
219
- # )
220
 
221
- css = """
222
- .message-row {
223
- justify-content: space-evenly !important;
224
- }
225
- .message-bubble-border {
226
- border-radius: 6px !important;
227
- }
228
- .message-buttons-bot, .message-buttons-user {
229
- right: 10px !important;
230
- left: auto !important;
231
- bottom: 2px !important;
232
- }
233
- .dark.message-bubble-border {
234
- border-color: #343140 !important;
235
- }
236
- .dark.user {
237
- background: #1e1c26 !important;
238
- }
239
- .dark.assistant.dark, .dark.pending.dark {
240
- background: #16141c !important;
241
- }
242
- """
243
-
244
- def get_messages_formatter_type(model_name):
245
- if "Llama" in model_name:
246
- return MessagesFormatterType.LLAMA_3
247
- elif "Mistral" in model_name:
248
- return MessagesFormatterType.MISTRAL
249
- else:
250
- raise ValueError(f"Unsupported model: {model_name}")
251
-
252
- @spaces.GPU(duration=120)
253
- def respond(
254
- message,
255
- history: list[tuple[str, str]],
256
- model,
257
- system_message,
258
- max_tokens,
259
- temperature,
260
- top_p,
261
- top_k,
262
- repeat_penalty,
263
- ):
264
- chat_template = get_messages_formatter_type(model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
266
  llm = Llama(
267
- model_path=f"models/{model}",
268
  flash_attn=True,
269
  n_gpu_layers=81,
270
  n_batch=1024,
@@ -274,39 +107,23 @@ def respond(
274
 
275
  agent = LlamaCppAgent(
276
  provider,
277
- system_prompt=f"{system_message}",
278
- predefined_messages_formatter_type=chat_template,
279
- debug_output=True
280
  )
281
 
282
  settings = provider.get_provider_default_settings()
283
- settings.temperature = temperature
284
- settings.top_k = top_k
285
- settings.top_p = top_p
286
- settings.max_tokens = max_tokens
287
- settings.repeat_penalty = repeat_penalty
288
  settings.stream = True
289
 
290
  messages = BasicChatHistory()
291
 
292
- print('history is ------------------ ', history)
293
-
294
- for msn in history:
295
- user = {
296
- 'role': Roles.user,
297
- 'content': msn[0]
298
- }
299
- assistant = {
300
- 'role': Roles.assistant,
301
- 'content': msn[1]
302
- }
303
- messages.add_message(user)
304
- messages.add_message(assistant)
305
- print('MESSAGE IS ',message)
306
  stream = agent.get_chat_response(
307
- message,
308
  llm_sampling_settings=settings,
309
- chat_history=messages,
310
  returns_streaming_generator=True,
311
  print_output=False
312
  )
@@ -316,103 +133,291 @@ def respond(
316
  outputs += output
317
  yield outputs
318
 
319
- PLACEHOLDER = """
320
- <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
321
- <figure style="margin: 0;">
322
- <img src="https://huggingface.co/spaces/pabloce/llama-cpp-agent/resolve/main/llama.jpg" alt="Logo" style="width: 100%; height: 100%; border-radius: 8px;">
323
- </figure>
324
- <div style="padding: .5rem 1.5rem;">
325
- <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2>
326
- <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
327
- <div style="display: flex; justify-content: space-between; align-items: center;">
328
- <div style="display: flex; flex-flow: column; justify-content: space-between;">
329
- <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(229, 70, 77, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #f88181; margin-bottom: 2.5px;">
330
- Mistral 7B Instruct v0.3
331
- </span>
332
- <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(79, 70, 229, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #60a5fa; margin-top: 2.5px;">
333
- Meta Llama 3 70B Instruct
334
- </span>
335
- </div>
336
- <div style="display: flex; justify-content: flex-end; align-items: center;">
337
- <a href="https://discord.gg/sRMvWKrh" target="_blank" rel="noreferrer" style="padding: .5rem;">
338
- <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25">
339
- <title>Discord</title>
340
- <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path>
341
- </svg>
342
- </a>
343
- <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;">
344
- <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18">
345
- <title>GitHub</title>
346
- <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path>
347
- </svg>
348
- </a>
349
- </div>
350
- </div>
351
- </div>
352
- </div>
353
- """
354
-
355
- demo = gr.ChatInterface(
356
- respond,
357
- additional_inputs=[
358
- gr.Dropdown([
359
- 'Meta-Llama-3-70B-Instruct-Q3_K_M.gguf',
360
- 'Mistral-7B-Instruct-v0.3-f32.gguf'
361
- ],
362
- value="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
363
- label="Model"
364
- ),
365
- gr.Textbox(value="You are a helpful assistant.", label="System message"),
366
- gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
367
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
368
- gr.Slider(
369
- minimum=0.1,
370
- maximum=1.0,
371
- value=0.95,
372
- step=0.05,
373
- label="Top-p",
374
- ),
375
- gr.Slider(
376
- minimum=0,
377
- maximum=100,
378
- value=40,
379
- step=1,
380
- label="Top-k",
381
- ),
382
- gr.Slider(
383
- minimum=0.0,
384
- maximum=2.0,
385
- value=1.1,
386
- step=0.1,
387
- label="Repetition penalty",
388
- ),
389
- ],
390
- theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
391
- body_background_fill_dark="#16141c",
392
- block_background_fill_dark="#16141c",
393
- block_border_width="1px",
394
- block_title_background_fill_dark="#1e1c26",
395
- input_background_fill_dark="#292733",
396
- button_secondary_background_fill_dark="#24212b",
397
- border_color_accent_dark="#343140",
398
- border_color_primary_dark="#343140",
399
- background_fill_secondary_dark="#16141c",
400
- color_accent_soft_dark="transparent",
401
- code_background_fill_dark="#292733",
402
- ),
403
- css=css,
404
- retry_btn="Retry",
405
- undo_btn="Undo",
406
- clear_btn="Clear",
407
- submit_btn="Send",
408
- description="Llama-cpp-agent: Chat multi llm selection",
409
- chatbot=gr.Chatbot(
410
- scale=1,
411
- placeholder=PLACEHOLDER,
412
- likeable=False,
413
- show_copy_button=True
414
- )
415
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
 
417
- if __name__ == "__main__":
418
- demo.launch()
 
1
+ from pdfminer.high_level import extract_pages
2
+ from pdfminer.layout import LTTextContainer
3
+ from tqdm import tqdm
4
+ import re
5
+ import gradio as gr
6
+ import os
7
+ import accelerate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  import spaces
 
9
  import subprocess
10
+ from huggingface_hub import hf_hub_download
11
  from llama_cpp import Llama
12
  from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
13
  from llama_cpp_agent.providers import LlamaCppPythonProvider
14
  from llama_cpp_agent.chat_history import BasicChatHistory
15
  from llama_cpp_agent.chat_history.messages import Roles
16
+
17
+ # subprocess.run('pip install llama-cpp-python==0.2.75 --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cu124', shell=True)
18
+ # subprocess.run('pip install llama-cpp-agent==0.2.10', shell=True)
19
+
20
+
21
+ # hf_hub_download(
22
+ # repo_id="QuantFactory/Meta-Llama-3-8B-Instruct-GGUF",
23
+ # filename="Meta-Llama-3-8B-Instruct.Q8_0.gguf",
24
+ # local_dir = "./models"
25
+ # )
26
 
27
  hf_hub_download(
28
  repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
29
  filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
30
  local_dir = "./models"
31
  )
 
 
 
 
 
32
 
33
+ def process_document(pdf_path, page_ids=None):
34
+ extracted_pages = extract_pages(pdf_path, page_numbers=page_ids)
35
+
36
+ page2content = {}
37
+
38
+ for extracted_page in tqdm(extracted_pages):
39
+ page_id = extracted_page.pageid
40
+ content = process_page(extracted_page)
41
+ page2content[page_id] = content
42
+
43
+ return page2content
44
+
45
+
46
+ def process_page(extracted_page):
47
+ content = []
48
+ elements = [element for element in extracted_page._objs]
49
+ elements.sort(key=lambda a: a.y1, reverse=True)
50
+ for i, element in enumerate(elements):
51
+ if isinstance(element, LTTextContainer):
52
+ line_text = extract_text_and_normalize(element)
53
+ content.append(line_text)
54
+ content = re.sub('\n+', ' ', ''.join(content))
55
+ return content
56
+
57
+
58
+ def extract_text_and_normalize(element):
59
+ # Extract text from line and split it with new lines
60
+ line_texts = element.get_text().split('\n')
61
+ norm_text = ''
62
+ for line_text in line_texts:
63
+ line_text = line_text.strip()
64
+ if not line_text:
65
+ line_text = '\n'
66
+ else:
67
+ line_text = re.sub('\s+', ' ', line_text)
68
+ if not re.search('[\w\d\,\-]', line_text[-1]):
69
+ line_text += '\n'
70
+ else:
71
+ line_text += ' '
72
+ norm_text += line_text
73
+ return norm_text
74
+
75
+
76
+ def txt_to_html(text):
77
+ html_content = "<html><body>"
78
+ for line in text.split('\n'):
79
+ html_content += "<p>{}</p>".format(line.strip())
80
+ html_content += "</body></html>"
81
+ return html_content
82
+
83
+
84
+ def deidentify_doc(pdftext, maxtokens, temperature, top_probability):
85
+ prompt = "In the following text replace any person name and any address with term [redacted], replace any Date of Birth and NHS number with term [redacted]"
86
+
87
+ # model_id = "models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf"
88
+ # # model = Llama(model_path=model_id, n_ctx=2048, n_threads=8, n_gpu_layers=-1, n_batch=128)
89
+ # model = Llama(
90
+ # model_path=model_id,
91
+ # flash_attn=True,
92
+ # n_gpu_layers=81,
93
+ # n_batch=1024,
94
+ # n_ctx=8192,
95
+ # )
96
+
97
+ chat_template = MessagesFormatterType.LLAMA_3
98
 
99
  llm = Llama(
100
+ model_path="models/Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
101
  flash_attn=True,
102
  n_gpu_layers=81,
103
  n_batch=1024,
 
107
 
108
  agent = LlamaCppAgent(
109
  provider,
110
+ system_prompt="You are a helpful assistant.",
111
+ predefined_messages_formatter_type=chat_template
 
112
  )
113
 
114
  settings = provider.get_provider_default_settings()
115
+ settings.temperature = 0.7
116
+ settings.top_k = 40
117
+ settings.top_p = 0.95
118
+ settings.max_tokens = 2048
119
+ settings.repeat_penalty = 1.1
120
  settings.stream = True
121
 
122
  messages = BasicChatHistory()
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  stream = agent.get_chat_response(
125
+ prompt + ' : ' + pdftext,
126
  llm_sampling_settings=settings,
 
127
  returns_streaming_generator=True,
128
  print_output=False
129
  )
 
133
  outputs += output
134
  yield outputs
135
 
136
+ # output = model.create_chat_completion(
137
+ # messages=[
138
+ # {"role": "assistant", "content": prompt},
139
+ # {
140
+ # "role": "user",
141
+ # "content": pdftext
142
+ # }
143
+ # ],
144
+ # max_tokens=maxtokens,
145
+ # temperature=temperature
146
+ # )
147
+ # output = output['choices'][0]['message']['content']
148
+
149
+ # prompt = "Perform the following actions on given text: 1. Replace any person age with term [redacted] 2. DO NOT REPLACE ANY MEDICAL MEASUREMENTS 3. Replace only the CALENDAR DATES of format 'day/month/year' with term [redacted]"
150
+ # output = model.create_chat_completion(
151
+ # messages=[
152
+ # {"role": "assistant", "content": prompt},
153
+ # {
154
+ # "role": "user",
155
+ # "content": output
156
+ # }
157
+ # ],
158
+ # max_tokens=maxtokens,
159
+ # temperature=temperature
160
+ # )
161
+ # output = output['choices'][0]['message']['content']
162
+
163
+ # print(prompt)
164
+ # print(output)
165
+ # print('-------------------------------------------------------')
166
+
167
+ # return outputs
168
+
169
+ @spaces.GPU(duration=120)
170
+ def pdf_to_text(files, maxtokens=2048, temperature=0, top_probability=0.95):
171
+ files=[files]#remove later
172
+ for file in files:
173
+ file_name = os.path.basename(file)
174
+ file_name_splt = file_name.split('.')
175
+ # print('File name is ', file_name)
176
+ if (len(file_name_splt) > 1 and file_name_splt[1] == 'pdf'):
177
+ page2content = process_document(file, page_ids=[0])
178
+ pdftext = page2content[1]
179
+ # pdftext = file # remove later
180
+ if (pdftext): #shift this if block to right later
181
+ anonymized_text = deidentify_doc(pdftext, maxtokens, temperature, top_probability)
182
+ return anonymized_text
183
+
184
+
185
+ css = ".gradio-container {background: 'logo.png'}"
186
+ temp_slider = gr.Slider(minimum=0, maximum=2, value=0.9, label="Temperature Value")
187
+ prob_slider = gr.Slider(minimum=0, maximum=1, value=0.95, label="Max Probability Value")
188
+ max_tokens = gr.Number(value=600, label="Max Tokens")
189
+ input_folder = gr.File(file_count='multiple')
190
+ input_folder_text = gr.Textbox(label='Enter output folder path')
191
+ output_text = gr.Textbox()
192
+ output_path_component = gr.File(label="Select Output Path")
193
+ iface = gr.Interface(
194
+ fn=pdf_to_text,
195
+ inputs='file',
196
+ # inputs=["textbox", input_folder_text, "textbox", max_tokens, temp_slider, prob_slider],
197
+ outputs=output_text,
198
+ title='COBIx Endoscopy Report De-Identification',
199
+ description="This application assists to remove personal information from the uploaded clinical report",
200
+ theme=gr.themes.Soft(),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  )
202
+ iface.launch()
203
+
204
+ # import spaces
205
+ # import json
206
+ # import subprocess
207
+ # from llama_cpp import Llama
208
+ # from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
209
+ # from llama_cpp_agent.providers import LlamaCppPythonProvider
210
+ # from llama_cpp_agent.chat_history import BasicChatHistory
211
+ # from llama_cpp_agent.chat_history.messages import Roles
212
+ # import gradio as gr
213
+ # from huggingface_hub import hf_hub_download
214
+
215
+ # hf_hub_download(
216
+ # repo_id="bartowski/Meta-Llama-3-70B-Instruct-GGUF",
217
+ # filename="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
218
+ # local_dir = "./models"
219
+ # )
220
+ # # hf_hub_download(
221
+ # # repo_id="bartowski/Mistral-7B-Instruct-v0.3-GGUF",
222
+ # # filename="Mistral-7B-Instruct-v0.3-f32.gguf",
223
+ # # local_dir = "./models"
224
+ # # )
225
+
226
+ # css = """
227
+ # .message-row {
228
+ # justify-content: space-evenly !important;
229
+ # }
230
+ # .message-bubble-border {
231
+ # border-radius: 6px !important;
232
+ # }
233
+ # .message-buttons-bot, .message-buttons-user {
234
+ # right: 10px !important;
235
+ # left: auto !important;
236
+ # bottom: 2px !important;
237
+ # }
238
+ # .dark.message-bubble-border {
239
+ # border-color: #343140 !important;
240
+ # }
241
+ # .dark.user {
242
+ # background: #1e1c26 !important;
243
+ # }
244
+ # .dark.assistant.dark, .dark.pending.dark {
245
+ # background: #16141c !important;
246
+ # }
247
+ # """
248
+
249
+ # def get_messages_formatter_type(model_name):
250
+ # if "Llama" in model_name:
251
+ # return MessagesFormatterType.LLAMA_3
252
+ # elif "Mistral" in model_name:
253
+ # return MessagesFormatterType.MISTRAL
254
+ # else:
255
+ # raise ValueError(f"Unsupported model: {model_name}")
256
+
257
+ # @spaces.GPU(duration=120)
258
+ # def respond(
259
+ # message,
260
+ # history: list[tuple[str, str]],
261
+ # model,
262
+ # system_message,
263
+ # max_tokens,
264
+ # temperature,
265
+ # top_p,
266
+ # top_k,
267
+ # repeat_penalty,
268
+ # ):
269
+ # chat_template = get_messages_formatter_type(model)
270
+
271
+ # llm = Llama(
272
+ # model_path=f"models/{model}",
273
+ # flash_attn=True,
274
+ # n_gpu_layers=81,
275
+ # n_batch=1024,
276
+ # n_ctx=8192,
277
+ # )
278
+ # provider = LlamaCppPythonProvider(llm)
279
+
280
+ # agent = LlamaCppAgent(
281
+ # provider,
282
+ # system_prompt=f"{system_message}",
283
+ # predefined_messages_formatter_type=chat_template,
284
+ # debug_output=True
285
+ # )
286
+
287
+ # settings = provider.get_provider_default_settings()
288
+ # settings.temperature = temperature
289
+ # settings.top_k = top_k
290
+ # settings.top_p = top_p
291
+ # settings.max_tokens = max_tokens
292
+ # settings.repeat_penalty = repeat_penalty
293
+ # settings.stream = True
294
+
295
+ # messages = BasicChatHistory()
296
+
297
+ # print('history is ------------------ ', history)
298
+
299
+ # for msn in history:
300
+ # user = {
301
+ # 'role': Roles.user,
302
+ # 'content': msn[0]
303
+ # }
304
+ # assistant = {
305
+ # 'role': Roles.assistant,
306
+ # 'content': msn[1]
307
+ # }
308
+ # messages.add_message(user)
309
+ # messages.add_message(assistant)
310
+
311
+ # stream = agent.get_chat_response(
312
+ # message,
313
+ # llm_sampling_settings=settings,
314
+ # chat_history=messages,
315
+ # returns_streaming_generator=True,
316
+ # print_output=False
317
+ # )
318
+
319
+ # outputs = ""
320
+ # for output in stream:
321
+ # outputs += output
322
+ # yield outputs
323
+
324
+ # PLACEHOLDER = """
325
+ # <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 6px; border-width: 1px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
326
+ # <figure style="margin: 0;">
327
+ # <img src="https://huggingface.co/spaces/pabloce/llama-cpp-agent/resolve/main/llama.jpg" alt="Logo" style="width: 100%; height: 100%; border-radius: 8px;">
328
+ # </figure>
329
+ # <div style="padding: .5rem 1.5rem;">
330
+ # <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">llama-cpp-agent</h2>
331
+ # <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">The llama-cpp-agent framework simplifies interactions with Large Language Models (LLMs), providing an interface for chatting, executing function calls, generating structured output, performing retrieval augmented generation, and processing text using agentic chains with tools.</p>
332
+ # <div style="display: flex; justify-content: space-between; align-items: center;">
333
+ # <div style="display: flex; flex-flow: column; justify-content: space-between;">
334
+ # <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(229, 70, 77, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #f88181; margin-bottom: 2.5px;">
335
+ # Mistral 7B Instruct v0.3
336
+ # </span>
337
+ # <span style="display: inline-flex; align-items: center; border-radius: 0.375rem; background-color: rgba(79, 70, 229, 0.1); padding: 0.1rem 0.75rem; font-size: 0.75rem; font-weight: 500; color: #60a5fa; margin-top: 2.5px;">
338
+ # Meta Llama 3 70B Instruct
339
+ # </span>
340
+ # </div>
341
+ # <div style="display: flex; justify-content: flex-end; align-items: center;">
342
+ # <a href="https://discord.gg/sRMvWKrh" target="_blank" rel="noreferrer" style="padding: .5rem;">
343
+ # <svg width="24" height="24" fill="currentColor" xmlns="http://www.w3.org/2000/svg" viewBox="0 5 30.67 23.25">
344
+ # <title>Discord</title>
345
+ # <path d="M26.0015 6.9529C24.0021 6.03845 21.8787 5.37198 19.6623 5C19.3833 5.48048 19.0733 6.13144 18.8563 6.64292C16.4989 6.30193 14.1585 6.30193 11.8336 6.64292C11.6166 6.13144 11.2911 5.48048 11.0276 5C8.79575 5.37198 6.67235 6.03845 4.6869 6.9529C0.672601 12.8736 -0.41235 18.6548 0.130124 24.3585C2.79599 26.2959 5.36889 27.4739 7.89682 28.2489C8.51679 27.4119 9.07477 26.5129 9.55525 25.5675C8.64079 25.2265 7.77283 24.808 6.93587 24.312C7.15286 24.1571 7.36986 23.9866 7.57135 23.8161C12.6241 26.1255 18.0969 26.1255 23.0876 23.8161C23.3046 23.9866 23.5061 24.1571 23.7231 24.312C22.8861 24.808 22.0182 25.2265 21.1037 25.5675C21.5842 26.5129 22.1422 27.4119 22.7621 28.2489C25.2885 27.4739 27.8769 26.2959 30.5288 24.3585C31.1952 17.7559 29.4733 12.0212 26.0015 6.9529ZM10.2527 20.8402C8.73376 20.8402 7.49382 19.4608 7.49382 17.7714C7.49382 16.082 8.70276 14.7025 10.2527 14.7025C11.7871 14.7025 13.0425 16.082 13.0115 17.7714C13.0115 19.4608 11.7871 20.8402 10.2527 20.8402ZM20.4373 20.8402C18.9183 20.8402 17.6768 19.4608 17.6768 17.7714C17.6768 16.082 18.8873 14.7025 20.4373 14.7025C21.9717 14.7025 23.2271 16.082 23.1961 17.7714C23.1961 19.4608 21.9872 20.8402 20.4373 20.8402Z"></path>
346
+ # </svg>
347
+ # </a>
348
+ # <a href="https://github.com/Maximilian-Winter/llama-cpp-agent" target="_blank" rel="noreferrer" style="padding: .5rem;">
349
+ # <svg width="24" height="24" fill="currentColor" viewBox="3 3 18 18">
350
+ # <title>GitHub</title>
351
+ # <path d="M12 3C7.0275 3 3 7.12937 3 12.2276C3 16.3109 5.57625 19.7597 9.15374 20.9824C9.60374 21.0631 9.77249 20.7863 9.77249 20.5441C9.77249 20.3249 9.76125 19.5982 9.76125 18.8254C7.5 19.2522 6.915 18.2602 6.735 17.7412C6.63375 17.4759 6.19499 16.6569 5.8125 16.4378C5.4975 16.2647 5.0475 15.838 5.80124 15.8264C6.51 15.8149 7.01625 16.4954 7.18499 16.7723C7.99499 18.1679 9.28875 17.7758 9.80625 17.5335C9.885 16.9337 10.1212 16.53 10.38 16.2993C8.3775 16.0687 6.285 15.2728 6.285 11.7432C6.285 10.7397 6.63375 9.9092 7.20749 9.26326C7.1175 9.03257 6.8025 8.08674 7.2975 6.81794C7.2975 6.81794 8.05125 6.57571 9.77249 7.76377C10.4925 7.55615 11.2575 7.45234 12.0225 7.45234C12.7875 7.45234 13.5525 7.55615 14.2725 7.76377C15.9937 6.56418 16.7475 6.81794 16.7475 6.81794C17.2424 8.08674 16.9275 9.03257 16.8375 9.26326C17.4113 9.9092 17.76 10.7281 17.76 11.7432C17.76 15.2843 15.6563 16.0687 13.6537 16.2993C13.98 16.5877 14.2613 17.1414 14.2613 18.0065C14.2613 19.2407 14.25 20.2326 14.25 20.5441C14.25 20.7863 14.4188 21.0746 14.8688 20.9824C16.6554 20.364 18.2079 19.1866 19.3078 17.6162C20.4077 16.0457 20.9995 14.1611 21 12.2276C21 7.12937 16.9725 3 12 3Z"></path>
352
+ # </svg>
353
+ # </a>
354
+ # </div>
355
+ # </div>
356
+ # </div>
357
+ # </div>
358
+ # """
359
+
360
+ # demo = gr.ChatInterface(
361
+ # respond,
362
+ # additional_inputs=[
363
+ # gr.Dropdown([
364
+ # 'Meta-Llama-3-70B-Instruct-Q3_K_M.gguf',
365
+ # 'Mistral-7B-Instruct-v0.3-f32.gguf'
366
+ # ],
367
+ # value="Meta-Llama-3-70B-Instruct-Q3_K_M.gguf",
368
+ # label="Model"
369
+ # ),
370
+ # gr.Textbox(value="You are a helpful assistant.", label="System message"),
371
+ # gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"),
372
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
373
+ # gr.Slider(
374
+ # minimum=0.1,
375
+ # maximum=1.0,
376
+ # value=0.95,
377
+ # step=0.05,
378
+ # label="Top-p",
379
+ # ),
380
+ # gr.Slider(
381
+ # minimum=0,
382
+ # maximum=100,
383
+ # value=40,
384
+ # step=1,
385
+ # label="Top-k",
386
+ # ),
387
+ # gr.Slider(
388
+ # minimum=0.0,
389
+ # maximum=2.0,
390
+ # value=1.1,
391
+ # step=0.1,
392
+ # label="Repetition penalty",
393
+ # ),
394
+ # ],
395
+ # theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
396
+ # body_background_fill_dark="#16141c",
397
+ # block_background_fill_dark="#16141c",
398
+ # block_border_width="1px",
399
+ # block_title_background_fill_dark="#1e1c26",
400
+ # input_background_fill_dark="#292733",
401
+ # button_secondary_background_fill_dark="#24212b",
402
+ # border_color_accent_dark="#343140",
403
+ # border_color_primary_dark="#343140",
404
+ # background_fill_secondary_dark="#16141c",
405
+ # color_accent_soft_dark="transparent",
406
+ # code_background_fill_dark="#292733",
407
+ # ),
408
+ # css=css,
409
+ # retry_btn="Retry",
410
+ # undo_btn="Undo",
411
+ # clear_btn="Clear",
412
+ # submit_btn="Send",
413
+ # description="Llama-cpp-agent: Chat multi llm selection",
414
+ # chatbot=gr.Chatbot(
415
+ # scale=1,
416
+ # placeholder=PLACEHOLDER,
417
+ # likeable=False,
418
+ # show_copy_button=True
419
+ # )
420
+ # )
421
 
422
+ # if __name__ == "__main__":
423
+ # demo.launch()