Spaces:
Sleeping
Sleeping
from pdfminer.high_level import extract_pages | |
from pdfminer.layout import LTTextContainer | |
from tqdm import tqdm | |
import re | |
import gradio as gr | |
import os | |
from llama_cpp import Llama | |
# from gpt4all import GPT4All | |
import transformers | |
# from transformers import GemmaTokenizer, AutoModelForCausalLM | |
# from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
import accelerate | |
import torch | |
# HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
def process_document(pdf_path, page_ids=None): | |
extracted_pages = extract_pages(pdf_path, page_numbers=page_ids) | |
page2content = {} | |
# Process each extracted page | |
for extracted_page in tqdm(extracted_pages): | |
page_id = extracted_page.pageid | |
content = process_page(extracted_page) | |
page2content[page_id] = content | |
return page2content | |
def process_page(extracted_page): | |
content = [] | |
elements = [element for element in extracted_page._objs] | |
elements.sort(key=lambda a: a.y1, reverse=True) | |
for i, element in enumerate(elements): | |
# Extract text if the element is a text container | |
# and text extraction is enabled | |
if isinstance(element, LTTextContainer): | |
line_text = extract_text_and_normalize(element) | |
content.append(line_text) | |
content = re.sub('\n+', ' ', ''.join(content)) | |
return content | |
def extract_text_and_normalize(element): | |
# Extract text from line and split it with new lines | |
line_texts = element.get_text().split('\n') | |
norm_text = '' | |
for line_text in line_texts: | |
line_text=line_text.strip() | |
# empty strings after striping convert to newline character | |
if not line_text: | |
line_text = '\n' | |
else: | |
line_text = re.sub('\s+', ' ', line_text) | |
# if the last character is not a letter or number, | |
# add newline character to a line | |
if not re.search('[\w\d\,\-]', line_text[-1]): | |
line_text+='\n' | |
else: | |
line_text+=' ' | |
# concatenate into single string | |
norm_text+=line_text | |
return norm_text | |
def txt_to_html(text): | |
html_content = "<html><body>" | |
for line in text.split('\n'): | |
html_content += "<p>{}</p>".format(line.strip()) | |
html_content += "</body></html>" | |
return html_content | |
def deidentify_doc(pdftext=""): | |
prompt = "Please anonymize the following clinical note. Replace all the following information with the term '[redacted]': Redact any strings that might be a name or initials, patients’ names, doctors’ names, the names Dr., redact any medical staff names, redact any strings that might be a location or address, such as '3970 Longview Drive', redact any strings that look like 'age 37', redact any dates and registration numbers, redact professions such as 'manager', redact any contact information." | |
print('Input prompt is ',prompt) | |
print('Input pdf text is ',pdftext) | |
output = model.create_chat_completion( | |
messages = [ | |
{"role": "assistant", "content": prompt}, | |
{ | |
"role": "user", | |
"content": pdftext | |
} | |
], | |
max_tokens=600, | |
temperature=0 | |
) | |
output = output['choices'][0]['message']['content'] | |
# if (pdftext): | |
# prompt = prompt + ': ' + pdftext | |
# output = model.generate(prompt=prompt, max_tokens=1024, n_batch=128) | |
# messages = [ | |
# {"role": "assistant", | |
# "content": prompt}, | |
# {"role": "user", | |
# "content": pdftext}, ] | |
# prompt = model.tokenizer.apply_chat_template( | |
# messages, | |
# tokenize=False, | |
# add_generation_prompt=True | |
# ) | |
# terminators = [ | |
# model.tokenizer.eos_token_id, | |
# model.tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
# ] | |
# outputs = model( | |
# prompt, | |
# max_new_tokens=1024, | |
# eos_token_id=terminators, | |
# do_sample=True, | |
# temperature=0.3, | |
# top_p=0.95, | |
# ) | |
# output = outputs[0]["generated_text"][len(prompt):] | |
return output | |
def pdf_to_text(file): | |
pdftext="" | |
if(file): | |
page2content = process_document(file, page_ids=[0]) | |
pdftext = page2content[1] | |
display_text = deidentify_doc(pdftext) | |
html = txt_to_html(display_text) | |
with open('out.html', "w", encoding="utf-8") as file: | |
file.write(html) | |
return html | |
model_id = "Meta-Llama-3-8B-Instruct.Q5_K_M.gguf" | |
model = Llama(model_path=model_id, n_ctx=2048, n_threads=8, n_gpu_layers=-1, n_batch=64) | |
# model = GPT4All("Meta-Llama-3-8B-Instruct.Q4_0.gguf", n_threads=8, device='gpu') | |
# model.chat_session() | |
# model_id = "D:/llama/meta-llama/Meta-Llama-3-8B-Instruct" | |
# model = transformers.pipeline( | |
# "text-generation", | |
# model=model_id, | |
# model_kwargs={"torch_dtype": torch.bfloat16}, | |
# device="cpu", | |
# ) | |
css = ".gradio-container {background: 'logo.png'}" | |
iface = gr.Interface( | |
fn = pdf_to_text, | |
inputs = ['file'], | |
outputs="html", | |
title='COBIx Endoscopy Report De-Identification', | |
description="This application assists to remove personal information from the uploaded clinical report", | |
theme=gr.themes.Soft(), | |
) | |
iface.launch() |