File size: 5,285 Bytes
6720717 7568689 6720717 7568689 6720717 7568689 6720717 7568689 13cbc38 6720717 7568689 6720717 7568689 6720717 13cbc38 7568689 6720717 872bd1f 6720717 7568689 872bd1f 13cbc38 7568689 6720717 7568689 6720717 7568689 13cbc38 7568689 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import os
import gradio as gr
from collections import OrderedDict
from PIL import Image, ImageDraw, ImageFont
from io import BytesIO
import time
import tempfile
import PyPDF2
import pdf2image
from datasets import load_dataset
MAX_PAGES = 50
MAX_PDF_SIZE = 100000000 # almost 100MB
MIN_WIDTH, MIN_HEIGHT = 150, 150
def equal_image_grid(images):
def compute_grid(n, max_cols=6):
equalDivisor = int(n**0.5)
cols = min(equalDivisor, max_cols)
rows = equalDivisor
if rows * cols >= n:
return rows, cols
cols += 1
if rows * cols >= n:
return rows, cols
while rows * cols < n:
rows += 1
return rows, cols
# assert len(images) == rows*cols
rows, cols = compute_grid(len(images))
# rescaling to min width [height padding]
images = [im for im in images if (im.height > 0) and (im.width > 0)] # could be NA
min_width = min(im.width for im in images)
images = [im.resize((min_width, int(im.height * min_width / im.width)), resample=Image.BICUBIC) for im in images]
w, h = max([img.size[0] for img in images]), max([img.size[1] for img in images])
grid = Image.new("RGB", size=(cols * w, rows * h))
grid_w, grid_h = grid.size
for i, img in enumerate(images):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
def add_pagenumbers(im_list, height_scale=40):
def add_pagenumber(image, i):
width, height = image.size
draw = ImageDraw.Draw(image)
fontsize = int((width * height) ** (0.5) / height_scale)
font = ImageFont.truetype("Arial.ttf", fontsize)
margin = int(2 * fontsize)
draw.text(
(width - margin, height - margin),
str(i + 1),
fill="#D00917",
font=font,
spacing=4,
align="right",
)
for i, image in enumerate(im_list):
add_pagenumber(image, i)
def pdf_to_grid(pdf_path):
reader = PyPDF2.PdfReader(pdf_path)
reached_page_limit = False
images = []
try:
for p, page in enumerate(reader.pages):
if reached_page_limit:
break
for image in page.images:
im = Image.open(BytesIO(image.data))
if im.width < MIN_WIDTH and im.height < MIN_HEIGHT:
continue
images.append(im)
except Exception as e:
print(f"{pdf_path} PyPDF get_images {e}")
images = pdf2image.convert_from_bytes(pdf_path)
# simpler but slower
# images = pdf2image.convert_from_path(pdf_path)
if len(images) == 0:
return None
add_pagenumbers(images)
return equal_image_grid(images)
def main(dataset, label):
# to get different samples, use timestamp as seed
timestamp = time.time()
seed = int(timestamp * 1000) % 1000000
try:
shuffled_dataset = DATASETS[dataset].shuffle(buffer_size=10, seed=seed)
except: # lazy
shuffled_dataset = DATASETS[dataset].shuffle(seed=seed)
# first get PDF file
for sample in shuffled_dataset:
label_column = "label" if "label" in sample else "labels"
filelabel = _CLASSES[sample[label_column]]
if label and filelabel != label:
continue
pdf_path = sample["file"]
grid = pdf_to_grid(BytesIO(pdf_path))
if grid is None:
continue
PDF = tempfile.NamedTemporaryFile(suffix=".pdf", delete=False)
with PDF as tmp_file:
# pdf_path.to_file(tmp_file.name)
tmp_file.write(pdf_path)
return filelabel, grid, tmp_file.name
_CLASSES = [
"letter",
"form",
"email",
"handwritten",
"advertisement",
"scientific report",
"scientific publication",
"specification",
"file folder",
"news article",
"budget",
"invoice",
"presentation",
"questionnaire",
"resume",
"memo",
"",
]
# load both datasets in memory? --> easier retrieval afterwards with seed index based on pressing button
DATASETS = OrderedDict(
{
# "rvl_cdip": load_dataset("bdpc/rvl_cdip_mp", split="test", streaming=True),
"rvl_cdip_N": load_dataset("bdpc/rvl_cdip_n_mp", split="test"),
}
)
meta_cats = {"dataset": ["rvl_cdip", "rvl_cdip_N"], "label": _CLASSES}
sliders = [gr.Dropdown(choices=choices, value=choices[-1], label=label) for label, choices in meta_cats.items()]
slider_defaults = [sliders[0].value, None]
# test
# l, im, f = main(*slider_defaults)
outputs = [
gr.Textbox(label="label"),
gr.Image(label="image grid of PDF"),
gr.File(label="PDF"),
]
DESCRIPTION = """
Visualize PDF samples from multi-page (PDF) document classification datasets @ https://huggingface.co/datasets/bdpc
- **dataset**: dataset name
- **label**: label name
The first time that the app is launched, it will download the datasets, which can take a few minutes.
For fastest response, choose the rvl_cdip_N dataset, which is considerably smaller to iterate over.
"""
# main("rvl_cdip_N", "letter")
iface = gr.Interface(
fn=main,
inputs=sliders,
outputs=outputs,
description=DESCRIPTION,
title="Beyond Document Page Classification: Examples",
)
iface.launch(share=True)
|