sussahoo commited on
Commit
8fd268e
1 Parent(s): 2903386

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +332 -0
  2. image_0.png +0 -0
  3. packages.txt +6 -0
  4. requirements.txt +47 -0
app.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageEnhance, ImageOps
2
+ import string
3
+ from collections import Counter
4
+ from itertools import tee, count
5
+ import pytesseract
6
+ from pytesseract import Output
7
+ import json
8
+ import pandas as pd
9
+ # import matplotlib.pyplot as plt
10
+ import cv2
11
+ import numpy as np
12
+ from transformers import DetrFeatureExtractor
13
+ from transformers import TableTransformerForObjectDetection
14
+ import torch
15
+ import gradio as gr
16
+
17
+ def plot_results_detection(model, image, prob, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax):
18
+ plt.imshow(image)
19
+ ax = plt.gca()
20
+
21
+ for p, (xmin, ymin, xmax, ymax) in zip(prob, bboxes_scaled.tolist()):
22
+ cl = p.argmax()
23
+ xmin, ymin, xmax, ymax = xmin-delta_xmin, ymin-delta_ymin, xmax+delta_xmax, ymax+delta_ymax
24
+ ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,fill=False, color='red', linewidth=3))
25
+ text = f'{model.config.id2label[cl.item()]}: {p[cl]:0.2f}'
26
+ ax.text(xmin-20, ymin-50, text, fontsize=10,bbox=dict(facecolor='yellow', alpha=0.5))
27
+ plt.axis('off')
28
+
29
+ def crop_tables(pil_img, prob, boxes, delta_xmin, delta_ymin, delta_xmax, delta_ymax):
30
+ '''
31
+ crop_tables and plot_results_detection must have same co-ord shifts because 1 only plots the other one updates co-ordinates
32
+ '''
33
+ cropped_img_list = []
34
+
35
+ for p, (xmin, ymin, xmax, ymax) in zip(prob, boxes.tolist()):
36
+
37
+ xmin, ymin, xmax, ymax = xmin-delta_xmin, ymin-delta_ymin, xmax+delta_xmax, ymax+delta_ymax
38
+ cropped_img = pil_img.crop((xmin, ymin, xmax, ymax))
39
+ cropped_img_list.append(cropped_img)
40
+ return cropped_img_list
41
+
42
+ def add_padding(pil_img, top, right, bottom, left, color=(255,255,255)):
43
+ '''
44
+ Image padding as part of TSR pre-processing to prevent missing table edges
45
+ '''
46
+ width, height = pil_img.size
47
+ new_width = width + right + left
48
+ new_height = height + top + bottom
49
+ result = Image.new(pil_img.mode, (new_width, new_height), color)
50
+ result.paste(pil_img, (left, top))
51
+ return result
52
+
53
+ def table_detector(image, THRESHOLD_PROBA):
54
+ '''
55
+ Table detection using DEtect-object TRansformer pre-trained on 1 million tables
56
+ '''
57
+
58
+ feature_extractor = DetrFeatureExtractor(do_resize=True, size=800, max_size=800)
59
+ encoding = feature_extractor(image, return_tensors="pt")
60
+
61
+ model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-detection")
62
+
63
+ with torch.no_grad():
64
+ outputs = model(**encoding)
65
+
66
+ probas = outputs.logits.softmax(-1)[0, :, :-1]
67
+ keep = probas.max(-1).values > THRESHOLD_PROBA
68
+
69
+ target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0)
70
+ postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
71
+ bboxes_scaled = postprocessed_outputs[0]['boxes'][keep]
72
+
73
+ return (model, probas[keep], bboxes_scaled)
74
+
75
+
76
+ def table_struct_recog(image, THRESHOLD_PROBA):
77
+ '''
78
+ Table structure recognition using DEtect-object TRansformer pre-trained on 1 million tables
79
+ '''
80
+
81
+ feature_extractor = DetrFeatureExtractor(do_resize=True, size=1000, max_size=1000)
82
+ encoding = feature_extractor(image, return_tensors="pt")
83
+
84
+ model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition")
85
+ with torch.no_grad():
86
+ outputs = model(**encoding)
87
+
88
+ probas = outputs.logits.softmax(-1)[0, :, :-1]
89
+ keep = probas.max(-1).values > THRESHOLD_PROBA
90
+
91
+ target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0)
92
+ postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
93
+ bboxes_scaled = postprocessed_outputs[0]['boxes'][keep]
94
+
95
+ return (model, probas[keep], bboxes_scaled)
96
+
97
+ def generate_structure(model, pil_img, prob, boxes, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom):
98
+ colors = ["red", "blue", "green", "yellow", "orange", "violet"]
99
+ '''
100
+ Co-ordinates are adjusted here by 3 'pixels'
101
+ To plot table pillow image and the TSR bounding boxes on the table
102
+ '''
103
+ # plt.figure(figsize=(32,20))
104
+ # plt.imshow(pil_img)
105
+ # ax = plt.gca()
106
+ rows = {}
107
+ cols = {}
108
+ idx = 0
109
+ for p, (xmin, ymin, xmax, ymax) in zip(prob, boxes.tolist()):
110
+
111
+ xmin, ymin, xmax, ymax = xmin, ymin, xmax, ymax
112
+ cl = p.argmax()
113
+ class_text = model.config.id2label[cl.item()]
114
+ text = f'{class_text}: {p[cl]:0.2f}'
115
+ # or (class_text == 'table column')
116
+ # if (class_text == 'table row') or (class_text =='table projected row header') or (class_text == 'table column'):
117
+ # ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,fill=False, color=colors[0], linewidth=2))
118
+ # ax.text(xmin-10, ymin-10, text, fontsize=5, bbox=dict(facecolor='yellow', alpha=0.5))
119
+
120
+ if class_text == 'table row':
121
+ rows['table row.'+str(idx)] = (xmin, ymin-expand_rowcol_bbox_top, xmax, ymax+expand_rowcol_bbox_bottom)
122
+ if class_text == 'table column':
123
+ cols['table column.'+str(idx)] = (xmin, ymin-expand_rowcol_bbox_top, xmax, ymax+expand_rowcol_bbox_bottom)
124
+
125
+ idx += 1
126
+
127
+ # plt.axis('on')
128
+ return rows, cols
129
+
130
+ def sort_table_featuresv2(rows:dict, cols:dict):
131
+ # Sometimes the header and first row overlap, and we need the header bbox not to have first row's bbox inside the headers bbox
132
+ rows_ = {table_feature : (xmin, ymin, xmax, ymax) for table_feature, (xmin, ymin, xmax, ymax) in sorted(rows.items(), key=lambda tup: tup[1][1])}
133
+ cols_ = {table_feature : (xmin, ymin, xmax, ymax) for table_feature, (xmin, ymin, xmax, ymax) in sorted(cols.items(), key=lambda tup: tup[1][0])}
134
+
135
+ return rows_, cols_
136
+
137
+ def individual_table_featuresv2(pil_img, rows:dict, cols:dict):
138
+
139
+ for k, v in rows.items():
140
+ xmin, ymin, xmax, ymax = v
141
+ cropped_img = pil_img.crop((xmin, ymin, xmax, ymax))
142
+ rows[k] = xmin, ymin, xmax, ymax, cropped_img
143
+
144
+ for k, v in cols.items():
145
+ xmin, ymin, xmax, ymax = v
146
+ cropped_img = pil_img.crop((xmin, ymin, xmax, ymax))
147
+ cols[k] = xmin, ymin, xmax, ymax, cropped_img
148
+
149
+ return rows, cols
150
+
151
+ def object_to_cellsv2(master_row:dict, cols:dict, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom, padd_left):
152
+ '''Removes redundant bbox for rows&columns and divides each row into cells from columns
153
+ Args:
154
+ Returns:
155
+
156
+ '''
157
+ cells_img = {}
158
+ header_idx = 0
159
+ row_idx = 0
160
+ previous_xmax_col = 0
161
+ new_cols = {}
162
+ new_master_row = {}
163
+ previous_ymin_row = 0
164
+ new_cols = cols
165
+ new_master_row = master_row
166
+ ## Below 2 for loops remove redundant bounding boxes ###
167
+ # for k_col, v_col in cols.items():
168
+ # xmin_col, _, xmax_col, _, col_img = v_col
169
+ # if (np.isclose(previous_xmax_col, xmax_col, atol=5)) or (xmin_col >= xmax_col):
170
+ # print('Found a column with double bbox')
171
+ # continue
172
+ # previous_xmax_col = xmax_col
173
+ # new_cols[k_col] = v_col
174
+
175
+ # for k_row, v_row in master_row.items():
176
+ # _, ymin_row, _, ymax_row, row_img = v_row
177
+ # if (np.isclose(previous_ymin_row, ymin_row, atol=5)) or (ymin_row >= ymax_row):
178
+ # print('Found a row with double bbox')
179
+ # continue
180
+ # previous_ymin_row = ymin_row
181
+ # new_master_row[k_row] = v_row
182
+ ######################################################
183
+ for k_row, v_row in new_master_row.items():
184
+
185
+ _, _, _, _, row_img = v_row
186
+ xmax, ymax = row_img.size
187
+ xa, ya, xb, yb = 0, 0, 0, ymax
188
+ row_img_list = []
189
+ # plt.imshow(row_img)
190
+ # st.pyplot()
191
+ for idx, kv in enumerate(new_cols.items()):
192
+ k_col, v_col = kv
193
+ xmin_col, _, xmax_col, _, col_img = v_col
194
+ xmin_col, xmax_col = xmin_col - padd_left - 10, xmax_col - padd_left
195
+ # plt.imshow(col_img)
196
+ # st.pyplot()
197
+ # xa + 3 : to remove borders on the left side of the cropped cell
198
+ # yb = 3: to remove row information from the above row of the cropped cell
199
+ # xb - 3: to remove borders on the right side of the cropped cell
200
+ xa = xmin_col
201
+ xb = xmax_col
202
+ if idx == 0:
203
+ xa = 0
204
+ if idx == len(new_cols)-1:
205
+ xb = xmax
206
+ xa, ya, xb, yb = xa, ya, xb, yb
207
+
208
+ row_img_cropped = row_img.crop((xa, ya, xb, yb))
209
+ row_img_list.append(row_img_cropped)
210
+
211
+ cells_img[k_row+'.'+str(row_idx)] = row_img_list
212
+ row_idx += 1
213
+
214
+ return cells_img, len(new_cols), len(new_master_row)-1
215
+
216
+ def pytess(cell_pil_img):
217
+ return ' '.join(pytesseract.image_to_data(cell_pil_img, output_type=Output.DICT, config='-c tessedit_char_blacklist=œ˜â€œï¬â™Ã©œ¢!|”?«“¥ --psm 6 preserve_interword_spaces')['text']).strip()
218
+
219
+ def uniquify(seq, suffs = count(1)):
220
+ """Make all the items unique by adding a suffix (1, 2, etc).
221
+ Credit: https://stackoverflow.com/questions/30650474/python-rename-duplicates-in-list-with-progressive-numbers-without-sorting-list
222
+ `seq` is mutable sequence of strings.
223
+ `suffs` is an optional alternative suffix iterable.
224
+ """
225
+ not_unique = [k for k,v in Counter(seq).items() if v>1]
226
+
227
+ suff_gens = dict(zip(not_unique, tee(suffs, len(not_unique))))
228
+ for idx,s in enumerate(seq):
229
+ try:
230
+ suffix = str(next(suff_gens[s]))
231
+ except KeyError:
232
+ continue
233
+ else:
234
+ seq[idx] += suffix
235
+
236
+ return seq
237
+
238
+ def clean_dataframe(df):
239
+ '''
240
+ Remove irrelevant symbols that appear with tesseractOCR
241
+ '''
242
+ # df.columns = [col.replace('|', '') for col in df.columns]
243
+
244
+ for col in df.columns:
245
+
246
+ df[col]=df[col].str.replace("'", '', regex=True)
247
+ df[col]=df[col].str.replace('"', '', regex=True)
248
+ df[col]=df[col].str.replace(']', '', regex=True)
249
+ df[col]=df[col].str.replace('[', '', regex=True)
250
+ df[col]=df[col].str.replace('{', '', regex=True)
251
+ df[col]=df[col].str.replace('}', '', regex=True)
252
+ df[col]=df[col].str.replace('|', '', regex=True)
253
+ return df
254
+
255
+ def create_dataframe(cells_pytess_result:list, max_cols:int, max_rows:int,csv_path):
256
+ '''Create dataframe using list of cell values of the table, also checks for valid header of dataframe
257
+ Args:
258
+ cells_pytess_result: list of strings, each element representing a cell in a table
259
+ max_cols, max_rows: number of columns and rows
260
+ Returns:
261
+ dataframe : final dataframe after all pre-processing
262
+ '''
263
+
264
+ headers = cells_pytess_result[:max_cols]
265
+ new_headers = uniquify(headers, (f' {x!s}' for x in string.ascii_lowercase))
266
+ counter = 0
267
+
268
+ cells_list = cells_pytess_result[max_cols:]
269
+ df = pd.DataFrame("", index=range(0, max_rows), columns=new_headers)
270
+
271
+ cell_idx = 0
272
+ for nrows in range(max_rows):
273
+ for ncols in range(max_cols):
274
+ df.iat[nrows, ncols] = str(cells_list[cell_idx])
275
+ cell_idx += 1
276
+
277
+ ## To check if there are duplicate headers if result of uniquify+col == col
278
+ ## This check removes headers when all headers are empty or if median of header word count is less than 6
279
+ for x, col in zip(string.ascii_lowercase, new_headers):
280
+ if f' {x!s}' == col:
281
+ counter += 1
282
+ header_char_count = [len(col) for col in new_headers]
283
+
284
+ # if (counter == len(new_headers)) or (statistics.median(header_char_count) < 6):
285
+ # st.write('woooot')
286
+ # df.columns = uniquify(df.iloc[0], (f' {x!s}' for x in string.ascii_lowercase))
287
+ # df = df.iloc[1:,:]
288
+
289
+ df = clean_dataframe(df)
290
+ # df.to_csv(csv_path)
291
+
292
+ return df
293
+
294
+ def process_image(image, TD_THRESHOLD, TSR_THRESHOLD, padd_top, padd_left, padd_bottom, padd_right, delta_xmin = 0, delta_ymin = 0, delta_xmax = 0, delta_ymax = 0, expand_rowcol_bbox_top = 0, expand_rowcol_bbox_bottom = 0):
295
+ image = image.convert('RGB')
296
+ model, probas, bboxes_scaled = table_detector(image, THRESHOLD_PROBA=TD_THRESHOLD)
297
+ # plot_results_detection(model, image, probas, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax)
298
+ cropped_img_list = crop_tables(image, probas, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax)
299
+
300
+ result = []
301
+ for idx, unpadded_table in enumerate(cropped_img_list):
302
+ table = add_padding(unpadded_table, padd_top, padd_right, padd_bottom, padd_left)
303
+ model, probas, bboxes_scaled = table_struct_recog(table, THRESHOLD_PROBA=TSR_THRESHOLD)
304
+ rows, cols = generate_structure(model, table, probas, bboxes_scaled, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom)
305
+ rows, cols = sort_table_featuresv2(rows, cols)
306
+ master_row, cols = individual_table_featuresv2(table, rows, cols)
307
+ cells_img, max_cols, max_rows = object_to_cellsv2(master_row, cols, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom, padd_left)
308
+ sequential_cell_img_list = []
309
+ for k, img_list in cells_img.items():
310
+ for img in img_list:
311
+ sequential_cell_img_list.append(pytess(img))
312
+
313
+ csv_path = '/content/sample_data/table_' + str(idx)
314
+ df = create_dataframe(sequential_cell_img_list, max_cols, max_rows, csv_path)
315
+ result.append(df)
316
+ res = result[0].to_json()
317
+ return res
318
+
319
+
320
+ title = "Interactive demo OCR: microsoft - table-transformer-detection + tesseract"
321
+ description = "Demo for microsoft - table-transformer-detection + tesseract"
322
+ article = "<p style='text-align: center'></p>"
323
+ examples =[["image_0.png"]]
324
+
325
+ iface = gr.Interface(fn=process_image,
326
+ inputs=[gr.Image(type="pil"), gr.Slider(0, 1, value=0.9), gr.Slider(0, 1, value=0.8), gr.Slider(0, 200, value=100), gr.Slider(0, 200, value=100), gr.Slider(0, 200, value=100), gr.Slider(0, 200, value=100)],
327
+ outputs="text",
328
+ title=title,
329
+ description=description,
330
+ article=article,
331
+ examples=examples)
332
+ iface.launch(debug=True)
image_0.png ADDED
packages.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ ffmpeg
2
+ libsm6
3
+ libxext6
4
+ libgl1
5
+ tesseract-ocr-eng
6
+ python3-opencv
requirements.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Cython==0.29.14
2
+ dask==2021.3.1
3
+ datasets==1.18.3
4
+ Flask==2.0.1
5
+ GitPython==3.1.26
6
+ imutils==0.5.4
7
+ multiprocess==0.70.12.2
8
+ numba==0.54.1
9
+ numexpr==2.7.3
10
+ numpy==1.20.3
11
+ oauthlib==3.1.0
12
+ opencv-contrib-python==4.6.0.66
13
+ openpyxl==3.0.7
14
+ Pillow==9.0.1
15
+ plotly==4.14.3
16
+ ply==3.11
17
+ protobuf==3.14.0
18
+ psutil==5.8.0
19
+ pyarrow==7.0.0
20
+ pydantic==1.7.3
21
+ pydeck==0.7.1
22
+ PyDictionary==2.0.1
23
+ pydot==1.4.2
24
+ pymongo==4.0.2
25
+ Pympler==1.0.1
26
+ PyMuPDF==1.20.2
27
+ pyperclip==1.8.2
28
+ pyppeteer==0.2.5
29
+ pyquery==1.4.3
30
+ pyreadline3==3.3
31
+ pytesseract==0.3.10
32
+ pytz-deprecation-shim==0.1.0.post0
33
+ PyWavelets==1.1.1
34
+ PyYAML==5.4.1
35
+ scipy==1.4.1
36
+ seaborn==0.11.1
37
+ sklearn==0.0
38
+ streamlit==1.5.1
39
+ timm==0.6.7
40
+ tokenizers==0.12.1
41
+ toml==0.10.2
42
+ toolz==0.11.1
43
+ torch==1.10.0
44
+ torchvision==0.11.1
45
+ git+https://github.com/huggingface/transformers.git
46
+ #-e git+https://github.com/nielsrogge/transformers.git@d34f7e6ffbb911d39465173ef2b35ba147ef58a9#egg=transformers
47
+ urllib3==1.26.7