Datasets:
pierreguillou
commited on
Commit
•
e6b0f00
1
Parent(s):
755d9f2
Update DocLayNet-large.py
Browse files- DocLayNet-large.py +74 -51
DocLayNet-large.py
CHANGED
@@ -56,6 +56,13 @@ _LICENSE = "https://github.com/DS4SD/DocLayNet/blob/main/LICENSE"
|
|
56 |
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
57 |
# }
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
# functions
|
60 |
def load_image(image_path):
|
61 |
image = Image.open(image_path).convert("RGB")
|
@@ -156,8 +163,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
156 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
157 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
158 |
|
159 |
-
|
160 |
-
|
|
|
|
|
161 |
# downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_2.zip")
|
162 |
# downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_3.zip")
|
163 |
|
@@ -166,7 +175,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
166 |
name=datasets.Split.TRAIN,
|
167 |
# These kwargs will be passed to _generate_examples
|
168 |
gen_kwargs={
|
169 |
-
"
|
|
|
|
|
|
|
170 |
"split": "train",
|
171 |
},
|
172 |
),
|
@@ -174,7 +186,10 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
174 |
name=datasets.Split.VALIDATION,
|
175 |
# These kwargs will be passed to _generate_examples
|
176 |
gen_kwargs={
|
177 |
-
"
|
|
|
|
|
|
|
178 |
"split": "dev",
|
179 |
},
|
180 |
),
|
@@ -182,58 +197,66 @@ class DocLayNet(datasets.GeneratorBasedBuilder):
|
|
182 |
name=datasets.Split.TEST,
|
183 |
# These kwargs will be passed to _generate_examples
|
184 |
gen_kwargs={
|
185 |
-
"
|
|
|
|
|
|
|
186 |
"split": "test"
|
187 |
},
|
188 |
),
|
189 |
]
|
190 |
|
191 |
-
def _generate_examples(self,
|
|
|
192 |
logger.info("⏳ Generating examples from = %s", filepath)
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
for
|
198 |
-
|
199 |
-
|
200 |
-
bboxes_line = []
|
201 |
-
categories = []
|
202 |
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
coco_height = data["metadata"]["coco_height"]
|
236 |
-
collection = data["metadata"]["collection"]
|
237 |
-
doc_category = data["metadata"]["doc_category"]
|
238 |
-
|
239 |
-
yield guid, {"id": str(guid), "texts": texts, "bboxes_block": bboxes_block, "bboxes_line": bboxes_line, "categories": categories, "image": image, "pdf": pdf_encoded_string, "page_hash": page_hash, "original_filename": original_filename, "page_no": page_no, "num_pages": num_pages, "original_width": original_width, "original_height": original_height, "coco_width": coco_width, "coco_height": coco_height, "collection": collection, "doc_category": doc_category}
|
|
|
56 |
# "second_domain": "https://huggingface.co/great-new-dataset-second_domain.zip",
|
57 |
# }
|
58 |
|
59 |
+
_URLs = {
|
60 |
+
"part_dataset_0": "https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_0.zip",
|
61 |
+
"part_dataset_1": "https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_1.zip",
|
62 |
+
"part_dataset_2": "https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_2.zip",
|
63 |
+
"part_dataset_3": "https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_3.zip",
|
64 |
+
}
|
65 |
+
|
66 |
# functions
|
67 |
def load_image(image_path):
|
68 |
image = Image.open(image_path).convert("RGB")
|
|
|
163 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
164 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
165 |
|
166 |
+
archive_path = dl_manager.download_and_extract(_URLs)
|
167 |
+
|
168 |
+
downloaded_file = dl_manager.download_and_extract(archive_path["part_dataset_0"])
|
169 |
+
downloaded_file = dl_manager.download_and_extract("part_dataset_0")
|
170 |
# downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_2.zip")
|
171 |
# downloaded_file = dl_manager.download_and_extract("https://huggingface.co/datasets/pierreguillou/DocLayNet-large/resolve/main/data/part_dataset_3.zip")
|
172 |
|
|
|
175 |
name=datasets.Split.TRAIN,
|
176 |
# These kwargs will be passed to _generate_examples
|
177 |
gen_kwargs={
|
178 |
+
"filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/train/",
|
179 |
+
"filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/train/",
|
180 |
+
"filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/train/",
|
181 |
+
"filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/train/",
|
182 |
"split": "train",
|
183 |
},
|
184 |
),
|
|
|
186 |
name=datasets.Split.VALIDATION,
|
187 |
# These kwargs will be passed to _generate_examples
|
188 |
gen_kwargs={
|
189 |
+
"filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/val/",
|
190 |
+
"filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/val/",
|
191 |
+
"filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/val/",
|
192 |
+
"filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/val/",
|
193 |
"split": "dev",
|
194 |
},
|
195 |
),
|
|
|
197 |
name=datasets.Split.TEST,
|
198 |
# These kwargs will be passed to _generate_examples
|
199 |
gen_kwargs={
|
200 |
+
"filepath_0": f"{archive_path["part_dataset_0"]}/large_dataset/test/",
|
201 |
+
"filepath_1": f"{archive_path["part_dataset_1"]}/large_dataset/test/",
|
202 |
+
"filepath_2": f"{archive_path["part_dataset_2"]}/large_dataset/test/",
|
203 |
+
"filepath_3": f"{archive_path["part_dataset_3"]}/large_dataset/test/",
|
204 |
"split": "test"
|
205 |
},
|
206 |
),
|
207 |
]
|
208 |
|
209 |
+
def _generate_examples(self, filepath_0, filepath_1, filepath_2, filepath_3, split):
|
210 |
+
filepath = (filepath_0, filepath_1, filepath_2, filepath_3)
|
211 |
logger.info("⏳ Generating examples from = %s", filepath)
|
212 |
+
ann_dirs = [os.path.join(filepath_0, "annotations"), os.path.join(filepath_1, "annotations"), os.path.join(filepath_2, "annotations"), os.path.join(filepath_3, "annotations")]
|
213 |
+
img_dirs = [os.path.join(filepath_0, "images"), os.path.join(filepath_1, "images"), os.path.join(filepath_2, "images"), os.path.join(filepath_3, "images")]
|
214 |
+
pdf_dirs = [os.path.join(filepath_0, "pdfs"), os.path.join(filepath_1, "pdfs"), os.path.join(filepath_2, "pdfs"), os.path.join(filepath_3, "pdfs")]
|
215 |
+
|
216 |
+
for ann_dir, img_dir, pdf_dir in zip(ann_dirs, img_dirs, pdf_dirs):
|
217 |
+
|
218 |
+
ann_listdir = os.listdir(ann_dir)
|
|
|
|
|
219 |
|
220 |
+
for guid, file in enumerate(ann_listdir):
|
221 |
+
texts = []
|
222 |
+
bboxes_block = []
|
223 |
+
bboxes_line = []
|
224 |
+
categories = []
|
225 |
+
|
226 |
+
# get json
|
227 |
+
file_path = os.path.join(ann_dir, file)
|
228 |
+
with open(file_path, "r", encoding="utf8") as f:
|
229 |
+
data = json.load(f)
|
230 |
+
|
231 |
+
# get image
|
232 |
+
image_path = os.path.join(img_dir, file)
|
233 |
+
image_path = image_path.replace("json", "png")
|
234 |
+
image, size = load_image(image_path)
|
235 |
|
236 |
+
# get pdf
|
237 |
+
pdf_path = os.path.join(pdf_dir, file)
|
238 |
+
pdf_path = pdf_path.replace("json", "pdf")
|
239 |
+
with open(pdf_path, "rb") as pdf_file:
|
240 |
+
pdf_bytes = pdf_file.read()
|
241 |
+
pdf_encoded_string = base64.b64encode(pdf_bytes)
|
242 |
+
|
243 |
+
for item in data["form"]:
|
244 |
+
text_example, category_example, bbox_block_example, bbox_line_example = item["text"], item["category"], item["box"], item["box_line"]
|
245 |
+
texts.append(text_example)
|
246 |
+
categories.append(category_example)
|
247 |
+
bboxes_block.append(bbox_block_example)
|
248 |
+
bboxes_line.append(bbox_line_example)
|
249 |
+
|
250 |
+
# get all metadadata
|
251 |
+
page_hash = data["metadata"]["page_hash"]
|
252 |
+
original_filename = data["metadata"]["original_filename"]
|
253 |
+
page_no = data["metadata"]["page_no"]
|
254 |
+
num_pages = data["metadata"]["num_pages"]
|
255 |
+
original_width = data["metadata"]["original_width"]
|
256 |
+
original_height = data["metadata"]["original_height"]
|
257 |
+
coco_width = data["metadata"]["coco_width"]
|
258 |
+
coco_height = data["metadata"]["coco_height"]
|
259 |
+
collection = data["metadata"]["collection"]
|
260 |
+
doc_category = data["metadata"]["doc_category"]
|
261 |
+
|
262 |
+
yield guid, {"id": str(guid), "texts": texts, "bboxes_block": bboxes_block, "bboxes_line": bboxes_line, "categories": categories, "image": image, "pdf": pdf_encoded_string, "page_hash": page_hash, "original_filename": original_filename, "page_no": page_no, "num_pages": num_pages, "original_width": original_width, "original_height": original_height, "coco_width": coco_width, "coco_height": coco_height, "collection": collection, "doc_category": doc_category}
|
|
|
|
|
|
|
|
|
|