dolfim-ibm commited on
Commit
ef175eb
0 Parent(s):

Add icdar2023-doclaynet competition

Browse files

Signed-off-by: Michele Dolfi <[email protected]>

Files changed (4) hide show
  1. .gitattributes +54 -0
  2. .gitignore +304 -0
  3. README.md +177 -0
  4. icdar2023-doclaynet.py +199 -0
.gitattributes ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # Audio files - uncompressed
37
+ *.pcm filter=lfs diff=lfs merge=lfs -text
38
+ *.sam filter=lfs diff=lfs merge=lfs -text
39
+ *.raw filter=lfs diff=lfs merge=lfs -text
40
+ # Audio files - compressed
41
+ *.aac filter=lfs diff=lfs merge=lfs -text
42
+ *.flac filter=lfs diff=lfs merge=lfs -text
43
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
44
+ *.ogg filter=lfs diff=lfs merge=lfs -text
45
+ *.wav filter=lfs diff=lfs merge=lfs -text
46
+ # Image files - uncompressed
47
+ *.bmp filter=lfs diff=lfs merge=lfs -text
48
+ *.gif filter=lfs diff=lfs merge=lfs -text
49
+ *.png filter=lfs diff=lfs merge=lfs -text
50
+ *.tiff filter=lfs diff=lfs merge=lfs -text
51
+ # Image files - compressed
52
+ *.jpg filter=lfs diff=lfs merge=lfs -text
53
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
54
+ *.webp filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.gitignore.io/api/linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
2
+ # Edit at https://www.gitignore.io/?templates=linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
3
+
4
+ ### Linux ###
5
+ *~
6
+
7
+ # temporary files which can be created if a process still has a handle open of a deleted file
8
+ .fuse_hidden*
9
+
10
+ # KDE directory preferences
11
+ .directory
12
+
13
+ # Linux trash folder which might appear on any partition or disk
14
+ .Trash-*
15
+
16
+ # .nfs files are created when an open file is removed but is still being accessed
17
+ .nfs*
18
+
19
+ ### macOS ###
20
+ # General
21
+ .DS_Store
22
+ .AppleDouble
23
+ .LSOverride
24
+
25
+ # Icon must end with two \r
26
+ Icon
27
+
28
+ # Thumbnails
29
+ ._*
30
+
31
+ # Files that might appear in the root of a volume
32
+ .DocumentRevisions-V100
33
+ .fseventsd
34
+ .Spotlight-V100
35
+ .TemporaryItems
36
+ .Trashes
37
+ .VolumeIcon.icns
38
+ .com.apple.timemachine.donotpresent
39
+
40
+ # Directories potentially created on remote AFP share
41
+ .AppleDB
42
+ .AppleDesktop
43
+ Network Trash Folder
44
+ Temporary Items
45
+ .apdisk
46
+
47
+ ### PyCharm+all ###
48
+ # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
49
+ # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
50
+
51
+ # User-specific stuff
52
+ .idea/**/workspace.xml
53
+ .idea/**/tasks.xml
54
+ .idea/**/usage.statistics.xml
55
+ .idea/**/dictionaries
56
+ .idea/**/shelf
57
+
58
+ # Generated files
59
+ .idea/**/contentModel.xml
60
+
61
+ # Sensitive or high-churn files
62
+ .idea/**/dataSources/
63
+ .idea/**/dataSources.ids
64
+ .idea/**/dataSources.local.xml
65
+ .idea/**/sqlDataSources.xml
66
+ .idea/**/dynamic.xml
67
+ .idea/**/uiDesigner.xml
68
+ .idea/**/dbnavigator.xml
69
+
70
+ # Gradle
71
+ .idea/**/gradle.xml
72
+ .idea/**/libraries
73
+
74
+ # Gradle and Maven with auto-import
75
+ # When using Gradle or Maven with auto-import, you should exclude module files,
76
+ # since they will be recreated, and may cause churn. Uncomment if using
77
+ # auto-import.
78
+ # .idea/modules.xml
79
+ # .idea/*.iml
80
+ # .idea/modules
81
+ # *.iml
82
+ # *.ipr
83
+
84
+ # CMake
85
+ cmake-build-*/
86
+
87
+ # Mongo Explorer plugin
88
+ .idea/**/mongoSettings.xml
89
+
90
+ # File-based project format
91
+ *.iws
92
+
93
+ # IntelliJ
94
+ out/
95
+
96
+ # mpeltonen/sbt-idea plugin
97
+ .idea_modules/
98
+
99
+ # JIRA plugin
100
+ atlassian-ide-plugin.xml
101
+
102
+ # Cursive Clojure plugin
103
+ .idea/replstate.xml
104
+
105
+ # Crashlytics plugin (for Android Studio and IntelliJ)
106
+ com_crashlytics_export_strings.xml
107
+ crashlytics.properties
108
+ crashlytics-build.properties
109
+ fabric.properties
110
+
111
+ # Editor-based Rest Client
112
+ .idea/httpRequests
113
+
114
+ # Android studio 3.1+ serialized cache file
115
+ .idea/caches/build_file_checksums.ser
116
+
117
+ ### PyCharm+all Patch ###
118
+ # Ignores the whole .idea folder and all .iml files
119
+ # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
120
+
121
+ .idea/
122
+
123
+ # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
124
+
125
+ *.iml
126
+ modules.xml
127
+ .idea/misc.xml
128
+ *.ipr
129
+
130
+ # Sonarlint plugin
131
+ .idea/sonarlint
132
+
133
+ ### Python ###
134
+ # Byte-compiled / optimized / DLL files
135
+ __pycache__/
136
+ *.py[cod]
137
+ *$py.class
138
+
139
+ # C extensions
140
+ *.so
141
+
142
+ # Distribution / packaging
143
+ .Python
144
+ build/
145
+ develop-eggs/
146
+ dist/
147
+ downloads/
148
+ eggs/
149
+ .eggs/
150
+ lib/
151
+ lib64/
152
+ parts/
153
+ sdist/
154
+ var/
155
+ wheels/
156
+ pip-wheel-metadata/
157
+ share/python-wheels/
158
+ *.egg-info/
159
+ .installed.cfg
160
+ *.egg
161
+ MANIFEST
162
+
163
+ # PyInstaller
164
+ # Usually these files are written by a python script from a template
165
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
166
+ *.manifest
167
+ *.spec
168
+
169
+ # Installer logs
170
+ pip-log.txt
171
+ pip-delete-this-directory.txt
172
+
173
+ # Unit test / coverage reports
174
+ htmlcov/
175
+ .tox/
176
+ .nox/
177
+ .coverage
178
+ .coverage.*
179
+ .cache
180
+ nosetests.xml
181
+ coverage.xml
182
+ *.cover
183
+ .hypothesis/
184
+ .pytest_cache/
185
+
186
+ # Translations
187
+ *.mo
188
+ *.pot
189
+
190
+ # Scrapy stuff:
191
+ .scrapy
192
+
193
+ # Sphinx documentation
194
+ docs/_build/
195
+
196
+ # PyBuilder
197
+ target/
198
+
199
+ # pyenv
200
+ .python-version
201
+
202
+ # pipenv
203
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
204
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
205
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
206
+ # install all needed dependencies.
207
+ #Pipfile.lock
208
+
209
+ # celery beat schedule file
210
+ celerybeat-schedule
211
+
212
+ # SageMath parsed files
213
+ *.sage.py
214
+
215
+ # Spyder project settings
216
+ .spyderproject
217
+ .spyproject
218
+
219
+ # Rope project settings
220
+ .ropeproject
221
+
222
+ # Mr Developer
223
+ .mr.developer.cfg
224
+ .project
225
+ .pydevproject
226
+
227
+ # mkdocs documentation
228
+ /site
229
+
230
+ # mypy
231
+ .mypy_cache/
232
+ .dmypy.json
233
+ dmypy.json
234
+
235
+ # Pyre type checker
236
+ .pyre/
237
+
238
+ ### VirtualEnv ###
239
+ # Virtualenv
240
+ # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
241
+ pyvenv.cfg
242
+ .env
243
+ .venv
244
+ env/
245
+ venv/
246
+ ENV/
247
+ env.bak/
248
+ venv.bak/
249
+ pip-selfcheck.json
250
+
251
+ ### VisualStudioCode ###
252
+ .vscode/*
253
+
254
+ ### VisualStudioCode Patch ###
255
+ # Ignore all local history of files
256
+ .history
257
+
258
+ ### Windows ###
259
+ # Windows thumbnail cache files
260
+ Thumbs.db
261
+ Thumbs.db:encryptable
262
+ ehthumbs.db
263
+ ehthumbs_vista.db
264
+
265
+ # Dump file
266
+ *.stackdump
267
+
268
+ # Folder config file
269
+ [Dd]esktop.ini
270
+
271
+ # Recycle Bin used on file shares
272
+ $RECYCLE.BIN/
273
+
274
+ # Windows Installer files
275
+ *.cab
276
+ *.msi
277
+ *.msix
278
+ *.msm
279
+ *.msp
280
+
281
+ # Windows shortcuts
282
+ *.lnk
283
+
284
+ # End of https://www.gitignore.io/api/linux,macos,python,windows,pycharm+all,visualstudiocode,virtualenv
285
+
286
+
287
+ # Created by https://www.toptal.com/developers/gitignore/api/jupyternotebooks
288
+ # Edit at https://www.toptal.com/developers/gitignore?templates=jupyternotebooks
289
+
290
+ ### JupyterNotebooks ###
291
+ # gitignore template for Jupyter Notebooks
292
+ # website: http://jupyter.org/
293
+
294
+ .ipynb_checkpoints
295
+ */.ipynb_checkpoints/*
296
+
297
+ # IPython
298
+ profile_default/
299
+ ipython_config.py
300
+
301
+ # Remove previous ipynb_checkpoints
302
+ # git rm -r .ipynb_checkpoints/
303
+
304
+ # End of https://www.toptal.com/developers/gitignore/api/jupyternotebooks
README.md ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ license: apache-2.0
5
+ pretty_name: ICDAR 2023 Competition on Robust Layout Segmentation in Corporate Documents
6
+ size_categories:
7
+ - 10K<n<100K
8
+ tags:
9
+ - layout-segmentation
10
+ - COCO
11
+ - document-understanding
12
+ - PDF
13
+ - icdar
14
+ - competition
15
+ task_categories:
16
+ - object-detection
17
+ - image-segmentation
18
+ task_ids:
19
+ - instance-segmentation
20
+ ---
21
+
22
+ # Dataset Card for ICDAR 2023 Competition on Robust Layout Segmentation in Corporate Documents
23
+
24
+ ## Table of Contents
25
+ - [Table of Contents](#table-of-contents)
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
29
+ - [Dataset Structure](#dataset-structure)
30
+ - [Data Fields](#data-fields)
31
+ - [Data Splits](#data-splits)
32
+ - [Dataset Creation](#dataset-creation)
33
+ - [Annotations](#annotations)
34
+ - [Additional Information](#additional-information)
35
+ - [Dataset Curators](#dataset-curators)
36
+ - [Licensing Information](#licensing-information)
37
+ - [Citation Information](#citation-information)
38
+ - [Contributions](#contributions)
39
+
40
+ ## Dataset Description
41
+
42
+ - **Homepage:** https://ds4sd.github.io/icdar23-doclaynet/
43
+ - **Leaderboard:** https://eval.ai/web/challenges/challenge-page/1923/leaderboard
44
+ - **Point of Contact:**
45
+
46
+ ### Dataset Summary
47
+
48
+ This is the official competition dataset for the _ICDAR 2023 Competition on Robust Layout Segmentation in Corporate Documents_.
49
+ You are invited to advance the research in accurately segmenting the layout on a broad range of document styles and domains. To achieve this, we challenge you to develop a model that can correctly identify and segment the layout components in document pages as bounding boxes on a competition data-set we provide.
50
+
51
+ For more information see https://ds4sd.github.io/icdar23-doclaynet/.
52
+
53
+
54
+ #### Training resources
55
+
56
+ In our recently published [DocLayNet](https://github.com/DS4SD/DocLayNet) dataset, which contains 80k+ human-annotated document pages exposing diverse layouts, we define 11 classes for layout components (paragraphs, headings, tables, figures, lists, mathematical formulas and several more). We encourage you to use this dataset for training and internal evaluation of your solution.
57
+ Further, you may consider any other publicly available document layout dataset for training (e.g. [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet), [DocBank](https://github.com/doc-analysis/DocBank)).
58
+
59
+
60
+ ### Supported Tasks and Leaderboards
61
+
62
+ This is the official dataset of the ICDAR 2023 Competition on Robust Layout Segmentation in Corporate Documents.
63
+ For more information see https://ds4sd.github.io/icdar23-doclaynet/.
64
+
65
+ #### Evaluation Metric
66
+
67
+ Your submissions on our [EvalAI challenge](https://eval.ai/web/challenges/challenge-page/1923/) will be evaluated using the Mean Average Precision (mAP) @ Intersection-over-Union (IoU) [0.50:0.95] metric, as used in the [COCO](https://cocodataset.org/) object detection competition. In detail, we will calculate the average precision for a sequence of IoU thresholds ranging from 0.50 to 0.95 with a step size of 0.05. This metric is computed for every document category in the competition-dataset. Then the mean of the average precisions on all categories is computed as the final score.
68
+
69
+ #### Submission
70
+
71
+ We ask you to upload a JSON file in [COCO results format](https://cocodataset.org/#format-results) [here](https://eval.ai/web/challenges/challenge-page/1923/submission), with complete layout bounding-boxes for each page sample. The given `image_id`s must correspond to the ones we publish with the competition data-set's `coco.json`. For each submission you make, the computed mAP will be provided for each category as well as combined. The [leaderboard](https://eval.ai/web/challenges/challenge-page/1923/leaderboard/4545/Total) will be ranked based on the overall mAP.
72
+
73
+
74
+ ## Dataset Structure
75
+
76
+ ### Data Fields
77
+
78
+ DocLayNet provides four types of data assets:
79
+
80
+ 1. PNG images of all pages, resized to square `1025 x 1025px`
81
+ 2. ~~Bounding-box annotations in COCO format for each PNG image~~ (annotations will be released at the end of the competition)
82
+ 3. Extra: Single-page PDF files matching each PNG image
83
+ 4. Extra: JSON file matching each PDF page, which provides the digital text cells with coordinates and content
84
+
85
+ The COCO image record are defined like this example
86
+
87
+ ```js
88
+ ...
89
+ {
90
+ "id": 1,
91
+ "width": 1025,
92
+ "height": 1025,
93
+ "file_name": "132a855ee8b23533d8ae69af0049c038171a06ddfcac892c3c6d7e6b4091c642.png",
94
+
95
+ // Custom fields:
96
+ "doc_category": "financial_reports" // high-level document category
97
+ "collection": "ann_reports_00_04_fancy", // sub-collection name
98
+ "doc_name": "NASDAQ_FFIN_2002.pdf", // original document filename
99
+ "page_no": 9, // page number in original document
100
+ "precedence": 0, // Annotation order, non-zero in case of redundant double- or triple-annotation
101
+ },
102
+ ...
103
+ ```
104
+
105
+ The `doc_category` field uses one of the following constants:
106
+
107
+ ```
108
+ reports,
109
+ manuals,
110
+ patents,
111
+ pthers
112
+ ```
113
+
114
+
115
+ ### Data Splits
116
+
117
+ The dataset provides three splits
118
+ - `dev`, which is extracted from the [DocLayNet](https://github.com/DS4SD/DocLayNet) dataset
119
+ - `test`, which contains new data for the competition
120
+
121
+ ## Dataset Creation
122
+
123
+ ### Annotations
124
+
125
+ #### Annotation process
126
+
127
+ The labeling guideline used for training of the annotation experts are available at [DocLayNet_Labeling_Guide_Public.pdf](https://raw.githubusercontent.com/DS4SD/DocLayNet/main/assets/DocLayNet_Labeling_Guide_Public.pdf).
128
+
129
+
130
+ #### Who are the annotators?
131
+
132
+ Annotations are crowdsourced.
133
+
134
+
135
+ ## Additional Information
136
+
137
+ ### Dataset Curators
138
+
139
+ The dataset is curated by the [Deep Search team](https://ds4sd.github.io/) at IBM Research.
140
+ You can contact us at [[email protected]](mailto:[email protected]).
141
+
142
+ Curators:
143
+ - Christoph Auer, [@cau-git](https://github.com/cau-git)
144
+ - Michele Dolfi, [@dolfim-ibm](https://github.com/dolfim-ibm)
145
+ - Ahmed Nassar, [@nassarofficial](https://github.com/nassarofficial)
146
+ - Peter Staar, [@PeterStaar-IBM](https://github.com/PeterStaar-IBM)
147
+
148
+ ### Licensing Information
149
+
150
+ License: [CDLA-Permissive-1.0](https://cdla.io/permissive-1-0/)
151
+
152
+
153
+ ### Citation Information
154
+
155
+ A publication will be submitted at the end of the competition. Meanwhile, we suggest the cite our original dataset paper.
156
+
157
+ ```bib
158
+ @article{doclaynet2022,
159
+ title = {DocLayNet: A Large Human-Annotated Dataset for Document-Layout Segmentation},
160
+ doi = {10.1145/3534678.353904},
161
+ url = {https://doi.org/10.1145/3534678.3539043},
162
+ author = {Pfitzmann, Birgit and Auer, Christoph and Dolfi, Michele and Nassar, Ahmed S and Staar, Peter W J},
163
+ year = {2022},
164
+ isbn = {9781450393850},
165
+ publisher = {Association for Computing Machinery},
166
+ address = {New York, NY, USA},
167
+ booktitle = {Proceedings of the 28th ACM SIGKDD Conference on Knowledge Discovery and Data Mining},
168
+ pages = {3743–3751},
169
+ numpages = {9},
170
+ location = {Washington DC, USA},
171
+ series = {KDD '22}
172
+ }
173
+ ```
174
+
175
+ ### Contributions
176
+
177
+ Thanks to [@dolfim-ibm](https://github.com/dolfim-ibm), [@cau-git](https://github.com/cau-git) for adding this dataset.
icdar2023-doclaynet.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Inspired from
3
+ https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py
4
+ """
5
+
6
+ import json
7
+ import os
8
+ import datasets
9
+ import collections
10
+
11
+
12
+ class COCOBuilderConfig(datasets.BuilderConfig):
13
+ def __init__(self, name, splits, **kwargs):
14
+ super().__init__(name, **kwargs)
15
+ self.splits = splits
16
+
17
+
18
+ # Add BibTeX citation
19
+ # Find for instance the citation on arxiv or on the dataset repo/website
20
+ _CITATION = """\
21
+ @article{doclaynet2022,
22
+ title = {DocLayNet: A Large Human-Annotated Dataset for Document-Layout Analysis},
23
+ doi = {10.1145/3534678.353904},
24
+ url = {https://arxiv.org/abs/2206.01062},
25
+ author = {Pfitzmann, Birgit and Auer, Christoph and Dolfi, Michele and Nassar, Ahmed S and Staar, Peter W J},
26
+ year = {2022}
27
+ }
28
+ """
29
+
30
+ # Add description of the dataset here
31
+ # You can copy an official description
32
+ _DESCRIPTION = """\
33
+ Dataset for the ICDAR 2023 Competition on Robust Layout Segmentation in Corporate Documents.
34
+ """
35
+
36
+ # Add a link to an official homepage for the dataset here
37
+ _HOMEPAGE = "https://ds4sd.github.io/icdar23-doclaynet/"
38
+
39
+ # Add the licence for the dataset here if you can find it
40
+ _LICENSE = "apache-2.0"
41
+
42
+ # Add link to the official dataset URLs here
43
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
44
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
45
+
46
+ _URLs = {
47
+ "dev": "https://ds4sd-icdar23-doclaynet-competition.s3.eu-de.cloud-object-storage.appdomain.cloud/dev-dataset-public.zip",
48
+ "test": "https://ds4sd-icdar23-doclaynet-competition.s3.eu-de.cloud-object-storage.appdomain.cloud/competition-dataset-public.zip"
49
+ }
50
+
51
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
52
+ class COCODataset(datasets.GeneratorBasedBuilder):
53
+ """An example dataset script to work with the local (downloaded) COCO dataset"""
54
+
55
+ VERSION = datasets.Version("1.0.0")
56
+
57
+ BUILDER_CONFIG_CLASS = COCOBuilderConfig
58
+ BUILDER_CONFIGS = [
59
+ COCOBuilderConfig(name="2023.01", splits=["dev", "test"]),
60
+ ]
61
+ DEFAULT_CONFIG_NAME = "2023.01"
62
+
63
+ def _info(self):
64
+ features = datasets.Features(
65
+ {
66
+ "image_id": datasets.Value("int64"),
67
+ "image": datasets.Image(),
68
+ "width": datasets.Value("int32"),
69
+ "height": datasets.Value("int32"),
70
+ # Custom fields
71
+ # "doc_category": datasets.Value(
72
+ # "string"
73
+ # ), # high-level document category
74
+ # "collection": datasets.Value("string"), # sub-collection name
75
+ # "doc_name": datasets.Value("string"), # original document filename
76
+ # "page_no": datasets.Value("int64"), # page number in original document
77
+ }
78
+ )
79
+ object_dict = {
80
+ "category_id": datasets.ClassLabel(
81
+ names=[
82
+ "Caption",
83
+ "Footnote",
84
+ "Formula",
85
+ "List-item",
86
+ "Page-footer",
87
+ "Page-header",
88
+ "Picture",
89
+ "Section-header",
90
+ "Table",
91
+ "Text",
92
+ "Title",
93
+ ]
94
+ ),
95
+ "image_id": datasets.Value("string"),
96
+ "id": datasets.Value("int64"),
97
+ "area": datasets.Value("int64"),
98
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
99
+ "segmentation": [[datasets.Value("float32")]],
100
+ "iscrowd": datasets.Value("bool"),
101
+ "precedence": datasets.Value("int32"),
102
+ }
103
+ # features["objects"] = [object_dict]
104
+
105
+ return datasets.DatasetInfo(
106
+ # This is the description that will appear on the datasets page.
107
+ description=_DESCRIPTION,
108
+ # This defines the different columns of the dataset and their types
109
+ features=features, # Here we define them above because they are different between the two configurations
110
+ # If there's a common (input, target) tuple from the features,
111
+ # specify them here. They'll be used if as_supervised=True in
112
+ # builder.as_dataset.
113
+ supervised_keys=None,
114
+ # Homepage of the dataset for documentation
115
+ homepage=_HOMEPAGE,
116
+ # License for the dataset if available
117
+ license=_LICENSE,
118
+ # Citation for the dataset
119
+ citation=_CITATION,
120
+ )
121
+
122
+ def _split_generators(self, dl_manager):
123
+ """Returns SplitGenerators."""
124
+ archive_path = dl_manager.download_and_extract(_URLs)
125
+ splits = []
126
+ for split in self.config.splits:
127
+ if split in ["val", "valid", "validation", "dev"]:
128
+ dataset = datasets.SplitGenerator(
129
+ name=datasets.Split.VALIDATION,
130
+ # These kwargs will be passed to _generate_examples
131
+ gen_kwargs={
132
+ "json_path": os.path.join(
133
+ archive_path["dev"], "coco.json"
134
+ ),
135
+ "image_dir": os.path.join(archive_path["dev"], "PNG"),
136
+ "split": "val",
137
+ },
138
+ )
139
+ elif split == "test":
140
+ dataset = datasets.SplitGenerator(
141
+ name=datasets.Split.TEST,
142
+ # These kwargs will be passed to _generate_examples
143
+ gen_kwargs={
144
+ "json_path": os.path.join(
145
+ archive_path["test"], "coco.json"
146
+ ),
147
+ "image_dir": os.path.join(archive_path["test"], "PNG"),
148
+ "split": "test",
149
+ },
150
+ )
151
+ else:
152
+ continue
153
+
154
+ splits.append(dataset)
155
+ return splits
156
+
157
+ def _generate_examples(
158
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
159
+ self,
160
+ json_path,
161
+ image_dir,
162
+ split,
163
+ ):
164
+ """Yields examples as (key, example) tuples."""
165
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
166
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
167
+ def _image_info_to_example(image_info, image_dir):
168
+ image = image_info["file_name"]
169
+ return {
170
+ "image_id": image_info["id"],
171
+ "image": os.path.join(image_dir, image),
172
+ "width": image_info["width"],
173
+ "height": image_info["height"],
174
+ # "doc_category": image_info["doc_category"],
175
+ # "collection": image_info["collection"],
176
+ # "doc_name": image_info["doc_name"],
177
+ # "page_no": image_info["page_no"],
178
+ }
179
+
180
+ with open(json_path, encoding="utf8") as f:
181
+ annotation_data = json.load(f)
182
+ images = annotation_data["images"]
183
+ # annotations = annotation_data["annotations"]
184
+ # image_id_to_annotations = collections.defaultdict(list)
185
+ # for annotation in annotations:
186
+ # image_id_to_annotations[annotation["image_id"]].append(annotation)
187
+
188
+ for idx, image_info in enumerate(images):
189
+ example = _image_info_to_example(image_info, image_dir)
190
+ # annotations = image_id_to_annotations[image_info["id"]]
191
+ # objects = []
192
+ # for annotation in annotations:
193
+ # category_id = annotation["category_id"] # Zero based counting
194
+ # if category_id != -1:
195
+ # category_id = category_id - 1
196
+ # annotation["category_id"] = category_id
197
+ # objects.append(annotation)
198
+ # example["objects"] = objects
199
+ yield idx, example