Datasets:

ArXiv:
License:
holylovenia commited on
Commit
4e4833b
1 Parent(s): 7971495

Upload bloom_vist.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. bloom_vist.py +262 -0
bloom_vist.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SEA Crowd Data Loader for Bloom VIST.
3
+ """
4
+ from typing import Dict, List, Tuple
5
+
6
+ import datasets
7
+ from datasets.download.download_manager import DownloadManager
8
+
9
+ from seacrowd.utils import schemas
10
+ from seacrowd.utils.configs import SEACrowdConfig
11
+ from seacrowd.utils.constants import TASK_TO_SCHEMA, Licenses, Tasks
12
+
13
+ _CITATION = r"""
14
+ @inproceedings{leong-etal-2022-bloom,
15
+ title = "Bloom Library: Multimodal Datasets in 300+ Languages for a Variety of Downstream Tasks",
16
+ author = "Leong, Colin and
17
+ Nemecek, Joshua and
18
+ Mansdorfer, Jacob and
19
+ Filighera, Anna and
20
+ Owodunni, Abraham and
21
+ Whitenack, Daniel",
22
+ editor = "Goldberg, Yoav and
23
+ Kozareva, Zornitsa and
24
+ Zhang, Yue",
25
+ booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
26
+ month = dec,
27
+ year = "2022",
28
+ address = "Abu Dhabi, United Arab Emirates",
29
+ publisher = "Association for Computational Linguistics",
30
+ url = "https://aclanthology.org/2022.emnlp-main.590",
31
+ doi = "10.18653/v1/2022.emnlp-main.590",
32
+ pages = "8608--8621",
33
+ }
34
+ """
35
+
36
+ logger = datasets.logging.get_logger(__name__)
37
+
38
+ # this config is created for SEACrowd Dataloader
39
+ _LANG_CONFIG = {
40
+ "abc": "Ambala Ayta",
41
+ "ahk": "Akha",
42
+ "bfn": "Bunak",
43
+ "bjn": "Banjar",
44
+ "bkx": "Baikeno",
45
+ "brb": "Brao",
46
+ "brv": "Western Bru",
47
+ "bya": "Batak",
48
+ "bzi": "Bisu",
49
+ "ceb": "Cebuano",
50
+ "cgc": "Kagayanen",
51
+ "cmo": "Central Mnong",
52
+ "ddg": "Fataluku",
53
+ "dmg": "Upper Kinabatangan",
54
+ "dnw": "Western Dani",
55
+ "dtp": "Kadazan Dusun",
56
+ "enc": "En",
57
+ "fil": "Filipino",
58
+ "hil": "Hiligaynon",
59
+ "hro": "Haroi",
60
+ "idt": "Idaté",
61
+ "ilo": "Ilocano",
62
+ "ind": "Indonesian",
63
+ "jra": "Jarai",
64
+ "kak": "Kalanguya",
65
+ "khb": "Lü",
66
+ "khm": "Khmer",
67
+ "kqr": "Kimaragang",
68
+ "krr": "Krung",
69
+ "ksw": "S’gaw Karen",
70
+ "lhu": "Lahu",
71
+ "lsi": "Lacid",
72
+ "lwl": "Eastern Lawa",
73
+ "mdr": "Mandar",
74
+ "mgm": "Mambae",
75
+ "mhx": "Lhao Vo",
76
+ "mkz": "Makasae",
77
+ "mry": "Mandaya",
78
+ "msb": "Masbatenyo",
79
+ "mya": "Burmese",
80
+ "nod": "Northern Thai",
81
+ "nxa": "Nauete",
82
+ "nxl": "South Nuaulu",
83
+ "pag": "Pangasinan",
84
+ "pce": "Ruching Palaung",
85
+ "pea": "Peranakan Indonesian",
86
+ "pmf": "Pamona",
87
+ "psp": "Filipino Sign Language",
88
+ "sea": "Semai",
89
+ "sgd": "Surigaonon",
90
+ "sml": "Central Sama",
91
+ "snl": "Sangil",
92
+ "tdt": "Tetun Dili",
93
+ "tet": "Tetun",
94
+ "tha": "Thai",
95
+ "tkd": "Tukudede",
96
+ "tpu": "Tampuan",
97
+ "war": "Waray-Waray",
98
+ "wms": "Wambon",
99
+ "yet": "Yetfa",
100
+ "yin": "Riang Lai",
101
+ "zlm": "Malay",
102
+ }
103
+
104
+ _LOCAL = False
105
+ _LANGUAGES = list(_LANG_CONFIG.keys())
106
+
107
+
108
+ _DATASETNAME = "bloom_vist"
109
+ _DESCRIPTION = r"""
110
+ BLOOM VIST is a visual storytelling of books that consists of 62 languages indigenous to SEA.
111
+ This dataset is owned by Bloom, a free, open-source software developed by SIL International and associated with Bloom Library, app, and services.
112
+ This dataset is released with the LICENSE family of Creative Commons (although each story datapoints has its licensing in more detail,
113
+ e.g cc-by, cc-by-nc, cc-by-nd, cc-by-sa, cc-by-nc-nd, cc-by-nc-sa).
114
+ Before using this dataloader, please accept the acknowledgement at https://huggingface.co/datasets/sil-ai/bloom-vist and use huggingface-cli login for authentication.
115
+ """
116
+
117
+ _HOMEPAGE = "https://huggingface.co/datasets/sil-ai/bloom-vist"
118
+ _LICENSE = Licenses.CC.value
119
+
120
+ _URL = "https://huggingface.co/datasets/sil-ai/bloom-vist"
121
+ _HF_REMOTE_REF = "/".join(_URL.split("/")[-2:])
122
+
123
+ _SUPPORTED_TASKS = [Tasks.IMAGE_CAPTIONING]
124
+ _SOURCE_VERSION = "0.1.0"
125
+ _SEACROWD_VERSION = "2024.06.20"
126
+
127
+ CONFIG_SUFFIXES_FOR_TASK = [TASK_TO_SCHEMA.get(task).lower() for task in _SUPPORTED_TASKS]
128
+
129
+
130
+ def conform_init_config():
131
+ """Assertion Function for Instantiated Configs"""
132
+ if len(_LANGUAGES) == 0:
133
+ raise AssertionError("No Languages detected from config!")
134
+ if len(CONFIG_SUFFIXES_FOR_TASK) != len(_SUPPORTED_TASKS):
135
+ raise AssertionError("Config prefixes don't matched in terms of `len` with `_SUPPORTED_TASKS`!")
136
+ if len(CONFIG_SUFFIXES_FOR_TASK) == 0:
137
+ raise AssertionError("Config prefixes and `_SUPPORTED_TASKS` have `len` of 0!")
138
+
139
+
140
+ conform_init_config()
141
+
142
+
143
+ def construct_configs_on_langs(languages: list = None) -> List[SEACrowdConfig]:
144
+ """
145
+ The function `construct_configs` constructs a list of SEACrowdConfig objects based on the provided
146
+ languages or a default language, and returns the list.
147
+
148
+ input:
149
+ languages (list, default None): The `languages` parameter is a list that specifies the languages for which the
150
+ configurations need to be constructed. If no languages are provided (value=None), the first value in language config
151
+ will be used.
152
+ output:
153
+ a list of `SEACrowdConfig` objects based on instantiated init variables
154
+ """
155
+
156
+ # set output var
157
+ config_list = []
158
+
159
+ # construct zipped arg for config instantiation
160
+ TASKS_AND_CONFIG_SUFFIX_PAIRS = list(zip(_SUPPORTED_TASKS, CONFIG_SUFFIXES_FOR_TASK))
161
+
162
+ # implement source schema
163
+ version, config_name_prefix = _SOURCE_VERSION, "source"
164
+ config_list += [
165
+ SEACrowdConfig(
166
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}",
167
+ version=datasets.Version(version),
168
+ description=f"{_DATASETNAME} {config_name_prefix} schema for language code {_LANG}",
169
+ schema=f"{config_name_prefix}",
170
+ subset_id=_LANG,
171
+ )
172
+ for _LANG in languages
173
+ ]
174
+
175
+ # implement SEACrowd schema
176
+ version, config_name_prefix = _SEACROWD_VERSION, "seacrowd"
177
+ for task_obj, config_name_suffix in TASKS_AND_CONFIG_SUFFIX_PAIRS:
178
+ config_list += [
179
+ SEACrowdConfig(
180
+ name=f"{_DATASETNAME}_{_LANG}_{config_name_prefix}_{config_name_suffix}",
181
+ version=datasets.Version(version),
182
+ description=f"{_DATASETNAME} {config_name_prefix} schema for {task_obj.name} and language code {_LANG}",
183
+ schema=f"{config_name_prefix}_{config_name_suffix}",
184
+ subset_id=_LANG,
185
+ )
186
+ for _LANG in languages
187
+ ]
188
+ return config_list
189
+
190
+
191
+ class BloomVISTDataset(datasets.GeneratorBasedBuilder):
192
+ """Bloom VIST dataset, subsetted from https://huggingface.co/datasets/sil-ai/bloom-vist"""
193
+
194
+ # get all schema w/o lang arg + get all schema w/ lang arg
195
+ BUILDER_CONFIGS = construct_configs_on_langs(_LANGUAGES)
196
+
197
+ def _info(self) -> datasets.DatasetInfo:
198
+ _config_schema_name = self.config.schema
199
+ logger.info(f"Received schema name: {self.config.schema}")
200
+ # source schema
201
+ if _config_schema_name == "source":
202
+ features = datasets.Features(
203
+ {
204
+ "title": datasets.Value("string"),
205
+ "license": datasets.Value("string"),
206
+ "album_id": datasets.Value("string"),
207
+ "story": datasets.Sequence(
208
+ feature={"image_id": datasets.Value("string"), "image_url": datasets.Value("string"), "story_index": datasets.Value("int32"), "story_id": datasets.Value("string"), "text": datasets.Value("string")}, length=-1, id=None
209
+ ),
210
+ }
211
+ )
212
+
213
+ # image-text schema
214
+ elif _config_schema_name == "seacrowd_imtext":
215
+ features = schemas.image_text_features()
216
+
217
+ else:
218
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")
219
+
220
+ return datasets.DatasetInfo(
221
+ description=_DESCRIPTION,
222
+ features=features,
223
+ homepage=_HOMEPAGE,
224
+ license=_LICENSE,
225
+ citation=_CITATION,
226
+ )
227
+
228
+ def _split_generators(self, dl_manager: DownloadManager) -> List[datasets.SplitGenerator]:
229
+ hf_dset_dict = datasets.load_dataset(_HF_REMOTE_REF, self.config.subset_id)
230
+
231
+ return [datasets.SplitGenerator(name=datasets.Split(dset_key), gen_kwargs={"hf_dset": dset}) for dset_key, dset in hf_dset_dict.items() if dset.num_rows > 0]
232
+
233
+ def _generate_examples(self, hf_dset) -> Tuple[int, Dict]:
234
+ _config_schema_name = self.config.schema
235
+
236
+ _idx = 0
237
+ for datapoints in hf_dset:
238
+ # for source schema, the `_idx` will be taken from "album_id" value
239
+ if _config_schema_name == "source":
240
+ yield datapoints["album_id"], {colname: datapoints[colname] for colname in self.info.features}
241
+
242
+ # for seacrowd schema, the `_idx` will be created manually
243
+ # since one album_id has multiple pairs of image-text
244
+ elif _config_schema_name == "seacrowd_imtext":
245
+ # check the len of the features in sequenced columns
246
+ # since in source hf there's no validation on data integrity
247
+ _len_vars = []
248
+ _ftrs_in_seq = ("image_id", "image_url", "story_index", "story_id", "text")
249
+ story_data = datapoints["story"]
250
+ for ftr in _ftrs_in_seq:
251
+ _len_vars.append(len(story_data[ftr]))
252
+
253
+ # skip story w/ mismatched infos
254
+ if max(_len_vars) != min(_len_vars):
255
+ continue
256
+
257
+ for num_data in range(max(_len_vars)):
258
+ yield _idx, {"id": _idx, "image_paths": [story_data["image_url"][num_data]], "texts": story_data["text"][num_data], "metadata": {"context": datapoints["title"], "labels": []}}
259
+ _idx += 1
260
+
261
+ else:
262
+ raise ValueError(f"Received unexpected config schema of {_config_schema_name}!")