Datasets:

License:
jnemecek commited on
Commit
992fab9
1 Parent(s): c5efb04

update language listing

Browse files
Files changed (1) hide show
  1. biblenlp-corpus.py +337 -337
biblenlp-corpus.py CHANGED
@@ -1,338 +1,338 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
- from tqdm import tqdm
21
- import numpy as np
22
- import ijson
23
-
24
- _VERSION = "0.0.2"
25
- _LANGUAGES = ['aau', 'aaz', 'abx', 'aby', 'acf', 'acu', 'adz', 'aey', 'agd', 'agg', 'agm', 'agn', 'agr', 'agu', 'aia', 'ake', 'alp', 'alq', 'als', 'aly', 'ame', 'amk', 'amp', 'amr', 'amu', 'anh', 'anv', 'aoi', 'aoj', 'apb', 'apn', 'apu', 'apy', 'arb', 'arl', 'arn', 'arp', 'aso', 'ata', 'atb', 'atd', 'atg', 'auc', 'aui', 'auy', 'avt', 'awb', 'awk', 'awx', 'azg', 'azz', 'bao', 'bbb', 'bbr', 'bch', 'bco', 'bdd', 'bea', 'bel', 'bgs', 'bgt', 'bhg', 'bhl', 'big', 'bjr', 'bjv', 'bkd', 'bki', 'bkq', 'bkx', 'bla', 'blw', 'blz', 'bmh', 'bmk', 'bmr', 'bnp', 'boa', 'boj', 'bon', 'box', 'bqc', 'bre', 'bsn', 'bsp', 'bss', 'buk', 'bus', 'bvr', 'bxh', 'byx', 'bzd', 'bzj', 'cab', 'caf', 'cao', 'cap', 'car', 'cav', 'cax', 'cbc', 'cbi', 'cbk', 'cbr', 'cbs', 'cbt', 'cbu', 'cbv', 'cco', 'ces', 'cgc', 'cha', 'chd', 'chf', 'chk', 'chq', 'chz', 'cjo', 'cjv', 'cle', 'clu', 'cme', 'cmn', 'cni', 'cnl', 'cnt', 'cof', 'con', 'cop', 'cot', 'cpa', 'cpb', 'cpc', 'cpu', 'crn', 'crx', 'cso', 'cta', 'ctp', 'ctu', 'cub', 'cuc', 'cui', 'cut', 'cux', 'cwe', 'daa', 'dad', 'dah', 'ded', 'deu', 'dgr', 'dgz', 'dif', 'dik', 'dji', 'djk', 'dob', 'dwr', 'dww', 'dwy', 'eko', 'emi', 'emp', 'eng', 'epo', 'eri', 'ese', 'etr', 'faa', 'fai', 'far', 'for', 'fra', 'fuf', 'gai', 'gam', 'gaw', 'gdn', 'gdr', 'geb', 'gfk', 'ghs', 'gia', 'glk', 'gmv', 'gng', 'gnn', 'gnw', 'gof', 'grc', 'gub', 'guh', 'gui', 'gul', 'gum', 'guo', 'gvc', 'gvf', 'gwi', 'gym', 'gyr', 'hat', 'haw', 'hbo', 'hch', 'heb', 'heg', 'hix', 'hla', 'hlt', 'hns', 'hop', 'hrv', 'hub', 'hui', 'hus', 'huu', 'huv', 'hvn', 'ign', 'ikk', 'ikw', 'imo', 'inb', 'ind', 'ino', 'iou', 'ipi', 'ita', 'jac', 'jao', 'jic', 'jiv', 'jpn', 'jvn', 'kaq', 'kbc', 'kbh', 'kbm', 'kdc', 'kde', 'kdl', 'kek', 'ken', 'kew', 'kgk', 'kgp', 'khs', 'kje', 'kjs', 'kkc', 'kky', 'klt', 'klv', 'kms', 'kmu', 'kne', 'knf', 'knj', 'kos', 'kpf', 'kpg', 'kpj', 'kpw', 'kqa', 'kqc', 'kqf', 'kql', 'kqw', 'ksj', 'ksr', 'ktm', 'kto', 'kud', 'kue', 'kup', 'kvn', 'kwd', 'kwf', 'kwi', 'kwj', 'kyf', 'kyg', 'kyq', 'kyz', 'kze', 'lac', 'lat', 'lbb', 'leu', 'lex', 'lgl', 'lid', 'lif', 'lww', 'maa', 'maj', 'maq', 'mau', 'mav', 'maz', 'mbb', 'mbc', 'mbh', 'mbl', 'mbt', 'mca', 'mcb', 'mcd', 'mcf', 'mcp', 'mdy', 'med', 'mee', 'mek', 'meq', 'met', 'meu', 'mgh', 'mgw', 'mhl', 'mib', 'mic', 'mie', 'mig', 'mih', 'mil', 'mio', 'mir', 'mit', 'miz', 'mjc', 'mkn', 'mks', 'mlh', 'mlp', 'mmx', 'mna', 'mop', 'mox', 'mph', 'mpj', 'mpm', 'mpp', 'mps', 'mpx', 'mqb', 'mqj', 'msb', 'msc', 'msk', 'msm', 'msy', 'mti', 'muy', 'mva', 'mvn', 'mwc', 'mxb', 'mxp', 'mxq', 'mxt', 'myu', 'myw', 'myy', 'mzz', 'nab', 'naf', 'nak', 'nay', 'nbq', 'nca', 'nch', 'ncj', 'ncl', 'ncu', 'ndj', 'nfa', 'ngp', 'ngu', 'nhg', 'nhi', 'nho', 'nhr', 'nhu', 'nhw', 'nhy', 'nif', 'nin', 'nko', 'nld', 'nlg', 'nna', 'nnq', 'not', 'nou', 'npl', 'nsn', 'nss', 'ntj', 'ntp', 'nwi', 'nyu', 'obo', 'ong', 'ons', 'ood', 'opm', 'ote', 'otm', 'otn', 'otq', 'ots', 'pab', 'pad', 'pah', 'pao', 'pes', 'pib', 'pio', 'pir', 'pjt', 'plu', 'pma', 'poe', 'poi', 'pon', 'poy', 'ppo', 'prf', 'pri', 'ptp', 'ptu', 'pwg', 'quc', 'quf', 'quh', 'qul', 'qup', 'qvc', 'qve', 'qvh', 'qvm', 'qvn', 'qvs', 'qvw', 'qvz', 'qwh', 'qxh', 'qxn', 'qxo', 'rai', 'rkb', 'rmc', 'roo', 'rop', 'rro', 'ruf', 'rug', 'rus', 'sab', 'san', 'sbe', 'seh', 'sey', 'sgz', 'shj', 'shp', 'sim', 'sja', 'sll', 'smk', 'snc', 'snn', 'sny', 'som', 'soq', 'spa', 'spl', 'spm', 'sps', 'spy', 'sri', 'srm', 'srn', 'srp', 'srq', 'ssd', 'ssg', 'ssx', 'stp', 'sua', 'sue', 'sus', 'suz', 'swe', 'swh', 'swp', 'sxb', 'tac', 'tav', 'tbc', 'tbl', 'tbo', 'tbz', 'tca', 'tee', 'ter', 'tew', 'tfr', 'tgp', 'tif', 'tim', 'tiy', 'tke', 'tku', 'tna', 'tnc', 'tnn', 'tnp', 'toc', 'tod', 'toj', 'ton', 'too', 'top', 'tos', 'tpt', 'trc', 'tsw', 'ttc', 'tue', 'tuo', 'txu', 'ubr', 'udu', 'ukr', 'uli', 'ura', 'urb', 'usa', 'usp', 'uvl', 'vid', 'vie', 'viv', 'vmy', 'waj', 'wal', 'wap', 'wat', 'wbp', 'wed', 'wer', 'wim', 'wmt', 'wmw', 'wnc', 'wnu', 'wos', 'wrk', 'wro', 'wsk', 'wuv', 'xav', 'xed', 'xla', 'xnn', 'xon', 'xsi', 'xtd', 'xtm', 'yaa', 'yad', 'yal', 'yap', 'yaq', 'yby', 'ycn', 'yka', 'yml', 'yre', 'yuj', 'yut', 'yuw', 'yva', 'zaa', 'zab', 'zac', 'zad', 'zai', 'zaj', 'zam', 'zao', 'zar', 'zas', 'zat', 'zav', 'zaw', 'zca', 'zia', 'ziw', 'zos', 'zpc', 'zpl', 'zpo', 'zpq', 'zpu', 'zpv', 'zpz', 'zsr', 'ztq', 'zty', 'zyp']
26
- _DESCRIPTION = "Bible Parallel Corpus"
27
- _HOMEPAGE = 'https://github.com/BibleNLP/ebible-corpus'
28
- _PAIR = 'all'
29
- _CITATION = """\
30
- @InProceedings{huggingface:dataset,
31
- title = {A great new dataset},
32
- author={huggingface, Inc.
33
- },
34
- year={2020}
35
- }
36
- """
37
- _LICENSE = 'Creative Commons and Public Domain, specified in the dataset'
38
- _FILES = {'corpus':'corpus.json','vref':'vref.txt'}
39
-
40
- class BiblenlpCorpusConfig(datasets.BuilderConfig):
41
- def __init__(self, languages=[], pair='all', **kwargs):
42
- '''
43
- languages: list of languages to include
44
- pair: 'all', 'range', or 'single' to specify whether verse ranges, single pairings, or all pairs are included
45
- **kwargs: additional arguments to pass to the superclass'''
46
- super(BiblenlpCorpusConfig, self).__init__(name=f'{"-".join([x for x in languages])}', **kwargs)
47
- self.languages = languages
48
- self.pair = pair
49
-
50
- class BiblenlpCorpus(datasets.GeneratorBasedBuilder):
51
- BUILDER_CONFIGS = [
52
- BiblenlpCorpusConfig(
53
- languages=_LANGUAGES,
54
- pair=_PAIR,
55
- description = f'Parallel Bible verses with {_PAIR} pairings of {"-".join([x for x in _LANGUAGES])} languages',
56
- )
57
- ]
58
-
59
- BUILDER_CONFIG_CLASS = BiblenlpCorpusConfig
60
-
61
- def _info(self):
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=datasets.Features(
65
- {
66
- "translation": datasets.features.TranslationVariableLanguages(languages=_LANGUAGES),
67
- "files": datasets.features.Sequence( {'lang':datasets.Value("string"), 'file':datasets.Value("string")} ),
68
- "ref": datasets.features.Sequence( datasets.Value("string") ),
69
- "licenses": datasets.features.Sequence( datasets.Value("string") ),
70
- "copyrights": datasets.features.Sequence( datasets.Value("string") )
71
- },
72
- ),
73
- supervised_keys=None,
74
- homepage=_HOMEPAGE,
75
- citation=_CITATION,
76
- license=_LICENSE,
77
- version=_VERSION
78
- ) #
79
-
80
- def _split_generators(self, dl_manager):
81
- downloaded_files = dl_manager.download(_FILES)
82
-
83
- def parse_json(json_filename, langs):
84
- with open(json_filename, 'rb') as input_file:
85
- # load json iteratively
86
- parser = ijson.parse(input_file)
87
- parsed_dict = {lang: [] for lang in langs}
88
- idx = {lang: -1 for lang in langs}
89
- for prefix, event, value in parser:
90
- #print(f'prefix:{prefix}, event:{event}, value:{value}')
91
- if any([prefix.startswith(f'{lang}.') for lang in langs]):
92
- #print(f'prefix:{prefix}, event:{event}, value:{value}')
93
- if event == 'start_map':
94
- idx[prefix.split('.')[0]] += 1
95
- tmpdict ={}
96
- tmpdict['verses']=[]
97
- if prefix.endswith('verses.item'):
98
- tmpdict['verses'].append(value)
99
- if prefix.endswith('copyright'):
100
- tmpdict['copyright'] = value
101
- if prefix.endswith('text'):
102
- tmpdict['text'] = value
103
- if prefix.endswith('file'):
104
- tmpdict['file'] = value
105
- if prefix.endswith('license'):
106
- tmpdict['license'] = value
107
- if event == 'end_map':
108
- #write the dictionary
109
- parsed_dict[prefix.split('.')[0]].append( tmpdict.copy() )
110
- return(parsed_dict)
111
-
112
- def define_splits(corpus_slice, langs):
113
- verse2split = {}
114
- langtexts = {}
115
- textverses = {}
116
- vindexes = {}
117
- vgroups = []
118
- versesets ={}
119
-
120
- np.random.seed(42)
121
-
122
- for lang in tqdm(langs):
123
- langtexts[lang] = set()
124
- textverses[lang]={}
125
- vindexes[lang]={}
126
- for idx,line in enumerate(corpus_slice[lang]):
127
- langtexts[lang].add(line['file'])
128
- for v in line['verses']:
129
- if not textverses[lang].get(line['file']):
130
- textverses[lang][line['file']] = set()
131
- textverses[lang][line['file']].add(v)
132
- if not vindexes[lang].get(v):
133
- vindexes[lang][v] = [idx]
134
- else:
135
- vindexes[lang][v].append(idx)
136
-
137
- #print(list(vindexes['eng'].keys())[0:10])
138
- #for f in langtexts['eng']:
139
- # print(len(textverses['eng'][f]))
140
-
141
- for line in tqdm(corpus_slice[langs[0]]):
142
- versesets = {line['file']:line['verses']}
143
- if any([verse2split.get(z) for z in line['verses']]):
144
- for verse in line['verses']:
145
- if verse2split.get(verse):
146
- prevsplit = verse2split.get(verse)
147
- break
148
- split = prevsplit
149
- else:
150
- split = np.random.choice(['train','test','dev'],p=[.9,.05,.05])
151
- if not all([verse2split.get(z) for z in line['verses']]):
152
- all_verses = set()
153
- for v in line['verses']:
154
- all_verses.add(v)
155
- while True:
156
- verses_added = False
157
- idxes = {k:set() for k in langs}
158
- #get indexes for verses
159
- for v in all_verses:
160
- for lang in langs:
161
- if vindexes[lang].get(v):
162
- for idx in vindexes[lang][v]:
163
- idxes[lang].add(idx)
164
-
165
- for lang in langs:
166
- for compline in [corpus_slice[lang][x] for x in idxes[lang] if x != set()]:
167
-
168
- if all(x in textverses[lang][compline['file']] for x in all_verses) and any([x in list(all_verses) for x in compline['verses']]):
169
- if not versesets.get(compline['file']):
170
- versesets[compline['file']] = compline['verses'].copy()
171
- else:
172
- versesets[compline['file']].extend(compline['verses'].copy())
173
- for v in compline['verses']:
174
- pre_size = len(all_verses)
175
- all_verses.add(v)
176
- if len(all_verses) > pre_size:
177
- verses_added = True
178
-
179
- if verses_added == False or all([set(versesets[q]) == all_verses for q in versesets.keys()]):
180
- vgroups.append(all_verses)
181
- for v in all_verses:
182
- verse2split[v] = split
183
- break
184
-
185
- return(vgroups,vindexes, verse2split)
186
-
187
-
188
- corpus_slice = parse_json(downloaded_files['corpus'], self.config.languages)
189
- vgroups, vindexes, verse2split = define_splits(corpus_slice, self.config.languages)
190
-
191
-
192
- return [
193
- datasets.SplitGenerator(
194
- name=datasets.Split.TRAIN,
195
- gen_kwargs={
196
- 'langs': self.config.languages,
197
- 'corpus': corpus_slice,
198
- 'vgroups': vgroups,
199
- 'vindexes': vindexes,
200
- 'v2split': verse2split,
201
- 'split': 'train',
202
- 'vref': downloaded_files['vref'],
203
- }
204
- ),
205
- datasets.SplitGenerator(
206
- name=datasets.Split.VALIDATION,
207
- gen_kwargs={
208
- 'langs': self.config.languages,
209
- 'corpus': corpus_slice,
210
- 'vgroups': vgroups,
211
- 'vindexes': vindexes,
212
- 'v2split': verse2split,
213
- 'split': 'dev',
214
- 'vref': downloaded_files['vref'],
215
- }
216
- ),
217
- datasets.SplitGenerator(
218
- name=datasets.Split.TEST,
219
- gen_kwargs={
220
- 'langs': self.config.languages,
221
- 'corpus': corpus_slice,
222
- 'vgroups': vgroups,
223
- 'vindexes': vindexes,
224
- 'v2split': verse2split,
225
- 'split': 'test',
226
- 'vref': downloaded_files['vref'],
227
- }
228
- ),
229
- ]
230
-
231
-
232
-
233
-
234
-
235
- def _generate_examples(self, vref, corpus, vgroups, vindexes, v2split, split, langs):
236
- #print(f'Generating {split} examples...')
237
- with open(vref, 'r') as txtfile:
238
- #print('file opened')
239
- lines = txtfile.readlines()
240
- #print('lines read')
241
- verse2index = {k.strip():v for v,k in enumerate(lines) if k.strip() != ''}
242
- #print('v2i created')
243
-
244
- def order_verses(verse_list):
245
- lines_list = [int(verse2index[x]) for x in verse_list]
246
- verse_order = np.argsort(lines_list)
247
- return(verse_order)
248
-
249
- trans_license = {}
250
- trans_copyright = {}
251
- id = -1
252
- #print(f'beginning groups, starting with {list(vgroups)[0]}')
253
- for group in vgroups:
254
- #print(group)
255
- if v2split.get(list(group)[0]) == split:
256
- #print('correct split')
257
- if len(group) > 1:
258
- v_order = order_verses(group)
259
- o_group = list(np.array(list(group))[v_order])
260
-
261
- else:
262
- o_group = list(group)
263
- trans_dict={k:{} for k in langs}
264
- trans_texts = {k:{} for k in langs}
265
- used_idxes = {k:[] for k in langs}
266
- for v in o_group:
267
- #print(f'trying verse {v}')
268
- single_texts = {k:{} for k in langs}
269
- single_dict = {k:{} for k in langs}
270
- for lang in langs:
271
- if vindexes[lang].get(v):
272
- try:
273
- for i in range(0,len(list(vindexes[lang][v]))):
274
- if list(vindexes[lang][v])[i] not in used_idxes[lang]:
275
- used_idxes[lang].append(vindexes[lang][v])
276
- line = corpus[lang][vindexes[lang][v][i]]
277
- if not trans_texts.get(line['file']):
278
- trans_texts[lang][line['file']]=line['text']
279
- else:
280
- trans_texts[lang][line['file']] += f' {line["text"]}'
281
- if line.get('license'):
282
- trans_license[line['file']]=line['license']
283
- if line.get('copyright'):
284
- trans_copyright[line['file']]=line['copyright']
285
- if len(line['verses']) == 1:
286
- single_texts[lang][line['file']] = line['text']
287
- except:
288
- print(lang,v)
289
- raise
290
-
291
- single_dict[lang]=list(single_texts[lang].values())
292
- trans_dict[lang]=list(trans_texts[lang].values())
293
-
294
- single_file = {x:list(single_texts[x].keys()) for x in langs}
295
- sing_v_order = {x:np.argsort(list(single_texts[x].values())) for x in langs}
296
- lang_order = np.argsort(langs)
297
- single_lic_list =[]
298
- single_copy_list = []
299
- for lang in [langs[x] for x in lang_order]:
300
- single_lic_list.extend([trans_license.get(single_file[lang][x],'') for x in sing_v_order[lang]])
301
- single_copy_list.extend([trans_copyright.get(single_file[lang][x],'') for x in sing_v_order[lang]])
302
- if all([single_dict.get(x) and single_dict.get(x) != [{}] and list(single_dict.get(x)) for x in langs]) and len(list(single_dict.keys())) == len(langs) and self.config.pair != 'range':
303
- id = id + 1
304
- sfile_list = []
305
- for key in [langs[x] for x in lang_order]:
306
- for value in [single_file.get(key)[x] for x in sing_v_order.get(key)]:
307
- sfile_list.append({'lang':key,'file':value})
308
-
309
- #print('outputting single example')
310
- # print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
311
- try:
312
- yield(id, {'translation': single_dict, 'files': sfile_list, 'ref':[v], 'licenses':single_lic_list, 'copyrights':single_copy_list})
313
- except:
314
- print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
315
- raise
316
-
317
- file_list = {x:list(trans_texts[x].keys()) for x in langs}
318
- trans_v_order = {x:np.argsort(list(trans_texts[x].values())) for x in langs}
319
- lang_order = np.argsort(langs)
320
- #license_list = [trans_license.get(x,'') for x in [y for y in file_list.values()]]
321
- #copyright_list = [trans_copyright.get(x,'') for x in [y for y in file_list.values()]]
322
- license_list = []
323
- copyright_list = []
324
- for lang in [langs[x] for x in lang_order]:
325
- license_list.extend([trans_license.get(file_list[lang][x],'') for x in trans_v_order[lang]])
326
- copyright_list .extend([trans_copyright.get(file_list[lang][x],'') for x in trans_v_order[lang]])
327
- if len(o_group)>1 and all([trans_dict.get(x) and trans_dict.get(x) != [{}] and list(trans_dict.get(x)) for x in langs]) and len(list(trans_dict.keys())) == len(langs) and self.config.pair != 'single':
328
- id = id + 1
329
- ofile_list = []
330
- for key in [langs[x] for x in lang_order]:
331
- for value in [file_list.get(key)[x] for x in trans_v_order.get(key)]:
332
- ofile_list.append({'lang':key,'file':value})
333
- #print('outputting range example')
334
- try:
335
- yield(id, {'translation': trans_dict, 'files': ofile_list, 'ref':o_group, 'licenses':license_list, 'copyrights':copyright_list})
336
- except:
337
- print(id, trans_dict, ofile_list, o_group, license_list, copyright_list)
338
  raise
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import os
18
+
19
+ import datasets
20
+ from tqdm import tqdm
21
+ import numpy as np
22
+ import ijson
23
+
24
+ _VERSION = "0.0.2"
25
+ _LANGUAGES = ['aai', 'aak', 'aau', 'aaz', 'abt', 'abx', 'aby', 'acf', 'acr', 'acu', 'adz', 'aer', 'aey', 'agd', 'agg', 'agm', 'agn', 'agr', 'agt', 'agu', 'aia', 'aii', 'aka', 'ake', 'alp', 'alq', 'als', 'aly', 'ame', 'amf', 'amk', 'amm', 'amn', 'amo', 'amp', 'amr', 'amu', 'amx', 'anh', 'anv', 'aoi', 'aoj', 'aom', 'aon', 'apb', 'ape', 'apn', 'apr', 'apu', 'apw', 'apz', 'arb', 'are', 'arl', 'arn', 'arp', 'asm', 'aso', 'ata', 'atb', 'atd', 'atg', 'att', 'auc', 'aui', 'auy', 'avt', 'awb', 'awk', 'awx', 'azb', 'azg', 'azz', 'bao', 'bba', 'bbb', 'bbr', 'bch', 'bco', 'bdd', 'bea', 'bef', 'bel', 'ben', 'beo', 'beu', 'bgs', 'bgt', 'bhg', 'bhl', 'big', 'bjk', 'bjp', 'bjr', 'bjv', 'bjz', 'bkd', 'bki', 'bkq', 'bkx', 'bla', 'blw', 'blz', 'bmh', 'bmk', 'bmr', 'bmu', 'bnp', 'boa', 'boj', 'bon', 'box', 'bpr', 'bps', 'bqc', 'bqp', 'bre', 'bsj', 'bsn', 'bsp', 'bss', 'buk', 'bus', 'bvd', 'bvr', 'bxh', 'byr', 'byx', 'bzd', 'bzh', 'bzj', 'caa', 'cab', 'cac', 'caf', 'cak', 'cao', 'cap', 'car', 'cav', 'cax', 'cbc', 'cbi', 'cbk', 'cbr', 'cbs', 'cbt', 'cbu', 'cbv', 'cco', 'ceb', 'cek', 'ces', 'cgc', 'cha', 'chd', 'chf', 'chk', 'chq', 'chz', 'cjo', 'cjv', 'ckb', 'cle', 'clu', 'cme', 'cmn', 'cni', 'cnl', 'cnt', 'cof', 'con', 'cop', 'cot', 'cpa', 'cpb', 'cpc', 'cpu', 'cpy', 'crn', 'crx', 'cso', 'csy', 'cta', 'cth', 'ctp', 'ctu', 'cub', 'cuc', 'cui', 'cuk', 'cut', 'cux', 'cwe', 'cya', 'daa', 'dad', 'dah', 'dan', 'ded', 'deu', 'dgc', 'dgr', 'dgz', 'dhg', 'dif', 'dik', 'dji', 'djk', 'djr', 'dob', 'dop', 'dov', 'dwr', 'dww', 'dwy', 'ebk', 'eko', 'emi', 'emp', 'eng', 'enq', 'epo', 'eri', 'ese', 'esk', 'etr', 'ewe', 'faa', 'fai', 'far', 'ffm', 'for', 'fra', 'fue', 'fuf', 'fuh', 'gah', 'gai', 'gam', 'gaw', 'gdn', 'gdr', 'geb', 'gfk', 'ghs', 'glk', 'gmv', 'gng', 'gnn', 'gnw', 'gof', 'grc', 'gub', 'guh', 'gui', 'guj', 'gul', 'gum', 'gun', 'guo', 'gup', 'gux', 'gvc', 'gvf', 'gvn', 'gvs', 'gwi', 'gym', 'gyr', 'hat', 'hau', 'haw', 'hbo', 'hch', 'heb', 'heg', 'hin', 'hix', 'hla', 'hlt', 'hmo', 'hns', 'hop', 'hot', 'hrv', 'hto', 'hub', 'hui', 'hun', 'hus', 'huu', 'huv', 'hvn', 'ian', 'ign', 'ikk', 'ikw', 'ilo', 'imo', 'inb', 'ind', 'ino', 'iou', 'ipi', 'isn', 'ita', 'iws', 'ixl', 'jac', 'jae', 'jao', 'jic', 'jid', 'jiv', 'jni', 'jpn', 'jvn', 'kan', 'kaq', 'kbc', 'kbh', 'kbm', 'kbq', 'kdc', 'kde', 'kdl', 'kek', 'ken', 'kew', 'kgf', 'kgk', 'kgp', 'khs', 'khz', 'kik', 'kiw', 'kiz', 'kje', 'kjn', 'kjs', 'kkc', 'kkl', 'klt', 'klv', 'kmg', 'kmh', 'kmk', 'kmo', 'kms', 'kmu', 'kne', 'knf', 'knj', 'knv', 'kos', 'kpf', 'kpg', 'kpj', 'kpr', 'kpw', 'kpx', 'kqa', 'kqc', 'kqf', 'kql', 'kqw', 'ksd', 'ksj', 'ksr', 'ktm', 'kto', 'kud', 'kue', 'kup', 'kvg', 'kvn', 'kwd', 'kwf', 'kwi', 'kwj', 'kyc', 'kyf', 'kyg', 'kyq', 'kyz', 'kze', 'lac', 'lat', 'lbb', 'lbk', 'lcm', 'leu', 'lex', 'lgl', 'lid', 'lif', 'lin', 'lit', 'llg', 'lug', 'luo', 'lww', 'maa', 'maj', 'mal', 'mam', 'maq', 'mar', 'mau', 'mav', 'maz', 'mbb', 'mbc', 'mbh', 'mbj', 'mbl', 'mbs', 'mbt', 'mca', 'mcb', 'mcd', 'mcf', 'mco', 'mcp', 'mcq', 'mcr', 'mdy', 'med', 'mee', 'mek', 'meq', 'met', 'meu', 'mgc', 'mgh', 'mgw', 'mhl', 'mib', 'mic', 'mie', 'mig', 'mih', 'mil', 'mio', 'mir', 'mit', 'miz', 'mjc', 'mkj', 'mkl', 'mkn', 'mks', 'mle', 'mlh', 'mlp', 'mmo', 'mmx', 'mna', 'mop', 'mox', 'mph', 'mpj', 'mpm', 'mpp', 'mps', 'mpt', 'mpx', 'mqb', 'mqj', 'msb', 'msc', 'msk', 'msm', 'msy', 'mti', 'mto', 'mux', 'muy', 'mva', 'mvn', 'mwc', 'mwe', 'mwf', 'mwp', 'mxb', 'mxp', 'mxq', 'mxt', 'mya', 'myk', 'myu', 'myw', 'myy', 'mzz', 'nab', 'naf', 'nak', 'nas', 'nay', 'nbq', 'nca', 'nch', 'ncj', 'ncl', 'ncu', 'ndg', 'ndj', 'nfa', 'ngp', 'ngu', 'nhe', 'nhg', 'nhi', 'nho', 'nhr', 'nhu', 'nhw', 'nhy', 'nif', 'nii', 'nin', 'nko', 'nld', 'nlg', 'nmw', 'nna', 'nnq', 'noa', 'nop', 'not', 'nou', 'npi', 'npl', 'nsn', 'nss', 'ntj', 'ntp', 'ntu', 'nuy', 'nvm', 'nwi', 'nya', 'nys', 'nyu', 'obo', 'okv', 'omw', 'ong', 'ons', 'ood', 'opm', 'ory', 'ote', 'otm', 'otn', 'otq', 'ots', 'pab', 'pad', 'pah', 'pan', 'pao', 'pes', 'pib', 'pio', 'pir', 'piu', 'pjt', 'pls', 'plu', 'pma', 'poe', 'poh', 'poi', 'pol', 'pon', 'por', 'poy', 'ppo', 'prf', 'pri', 'ptp', 'ptu', 'pwg', 'qub', 'quc', 'quf', 'quh', 'qul', 'qup', 'qvc', 'qve', 'qvh', 'qvm', 'qvn', 'qvs', 'qvw', 'qvz', 'qwh', 'qxh', 'qxn', 'qxo', 'rai', 'reg', 'rgu', 'rkb', 'rmc', 'rmy', 'ron', 'roo', 'rop', 'row', 'rro', 'ruf', 'rug', 'rus', 'rwo', 'sab', 'san', 'sbe', 'sbk', 'sbs', 'seh', 'sey', 'sgb', 'sgz', 'shj', 'shp', 'sim', 'sja', 'sll', 'smk', 'snc', 'snn', 'snp', 'snx', 'sny', 'som', 'soq', 'soy', 'spa', 'spl', 'spm', 'spp', 'sps', 'spy', 'sri', 'srm', 'srn', 'srp', 'srq', 'ssd', 'ssg', 'ssx', 'stp', 'sua', 'sue', 'sus', 'suz', 'swe', 'swh', 'swp', 'sxb', 'tac', 'taj', 'tam', 'tav', 'taw', 'tbc', 'tbf', 'tbg', 'tbl', 'tbo', 'tbz', 'tca', 'tcs', 'tcz', 'tdt', 'tee', 'tel', 'ter', 'tet', 'tew', 'tfr', 'tgk', 'tgl', 'tgo', 'tgp', 'tha', 'thd', 'tif', 'tim', 'tiw', 'tiy', 'tke', 'tku', 'tlf', 'tmd', 'tna', 'tnc', 'tnk', 'tnn', 'tnp', 'toc', 'tod', 'tof', 'toj', 'ton', 'too', 'top', 'tos', 'tpa', 'tpi', 'tpt', 'tpz', 'trc', 'tsw', 'ttc', 'tte', 'tuc', 'tue', 'tuf', 'tuo', 'tur', 'tvk', 'twi', 'txq', 'txu', 'tzj', 'tzo', 'ubr', 'ubu', 'udu', 'uig', 'ukr', 'uli', 'ulk', 'upv', 'ura', 'urb', 'urd', 'uri', 'urt', 'urw', 'usa', 'usp', 'uvh', 'uvl', 'vid', 'vie', 'viv', 'vmy', 'waj', 'wal', 'wap', 'wat', 'wbi', 'wbp', 'wed', 'wer', 'wim', 'wiu', 'wiv', 'wmt', 'wmw', 'wnc', 'wnu', 'wol', 'wos', 'wrk', 'wro', 'wrs', 'wsk', 'wuv', 'xav', 'xbi', 'xed', 'xla', 'xnn', 'xon', 'xsi', 'xtd', 'xtm', 'yaa', 'yad', 'yal', 'yap', 'yaq', 'yby', 'ycn', 'yka', 'yle', 'yml', 'yon', 'yor', 'yrb', 'yre', 'yss', 'yuj', 'yut', 'yuw', 'yva', 'zaa', 'zab', 'zac', 'zad', 'zai', 'zaj', 'zam', 'zao', 'zap', 'zar', 'zas', 'zat', 'zav', 'zaw', 'zca', 'zga', 'zia', 'ziw', 'zlm', 'zos', 'zpc', 'zpl', 'zpm', 'zpo', 'zpq', 'zpu', 'zpv', 'zpz', 'zsr', 'ztq', 'zty', 'zyp']
26
+ _DESCRIPTION = "Bible Parallel Corpus"
27
+ _HOMEPAGE = 'https://github.com/BibleNLP/ebible-corpus'
28
+ _PAIR = 'all'
29
+ _CITATION = """\
30
+ @InProceedings{huggingface:dataset,
31
+ title = {A great new dataset},
32
+ author={huggingface, Inc.
33
+ },
34
+ year={2020}
35
+ }
36
+ """
37
+ _LICENSE = 'Creative Commons and Public Domain, specified in the dataset'
38
+ _FILES = {'corpus':'corpus.json','vref':'vref.txt'}
39
+
40
+ class BiblenlpCorpusConfig(datasets.BuilderConfig):
41
+ def __init__(self, languages=[], pair='all', **kwargs):
42
+ '''
43
+ languages: list of languages to include
44
+ pair: 'all', 'range', or 'single' to specify whether verse ranges, single pairings, or all pairs are included
45
+ **kwargs: additional arguments to pass to the superclass'''
46
+ super(BiblenlpCorpusConfig, self).__init__(name=f'{"-".join([x for x in languages])}', **kwargs)
47
+ self.languages = languages
48
+ self.pair = pair
49
+
50
+ class BiblenlpCorpus(datasets.GeneratorBasedBuilder):
51
+ BUILDER_CONFIGS = [
52
+ BiblenlpCorpusConfig(
53
+ languages=_LANGUAGES,
54
+ pair=_PAIR,
55
+ description = f'Parallel Bible verses with {_PAIR} pairings of {"-".join([x for x in _LANGUAGES])} languages',
56
+ )
57
+ ]
58
+
59
+ BUILDER_CONFIG_CLASS = BiblenlpCorpusConfig
60
+
61
+ def _info(self):
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=datasets.Features(
65
+ {
66
+ "translation": datasets.features.TranslationVariableLanguages(languages=_LANGUAGES),
67
+ "files": datasets.features.Sequence( {'lang':datasets.Value("string"), 'file':datasets.Value("string")} ),
68
+ "ref": datasets.features.Sequence( datasets.Value("string") ),
69
+ "licenses": datasets.features.Sequence( datasets.Value("string") ),
70
+ "copyrights": datasets.features.Sequence( datasets.Value("string") )
71
+ },
72
+ ),
73
+ supervised_keys=None,
74
+ homepage=_HOMEPAGE,
75
+ citation=_CITATION,
76
+ license=_LICENSE,
77
+ version=_VERSION
78
+ ) #
79
+
80
+ def _split_generators(self, dl_manager):
81
+ downloaded_files = dl_manager.download(_FILES)
82
+
83
+ def parse_json(json_filename, langs):
84
+ with open(json_filename, 'rb') as input_file:
85
+ # load json iteratively
86
+ parser = ijson.parse(input_file)
87
+ parsed_dict = {lang: [] for lang in langs}
88
+ idx = {lang: -1 for lang in langs}
89
+ for prefix, event, value in parser:
90
+ #print(f'prefix:{prefix}, event:{event}, value:{value}')
91
+ if any([prefix.startswith(f'{lang}.') for lang in langs]):
92
+ #print(f'prefix:{prefix}, event:{event}, value:{value}')
93
+ if event == 'start_map':
94
+ idx[prefix.split('.')[0]] += 1
95
+ tmpdict ={}
96
+ tmpdict['verses']=[]
97
+ if prefix.endswith('verses.item'):
98
+ tmpdict['verses'].append(value)
99
+ if prefix.endswith('copyright'):
100
+ tmpdict['copyright'] = value
101
+ if prefix.endswith('text'):
102
+ tmpdict['text'] = value
103
+ if prefix.endswith('file'):
104
+ tmpdict['file'] = value
105
+ if prefix.endswith('license'):
106
+ tmpdict['license'] = value
107
+ if event == 'end_map':
108
+ #write the dictionary
109
+ parsed_dict[prefix.split('.')[0]].append( tmpdict.copy() )
110
+ return(parsed_dict)
111
+
112
+ def define_splits(corpus_slice, langs):
113
+ verse2split = {}
114
+ langtexts = {}
115
+ textverses = {}
116
+ vindexes = {}
117
+ vgroups = []
118
+ versesets ={}
119
+
120
+ np.random.seed(42)
121
+
122
+ for lang in tqdm(langs):
123
+ langtexts[lang] = set()
124
+ textverses[lang]={}
125
+ vindexes[lang]={}
126
+ for idx,line in enumerate(corpus_slice[lang]):
127
+ langtexts[lang].add(line['file'])
128
+ for v in line['verses']:
129
+ if not textverses[lang].get(line['file']):
130
+ textverses[lang][line['file']] = set()
131
+ textverses[lang][line['file']].add(v)
132
+ if not vindexes[lang].get(v):
133
+ vindexes[lang][v] = [idx]
134
+ else:
135
+ vindexes[lang][v].append(idx)
136
+
137
+ #print(list(vindexes['eng'].keys())[0:10])
138
+ #for f in langtexts['eng']:
139
+ # print(len(textverses['eng'][f]))
140
+
141
+ for line in tqdm(corpus_slice[langs[0]]):
142
+ versesets = {line['file']:line['verses']}
143
+ if any([verse2split.get(z) for z in line['verses']]):
144
+ for verse in line['verses']:
145
+ if verse2split.get(verse):
146
+ prevsplit = verse2split.get(verse)
147
+ break
148
+ split = prevsplit
149
+ else:
150
+ split = np.random.choice(['train','test','dev'],p=[.9,.05,.05])
151
+ if not all([verse2split.get(z) for z in line['verses']]):
152
+ all_verses = set()
153
+ for v in line['verses']:
154
+ all_verses.add(v)
155
+ while True:
156
+ verses_added = False
157
+ idxes = {k:set() for k in langs}
158
+ #get indexes for verses
159
+ for v in all_verses:
160
+ for lang in langs:
161
+ if vindexes[lang].get(v):
162
+ for idx in vindexes[lang][v]:
163
+ idxes[lang].add(idx)
164
+
165
+ for lang in langs:
166
+ for compline in [corpus_slice[lang][x] for x in idxes[lang] if x != set()]:
167
+
168
+ if all(x in textverses[lang][compline['file']] for x in all_verses) and any([x in list(all_verses) for x in compline['verses']]):
169
+ if not versesets.get(compline['file']):
170
+ versesets[compline['file']] = compline['verses'].copy()
171
+ else:
172
+ versesets[compline['file']].extend(compline['verses'].copy())
173
+ for v in compline['verses']:
174
+ pre_size = len(all_verses)
175
+ all_verses.add(v)
176
+ if len(all_verses) > pre_size:
177
+ verses_added = True
178
+
179
+ if verses_added == False or all([set(versesets[q]) == all_verses for q in versesets.keys()]):
180
+ vgroups.append(all_verses)
181
+ for v in all_verses:
182
+ verse2split[v] = split
183
+ break
184
+
185
+ return(vgroups,vindexes, verse2split)
186
+
187
+
188
+ corpus_slice = parse_json(downloaded_files['corpus'], self.config.languages)
189
+ vgroups, vindexes, verse2split = define_splits(corpus_slice, self.config.languages)
190
+
191
+
192
+ return [
193
+ datasets.SplitGenerator(
194
+ name=datasets.Split.TRAIN,
195
+ gen_kwargs={
196
+ 'langs': self.config.languages,
197
+ 'corpus': corpus_slice,
198
+ 'vgroups': vgroups,
199
+ 'vindexes': vindexes,
200
+ 'v2split': verse2split,
201
+ 'split': 'train',
202
+ 'vref': downloaded_files['vref'],
203
+ }
204
+ ),
205
+ datasets.SplitGenerator(
206
+ name=datasets.Split.VALIDATION,
207
+ gen_kwargs={
208
+ 'langs': self.config.languages,
209
+ 'corpus': corpus_slice,
210
+ 'vgroups': vgroups,
211
+ 'vindexes': vindexes,
212
+ 'v2split': verse2split,
213
+ 'split': 'dev',
214
+ 'vref': downloaded_files['vref'],
215
+ }
216
+ ),
217
+ datasets.SplitGenerator(
218
+ name=datasets.Split.TEST,
219
+ gen_kwargs={
220
+ 'langs': self.config.languages,
221
+ 'corpus': corpus_slice,
222
+ 'vgroups': vgroups,
223
+ 'vindexes': vindexes,
224
+ 'v2split': verse2split,
225
+ 'split': 'test',
226
+ 'vref': downloaded_files['vref'],
227
+ }
228
+ ),
229
+ ]
230
+
231
+
232
+
233
+
234
+
235
+ def _generate_examples(self, vref, corpus, vgroups, vindexes, v2split, split, langs):
236
+ #print(f'Generating {split} examples...')
237
+ with open(vref, 'r') as txtfile:
238
+ #print('file opened')
239
+ lines = txtfile.readlines()
240
+ #print('lines read')
241
+ verse2index = {k.strip():v for v,k in enumerate(lines) if k.strip() != ''}
242
+ #print('v2i created')
243
+
244
+ def order_verses(verse_list):
245
+ lines_list = [int(verse2index[x]) for x in verse_list]
246
+ verse_order = np.argsort(lines_list)
247
+ return(verse_order)
248
+
249
+ trans_license = {}
250
+ trans_copyright = {}
251
+ id = -1
252
+ #print(f'beginning groups, starting with {list(vgroups)[0]}')
253
+ for group in vgroups:
254
+ #print(group)
255
+ if v2split.get(list(group)[0]) == split:
256
+ #print('correct split')
257
+ if len(group) > 1:
258
+ v_order = order_verses(group)
259
+ o_group = list(np.array(list(group))[v_order])
260
+
261
+ else:
262
+ o_group = list(group)
263
+ trans_dict={k:{} for k in langs}
264
+ trans_texts = {k:{} for k in langs}
265
+ used_idxes = {k:[] for k in langs}
266
+ for v in o_group:
267
+ #print(f'trying verse {v}')
268
+ single_texts = {k:{} for k in langs}
269
+ single_dict = {k:{} for k in langs}
270
+ for lang in langs:
271
+ if vindexes[lang].get(v):
272
+ try:
273
+ for i in range(0,len(list(vindexes[lang][v]))):
274
+ if list(vindexes[lang][v])[i] not in used_idxes[lang]:
275
+ used_idxes[lang].append(vindexes[lang][v])
276
+ line = corpus[lang][vindexes[lang][v][i]]
277
+ if not trans_texts.get(line['file']):
278
+ trans_texts[lang][line['file']]=line['text']
279
+ else:
280
+ trans_texts[lang][line['file']] += f' {line["text"]}'
281
+ if line.get('license'):
282
+ trans_license[line['file']]=line['license']
283
+ if line.get('copyright'):
284
+ trans_copyright[line['file']]=line['copyright']
285
+ if len(line['verses']) == 1:
286
+ single_texts[lang][line['file']] = line['text']
287
+ except:
288
+ print(lang,v)
289
+ raise
290
+
291
+ single_dict[lang]=list(single_texts[lang].values())
292
+ trans_dict[lang]=list(trans_texts[lang].values())
293
+
294
+ single_file = {x:list(single_texts[x].keys()) for x in langs}
295
+ sing_v_order = {x:np.argsort(list(single_texts[x].values())) for x in langs}
296
+ lang_order = np.argsort(langs)
297
+ single_lic_list =[]
298
+ single_copy_list = []
299
+ for lang in [langs[x] for x in lang_order]:
300
+ single_lic_list.extend([trans_license.get(single_file[lang][x],'') for x in sing_v_order[lang]])
301
+ single_copy_list.extend([trans_copyright.get(single_file[lang][x],'') for x in sing_v_order[lang]])
302
+ if all([single_dict.get(x) and single_dict.get(x) != [{}] and list(single_dict.get(x)) for x in langs]) and len(list(single_dict.keys())) == len(langs) and self.config.pair != 'range':
303
+ id = id + 1
304
+ sfile_list = []
305
+ for key in [langs[x] for x in lang_order]:
306
+ for value in [single_file.get(key)[x] for x in sing_v_order.get(key)]:
307
+ sfile_list.append({'lang':key,'file':value})
308
+
309
+ #print('outputting single example')
310
+ # print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
311
+ try:
312
+ yield(id, {'translation': single_dict, 'files': sfile_list, 'ref':[v], 'licenses':single_lic_list, 'copyrights':single_copy_list})
313
+ except:
314
+ print(id, single_dict, sfile_list, v, single_lic_list, single_copy_list)
315
+ raise
316
+
317
+ file_list = {x:list(trans_texts[x].keys()) for x in langs}
318
+ trans_v_order = {x:np.argsort(list(trans_texts[x].values())) for x in langs}
319
+ lang_order = np.argsort(langs)
320
+ #license_list = [trans_license.get(x,'') for x in [y for y in file_list.values()]]
321
+ #copyright_list = [trans_copyright.get(x,'') for x in [y for y in file_list.values()]]
322
+ license_list = []
323
+ copyright_list = []
324
+ for lang in [langs[x] for x in lang_order]:
325
+ license_list.extend([trans_license.get(file_list[lang][x],'') for x in trans_v_order[lang]])
326
+ copyright_list .extend([trans_copyright.get(file_list[lang][x],'') for x in trans_v_order[lang]])
327
+ if len(o_group)>1 and all([trans_dict.get(x) and trans_dict.get(x) != [{}] and list(trans_dict.get(x)) for x in langs]) and len(list(trans_dict.keys())) == len(langs) and self.config.pair != 'single':
328
+ id = id + 1
329
+ ofile_list = []
330
+ for key in [langs[x] for x in lang_order]:
331
+ for value in [file_list.get(key)[x] for x in trans_v_order.get(key)]:
332
+ ofile_list.append({'lang':key,'file':value})
333
+ #print('outputting range example')
334
+ try:
335
+ yield(id, {'translation': trans_dict, 'files': ofile_list, 'ref':o_group, 'licenses':license_list, 'copyrights':copyright_list})
336
+ except:
337
+ print(id, trans_dict, ofile_list, o_group, license_list, copyright_list)
338
  raise