Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
4dc07f7
1 Parent(s): cbf068b

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (3b501bf6a1de613ae6141f9485e48e5ecca2983e)
- Add az-cs data files (abe80fa3aa2edbf16ffcf08870f82a4771208c66)
- Add bg-de data files (85caabdafc2d6876d7133902428be2e9894d544a)
- Add bn-ga data files (9815c1d74b4e1f074dfeeab7988577ee199de375)
- Add br-es_PR data files (3a982827c1a9283e1627ce2810f3c16d53b6c2c9)
- Add br-hi data files (41ed77aeebbb60d8a6512845d357d8f423ebaab5)
- Add br-la data files (8a4e42da8521288f1a93e3026bbc8111a9b99ab0)
- Add br-uz data files (1a7740a3c925f29a0b214579ef9086da6b4c8ef6)
- Add br-yi data files (2ad4c16d44851edbdbb21e2e3af78ac5b8fdca5b)
- Add bs-szl data files (38045a523a0dfcce7e1bea4e5cb3d31e90550bf1)
- Delete loading script (e4e28e421491ae48e631b9be553271358c3aaef0)

README.md CHANGED
@@ -211,6 +211,31 @@ language:
211
  - zh
212
  - zu
213
  - zza
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  language_bcp47:
215
  - ar-SY
216
  - bn-IN
@@ -248,21 +273,6 @@ language_bcp47:
248
  - zh-CN
249
  - zh-HK
250
  - zh-TW
251
- license:
252
- - bsd-3-clause
253
- multilinguality:
254
- - multilingual
255
- size_categories:
256
- - 10K<n<100K
257
- - 1K<n<10K
258
- - n<1K
259
- source_datasets:
260
- - original
261
- task_categories:
262
- - translation
263
- task_ids: []
264
- paperswithcode_id: null
265
- pretty_name: Opus Ubuntu
266
  dataset_info:
267
  - config_name: as-bs
268
  features:
@@ -276,10 +286,10 @@ dataset_info:
276
  - bs
277
  splits:
278
  - name: train
279
- num_bytes: 1037811
280
  num_examples: 8583
281
- download_size: 229723
282
- dataset_size: 1037811
283
  - config_name: az-cs
284
  features:
285
  - name: id
@@ -292,10 +302,10 @@ dataset_info:
292
  - cs
293
  splits:
294
  - name: train
295
- num_bytes: 17821
296
  num_examples: 293
297
- download_size: 9501
298
- dataset_size: 17821
299
  - config_name: bg-de
300
  features:
301
  - name: id
@@ -308,11 +318,11 @@ dataset_info:
308
  - de
309
  splits:
310
  - name: train
311
- num_bytes: 27627
312
  num_examples: 184
313
- download_size: 9994
314
- dataset_size: 27627
315
- - config_name: br-es_PR
316
  features:
317
  - name: id
318
  dtype: string
@@ -320,15 +330,15 @@ dataset_info:
320
  dtype:
321
  translation:
322
  languages:
323
- - br
324
- - es_PR
325
  splits:
326
  - name: train
327
- num_bytes: 8875
328
- num_examples: 125
329
- download_size: 5494
330
- dataset_size: 8875
331
- - config_name: bn-ga
332
  features:
333
  - name: id
334
  dtype: string
@@ -336,14 +346,14 @@ dataset_info:
336
  dtype:
337
  translation:
338
  languages:
339
- - bn
340
- - ga
341
  splits:
342
  - name: train
343
- num_bytes: 584629
344
- num_examples: 7324
345
- download_size: 142710
346
- dataset_size: 584629
347
  - config_name: br-hi
348
  features:
349
  - name: id
@@ -356,10 +366,10 @@ dataset_info:
356
  - hi
357
  splits:
358
  - name: train
359
- num_bytes: 1300081
360
  num_examples: 15551
361
- download_size: 325415
362
- dataset_size: 1300081
363
  - config_name: br-la
364
  features:
365
  - name: id
@@ -372,11 +382,11 @@ dataset_info:
372
  - la
373
  splits:
374
  - name: train
375
- num_bytes: 29341
376
  num_examples: 527
377
- download_size: 11565
378
- dataset_size: 29341
379
- - config_name: bs-szl
380
  features:
381
  - name: id
382
  dtype: string
@@ -384,15 +394,15 @@ dataset_info:
384
  dtype:
385
  translation:
386
  languages:
387
- - bs
388
- - szl
389
  splits:
390
  - name: train
391
- num_bytes: 41116
392
- num_examples: 646
393
- download_size: 18134
394
- dataset_size: 41116
395
- - config_name: br-uz
396
  features:
397
  - name: id
398
  dtype: string
@@ -401,14 +411,14 @@ dataset_info:
401
  translation:
402
  languages:
403
  - br
404
- - uz
405
  splits:
406
  - name: train
407
- num_bytes: 110278
408
- num_examples: 1416
409
- download_size: 33595
410
- dataset_size: 110278
411
- - config_name: br-yi
412
  features:
413
  - name: id
414
  dtype: string
@@ -416,25 +426,55 @@ dataset_info:
416
  dtype:
417
  translation:
418
  languages:
419
- - br
420
- - yi
421
  splits:
422
  - name: train
423
- num_bytes: 172846
424
- num_examples: 2799
425
- download_size: 41956
426
- dataset_size: 172846
427
- config_names:
428
- - as-bs
429
- - az-cs
430
- - bg-de
431
- - bn-ga
432
- - br-es_PR
433
- - br-hi
434
- - br-la
435
- - br-uz
436
- - br-yi
437
- - bs-szl
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
  ---
439
 
440
  # Dataset Card for Opus Ubuntu
 
211
  - zh
212
  - zu
213
  - zza
214
+ license:
215
+ - bsd-3-clause
216
+ multilinguality:
217
+ - multilingual
218
+ size_categories:
219
+ - 10K<n<100K
220
+ - 1K<n<10K
221
+ - n<1K
222
+ source_datasets:
223
+ - original
224
+ task_categories:
225
+ - translation
226
+ task_ids: []
227
+ pretty_name: Opus Ubuntu
228
+ config_names:
229
+ - as-bs
230
+ - az-cs
231
+ - bg-de
232
+ - bn-ga
233
+ - br-es_PR
234
+ - br-hi
235
+ - br-la
236
+ - br-uz
237
+ - br-yi
238
+ - bs-szl
239
  language_bcp47:
240
  - ar-SY
241
  - bn-IN
 
273
  - zh-CN
274
  - zh-HK
275
  - zh-TW
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  dataset_info:
277
  - config_name: as-bs
278
  features:
 
286
  - bs
287
  splits:
288
  - name: train
289
+ num_bytes: 1037799
290
  num_examples: 8583
291
+ download_size: 470874
292
+ dataset_size: 1037799
293
  - config_name: az-cs
294
  features:
295
  - name: id
 
302
  - cs
303
  splits:
304
  - name: train
305
+ num_bytes: 17809
306
  num_examples: 293
307
+ download_size: 14637
308
+ dataset_size: 17809
309
  - config_name: bg-de
310
  features:
311
  - name: id
 
318
  - de
319
  splits:
320
  - name: train
321
+ num_bytes: 27615
322
  num_examples: 184
323
+ download_size: 16278
324
+ dataset_size: 27615
325
+ - config_name: bn-ga
326
  features:
327
  - name: id
328
  dtype: string
 
330
  dtype:
331
  translation:
332
  languages:
333
+ - bn
334
+ - ga
335
  splits:
336
  - name: train
337
+ num_bytes: 584617
338
+ num_examples: 7324
339
+ download_size: 272247
340
+ dataset_size: 584617
341
+ - config_name: br-es_PR
342
  features:
343
  - name: id
344
  dtype: string
 
346
  dtype:
347
  translation:
348
  languages:
349
+ - br
350
+ - es_PR
351
  splits:
352
  - name: train
353
+ num_bytes: 8863
354
+ num_examples: 125
355
+ download_size: 8194
356
+ dataset_size: 8863
357
  - config_name: br-hi
358
  features:
359
  - name: id
 
366
  - hi
367
  splits:
368
  - name: train
369
+ num_bytes: 1300057
370
  num_examples: 15551
371
+ download_size: 641803
372
+ dataset_size: 1300057
373
  - config_name: br-la
374
  features:
375
  - name: id
 
382
  - la
383
  splits:
384
  - name: train
385
+ num_bytes: 29329
386
  num_examples: 527
387
+ download_size: 17723
388
+ dataset_size: 29329
389
+ - config_name: br-uz
390
  features:
391
  - name: id
392
  dtype: string
 
394
  dtype:
395
  translation:
396
  languages:
397
+ - br
398
+ - uz
399
  splits:
400
  - name: train
401
+ num_bytes: 110266
402
+ num_examples: 1416
403
+ download_size: 62660
404
+ dataset_size: 110266
405
+ - config_name: br-yi
406
  features:
407
  - name: id
408
  dtype: string
 
411
  translation:
412
  languages:
413
  - br
414
+ - yi
415
  splits:
416
  - name: train
417
+ num_bytes: 172834
418
+ num_examples: 2799
419
+ download_size: 77870
420
+ dataset_size: 172834
421
+ - config_name: bs-szl
422
  features:
423
  - name: id
424
  dtype: string
 
426
  dtype:
427
  translation:
428
  languages:
429
+ - bs
430
+ - szl
431
  splits:
432
  - name: train
433
+ num_bytes: 41104
434
+ num_examples: 646
435
+ download_size: 30035
436
+ dataset_size: 41104
437
+ configs:
438
+ - config_name: as-bs
439
+ data_files:
440
+ - split: train
441
+ path: as-bs/train-*
442
+ - config_name: az-cs
443
+ data_files:
444
+ - split: train
445
+ path: az-cs/train-*
446
+ - config_name: bg-de
447
+ data_files:
448
+ - split: train
449
+ path: bg-de/train-*
450
+ - config_name: bn-ga
451
+ data_files:
452
+ - split: train
453
+ path: bn-ga/train-*
454
+ - config_name: br-es_PR
455
+ data_files:
456
+ - split: train
457
+ path: br-es_PR/train-*
458
+ - config_name: br-hi
459
+ data_files:
460
+ - split: train
461
+ path: br-hi/train-*
462
+ - config_name: br-la
463
+ data_files:
464
+ - split: train
465
+ path: br-la/train-*
466
+ - config_name: br-uz
467
+ data_files:
468
+ - split: train
469
+ path: br-uz/train-*
470
+ - config_name: br-yi
471
+ data_files:
472
+ - split: train
473
+ path: br-yi/train-*
474
+ - config_name: bs-szl
475
+ data_files:
476
+ - split: train
477
+ path: bs-szl/train-*
478
  ---
479
 
480
  # Dataset Card for Opus Ubuntu
as-bs/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5137046400c8c1c34f715af401e7a431ec60d93095d5fa07a47b193f83712b98
3
+ size 470874
az-cs/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10fe96038f387a9687f10a31eee299fe6602cd2f6afc6c330ee68605f51cd867
3
+ size 14637
bg-de/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07cb365df607ba9349ba31d33b32bdb25c1c361ae8a88612137e0e7c42699baa
3
+ size 16278
bn-ga/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:769d63733c7a3d89ac643e51d27b09963ab6e4170d44d68cb11b8963f586f6bd
3
+ size 272247
br-es_PR/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80f33137cdf46514341d24312eb5a9650a249abc643d7772f029fb60b7449a5c
3
+ size 8194
br-hi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:426feadc1a674bb9c5e10766700cdb0843fa341e0e0aed915a961adb213526c5
3
+ size 641803
br-la/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:594abd20626306e2ccf2218ebb5b272acadf8bf905c66738daa456225f550919
3
+ size 17723
br-uz/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7187819326b2bcf220bc447b1a82b212950120ba6d6d431259933d4e002e64d
3
+ size 62660
br-yi/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17060e37b40482bb7150f150cd2e953825a5e68e3fc9c237287e7bbb4b6f573
3
+ size 77870
bs-szl/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9130608928808af043eb4a061b6ccbc5a5015913cde06ba23033d7fd5557c8e
3
+ size 30035
opus_ubuntu.py DELETED
@@ -1,132 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- import os
18
-
19
- import datasets
20
-
21
-
22
- _DESCRIPTION = """\
23
- A parallel corpus of Ubuntu localization files. Source: https://translations.launchpad.net
24
- 244 languages, 23,988 bitexts
25
- total number of files: 30,959
26
- total number of tokens: 29.84M
27
- total number of sentence fragments: 7.73M
28
- """
29
- _HOMEPAGE_URL = "http://opus.nlpl.eu/Ubuntu.php"
30
- _CITATION = """\
31
- @InProceedings{TIEDEMANN12.463,
32
- author = {J{\"o}rg Tiedemann},
33
- title = {Parallel Data, Tools and Interfaces in OPUS},
34
- booktitle = {Proceedings of the Eight International Conference on Language Resources and Evaluation (LREC'12)},
35
- year = {2012},
36
- month = {may},
37
- date = {23-25},
38
- address = {Istanbul, Turkey},
39
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Mehmet Ugur Dogan and Bente Maegaard and Joseph Mariani and Jan Odijk and Stelios Piperidis},
40
- publisher = {European Language Resources Association (ELRA)},
41
- isbn = {978-2-9517408-7-7},
42
- language = {english}
43
- }
44
- """
45
-
46
- _VERSION = "1.0.0"
47
- _BASE_NAME = "Ubuntu.{}.{}"
48
- _BASE_URL = "https://object.pouta.csc.fi/OPUS-Ubuntu/v14.10/moses/{}-{}.txt.zip"
49
- # Please note that only few pairs are shown here. You can use config to generate data for all language pairs
50
- _LANGUAGE_PAIRS = [
51
- ("as", "bs"),
52
- ("az", "cs"),
53
- ("bg", "de"),
54
- ("br", "es_PR"),
55
- ("bn", "ga"),
56
- ("br", "hi"),
57
- ("br", "la"),
58
- ("bs", "szl"),
59
- ("br", "uz"),
60
- ("br", "yi"),
61
- ]
62
-
63
-
64
- class UbuntuConfig(datasets.BuilderConfig):
65
- def __init__(self, *args, lang1=None, lang2=None, **kwargs):
66
- super().__init__(
67
- *args,
68
- name=f"{lang1}-{lang2}",
69
- **kwargs,
70
- )
71
- self.lang1 = lang1
72
- self.lang2 = lang2
73
-
74
-
75
- class OpusUbuntu(datasets.GeneratorBasedBuilder):
76
- BUILDER_CONFIGS = [
77
- UbuntuConfig(
78
- lang1=lang1,
79
- lang2=lang2,
80
- description=f"Translating {lang1} to {lang2} or vice versa",
81
- version=datasets.Version(_VERSION),
82
- )
83
- for lang1, lang2 in _LANGUAGE_PAIRS
84
- ]
85
- BUILDER_CONFIG_CLASS = UbuntuConfig
86
-
87
- def _info(self):
88
- return datasets.DatasetInfo(
89
- description=_DESCRIPTION,
90
- features=datasets.Features(
91
- {
92
- "id": datasets.Value("string"),
93
- "translation": datasets.Translation(languages=(self.config.lang1, self.config.lang2)),
94
- },
95
- ),
96
- supervised_keys=None,
97
- homepage=_HOMEPAGE_URL,
98
- citation=_CITATION,
99
- )
100
-
101
- def _split_generators(self, dl_manager):
102
- def _base_url(lang1, lang2):
103
- return _BASE_URL.format(lang1, lang2)
104
-
105
- download_url = _base_url(self.config.lang1, self.config.lang2)
106
- path = dl_manager.download_and_extract(download_url)
107
- return [
108
- datasets.SplitGenerator(
109
- name=datasets.Split.TRAIN,
110
- gen_kwargs={"datapath": path},
111
- )
112
- ]
113
-
114
- def _generate_examples(self, datapath):
115
- l1, l2 = self.config.lang1, self.config.lang2
116
- folder = l1 + "-" + l2
117
- l1_file = _BASE_NAME.format(folder, l1)
118
- l2_file = _BASE_NAME.format(folder, l2)
119
- l1_path = os.path.join(datapath, l1_file)
120
- l2_path = os.path.join(datapath, l2_file)
121
- with open(l1_path, encoding="utf-8") as f1, open(l2_path, encoding="utf-8") as f2:
122
- for sentence_counter, (x, y) in enumerate(zip(f1, f2)):
123
- x = x.strip()
124
- y = y.strip()
125
- result = (
126
- sentence_counter,
127
- {
128
- "id": str(sentence_counter),
129
- "translation": {l1: x, l2: y},
130
- },
131
- )
132
- yield result