Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
kargaranamir commited on
Commit
13ca861
1 Parent(s): be00a03

Delete GlotSparse.py

Browse files
Files changed (1) hide show
  1. GlotSparse.py +0 -152
GlotSparse.py DELETED
@@ -1,152 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The GlotSprase Authors.
3
- # Lint as: python3
4
- """
5
- GlotSprase
6
- """
7
-
8
- """ This dataset loading script is built based on Hugging Face tutorial, OSCAR-2301's and CulturaX dataset script. """
9
-
10
- import os
11
- import collections
12
-
13
- import pandas as pd
14
-
15
- import datasets
16
-
17
- logger = datasets.logging.get_logger(__name__)
18
-
19
- _DESCRIPTION = """\
20
- GlotSprase \
21
- """
22
-
23
- _URL = "https://huggingface.co/datasets/kargaranamir/GlotSparse"
24
-
25
- _LICENSE = """
26
- We do not own any of the text from which these data has been extracted.
27
- We license the actual packaging, the metadata and the annotations of these data under the cc0-1.0 (waiving all of the rights under copyright law).
28
-
29
- If you are a website/dataset owner and do not want your data to be included in this corpra, please send us an email at [email protected] .
30
- """
31
-
32
- _CITATION = r"""\
33
- @misc{GlotSparse,
34
- author = {Kargaran, Amir Hossein},
35
- title = {GlotSparse Corpus},
36
- year = {2023},
37
- publisher = {Github},
38
- journal = {Github Repository},
39
- howpublished = {{\\url{https://huggingface.co/datasets/kargaranamir/GlotSparse}}},
40
- }
41
- """
42
-
43
- _BASE_DATA_PAT_FORMAT_STR = "{language}/{language}.csv"
44
-
45
-
46
- def _languages():
47
- """Create the sorted dictionary of language codes, and language names.
48
- Returns:
49
- The sorted dictionary as an instance of `collections.OrderedDict`.
50
- """
51
- langs = {
52
- "Balochi_Arab": "bal_Arab",
53
- "Twi_Latn": "twi_Latn",
54
- "Fanti_Latn": "fat_Latn",
55
- "South-Azerbaijani_Arab": "azb_Arab",
56
- "Southern-Kurdish_Arab": "sdh_Arab",
57
- "Gurani_Arab": "hac_Arab",
58
- "Brahui_Arab": "brh_Arab",
59
- "Southern-Uzbek_Arab": "uzs_Arab",
60
- "Kirmanjki_Latn": "kiu_Latn",
61
- "Southern-Uzbek_Arab": "uzs_Arab",
62
- "Gilaki_Arab": "glk_Arab",
63
- }
64
-
65
- langs = {v: k for k, v in langs.items()}
66
- return collections.OrderedDict(sorted(langs.items()))
67
-
68
-
69
- class GlotConfig(datasets.BuilderConfig):
70
- """GlotSprase corpus."""
71
-
72
- def __init__(self, language: str, **kwargs):
73
- """BuilderConfig for GlotSprase.
74
- Args:
75
- language (str): It has to contain 3-letter coded strings following the writing script with an underline in between. For example: "glk_Arab", "fat_Latn".
76
- **kwargs: Keyword arguments forwarded to super.
77
- """
78
- # Validate the language.
79
- if language not in _languages():
80
- raise ValueError("Invalid language: %s " % language)
81
-
82
- name = f"{language}"
83
- description = (
84
- f"Original {_languages()[language]} GlotSprase dataset from 2023"
85
- )
86
- super(GlotConfig, self).__init__(
87
- name=name, description=description, **kwargs
88
- )
89
-
90
- # Additional attributes
91
- self.language = language
92
- self.base_data_path = _BASE_DATA_PAT_FORMAT_STR.format(language=language)
93
-
94
-
95
- class Glot(datasets.GeneratorBasedBuilder):
96
- """GlotSprase"""
97
-
98
- BUILDER_CONFIGS = [
99
- GlotConfig( # pylint: disable=g-complex-comprehension
100
- language=language,
101
- version=datasets.Version("1.0.0"),
102
- )
103
- for language in _languages()
104
- ]
105
- BUILDER_CONFIG_CLASS = GlotConfig
106
-
107
- def _info(self):
108
- return datasets.DatasetInfo(
109
- description=_DESCRIPTION,
110
- features=datasets.Features(
111
- {
112
- "Source": datasets.Value("string"),
113
- "Content": datasets.Value("string"),
114
- "Length": datasets.Value("int64"),
115
- "Script": datasets.Value("string"),
116
- "ISO639-3": datasets.Value("string"),
117
- "Language": datasets.Value("string"),
118
- }
119
- ),
120
- supervised_keys=None,
121
- homepage=_URL,
122
- citation=_CITATION,
123
- license=_LICENSE,
124
- )
125
-
126
- def _split_generators(self, dl_manager):
127
-
128
- data_urls = [self.config.base_data_path]
129
- doc_files = dl_manager.download(
130
- [url for url in data_urls if url.endswith(".csv")]
131
- )
132
- return [
133
- datasets.SplitGenerator(
134
- name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
135
- ),
136
- ]
137
-
138
-
139
- def _generate_examples(self, doc_files):
140
- """This function returns the data by iterating on all the files."""
141
- for doc_i, doc_path in enumerate(doc_files):
142
- df = pd.read_csv(doc_path)
143
-
144
- for index, row in df.iterrows():
145
- yield f"{doc_i}_{index}", {
146
- "ISO639-3": row["ISO639-3"],
147
- "Language": row["Language"],
148
- "Content": row["Content"],
149
- "Script": row["Script"],
150
- "Length": row["Length"],
151
- "Source": row["Source"],
152
- }