Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
GlotSparse / GlotSparse.py
kargaranamir's picture
Update GlotSparse.py
cc43d5d
raw
history blame
4.89 kB
# coding=utf-8
# Copyright 2023 The GlotSprase Authors.
# Lint as: python3
"""
GlotSprase
"""
""" This dataset loading script is built based on Hugging Face tutorial, OSCAR-2301's and CulturaX dataset script. """
import os
import collections
import pandas as pd
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
GlotSprase \
"""
_URL = "https://huggingface.co/datasets/kargaranamir/GlotSparse"
_LICENSE = """
We do not own any of the text from which these data has been extracted.
We license the actual packaging, the metadata and the annotations of these data under the cc0-1.0 (waiving all of the rights under copyright law).
If you are a website/dataset owner and do not want your data to be included in this corpra, please send us an email at [email protected] .
"""
_CITATION = r"""\
@misc{GlotSparse,
author = {Kargaran, Amir Hossein},
title = {GlotSparse Corpus},
year = {2023},
publisher = {Github},
journal = {Github Repository},
howpublished = {{\\url{https://huggingface.co/datasets/kargaranamir/GlotSparse}}},
}
"""
_BASE_DATA_PAT_FORMAT_STR = "{language}/{language}.csv"
def _languages():
"""Create the sorted dictionary of language codes, and language names.
Returns:
The sorted dictionary as an instance of `collections.OrderedDict`.
"""
langs = {
"Balochi_Arab": "bal_Arab",
"Twi_Latn": "twi_Latn",
"Fanti_Latn": "fat_Latn",
"South-Azerbaijani_Arab": "azb_Arab",
"Southern-Kurdish_Arab": "sdh_Arab",
"Gurani_Arab": "hac_Arab",
"Brahui_Arab": "brh_Arab",
"Southern-Uzbek_Arab": "uzs_Arab",
"Kirmanjki_Latn": "kiu_Latn",
"Southern-Uzbek_Arab": "uzs_Arab",
"Gilaki_Arab": "glk_Arab",
}
langs = {v: k for k, v in langs.items()}
return collections.OrderedDict(sorted(langs.items()))
class GlotConfig(datasets.BuilderConfig):
"""GlotSprase corpus."""
def __init__(self, language: str, **kwargs):
"""BuilderConfig for GlotSprase.
Args:
language (str): It has to contain 3-letter coded strings following the writing script with an underline in between. For example: "glk_Arab", "fat_Latn".
**kwargs: Keyword arguments forwarded to super.
"""
# Validate the language.
if language not in _languages():
raise ValueError("Invalid language: %s " % language)
name = f"{language}"
description = (
f"Original {_languages()[language]} GlotSprase dataset from 2023"
)
super(GlotConfig, self).__init__(
name=name, description=description, **kwargs
)
# Additional attributes
self.language = language
self.base_data_path = _BASE_DATA_PAT_FORMAT_STR.format(language=language)
class Glot(datasets.GeneratorBasedBuilder):
"""GlotSprase"""
BUILDER_CONFIGS = [
GlotConfig( # pylint: disable=g-complex-comprehension
language=language,
version=datasets.Version("1.0.0"),
)
for language in _languages()
]
BUILDER_CONFIG_CLASS = GlotConfig
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"Source": datasets.Value("string"),
"Content": datasets.Value("string"),
"Length": datasets.Value("int64"),
"Script": datasets.Value("string"),
"ISO639-3": datasets.Value("string"),
"Language": datasets.Value("string"),
}
),
supervised_keys=None,
homepage=_URL,
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
data_urls = [self.config.base_data_path]
doc_files = dl_manager.download(
[url for url in data_urls if url.endswith(".csv")]
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
),
]
def _generate_examples(self, doc_files):
"""This function returns the data by iterating on all the files."""
for doc_i, doc_path in enumerate(doc_files):
df = pd.read_csv(doc_path)
for index, row in df.iterrows():
yield f"{doc_i}_{index}", {
"ISO639-3": row["ISO639-3"],
"Language": row["Language"],
"Content": row["Content"],
"Script": row["Script"],
"Length": row["Length"],
"Source": row["Source"],
}