Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
part-of-speech
Size:
100K - 1M
ArXiv:
License:
"""TODO(xtreme): Add a description here.""" | |
import csv | |
import glob | |
import os | |
import datasets | |
_UD_POS_LANG = { | |
"Afrikaans": 'af', | |
"Arabic": 'ar', | |
"Basque": 'eu', | |
"Bulgarian": 'bg', | |
"Dutch": 'nl', | |
"English": 'en', | |
"Estonian": 'et', | |
"Finnish": 'fi', | |
"French": 'fr', | |
"German": 'de', | |
"Greek": 'el', | |
"Hebrew": 'he', | |
"Hindi": 'hi', | |
"Hungarian": 'hu', | |
"Indonesian": 'id', | |
"Italian": 'it', | |
"Japanese": 'ja', | |
"Kazakh": 'kk', | |
"Korean": 'ko', | |
"Chinese": 'zh', | |
"Marathi": 'mr', | |
"Persian": 'fa', | |
"Portuguese": 'pt', | |
"Russian": 'ru', | |
"Spanish": 'es', | |
"Tagalog": 'tl', | |
"Tamil": 'ta', | |
"Telugu": 'te', | |
"Thai": 'th', | |
"Turkish": 'tr', | |
"Urdu": 'ur', | |
"Vietnamese": 'vi', | |
"Yoruba": 'yo', | |
} | |
_DATA_URLS = { | |
"2_5": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz", | |
"2_7": 'https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3424/ud-treebanks-v2.7.tgz', | |
} | |
def generate_examples(folder: str, lang: str, split: str): | |
"""Yields examples.""" | |
for path in glob.glob(f'{folder}/UD_{lang}*/*{split}.conllu'): | |
# For lang other than [see below], we exclude Arabic-NYUAD which does not contains any words, only _ | |
if lang in ["Kazakh", "Tagalog", "Thai", "Yoruba"] or "NYUAD" not in path: | |
print('read', path) | |
with open(path) as file: | |
data = csv.reader(file, delimiter="\t", quoting=csv.QUOTE_NONE) | |
tokens = [] | |
pos_tags = [] | |
for id_row, row in enumerate(data): | |
if len(row) >= 10 and row[1] != "_" and row[3] != "_": | |
tokens.append(row[1]) | |
pos_tags.append(row[3]) | |
if len(row) == 0 and len(tokens) > 0: | |
yield {"tokens": tokens, "pos_tags": pos_tags} | |
tokens = [] | |
pos_tags = [] | |
def main(): | |
features = datasets.Features({ | |
"tokens": datasets.Sequence(datasets.Value("string")), | |
"pos_tags": datasets.Sequence(datasets.features.ClassLabel(names=[ | |
"ADJ", | |
"ADP", | |
"ADV", | |
"AUX", | |
"CCONJ", | |
"DET", | |
"INTJ", | |
"NOUN", | |
"NUM", | |
"PART", | |
"PRON", | |
"PROPN", | |
"PUNCT", | |
"SCONJ", | |
"SYM", | |
"VERB", | |
"X", | |
])), | |
}) | |
path = 'ud-treebanks-v2.7/' | |
if '2.7' in path: # xtreme-r | |
_UD_POS_LANG.update({ | |
'Lithuanian': 'lt', 'Polish': 'pl', 'Ukrainian': 'uk', 'Wolof': 'wo', 'Romanian': 'ro', | |
}) | |
for lang, code in _UD_POS_LANG.items(): | |
os.makedirs(f'{path}/parquet/{code}/', exist_ok=True) | |
splits = ['test'] if code != 'en' else ['train', 'dev', 'test'] | |
for split in splits: | |
ds = datasets.Dataset.from_generator( | |
generate_examples, features=features, keep_in_memory=True, gen_kwargs=dict( | |
folder=path, lang=lang, split=split | |
) | |
) | |
sp = f'{path}/parquet/{code}/{split}.parquet' | |
ds.to_parquet(sp) | |
print('save', sp) | |
if __name__ == '__main__': | |
main() | |