|
""" An automatically created Slovene thesaurus. """ |
|
import logging |
|
import os |
|
import xml.etree.ElementTree as ET |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
@article{krek2017translation, |
|
title={From translation equivalents to synonyms: creation of a Slovene thesaurus using word co-occurrence network analysis}, |
|
author={Krek, Simon and Laskowski, Cyprian and Robnik-{\v{S}}ikonja, Marko}, |
|
journal={Proceedings of eLex}, |
|
pages={93--109}, |
|
year={2017} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This is an automatically created Slovene thesaurus from Slovene data available in a comprehensive |
|
English–Slovenian dictionary, a monolingual dictionary, and a corpus. A network analysis on the bilingual dictionary |
|
word co-occurrence graph was used, together with additional information from the distributional thesaurus data |
|
available as part of the Sketch Engine tool and extracted from the 1.2 billion word Gigafida corpus and the |
|
monolingual dictionary. |
|
""" |
|
|
|
_HOMEPAGE = "http://hdl.handle.net/11356/1166" |
|
|
|
_LICENSE = "Creative Commons - Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)" |
|
|
|
_URLS = { |
|
"slo_thesaurus": "https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1166/CJVT_Thesaurus-v1.0.zip", |
|
} |
|
|
|
|
|
class SloThesaurus(datasets.GeneratorBasedBuilder): |
|
"""An automatically created Slovene thesaurus.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id_headword": datasets.Value("string"), |
|
"headword": datasets.Value("string"), |
|
"groups_core": [ |
|
{ |
|
"id_words": datasets.Sequence(datasets.Value("string")), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"scores": datasets.Sequence(datasets.Value("float32")), |
|
"domains": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) |
|
} |
|
], |
|
"groups_near": [ |
|
{ |
|
"id_words": datasets.Sequence(datasets.Value("string")), |
|
"words": datasets.Sequence(datasets.Value("string")), |
|
"scores": datasets.Sequence(datasets.Value("float32")), |
|
"domains": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) |
|
} |
|
] |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS["slo_thesaurus"] |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"file_path": os.path.join(data_dir, "CJVT_Thesaurus-v1.0.xml")} |
|
) |
|
] |
|
|
|
def _generate_examples(self, file_path): |
|
curr_doc = ET.parse(file_path) |
|
root = curr_doc.getroot() |
|
|
|
for idx_entry, curr_entry in enumerate(root.iterfind(f".//entry")): |
|
head_word = curr_entry.find("headword") |
|
if head_word is None: |
|
logging.warning("<headword> is missing for an entry, which should likely not happen. " |
|
"Please open an issue on the dataset repository if you are seeing this.") |
|
head_word = {"text": "UNK_headword", "id": "NA_id"} |
|
else: |
|
head_word = {"text": head_word.text.strip(), "id": head_word.attrib["id"]} |
|
|
|
all_core_groups = [] |
|
core_groups = curr_entry.find("groups_core") |
|
if core_groups is not None: |
|
for idx_group, core_group in enumerate(core_groups.iterfind("group"), start=0): |
|
parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []} |
|
all_candidates = core_group.iterfind("candidate") |
|
for candidate in all_candidates: |
|
candidate_s = candidate.find("s") |
|
candidate_domains = candidate.find("labels") |
|
if candidate_domains is not None: |
|
candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(), |
|
candidate_domains.findall("la"))) |
|
else: |
|
candidate_domains = [] |
|
|
|
parsed_group["id_words"].append(candidate_s.attrib["id"]) |
|
parsed_group["words"].append(candidate_s.text.strip()) |
|
parsed_group["scores"].append(float(candidate.attrib["score"])) |
|
parsed_group["domains"].append(candidate_domains) |
|
|
|
all_core_groups.append(parsed_group) |
|
|
|
all_near_groups = [] |
|
near_groups = curr_entry.find("groups_near") |
|
if near_groups is not None: |
|
for idx_group, core_group in enumerate(near_groups.iterfind("group"), start=0): |
|
parsed_group = {"id_words": [], "words": [], "scores": [], "domains": []} |
|
all_candidates = core_group.iterfind("candidate") |
|
for candidate in all_candidates: |
|
candidate_s = candidate.find("s") |
|
candidate_domains = candidate.find("labels") |
|
if candidate_domains is not None: |
|
candidate_domains = list(map(lambda candidate_el: candidate_el.text.strip(), |
|
candidate_domains.findall("la"))) |
|
else: |
|
candidate_domains = [] |
|
|
|
parsed_group["id_words"].append(candidate_s.attrib["id"]) |
|
parsed_group["words"].append(candidate_s.text.strip()) |
|
parsed_group["scores"].append(float(candidate.attrib["score"])) |
|
parsed_group["domains"].append(candidate_domains) |
|
|
|
all_near_groups.append(parsed_group) |
|
|
|
yield idx_entry, { |
|
"id_headword": head_word["id"], |
|
"headword": head_word["text"], |
|
"groups_core": all_core_groups, |
|
"groups_near": all_near_groups |
|
} |
|
|