|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""SCAT: Supporting Context for Ambiguous Translations""" |
|
|
|
import re |
|
from pathlib import Path |
|
from typing import Dict |
|
|
|
import datasets |
|
from datasets.utils.download_manager import DownloadManager |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{yin-etal-2021-context, |
|
title = "Do Context-Aware Translation Models Pay the Right Attention?", |
|
author = "Yin, Kayo and |
|
Fernandes, Patrick and |
|
Pruthi, Danish and |
|
Chaudhary, Aditi and |
|
Martins, Andr{\'e} F. T. and |
|
Neubig, Graham", |
|
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", |
|
month = aug, |
|
year = "2021", |
|
address = "Online", |
|
publisher = "Association for Computational Linguistics", |
|
url = "https://aclanthology.org/2021.acl-long.65", |
|
doi = "10.18653/v1/2021.acl-long.65", |
|
pages = "788--801", |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
The Supporting Context for Ambiguous Translations corpus (SCAT) is a dataset |
|
of English-to-French translations annotated with human rationales used for resolving ambiguity |
|
in pronoun anaphora resolution for multi-sentence translation. |
|
""" |
|
|
|
_URL = "https://huggingface.co/datasets/inseq/scat/raw/main/filtered_scat" |
|
|
|
_HOMEPAGE = "https://github.com/neulab/contextual-mt/tree/master/data/scat" |
|
|
|
_LICENSE = "Unknown" |
|
|
|
class ScatConfig(datasets.BuilderConfig): |
|
def __init__( |
|
self, |
|
source_language: str, |
|
target_language: str, |
|
**kwargs |
|
): |
|
"""BuilderConfig for MT-GenEval. |
|
Args: |
|
source_language: `str`, source language for translation. |
|
target_language: `str`, translation language. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
self.source_language = source_language |
|
self.target_language = target_language |
|
|
|
|
|
class Scat(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ScatConfig(name="sentences", source_language="en", target_language="fr")] |
|
|
|
DEFAULT_CONFIG_NAME = "sentences" |
|
|
|
@staticmethod |
|
def clean_string(txt: str): |
|
return txt.replace("<p>", "").replace("</p>", "").replace("<hon>", "").replace("<hoff>", "") |
|
|
|
@staticmethod |
|
def swap_pronoun(txt: str): |
|
pron: str = re.findall(r"<p>([^<]*)</p>", txt)[0] |
|
new_pron = pron |
|
is_cap = pron.istitle() |
|
if pron.lower() == "elles": |
|
new_pron = "ils" |
|
if pron.lower() == "elle": |
|
new_pron = "il" |
|
if pron.lower() == "ils": |
|
new_pron = "elles" |
|
if pron.lower() == "il": |
|
new_pron = "elle" |
|
if pron.lower() == "un": |
|
new_pron = "une" |
|
if pron.lower() == "une": |
|
new_pron = "un" |
|
if is_cap: |
|
new_pron = new_pron.capitalize() |
|
return txt.replace(f"<p>{pron}</p>", f"<p>{new_pron}</p>") |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"context_en": datasets.Value("string"), |
|
"en": datasets.Value("string"), |
|
"context_fr": datasets.Value("string"), |
|
"fr": datasets.Value("string"), |
|
"contrast_fr": datasets.Value("string"), |
|
"context_en_with_tags": datasets.Value("string"), |
|
"en_with_tags": datasets.Value("string"), |
|
"context_fr_with_tags": datasets.Value("string"), |
|
"fr_with_tags": datasets.Value("string"), |
|
"contrast_fr_with_tags": datasets.Value("string"), |
|
"has_supporting_context": datasets.Value("bool"), |
|
"has_supporting_preceding_context": datasets.Value("bool"), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
filepaths = {} |
|
splits = ["train", "valid", "test"] |
|
for split in splits: |
|
filepaths[split] = {} |
|
for lang in ["en", "fr"]: |
|
for ftype in ["context", ""]: |
|
fname = f"filtered.{split}{'.' + ftype if ftype else ''}.{lang}" |
|
name = f"{ftype}_{lang}" if ftype else lang |
|
filepaths[split][name] = dl_manager.download_and_extract(f"{_URL}/{fname}") |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split_name, |
|
gen_kwargs={ |
|
"filepaths": filepaths[split], |
|
}, |
|
) |
|
for split, split_name in zip(splits, ["train", "validation", "test"]) |
|
] |
|
|
|
|
|
def _generate_examples( |
|
self, filepaths: Dict[str, str] |
|
): |
|
""" Yields examples as (key, example) tuples. """ |
|
with open(filepaths["en"]) as f: |
|
en = f.read().splitlines() |
|
with open(filepaths["fr"]) as f: |
|
fr = f.read().splitlines() |
|
with open(filepaths["context_en"]) as f: |
|
context_en = f.read().splitlines() |
|
with open(filepaths["context_fr"]) as f: |
|
context_fr = f.read().splitlines() |
|
for i, (e, f, ce, cf) in enumerate(zip(en, fr, context_en, context_fr)): |
|
allfields = " ".join([e, f, ce, cf]) |
|
has_supporting_context = False |
|
if "<hon>" in allfields and "<hoff>" in allfields: |
|
has_supporting_context = True |
|
contrast_fr = self.swap_pronoun(f) |
|
yield i, { |
|
"id": i, |
|
"context_en": self.clean_string(ce), |
|
"en": self.clean_string(e), |
|
"context_fr": self.clean_string(cf), |
|
"fr": self.clean_string(f), |
|
"contrast_fr": self.clean_string(contrast_fr), |
|
"context_en_with_tags": ce, |
|
"en_with_tags": e, |
|
"context_fr_with_tags": cf, |
|
"fr_with_tags": f, |
|
"contrast_fr_with_tags": contrast_fr, |
|
"has_supporting_context": has_supporting_context, |
|
"has_supporting_preceding_context": "<hon>" in cf, |
|
} |