File size: 2,606 Bytes
5639eec 6dcf185 5639eec |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
# coding=utf-8
"""STS Benchmark Multilingual dataset"""
import json
import os
import gzip
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """\
STS Benchmark Multilingual dataset by deepl and google (only ar)
"""
_LANGUAGES = ["de", "en", "es", "fr", "it", "nl", "pl", "pt", "ru", "zh"]
_NEW_LANGUAGES = ['ar', 'id']
class STSBMultilingual(datasets.GeneratorBasedBuilder):
"""STS Benchmark Multilingual"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name=name,
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for {name} language{'' if name == 'en' else ' by deepl'}.",
)
for name in _LANGUAGES
] + [
datasets.BuilderConfig(
name='id',
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for id language by deepl.",
),
datasets.BuilderConfig(
name='ar',
version=datasets.Version("1.0.0"),
description=f"The STS Benchmark for id language by google translate.",
)
]
DEFAULT_CONFIG_NAME = 'en'
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"score": datasets.Value("float32"),
},
),
supervised_keys=None,
homepage="https://github.com/PhilipMay/stsb-multi-mt",
)
def _split_generators(self, dl_manager):
if self.config.name == 'ar':
path_or_ds = dl_manager.download('test_ar_google.jsonl')
elif self.config.name == 'id':
path_or_ds = dl_manager.download('test_id_deepl.jsonl')
else:
path_or_ds = datasets.load_dataset("stsb_multi_mt", self.config.name, split="test")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"path_or_ds": path_or_ds},
),
]
def _generate_examples(self, path_or_ds):
"""Yields examples."""
if isinstance(path_or_ds, datasets.Dataset):
for i, ins in enumerate(path_or_ds):
yield i, {'sentence1': ins['sentence1'], 'sentence2': ins['sentence2'], 'score': ins['similarity_score']}
else:
with open(path_or_ds) as f:
for i, line in enumerate(f):
yield i, json.loads(line)
|