|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""TODO""" |
|
|
|
|
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
from datasets import Value, Sequence |
|
import datasets |
|
from datasets.config import PYARROW_VERSION |
|
from datasets.utils.logging import get_logger |
|
|
|
logger = get_logger(__name__) |
|
|
|
if PYARROW_VERSION.major <= 6: |
|
msg = f"pyarrow version >= 7.0.0 required for this loading script, you have {PYARROW_VERSION}" |
|
logger.warning(msg) |
|
raise RuntimeError(msg) |
|
|
|
|
|
_DESCRIPTION = "TODO" |
|
|
|
_HOMEPAGE = "TODO" |
|
|
|
_LANG_CONFIGS = {"fi", "sv"} |
|
|
|
_DATA = { |
|
"sv": {"1900": "sv_1900.parquet", "1910": "sv_1910.parquet"}, |
|
"fi": {"1900": "fi_1900.parquet", "1910": "fi_1910.parquet"}, |
|
} |
|
|
|
|
|
class EuropeanaNewspapersConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for the Europeana Newspapers dataset.""" |
|
|
|
def __init__( |
|
self, *args, languages=None, min_decade=None, max_decade=None, **kwargs |
|
): |
|
"""BuilderConfig for the GitHub Code dataset. |
|
|
|
Args: |
|
languages (:obj:`List[str]`): List of languages to load. |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
|
|
super().__init__( |
|
*args, |
|
name="+".join(languages), |
|
**kwargs, |
|
) |
|
for lang in languages: |
|
if not lang in _LANG_CONFIGS: |
|
raise ValueError( |
|
f"{lang} not a valid language key for this dataset, valid keys are {_LANG_CONFIGS}" |
|
) |
|
self.languages = languages |
|
self.min_decade = min_decade |
|
self.max_decade = max_decade |
|
|
|
|
|
class EuropeanaNewspapers(datasets.GeneratorBasedBuilder): |
|
"""TODO.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = EuropeanaNewspapersConfig |
|
BUILDER_CONFIGS = [ |
|
EuropeanaNewspapersConfig(languages=[lang]) for lang in _LANG_CONFIGS |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"text": Value(dtype="string"), |
|
"mean_ocr": Value(dtype="float64"), |
|
"std_ocr": Value(dtype="float64"), |
|
"bounding_boxes": Sequence( |
|
feature=Sequence( |
|
feature=Value(dtype="float64", id=None), |
|
length=-1, |
|
), |
|
), |
|
"title": Value(dtype="string"), |
|
"date": Value(dtype="string"), |
|
"language": Sequence( |
|
feature=Value(dtype="string", id=None), |
|
), |
|
"item_iiif_url": Value( |
|
dtype="string", |
|
), |
|
|
|
"issue_uri": Value(dtype="string"), |
|
"id": Value(dtype="string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license="Multiple: see the 'license' field of each sample.", |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
languages = self.config.languages |
|
min_decade = self.config.min_decade |
|
max_decade = self.config.max_decade |
|
data_files = [] |
|
for language in languages: |
|
for decade, file in _DATA[language].items(): |
|
decade = int(decade) |
|
if max_decade is None and min_decade is None: |
|
data_files.append(file) |
|
if ( |
|
max_decade is not None |
|
and min_decade is not None |
|
and min_decade <= decade <= max_decade |
|
): |
|
data_files.append(file) |
|
if ( |
|
min_decade is not None |
|
and max_decade is None |
|
and decade >= min_decade |
|
): |
|
data_files.append(file) |
|
if ( |
|
min_decade is None |
|
and max_decade is not None |
|
and decade <= max_decade |
|
): |
|
data_files.append(file) |
|
|
|
files = dl_manager.download(data_files) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"files": files, |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, files): |
|
key = 0 |
|
for file in files: |
|
with open(file, "rb") as f: |
|
parquet_file = pq.ParquetFile(f) |
|
for record_batch in parquet_file.iter_batches(batch_size=10_000): |
|
pa_table = pa.Table.from_batches([record_batch]) |
|
rows = pa_table.to_pylist() |
|
for row in rows: |
|
row.pop("multi_language") |
|
yield key, row |
|
key += 1 |
|
|