Datasets:

ArXiv:
License:
File size: 3,810 Bytes
d104ebb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49d3983
d104ebb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
from pathlib import Path
from typing import Dict, List, Tuple

import datasets

from seacrowd.utils import schemas
from seacrowd.utils.configs import SEACrowdConfig
from seacrowd.utils.constants import Licenses, Tasks

_CITATION = """\
@inproceedings{cruz2021exploiting,
  title={Exploiting news article structure for automatic corpus generation of entailment datasets},
  author={Cruz, Jan Christian Blaise and Resabal, Jose Kristian and Lin, James and Velasco, Dan John and Cheng, Charibeth},
  booktitle={PRICAI 2021: Trends in Artificial Intelligence: 18th Pacific Rim International Conference on Artificial Intelligence, PRICAI 2021, Hanoi, Vietnam, November 8--12, 2021, Proceedings, Part II 18},
  pages={86--99},
  year={2021},
  organization={Springer}
}
"""
_DATASETNAME = "newsph"
_LANGUAGES = ["fil", "tgl"]
_DESCRIPTION = """\
Raw collection of news articles in Filipino which can be used for language modelling.
"""
_HOMEPAGE = "https://huggingface.co/datasets/newsph"
_LICENSE = Licenses.GPL_3_0.value
_LOCAL = False
_URLS = "https://huggingface.co/datasets/jcblaise/newsph/resolve/main/newsph.zip"
_SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
_SOURCE_VERSION = "1.0.0"

_SEACROWD_VERSION = "2024.06.20"


class NewsPhDataset(datasets.GeneratorBasedBuilder):
    """
    Raw collection of news articles in Filipino which can be used for language modelling.
    """

    SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
    SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)

    BUILDER_CONFIGS = [
        SEACrowdConfig(
            name="newsph_source",
            version=SOURCE_VERSION,
            description="newsph source schema",
            schema="source",
            subset_id="newsph",
        ),
        SEACrowdConfig(
            name="newsph_seacrowd_ssp",
            version=SEACROWD_VERSION,
            description="newsph SEACrowd schema",
            schema="seacrowd_ssp",
            subset_id="newsph",
        ),
    ]

    DEFAULT_CONFIG_NAME = "newsph_source"

    def _info(self) -> datasets.DatasetInfo:
        if self.config.schema == "source":
            features = datasets.Features(
                {
                    "id": datasets.Value("string"),
                    "text": datasets.Value("string"),
                }
            )
        elif self.config.schema == "seacrowd_ssp":
            features = schemas.self_supervised_pretraining.features
        else:
            raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
        """Returns SplitGenerators."""

        data_dir = dl_manager.download_and_extract(_URLS)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": os.path.join(data_dir, "newsph", "train.txt"),
                    "split": "train",
                },
            ),
        ]

    def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
        """Yields examples as (key, example) tuples."""
        if self.config.schema == "source" or self.config.schema == "seacrowd_ssp":
            with open(filepath, encoding="utf-8") as f:
                for idx, row in enumerate(f):
                    if row.strip():
                        yield idx, {"id": str(idx), "text": row}
                    else:
                        yield idx, {"id": str(idx), "text": ""}
        else:
            raise NotImplementedError