Commit
•
d33dced
0
Parent(s):
Update files from the datasets library (from 1.7.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.7.0
- .gitattributes +27 -0
- README.md +182 -0
- dataset_infos.json +1 -0
- dummy/1.1.0/dummy_data.zip +3 -0
- hlgd.py +126 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
annotations_creators:
|
3 |
+
- crowdsourced
|
4 |
+
language_creators:
|
5 |
+
- expert-generated
|
6 |
+
languages:
|
7 |
+
- en
|
8 |
+
licenses:
|
9 |
+
- apache-2-0
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
source_datasets:
|
13 |
+
- original
|
14 |
+
task_categories:
|
15 |
+
- text-classification
|
16 |
+
task_ids:
|
17 |
+
- text-classification-other-headline-grouping
|
18 |
+
size_categories:
|
19 |
+
- 10K<n<100K
|
20 |
+
---
|
21 |
+
|
22 |
+
# Dataset Card for Headline Grouping (HLGD)
|
23 |
+
|
24 |
+
## Table of Contents
|
25 |
+
- [Dataset Description](#dataset-description)
|
26 |
+
- [Dataset Summary](#dataset-summary)
|
27 |
+
- [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
|
28 |
+
- [Languages](#languages)
|
29 |
+
- [Dataset Structure](#dataset-structure)
|
30 |
+
- [Data Instances](#data-instances)
|
31 |
+
- [Data Fields](#data-fields)
|
32 |
+
- [Data Splits](#data-splits)
|
33 |
+
- [Dataset Creation](#dataset-creation)
|
34 |
+
- [Curation Rationale](#curation-rationale)
|
35 |
+
- [Source Data](#source-data)
|
36 |
+
- [Annotations](#annotations)
|
37 |
+
- [Personal and Sensitive Information](#personal-and-sensitive-information)
|
38 |
+
- [Considerations for Using the Data](#considerations-for-using-the-data)
|
39 |
+
- [Social Impact of Dataset](#social-impact-of-dataset)
|
40 |
+
- [Discussion of Biases](#discussion-of-biases)
|
41 |
+
- [Other Known Limitations](#other-known-limitations)
|
42 |
+
- [Additional Information](#additional-information)
|
43 |
+
- [Dataset Curators](#dataset-curators)
|
44 |
+
- [Licensing Information](#licensing-information)
|
45 |
+
- [Citation Information](#citation-information)
|
46 |
+
- [Contributions](#contributions)
|
47 |
+
|
48 |
+
## Dataset Description
|
49 |
+
|
50 |
+
- **Homepage:** [https://github.com/tingofurro/headline_grouping](https://github.com/tingofurro/headline_grouping)
|
51 |
+
- **Repository:** [https://github.com/tingofurro/headline_grouping](https://github.com/tingofurro/headline_grouping)
|
52 |
+
- **Paper:** [https://people.eecs.berkeley.edu/~phillab/pdfs/NAACL2021_HLG.pdf](https://people.eecs.berkeley.edu/~phillab/pdfs/NAACL2021_HLG.pdf)
|
53 |
+
- **Leaderboard:** N/A
|
54 |
+
- **Point of Contact:** phillab (at) berkeley (dot) edu
|
55 |
+
|
56 |
+
### Dataset Summary
|
57 |
+
|
58 |
+
HLGD is a binary classification dataset consisting of 20,056 labeled news headlines pairs indicating whether the two headlines describe the same underlying world event or not. The dataset comes with an existing split between `train`, `validation` and `test` (60-20-20).
|
59 |
+
|
60 |
+
### Supported Tasks and Leaderboards
|
61 |
+
|
62 |
+
The paper (NAACL2021) introducing HLGD proposes three challenges making use of various amounts of data:
|
63 |
+
- Challenge 1: Headline-only. Models must make predictions using only the text of both headlines.
|
64 |
+
- Challenge 2: Headline + Time. Models must make predictions using the headline and publication date of the two headlines.
|
65 |
+
- Challenge 3: Headline + Time + Other. Models can make predictions using the headline, publication date as well as any other relevant meta-data that can be obtained through the URL attached to the headline (full article content, authors, news source, etc.)
|
66 |
+
|
67 |
+
### Languages
|
68 |
+
|
69 |
+
Dataset is in english.
|
70 |
+
|
71 |
+
## Dataset Structure
|
72 |
+
|
73 |
+
### Data Instances
|
74 |
+
|
75 |
+
A typical dataset consists of a timeline_id, two headlines (A/B), each associated with a URL, and a date. Finally, a label indicates whether the two headlines describe the same underlying event (1) or not (0). Below is an example from the training set:
|
76 |
+
```
|
77 |
+
{'timeline_id': 4,
|
78 |
+
'headline_a': 'France fines Google nearly $57 million for first major violation of new European privacy regime',
|
79 |
+
'headline_b': "France hits Google with record EUR50mn fine over 'forced consent' data collection",
|
80 |
+
'date_a': '2019-01-21',
|
81 |
+
'date_b': '2019-01-21',
|
82 |
+
'url_a': 'https://www.chicagotribune.com/business/ct-biz-france-fines-google-privacy-20190121-story.html',
|
83 |
+
'url_b': 'https://www.rt.com/news/449369-france-hits-google-with-record-fine/',
|
84 |
+
'label': 1}
|
85 |
+
```
|
86 |
+
|
87 |
+
### Data Fields
|
88 |
+
|
89 |
+
- `timeline_id`: Represents the id of the timeline that the headline pair belongs to (values 0 to 9). The dev set is composed of timelines 0 and 5, and the test set timelines 7 and 8
|
90 |
+
- `headline_a`, `headline_b`: Raw text for the headline pair being compared
|
91 |
+
- `date_a`, `date_b`: Publication date of the respective headlines, in the `YYYY-MM-DD` format
|
92 |
+
- `url_a`, `url_b`: Original URL of the respective headlines. Can be used to retrieve additional meta-data on the headline.
|
93 |
+
- `label`: 1 if the two headlines are part of the the same headline group and describe the same underlying event, 0 otherwise.
|
94 |
+
|
95 |
+
### Data Splits
|
96 |
+
|
97 |
+
| | Train | Dev | Test |
|
98 |
+
| --------------------------- | ------- | ------ | ----- |
|
99 |
+
| Number of examples | 15,492 | 2,069 | 2,495 |
|
100 |
+
|
101 |
+
## Dataset Creation
|
102 |
+
|
103 |
+
### Curation Rationale
|
104 |
+
|
105 |
+
The task of grouping headlines from diverse news sources discussing a same underlying event is important to enable interfaces that can present the diversity of coverage of unfolding news events. Many news aggregators (such as Google or Yahoo news) present several sources for a given event, with an objective to highlight coverage diversity.
|
106 |
+
Automatic grouping of news headlines and articles remains challenging as headlines are short, heavily-stylized texts.
|
107 |
+
The HeadLine Grouping Dataset introduces the first benchmark to evaluate NLU model's ability to group headlines according to the underlying event they describe.
|
108 |
+
|
109 |
+
|
110 |
+
### Source Data
|
111 |
+
|
112 |
+
#### Initial Data Collection and Normalization
|
113 |
+
|
114 |
+
The data was obtained by collecting 10 news timelines from the NewsLens project by selecting timelines diversified in topic each contained between 80 and 300 news articles.
|
115 |
+
|
116 |
+
#### Who are the source language producers?
|
117 |
+
|
118 |
+
The source language producers are journalists or members of the newsroom of 34 news organizations listed in the paper.
|
119 |
+
|
120 |
+
### Annotations
|
121 |
+
|
122 |
+
#### Annotation process
|
123 |
+
|
124 |
+
Each timeline was annotated for group IDs by 5 independent annotators. The 5 annotations were merged into a single annotation named the global groups.
|
125 |
+
The global group IDs are then used to generate all pairs of headlines within timelines with binary labels: 1 if two headlines are part of the same global group, and 0 otherwise. A heuristic is used to remove negative examples to obtain a final dataset that has class imbalance of 1 positive example to 5 negative examples.
|
126 |
+
|
127 |
+
#### Who are the annotators?
|
128 |
+
|
129 |
+
Annotators were authors of the papers and 8 crowd-workers on the Upwork platform. The crowd-workers were native English speakers with experience either in proof-reading or data-entry.
|
130 |
+
|
131 |
+
### Personal and Sensitive Information
|
132 |
+
|
133 |
+
Annotators identity has been anonymized. Due to the public nature of news headline, it is not expected that the headlines will contain personal sensitive information.
|
134 |
+
|
135 |
+
## Considerations for Using the Data
|
136 |
+
|
137 |
+
### Social Impact of Dataset
|
138 |
+
|
139 |
+
The purpose of this dataset is to facilitate applications that present diverse news coverage.
|
140 |
+
|
141 |
+
By simplifying the process of developing models that can group headlines that describe a common event, we hope the community can build applications that show news readers diverse sources covering similar events.
|
142 |
+
|
143 |
+
We note however that the annotations were performed in majority by crowd-workers and that even though inter-annotator agreement was high, it was not perfect. Bias of the annotators therefore remains in the dataset.
|
144 |
+
|
145 |
+
### Discussion of Biases
|
146 |
+
|
147 |
+
There are several sources of bias in the dataset:
|
148 |
+
- Annotator bias: 10 annotators participated in the creation of the dataset. Their opinions and perspectives influenced the creation of the dataset.
|
149 |
+
- Subject matter bias: HLGD consists of headlines from 10 news timelines from diverse topics (space, tech, politics, etc.). This choice has an impact on the types of positive and negative examples that appear in the dataset.
|
150 |
+
- Source selection bias: 33 English-language news sources are represented in the dataset. This selection of news sources has an effect on the content in the timeline, and the overall dataset.
|
151 |
+
- Time-range of the timelines: the timelines selected range from 2010 to 2020, which has an influence on the language and style of news headlines.
|
152 |
+
|
153 |
+
### Other Known Limitations
|
154 |
+
|
155 |
+
For the task of Headline Grouping, inter-annotator agreement is high (0.814) but not perfect. Some decisions for headline grouping are subjective and depend on interpretation of the reader.
|
156 |
+
|
157 |
+
## Additional Information
|
158 |
+
|
159 |
+
### Dataset Curators
|
160 |
+
|
161 |
+
The dataset was initially created by Philippe Laban, Lucas Bandarkar and Marti Hearst at UC Berkeley.
|
162 |
+
|
163 |
+
### Licensing Information
|
164 |
+
|
165 |
+
The licensing status of the dataset depends on the legal status of news headlines. It is commonly held that News Headlines fall under "fair-use" ([American Bar blog post](https://www.americanbar.org/groups/gpsolo/publications/gp_solo/2011/september/fair_use_news_reviews/))
|
166 |
+
The dataset only distributes headlines, a URL and a publication date. Users of the dataset can then retrieve additional information (such as the body content, author, etc.) directly by querying the URL.
|
167 |
+
|
168 |
+
### Citation Information
|
169 |
+
|
170 |
+
```
|
171 |
+
@inproceedings{Laban2021NewsHG,
|
172 |
+
title={News Headline Grouping as a Challenging NLU Task},
|
173 |
+
author={Laban, Philippe and Bandarkar, Lucas and Hearst, Marti A},
|
174 |
+
booktitle={NAACL 2021},
|
175 |
+
publisher = {Association for Computational Linguistics},
|
176 |
+
year={2021}
|
177 |
+
}
|
178 |
+
```
|
179 |
+
|
180 |
+
### Contributions
|
181 |
+
|
182 |
+
Thanks to [@tingofurro](https://github.com/<tingofurro>) for adding this dataset.
|
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"default": {"description": "HLGD is a binary classification dataset consisting of 20,056 labeled news headlines pairs indicating\nwhether the two headlines describe the same underlying world event or not.\n", "citation": "@inproceedings{Laban2021NewsHG,\n title={News Headline Grouping as a Challenging NLU Task},\n author={Philippe Laban and Lucas Bandarkar},\n booktitle={NAACL 2021},\n publisher = {Association for Computational Linguistics},\n year={2021}\n}\n", "homepage": "https://github.com/tingofurro/headline_grouping", "license": "Apache-2.0 License", "features": {"timeline_id": {"num_classes": 10, "names": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "names_file": null, "id": null, "_type": "ClassLabel"}, "headline_a": {"dtype": "string", "id": null, "_type": "Value"}, "headline_b": {"dtype": "string", "id": null, "_type": "Value"}, "date_a": {"dtype": "string", "id": null, "_type": "Value"}, "date_b": {"dtype": "string", "id": null, "_type": "Value"}, "url_a": {"dtype": "string", "id": null, "_type": "Value"}, "url_b": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["different_event", "same_event"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "hlgd", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 6447212, "num_examples": 15492, "dataset_name": "hlgd"}, "test": {"name": "test", "num_bytes": 941145, "num_examples": 2495, "dataset_name": "hlgd"}, "validation": {"name": "validation", "num_bytes": 798302, "num_examples": 2069, "dataset_name": "hlgd"}}, "download_checksums": {"https://github.com/tingofurro/headline_grouping/releases/download/0.1/hlgd_classification_0.1.zip": {"num_bytes": 1858948, "checksum": "8192c72e28766debf548f0ba1f0b5c3d592cf7097af26a5d67b172c908614601"}}, "download_size": 1858948, "post_processing_size": null, "dataset_size": 8186659, "size_in_bytes": 10045607}}
|
dummy/1.1.0/dummy_data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:516c3bd2e820955de560d50cb208cac5887cf07e74cf8d3cd2230ed224e23c31
|
3 |
+
size 3628
|
hlgd.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
"""
|
16 |
+
HLGD is a binary classification dataset consisting of 20,056 labeled news headlines pairs indicating
|
17 |
+
whether the two headlines describe the same underlying world event or not.
|
18 |
+
"""
|
19 |
+
|
20 |
+
import json
|
21 |
+
import os
|
22 |
+
|
23 |
+
import datasets
|
24 |
+
|
25 |
+
|
26 |
+
_CITATION = """\
|
27 |
+
@inproceedings{Laban2021NewsHG,
|
28 |
+
title={News Headline Grouping as a Challenging NLU Task},
|
29 |
+
author={Philippe Laban and Lucas Bandarkar},
|
30 |
+
booktitle={NAACL 2021},
|
31 |
+
publisher = {Association for Computational Linguistics},
|
32 |
+
year={2021}
|
33 |
+
}
|
34 |
+
"""
|
35 |
+
|
36 |
+
_DESCRIPTION = """\
|
37 |
+
HLGD is a binary classification dataset consisting of 20,056 labeled news headlines pairs indicating
|
38 |
+
whether the two headlines describe the same underlying world event or not.
|
39 |
+
"""
|
40 |
+
|
41 |
+
_HOMEPAGE = "https://github.com/tingofurro/headline_grouping"
|
42 |
+
_LICENSE = "Apache-2.0 License"
|
43 |
+
_DOWNLOAD_URL = "https://github.com/tingofurro/headline_grouping/releases/download/0.1/hlgd_classification_0.1.zip"
|
44 |
+
|
45 |
+
|
46 |
+
class HLGD(datasets.GeneratorBasedBuilder):
|
47 |
+
"""Headline Grouping Dataset."""
|
48 |
+
|
49 |
+
VERSION = datasets.Version("1.1.0")
|
50 |
+
|
51 |
+
def _info(self):
|
52 |
+
features = datasets.Features(
|
53 |
+
{
|
54 |
+
"timeline_id": datasets.features.ClassLabel(names=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
|
55 |
+
"headline_a": datasets.Value("string"),
|
56 |
+
"headline_b": datasets.Value("string"),
|
57 |
+
"date_a": datasets.Value("string"),
|
58 |
+
"date_b": datasets.Value("string"),
|
59 |
+
"url_a": datasets.Value("string"),
|
60 |
+
"url_b": datasets.Value("string"),
|
61 |
+
"label": datasets.features.ClassLabel(names=["different_event", "same_event"]),
|
62 |
+
}
|
63 |
+
)
|
64 |
+
|
65 |
+
return datasets.DatasetInfo(
|
66 |
+
description=_DESCRIPTION,
|
67 |
+
features=features,
|
68 |
+
supervised_keys=None,
|
69 |
+
homepage=_HOMEPAGE,
|
70 |
+
license=_LICENSE,
|
71 |
+
citation=_CITATION,
|
72 |
+
)
|
73 |
+
|
74 |
+
def _split_generators(self, dl_manager):
|
75 |
+
"""Returns SplitGenerators."""
|
76 |
+
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
77 |
+
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
|
78 |
+
|
79 |
+
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
80 |
+
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
81 |
+
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
82 |
+
|
83 |
+
data_dir = dl_manager.download_and_extract(_DOWNLOAD_URL)
|
84 |
+
|
85 |
+
return [
|
86 |
+
datasets.SplitGenerator(
|
87 |
+
name=datasets.Split.TRAIN,
|
88 |
+
gen_kwargs={
|
89 |
+
"filepath": os.path.join(data_dir, "train.json"),
|
90 |
+
"split": "train",
|
91 |
+
},
|
92 |
+
),
|
93 |
+
datasets.SplitGenerator(
|
94 |
+
name=datasets.Split.TEST,
|
95 |
+
gen_kwargs={"filepath": os.path.join(data_dir, "test.json"), "split": "test"},
|
96 |
+
),
|
97 |
+
datasets.SplitGenerator(
|
98 |
+
name=datasets.Split.VALIDATION,
|
99 |
+
gen_kwargs={
|
100 |
+
"filepath": os.path.join(data_dir, "dev.json"),
|
101 |
+
"split": "dev",
|
102 |
+
},
|
103 |
+
),
|
104 |
+
]
|
105 |
+
|
106 |
+
def _generate_examples(
|
107 |
+
self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
|
108 |
+
):
|
109 |
+
"""Yields examples as (key, example) tuples."""
|
110 |
+
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
111 |
+
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
112 |
+
|
113 |
+
with open(filepath, encoding="utf-8") as f:
|
114 |
+
dataset_split = json.load(f)
|
115 |
+
|
116 |
+
for id_, row in enumerate(dataset_split):
|
117 |
+
yield id_, {
|
118 |
+
"timeline_id": row["timeline_id"],
|
119 |
+
"headline_a": row["headline_a"],
|
120 |
+
"headline_b": row["headline_b"],
|
121 |
+
"date_a": row["date_a"],
|
122 |
+
"date_b": row["date_b"],
|
123 |
+
"url_a": row["url_a"],
|
124 |
+
"url_b": row["url_b"],
|
125 |
+
"label": row["label"],
|
126 |
+
}
|