|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""NEWSROOM Dataset.""" |
|
|
|
|
|
import json |
|
import os |
|
from bs4 import BeautifulSoup |
|
import re |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """ |
|
@inproceedings{N18-1065, |
|
author = {Grusky, Max and Naaman, Mor and Artzi, Yoav}, |
|
title = {NEWSROOM: A Dataset of 1.3 Million Summaries |
|
with Diverse Extractive Strategies}, |
|
booktitle = {Proceedings of the 2018 Conference of the |
|
North American Chapter of the Association for |
|
Computational Linguistics: Human Language Technologies}, |
|
year = {2018}, |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
NEWSROOM is a large dataset for training and evaluating summarization systems. |
|
It contains 1.3 million articles and summaries written by authors and |
|
editors in the newsrooms of 38 major publications. |
|
Dataset features includes: |
|
- text: Input news text. |
|
- summary: Summary for the news. |
|
This dataset can be downloaded upon requests. Unzip all the contents |
|
"train.jsonl, dev.josnl, test.jsonl" to the tfds folder. |
|
""" |
|
|
|
_DOCUMENT = "text" |
|
_SUMMARY = "summary" |
|
|
|
|
|
_DATA_DIR = os.path.join( |
|
os.path.expanduser("~"), ".cache/huggingface/datasets/newsroom" |
|
) |
|
|
|
|
|
DM_SINGLE_CLOSE_QUOTE = "\u2019" |
|
DM_DOUBLE_CLOSE_QUOTE = "\u201d" |
|
|
|
END_TOKENS = [ |
|
".", |
|
"!", |
|
"?", |
|
"...", |
|
"'", |
|
"`", |
|
'"', |
|
DM_SINGLE_CLOSE_QUOTE, |
|
DM_DOUBLE_CLOSE_QUOTE, |
|
")", |
|
] |
|
|
|
|
|
def _process_document_and_summary(document, summary): |
|
"""Process document to remove double newlines and process summary to remove html tags and |
|
encode to utf-8. |
|
|
|
References: |
|
(1) https://stackoverflow.com/a/12982689/13448382 |
|
(2) https://stackoverflow.com/a/67560556/13448382 |
|
(3) https://docs.python.org/3/howto/unicode.html#the-unicode-type |
|
(4) https://stackoverflow.com/a/11566398/13448382 |
|
(5) https://huggingface.co/datasets/cnn_dailymail/blob/2d2c6100ccd17c0b215f85c38e36c4e7a5746425/cnn_dailymail.py#L155 |
|
""" |
|
|
|
document = re.sub(r"\n+", " ", document).strip() |
|
|
|
|
|
try: |
|
summary = summary.encode("latin1") |
|
except: |
|
summary = summary.encode("utf-8") |
|
finally: |
|
summary = BeautifulSoup(summary.decode("utf-8", "ignore"), "lxml").text |
|
summary = summary.replace("“", '"').replace("”", '"') |
|
summary = re.sub(r"\n+", " ", summary) |
|
summary = re.sub(r"\s+", " ", summary) |
|
summary = re.sub(r"\\'", "'", summary) |
|
summary = re.sub("''", '"', summary) |
|
summary = re.sub("\xa0", " ", summary) |
|
summary = re.sub(r'\."', '".', summary) |
|
summary = re.sub(r'\,"', '",', summary).strip() |
|
if summary and summary[-1] not in END_TOKENS: |
|
summary += "." |
|
|
|
return document, summary |
|
|
|
|
|
class Newsroom(datasets.GeneratorBasedBuilder): |
|
"""NEWSROOM Dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
features = {k: datasets.Value("string") for k in [_DOCUMENT, _SUMMARY]} |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(features), |
|
supervised_keys=(_DOCUMENT, _SUMMARY), |
|
homepage="https://lil.nlp.cornell.edu/newsroom/index.html", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"input_file": os.path.join(_DATA_DIR, "train.jsonl")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"input_file": os.path.join(_DATA_DIR, "dev.jsonl")}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"input_file": os.path.join(_DATA_DIR, "test.jsonl")}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, input_file=None): |
|
"""Yields examples.""" |
|
idx = 0 |
|
with open(input_file, encoding="utf-8") as f: |
|
for line in f: |
|
d = json.loads(line) |
|
document, summary = _process_document_and_summary( |
|
d[_DOCUMENT], d[_SUMMARY] |
|
) |
|
if not summary or not document: |
|
continue |
|
yield idx, {_DOCUMENT: document, _SUMMARY: summary} |
|
idx += 1 |
|
|