|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RedPajama V2: Quality annotated Web Text Documents.""" |
|
|
|
import json |
|
|
|
import datasets |
|
import traceback |
|
import os |
|
import gzip |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
_DESCRIPTION = """\ |
|
RedPajama V2 is a Data Foundation of Web Text Documents with Quality Annotations. |
|
""" |
|
|
|
with open("_CC_SNAPSHOT_IDS", "r") as f: |
|
_CC_SNAPSHOT_IDS = [line.strip() for line in f] |
|
|
|
_URL_BASE = 'https://data.together.xyz/redpajama-data-v2/v1.0.0' |
|
_LANGUAGES = ("en", "de", "fr", "es", "it") |
|
_SAMPLE_SNAPSHOT_ID = "2023-06" |
|
|
|
_LISTINGS_PATTERN = "listings/{language}-{snapshot}-{partition}.txt" |
|
|
|
|
|
class RedPajamaDataV2Config(datasets.BuilderConfig): |
|
"""BuilderConfig for RedPajama.""" |
|
|
|
def __init__(self, *args, language, partition, snapshots, **kwargs): |
|
"""BuilderConfig for RedPajama. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(RedPajamaDataV2Config, self).__init__(**kwargs) |
|
self.partition = partition |
|
self.snapshots = snapshots |
|
self.language = language |
|
|
|
|
|
_BUILDER_CONFIGS = [ |
|
RedPajamaDataV2Config( |
|
name=f'_sample', |
|
partition='sample', |
|
snapshots=None, |
|
language=None, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample", |
|
), |
|
|
|
RedPajamaDataV2Config( |
|
name=f'sample', |
|
partition='sample', |
|
snapshots=None, |
|
language=None, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 Sample", |
|
) |
|
] |
|
|
|
for lang in _LANGUAGES: |
|
_BUILDER_CONFIGS.extend( |
|
[ |
|
|
|
RedPajamaDataV2Config( |
|
name=f'{lang}-head-middle-{snapshot}', |
|
partition='head_middle', |
|
snapshots=[snapshot], |
|
language=lang, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 head-middle {lang}-{snapshot}", |
|
) |
|
for snapshot in _CC_SNAPSHOT_IDS |
|
] + [ |
|
|
|
RedPajamaDataV2Config( |
|
name=f'{lang}-head-middle-all', |
|
partition='head_middle', |
|
snapshots=_CC_SNAPSHOT_IDS, |
|
language=lang, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 head-middle {lang}" |
|
) |
|
] |
|
) |
|
|
|
_BUILDER_CONFIGS.extend( |
|
[ |
|
|
|
RedPajamaDataV2Config( |
|
name=f'{lang}-tail-{snapshot}', |
|
partition='tail', |
|
snapshots=[snapshot], |
|
language=lang, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 tail {lang}-{snapshot}", |
|
) |
|
for snapshot in _CC_SNAPSHOT_IDS |
|
] + [ |
|
|
|
RedPajamaDataV2Config( |
|
name=f'{lang}-tail-all', |
|
partition='tail', |
|
snapshots=_CC_SNAPSHOT_IDS, |
|
language=lang, |
|
version=datasets.Version("1.0.0", ""), |
|
description=f"RedPajamaV2 tail {lang}" |
|
) |
|
] |
|
) |
|
|
|
|
|
class RedPajamaV2(datasets.GeneratorBasedBuilder): |
|
""" RedPajama V2: Quality annotated Web Text Documents. """ |
|
|
|
BUILDER_CONFIGS = _BUILDER_CONFIGS |
|
|
|
def _info(self): |
|
if self.config.partition == "tail": |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"raw_content": datasets.Value("string"), |
|
"doc_id": datasets.Value("string"), |
|
"meta": datasets.Value("string"), |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
else: |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"raw_content": datasets.Value("string"), |
|
"doc_id": datasets.Value("string"), |
|
"meta": datasets.Value("string"), |
|
"quality_signals": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators_sample(self, dl_manager): |
|
|
|
with open("sample/sample_listings.txt", "r") as fd: |
|
listings = [line.strip() for line in fd] |
|
|
|
|
|
docs_files = dl_manager.download({ |
|
_SAMPLE_SNAPSHOT_ID: [ |
|
f"sample/documents/{lst}.json.gz" for lst in listings |
|
] |
|
}) |
|
|
|
|
|
signals_files = dl_manager.download({ |
|
_SAMPLE_SNAPSHOT_ID: [ |
|
f"sample/quality_signals/{lst}.signals.json.gz" |
|
for lst in listings |
|
] |
|
}) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"documents_files": { |
|
_SAMPLE_SNAPSHOT_ID: docs_files[_SAMPLE_SNAPSHOT_ID] |
|
}, |
|
"quality_signals_files": { |
|
_SAMPLE_SNAPSHOT_ID: signals_files[_SAMPLE_SNAPSHOT_ID] |
|
} |
|
} |
|
) |
|
] |
|
|
|
def _split_generators_full(self, dl_manager): |
|
url_lists = dl_manager.download_and_extract({ |
|
snapshot_id: _LISTINGS_PATTERN.format( |
|
language=self.config.language, |
|
snapshot=snapshot_id, |
|
partition=self.config.partition, |
|
) |
|
for snapshot_id in self.config.snapshots |
|
}) |
|
|
|
listings_ids = {} |
|
|
|
for snapshot_id, listings_file in url_lists.items(): |
|
with open(listings_file, encoding="utf-8") as f: |
|
listings_ids[snapshot_id] = [line.strip() for line in f] |
|
|
|
|
|
document_urls = { |
|
snapshot_id: [ |
|
os.path.join(_URL_BASE, f"documents/{lst_id}.json.gz") |
|
for lst_id in listings_ids[snapshot_id] |
|
] |
|
for snapshot_id in self.config.snapshots |
|
} |
|
|
|
documents_files = dl_manager.download(document_urls) |
|
|
|
|
|
if self.config.partition == "head_middle": |
|
quality_signals_urls = { |
|
snapshot_id: [ |
|
os.path.join( |
|
_URL_BASE, |
|
f"quality_signals/{lst_id}.signals.json.gz" |
|
) |
|
for lst_id in listings_ids[snapshot_id] |
|
] |
|
for snapshot_id in self.config.snapshots |
|
} |
|
|
|
quality_signals_files = dl_manager.download( |
|
quality_signals_urls |
|
) |
|
else: |
|
quality_signals_files = {} |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"documents_files": { |
|
snapshot_id: documents_files[snapshot_id] |
|
for snapshot_id in self.config.snapshots |
|
}, |
|
"quality_signals_files": { |
|
snapshot_id: quality_signals_files.get(snapshot_id) |
|
for snapshot_id in self.config.snapshots |
|
} |
|
} |
|
) |
|
] |
|
|
|
def _split_generators(self, dl_manager): |
|
if self.config.name.endswith("sample"): |
|
return self._split_generators_sample(dl_manager) |
|
|
|
return self._split_generators_full(dl_manager) |
|
|
|
def _generate_examples(self, documents_files, quality_signals_files): |
|
""" This function returns examples """ |
|
snapshots = list(documents_files.keys()) |
|
|
|
key = 0 |
|
for snapshot in snapshots: |
|
docs_files = documents_files[snapshot] |
|
if self.config.partition in ("head_middle", "sample"): |
|
qs_files = quality_signals_files[snapshot] |
|
else: |
|
qs_files = None |
|
|
|
assert len(docs_files) == len(qs_files) |
|
|
|
for doc_file, qs_file in zip(docs_files, qs_files): |
|
with gzip.open(doc_file, "rt", encoding="utf-8") as df: |
|
with gzip.open(qs_file, "rt", encoding="utf-8") as qf: |
|
for row, (doc, qs) in enumerate(zip(df, qf)): |
|
|
|
try: |
|
doc = json.loads(doc) |
|
qs = json.loads(qs) |
|
doc_id = qs["id"] |
|
|
|
meta = { |
|
"url": doc["url"], |
|
"language": doc["language"], |
|
"source_domain": doc["source_domain"], |
|
"date_download": doc["date_download"], |
|
"digest": doc["digest"], |
|
} |
|
|
|
if self.config.partition == "tail": |
|
yield key, { |
|
"raw_content": doc["raw_content"], |
|
"doc_id": doc_id, |
|
"meta": json.dumps(meta), |
|
} |
|
else: |
|
yield key, { |
|
"raw_content": doc["raw_content"], |
|
"doc_id": doc_id, |
|
"meta": json.dumps(meta), |
|
"quality_signals": json.dumps( |
|
qs["quality_signals"] |
|
), |
|
} |
|
key += 1 |
|
except Exception as e: |
|
print(f'doc_file: {doc_file}') |
|
print(f'qs_file: {qs_file}') |
|
print(f'row: {row}') |
|
traceback.print_exc() |
|
|
|
raise e |
|
|