Skylion007 commited on
Commit
cf22a37
1 Parent(s): c6680bb

Delete dataset script

Browse files
Files changed (1) hide show
  1. openwebtext.py +0 -79
openwebtext.py DELETED
@@ -1,79 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """The Open WebText Corpus"""
16
-
17
- import re
18
-
19
- import datasets
20
-
21
-
22
- _CITATION = """\
23
- @misc{Gokaslan2019OpenWeb,
24
- title={OpenWebText Corpus},
25
- author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
26
- howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
27
- year={2019}
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- An open-source replication of the WebText dataset from OpenAI.
33
- """
34
-
35
- _N_DATA_FILES = 21
36
- _DATA_FILES = ["subsets/urlsf_subset{:02d}.tar".format(i) for i in range(_N_DATA_FILES)]
37
-
38
-
39
- class Openwebtext(datasets.GeneratorBasedBuilder):
40
- """The Open WebText dataset."""
41
-
42
- BUILDER_CONFIGS = [
43
- datasets.BuilderConfig(
44
- name="plain_text",
45
- description="Plain text",
46
- version=datasets.Version("1.0.0"),
47
- )
48
- ]
49
-
50
- def _info(self):
51
- return datasets.DatasetInfo(
52
- description=_DESCRIPTION,
53
- features=datasets.Features({"text": datasets.Value("string")}),
54
- homepage="https://skylion007.github.io/OpenWebTextCorpus/",
55
- citation=_CITATION,
56
- )
57
-
58
- def _split_generators(self, dl_manager):
59
- archives = dl_manager.download(_DATA_FILES)
60
- return [
61
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
62
- "archive_iterators": [
63
- dl_manager.iter_archive(archive) for archive in archives
64
- ],
65
- "iter_archive": dl_manager.iter_archive
66
- }),
67
- ]
68
-
69
- def _generate_examples(self, archive_iterators, iter_archive):
70
- """Yields examples."""
71
- for archive_iterator in archive_iterators:
72
- for xz_filepath, xz_f in archive_iterator:
73
- if not xz_filepath.endswith(".xz"):
74
- continue
75
- for txt_filepath, txt_f in iter_archive(xz_f):
76
- if not txt_filepath.endswith(".txt"):
77
- continue
78
- idx = f"{xz_filepath}/{txt_filepath}"
79
- yield idx, {"text": re.sub("\n\n\n+", "\n\n", txt_f.read().decode("utf-8")).strip()}