Datasets:

Languages:
English
ArXiv:
License:
mariosasko commited on
Commit
afcaeaf
1 Parent(s): dd75f49

Remove download_custom (#4)

Browse files

- Remove download_custom (78fc817fab6e6b5daf8747b23b9b7bd76090cb06)

Files changed (5) hide show
  1. README.md +5 -5
  2. data/test_files.txt +100 -0
  3. data/train_files.txt +0 -0
  4. data/validation_files.txt +50 -0
  5. pg19.py +31 -71
README.md CHANGED
@@ -31,16 +31,16 @@ dataset_info:
31
  dtype: string
32
  splits:
33
  - name: train
34
- num_bytes: 11453688524
35
  num_examples: 28602
36
  - name: validation
37
- num_bytes: 17402307
38
  num_examples: 50
39
  - name: test
40
- num_bytes: 40482864
41
  num_examples: 100
42
- download_size: 11740484131
43
- dataset_size: 11511573695
44
  ---
45
 
46
  # Dataset Card for "pg19"
 
31
  dtype: string
32
  splits:
33
  - name: train
34
+ num_bytes: 11453688452
35
  num_examples: 28602
36
  - name: validation
37
+ num_bytes: 17402295
38
  num_examples: 50
39
  - name: test
40
+ num_bytes: 40482852
41
  num_examples: 100
42
+ download_size: 11740397875
43
+ dataset_size: 11511573599
44
  ---
45
 
46
  # Dataset Card for "pg19"
data/test_files.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test/10146.txt
2
+ test/10321.txt
3
+ test/10356.txt
4
+ test/10762.txt
5
+ test/12204.txt
6
+ test/15562.txt
7
+ test/22424.txt
8
+ test/24553.txt
9
+ test/2544.txt
10
+ test/25646.txt
11
+ test/25773.txt
12
+ test/25830.txt
13
+ test/26183.txt
14
+ test/26239.txt
15
+ test/26493.txt
16
+ test/26618.txt
17
+ test/27454.txt
18
+ test/28444.txt
19
+ test/28988.txt
20
+ test/29594.txt
21
+ test/29973.txt
22
+ test/30312.txt
23
+ test/30752.txt
24
+ test/30754.txt
25
+ test/30909.txt
26
+ test/30981.txt
27
+ test/31065.txt
28
+ test/3129.txt
29
+ test/31974.txt
30
+ test/3247.txt
31
+ test/32761.txt
32
+ test/3340.txt
33
+ test/33426.txt
34
+ test/33756.txt
35
+ test/34467.txt
36
+ test/35205.txt
37
+ test/35246.txt
38
+ test/3608.txt
39
+ test/36256.txt
40
+ test/37006.txt
41
+ test/37328.txt
42
+ test/37403.txt
43
+ test/37443.txt
44
+ test/3754.txt
45
+ test/37702.txt
46
+ test/38106.txt
47
+ test/3890.txt
48
+ test/38929.txt
49
+ test/38955.txt
50
+ test/4047.txt
51
+ test/40579.txt
52
+ test/40700.txt
53
+ test/4128.txt
54
+ test/41603.txt
55
+ test/41607.txt
56
+ test/42081.txt
57
+ test/42655.txt
58
+ test/43536.txt
59
+ test/43845.txt
60
+ test/44099.txt
61
+ test/44557.txt
62
+ test/45313.txt
63
+ test/45881.txt
64
+ test/45888.txt
65
+ test/46915.txt
66
+ test/47068.txt
67
+ test/47558.txt
68
+ test/47581.txt
69
+ test/47676.txt
70
+ test/48693.txt
71
+ test/49078.txt
72
+ test/49529.txt
73
+ test/49596.txt
74
+ test/50287.txt
75
+ test/51410.txt
76
+ test/53345.txt
77
+ test/5396.txt
78
+ test/54537.txt
79
+ test/54624.txt
80
+ test/55339.txt
81
+ test/55871.txt
82
+ test/56410.txt
83
+ test/5734.txt
84
+ test/5770.txt
85
+ test/57791.txt
86
+ test/58473.txt
87
+ test/58553.txt
88
+ test/58598.txt
89
+ test/5956.txt
90
+ test/5962.txt
91
+ test/6412.txt
92
+ test/6941.txt
93
+ test/7412.txt
94
+ test/7987.txt
95
+ test/8197.txt
96
+ test/8559.txt
97
+ test/860.txt
98
+ test/8788.txt
99
+ test/9315.txt
100
+ test/9931.txt
data/train_files.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/validation_files.txt ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ validation/1022.txt
2
+ validation/11155.txt
3
+ validation/13089.txt
4
+ validation/16959.txt
5
+ validation/1925.txt
6
+ validation/2383.txt
7
+ validation/23956.txt
8
+ validation/24360.txt
9
+ validation/25066.txt
10
+ validation/27688.txt
11
+ validation/28213.txt
12
+ validation/28776.txt
13
+ validation/29981.txt
14
+ validation/32629.txt
15
+ validation/34016.txt
16
+ validation/34056.txt
17
+ validation/34100.txt
18
+ validation/356.txt
19
+ validation/35816.txt
20
+ validation/36402.txt
21
+ validation/37833.txt
22
+ validation/38214.txt
23
+ validation/38403.txt
24
+ validation/4024.txt
25
+ validation/41074.txt
26
+ validation/42067.txt
27
+ validation/42142.txt
28
+ validation/42306.txt
29
+ validation/43423.txt
30
+ validation/44896.txt
31
+ validation/44912.txt
32
+ validation/4533.txt
33
+ validation/48089.txt
34
+ validation/48461.txt
35
+ validation/48677.txt
36
+ validation/49091.txt
37
+ validation/50355.txt
38
+ validation/51859.txt
39
+ validation/5195.txt
40
+ validation/5321.txt
41
+ validation/53682.txt
42
+ validation/54098.txt
43
+ validation/555.txt
44
+ validation/55658.txt
45
+ validation/56719.txt
46
+ validation/57843.txt
47
+ validation/58093.txt
48
+ validation/6404.txt
49
+ validation/7510.txt
50
+ validation/8545.txt
pg19.py CHANGED
@@ -2,11 +2,7 @@
2
 
3
 
4
  import csv
5
- import json
6
  import os
7
- from operator import itemgetter
8
-
9
- import requests
10
 
11
  import datasets
12
 
@@ -38,9 +34,9 @@ To compare models we propose to continue measuring the word-level perplexity, by
38
  One could use this dataset for benchmarking long-range language models, or use it to pre-train for other natural language processing tasks which require long-range reasoning, such as LAMBADA or NarrativeQA. We would not recommend using this dataset to train a general-purpose language model, e.g. for applications to a production-system dialogue agent, due to the dated linguistic style of old texts and the inherent biases present in historical writing.
39
  """
40
 
41
- _ASSET_ROOT_URL = "https://storage.googleapis.com/deepmind-gutenberg/"
42
- _STORAGE_API_ROOT_URL = "https://storage.googleapis.com/storage/v1/b/deepmind-gutenberg/o/"
43
 
 
 
44
  _METADATA_URL = _ASSET_ROOT_URL + "metadata.csv"
45
 
46
 
@@ -80,97 +76,61 @@ class Pg19(datasets.GeneratorBasedBuilder):
80
 
81
  def _split_generators(self, dl_manager):
82
  """Returns SplitGenerators."""
83
- # TODO(pg19): Downloads the data and defines the splits
84
- # dl_manager is a datasets.download.DownloadManager that can be used to
85
- # download and extract URLs
86
-
87
- def fetch_all_pages(url, prefix):
88
- pageToken = None
89
- payload = {"prefix": prefix}
90
-
91
- while True:
92
- resp = requests.get(url, params={"pageToken": pageToken, **payload})
93
- json = resp.json()
94
- yield json
95
-
96
- pageToken = json.pop("nextPageToken", None)
97
- if pageToken is None:
98
- break
99
-
100
- def get_filename(path):
101
- return os.path.splitext(os.path.basename(path))[0]
102
-
103
- def download_listdir(url, local_filepath):
104
- root_url, prefix = url.rsplit("/", 1)
105
- pages = fetch_all_pages(root_url, prefix + "/")
106
- items = flat_map(itemgetter("items"), pages)
107
- names = sorted(map(itemgetter("name"), items))
108
-
109
- with open(local_filepath, "w") as f:
110
- f.write(json.dumps(names))
111
- return local_filepath
112
-
113
- def filepath_to_json(path):
114
- with open(path, "r", encoding="utf-8") as f:
115
- return json.load(f)
116
-
117
  splits = ["train", "validation", "test"]
118
- split_paths = map(lambda path: _STORAGE_API_ROOT_URL + path, splits)
119
- split_paths = dl_manager.download_custom(dict(zip(splits, split_paths)), download_listdir)
120
-
121
- file_urls = list(map(filepath_to_json, split_paths.values()))
122
-
123
- complete_file_urls = [
124
- list(map(lambda url: _ASSET_ROOT_URL + url, urls)) for (split_path, urls) in zip(split_paths, file_urls)
125
- ]
126
- urls_to_download = {(get_filename(url)): url for urls in complete_file_urls for url in urls}
127
-
128
- metadata = dl_manager.download({"metadata": _METADATA_URL})
129
- downloaded_files = dl_manager.download(urls_to_download)
130
-
131
- ids_in_split = list(map(lambda urls: list(map(get_filename, urls)), file_urls))
132
- split_ids_index = dict(zip(split_paths, ids_in_split))
133
 
 
 
134
  return [
135
  datasets.SplitGenerator(
136
  name=datasets.Split.TRAIN,
137
  gen_kwargs={
138
- "ids": split_ids_index["train"],
139
- "metadata_filepath": metadata["metadata"],
140
- "filepaths": downloaded_files,
141
  },
142
  ),
143
  datasets.SplitGenerator(
144
  name=datasets.Split.VALIDATION,
145
  gen_kwargs={
146
- "ids": split_ids_index["validation"],
147
- "metadata_filepath": metadata["metadata"],
148
- "filepaths": downloaded_files,
149
  },
150
  ),
151
  datasets.SplitGenerator(
152
  name=datasets.Split.TEST,
153
  gen_kwargs={
154
- "ids": split_ids_index["test"],
155
- "metadata_filepath": metadata["metadata"],
156
- "filepaths": downloaded_files,
157
  },
158
  ),
159
  ]
160
 
161
- def _generate_examples(self, ids, metadata_filepath, filepaths):
162
  """Yields examples."""
163
  # TODO(pg19): Yields (key, example) tuples from the dataset
164
 
165
- with open(metadata_filepath, encoding="utf-8") as f:
166
- metadata_dict = csv.DictReader(f, fieldnames=["_id", "short_book_title", "publication_date", "url"])
167
- indexed_metadata = {row["_id"]: row for row in metadata_dict}
168
 
169
  for _id in ids:
170
- data = indexed_metadata[_id]
171
- filepath = filepaths[_id]
172
 
173
- with open(filepath, encoding="utf-8") as f:
174
  text = f.read()
175
 
176
  _id = data["_id"]
 
2
 
3
 
4
  import csv
 
5
  import os
 
 
 
6
 
7
  import datasets
8
 
 
34
  One could use this dataset for benchmarking long-range language models, or use it to pre-train for other natural language processing tasks which require long-range reasoning, such as LAMBADA or NarrativeQA. We would not recommend using this dataset to train a general-purpose language model, e.g. for applications to a production-system dialogue agent, due to the dated linguistic style of old texts and the inherent biases present in historical writing.
35
  """
36
 
 
 
37
 
38
+ _SPLIT_FILES_PATH = "data/{split}_files.txt"
39
+ _ASSET_ROOT_URL = "https://storage.googleapis.com/deepmind-gutenberg/"
40
  _METADATA_URL = _ASSET_ROOT_URL + "metadata.csv"
41
 
42
 
 
76
 
77
  def _split_generators(self, dl_manager):
78
  """Returns SplitGenerators."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  splits = ["train", "validation", "test"]
80
+ files = dl_manager.download({split: _SPLIT_FILES_PATH.format(split=split) for split in splits})
81
+
82
+ for split, names_file in list(files.items()):
83
+ with open(names_file, encoding="utf-8") as f:
84
+ split_files = f.read().splitlines()
85
+ split_files = sorted(split_files)
86
+ split_files = {
87
+ os.path.splitext(os.path.basename(file))[0]: _ASSET_ROOT_URL + file
88
+ for file in split_files
89
+ }
90
+ files[split] = split_files
 
 
 
 
91
 
92
+ metadata = dl_manager.download(_METADATA_URL)
93
+ downloaded_files = dl_manager.download(files)
94
  return [
95
  datasets.SplitGenerator(
96
  name=datasets.Split.TRAIN,
97
  gen_kwargs={
98
+ "ids": list(downloaded_files["train"]),
99
+ "metadata": metadata,
100
+ "files": downloaded_files["train"],
101
  },
102
  ),
103
  datasets.SplitGenerator(
104
  name=datasets.Split.VALIDATION,
105
  gen_kwargs={
106
+ "ids": list(downloaded_files["validation"]),
107
+ "metadata": metadata,
108
+ "files": downloaded_files["validation"],
109
  },
110
  ),
111
  datasets.SplitGenerator(
112
  name=datasets.Split.TEST,
113
  gen_kwargs={
114
+ "ids": list(downloaded_files["test"]),
115
+ "metadata": metadata,
116
+ "files": downloaded_files["test"],
117
  },
118
  ),
119
  ]
120
 
121
+ def _generate_examples(self, ids, metadata, files):
122
  """Yields examples."""
123
  # TODO(pg19): Yields (key, example) tuples from the dataset
124
 
125
+ with open(metadata, encoding="utf-8") as f:
126
+ reader = csv.DictReader(f, fieldnames=["_id", "short_book_title", "publication_date", "url"])
127
+ id2metadata = {row["_id"]: row for row in reader}
128
 
129
  for _id in ids:
130
+ data = id2metadata[_id]
131
+ file = files[_id]
132
 
133
+ with open(file, encoding="utf-8") as f:
134
  text = f.read()
135
 
136
  _id = data["_id"]