Commit
•
720b05b
1
Parent(s):
7ea2120
store local paths in non-streaming mode
Browse files- peoples_speech.py +14 -10
peoples_speech.py
CHANGED
@@ -13,6 +13,7 @@
|
|
13 |
# limitations under the License.
|
14 |
|
15 |
import json
|
|
|
16 |
|
17 |
import datasets
|
18 |
from datasets.tasks import AutomaticSpeechRecognition
|
@@ -86,13 +87,13 @@ _URLS = {
|
|
86 |
},
|
87 |
}
|
88 |
|
89 |
-
|
90 |
|
91 |
# relative path to data inside dataset's repo
|
92 |
-
_DATA_URL = "{config}/{config}_00000{archive_id}.tar"
|
93 |
|
94 |
# relative path to metadata inside dataset's repo
|
95 |
-
_MANIFEST_URL = "{config}.json"
|
96 |
|
97 |
|
98 |
class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
@@ -130,7 +131,9 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
130 |
# TODO: for demo purposes I use just first 5 archives
|
131 |
# TODO: this should be changed to the actual number of archives further
|
132 |
urls = [_DATA_URL.format(config=self.config.name, archive_id=i) for i in range(5)]
|
133 |
-
|
|
|
|
|
134 |
|
135 |
manifest_url = _MANIFEST_URL.format(config=self.config.name)
|
136 |
manifest_path = dl_manager.download_and_extract(manifest_url)
|
@@ -139,13 +142,14 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
139 |
datasets.SplitGenerator(
|
140 |
name=datasets.Split.TRAIN,
|
141 |
gen_kwargs={
|
142 |
-
"
|
|
|
143 |
"manifest_path": manifest_path
|
144 |
},
|
145 |
),
|
146 |
]
|
147 |
|
148 |
-
def _generate_examples(self, archives, manifest_path):
|
149 |
meta = dict()
|
150 |
with open(manifest_path, "r", encoding="utf-8") as f:
|
151 |
for line in tqdm(f, desc="reading metadata file"):
|
@@ -162,13 +166,13 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
162 |
}
|
163 |
|
164 |
print("generating examples")
|
165 |
-
for archive in archives:
|
166 |
-
# note that you don't need to use `tarfile` library and open tar archives manually
|
167 |
-
# dl_manager.iter_archive() does it for you :)
|
168 |
for audio_filename, audio_file in archive:
|
|
|
|
|
169 |
yield audio_filename, {
|
170 |
"id": audio_filename,
|
171 |
-
"audio": {"path":
|
172 |
"text": meta[audio_filename]["text"],
|
173 |
"duration_ms": meta[audio_filename]["duration_ms"]
|
174 |
}
|
|
|
13 |
# limitations under the License.
|
14 |
|
15 |
import json
|
16 |
+
import os
|
17 |
|
18 |
import datasets
|
19 |
from datasets.tasks import AutomaticSpeechRecognition
|
|
|
87 |
},
|
88 |
}
|
89 |
|
90 |
+
_BASE_URL = "https://huggingface.co/datasets/MLCommons/peoples_speech/resolve/main/"
|
91 |
|
92 |
# relative path to data inside dataset's repo
|
93 |
+
_DATA_URL = _BASE_URL + "{config}/{config}_00000{archive_id}.tar"
|
94 |
|
95 |
# relative path to metadata inside dataset's repo
|
96 |
+
_MANIFEST_URL = _BASE_URL + "{config}.json"
|
97 |
|
98 |
|
99 |
class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
|
131 |
# TODO: for demo purposes I use just first 5 archives
|
132 |
# TODO: this should be changed to the actual number of archives further
|
133 |
urls = [_DATA_URL.format(config=self.config.name, archive_id=i) for i in range(5)]
|
134 |
+
archive_paths = [dl_manager.download(url) for url in urls]
|
135 |
+
local_extracted_archive_paths = [dl_manager.extract(path) for path in archive_paths] \
|
136 |
+
if not dl_manager.is_streaming else [None] * len(archive_paths)
|
137 |
|
138 |
manifest_url = _MANIFEST_URL.format(config=self.config.name)
|
139 |
manifest_path = dl_manager.download_and_extract(manifest_url)
|
|
|
142 |
datasets.SplitGenerator(
|
143 |
name=datasets.Split.TRAIN,
|
144 |
gen_kwargs={
|
145 |
+
"local_extracted_archive_paths": local_extracted_archive_paths,
|
146 |
+
"archives": [dl_manager.iter_archive(path) for path in archive_paths],
|
147 |
"manifest_path": manifest_path
|
148 |
},
|
149 |
),
|
150 |
]
|
151 |
|
152 |
+
def _generate_examples(self, local_extracted_archive_paths, archives, manifest_path):
|
153 |
meta = dict()
|
154 |
with open(manifest_path, "r", encoding="utf-8") as f:
|
155 |
for line in tqdm(f, desc="reading metadata file"):
|
|
|
166 |
}
|
167 |
|
168 |
print("generating examples")
|
169 |
+
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
|
|
|
|
|
170 |
for audio_filename, audio_file in archive:
|
171 |
+
path = os.path.join(local_extracted_archive_path, audio_filename) if local_extracted_archive_path \
|
172 |
+
else audio_filename
|
173 |
yield audio_filename, {
|
174 |
"id": audio_filename,
|
175 |
+
"audio": {"path": path, "bytes": audio_file.read()},
|
176 |
"text": meta[audio_filename]["text"],
|
177 |
"duration_ms": meta[audio_filename]["duration_ms"]
|
178 |
}
|