jnemecek commited on
Commit
635233d
1 Parent(s): 5ed0937

add 'sentence' to generator

Browse files
Files changed (1) hide show
  1. audio-keyword-spotting.py +141 -140
audio-keyword-spotting.py CHANGED
@@ -1,141 +1,142 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """sil-ai/audio-keyword-spotting is a subset of MLCommons/ml_spoken_words focusing on keywords found in the Bible"""
15
-
16
- import json
17
- import os
18
-
19
- import datasets
20
-
21
- _CITATION = """\
22
- @InProceedings{huggingface:audio-keyword-spotting,
23
- title = {audio-keyword-spotting},
24
- author={Joshua Nemecek
25
- },
26
- year={2022}
27
- }
28
- """
29
- _DESCRIPTION = 'sil-ai/audio-keyword-spotting is a subset of MLCommons/ml_spoken_words focusing on keywords found in the Bible'
30
- _LANGUAGES = ['eng', 'ind', 'spa']
31
- _LANG_ISO_DICT = {'en':'eng','es':'spa','id':'ind'}
32
- _HOMEPAGE = 'https://ai.sil.org'
33
- _URLS = {"metadata": "bible-keyword.json",
34
- "files": {lang: f'https://audio-keyword-spotting.s3.amazonaws.com/HF/{lang}-kw-archive.tar.gz' for lang in _LANGUAGES},
35
- }
36
- _LICENSE = 'CC-BY 4.0'
37
- _GENDERS = ["MALE", "FEMALE", "OTHER", "NAN"]
38
-
39
- class AudioKeywordSpottingConfig(datasets.BuilderConfig):
40
- """BuilderConfig for Audio-Keyword-Spotting"""
41
- def __init__(self, language='', **kwargs):
42
- super(AudioKeywordSpottingConfig, self).__init__(**kwargs)
43
- self.language = _LANG_ISO_DICT.get(language, language)
44
-
45
- class AudioKeywordSpotting(datasets.GeneratorBasedBuilder):
46
- """Audio-Keyword-Spotting class"""
47
- BUILDER_CONFIGS = [AudioKeywordSpottingConfig(name=x, description=f'Audio keyword spotting for language code {x}', language=x) for x in _LANGUAGES]
48
-
49
- DEFAULT_CONFIG_NAME = ''
50
-
51
- BUILDER_CONFIG_CLASS = AudioKeywordSpottingConfig
52
-
53
- VERSION = datasets.Version("0.0.1")
54
-
55
- def _info(self):
56
- features = datasets.Features(
57
- {
58
- "file": datasets.Value("string"),
59
- "is_valid": datasets.Value("bool"),
60
- "language": datasets.Value("string"),
61
- "speaker_id": datasets.Value("string"),
62
- "gender": datasets.ClassLabel(names=_GENDERS),
63
- "keyword": datasets.Value("string"),
64
- "audio": datasets.Audio(sampling_rate=16_000),
65
- }
66
- )
67
-
68
- return datasets.DatasetInfo(
69
- description=_DESCRIPTION,
70
- features=features,
71
- homepage=_HOMEPAGE,
72
- license=_LICENSE,
73
- citation=_CITATION,
74
- )
75
-
76
- def _split_generators(self, dl_manager):
77
-
78
- if self.config.language == '':
79
- raise ValueError('Please specify a language.')
80
- elif self.config.language not in _LANGUAGES:
81
- raise ValueError(f'{self.config.language} does not appear in the list of languages: {_LANGUAGES}')
82
-
83
- data_dir = dl_manager.download(_URLS['metadata'])
84
- with open(data_dir, 'r') as f:
85
- filemeta = json.load(f)
86
-
87
- audio_dir = dl_manager.download_and_extract(_URLS['files'][self.config.name])
88
-
89
- langmeta = filemeta[self.config.language]
90
-
91
- return [
92
- datasets.SplitGenerator(
93
- name=datasets.Split.TRAIN,
94
- # These kwargs will be passed to _generate_examples
95
- gen_kwargs={
96
- "audio_dir": audio_dir,
97
- "data": langmeta,
98
- "split": "train",
99
- },
100
- ),
101
- datasets.SplitGenerator(
102
- name=datasets.Split.VALIDATION,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={
105
- "audio_dir": audio_dir,
106
- "data": langmeta,
107
- "split": "validation",
108
- },
109
- ),
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TEST,
112
- # These kwargs will be passed to _generate_examples
113
- gen_kwargs={
114
- "audio_dir": audio_dir,
115
- "data": langmeta,
116
- "split": "test",
117
- },
118
- ),
119
- ]
120
-
121
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
122
- def _generate_examples(self, audio_dir, data, split):
123
- for key, row in enumerate(data[split]):
124
- try:
125
- tfile = os.path.join(audio_dir, row['file'])
126
- if not tfile.endswith('.wav'):
127
- os.rename(tfile, tfile + '.wav')
128
- tfile += '.wav'
129
- yield key, {
130
- "file": tfile,
131
- "is_valid": row['is_valid'],
132
- "language": self.config.language,
133
- "speaker_id": row['speaker_id'],
134
- "gender": row['gender'],
135
- "keyword": row['keyword'],
136
- "audio": tfile,
137
- }
138
- except Exception as e:
139
- print(e)
140
- print(f'In split {split}: {row["file"]} failed to download. Data may be missing.')
 
141
  pass
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """sil-ai/audio-keyword-spotting is a subset of MLCommons/ml_spoken_words focusing on keywords found in the Bible"""
15
+
16
+ import json
17
+ import os
18
+
19
+ import datasets
20
+
21
+ _CITATION = """\
22
+ @InProceedings{huggingface:audio-keyword-spotting,
23
+ title = {audio-keyword-spotting},
24
+ author={Joshua Nemecek
25
+ },
26
+ year={2022}
27
+ }
28
+ """
29
+ _DESCRIPTION = 'sil-ai/audio-keyword-spotting is a subset of MLCommons/ml_spoken_words focusing on keywords found in the Bible'
30
+ _LANGUAGES = ['eng', 'ind', 'spa']
31
+ _LANG_ISO_DICT = {'en':'eng','es':'spa','id':'ind'}
32
+ _HOMEPAGE = 'https://ai.sil.org'
33
+ _URLS = {"metadata": "bible-keyword.json",
34
+ "files": {lang: f'https://audio-keyword-spotting.s3.amazonaws.com/HF/{lang}-kw-archive.tar.gz' for lang in _LANGUAGES},
35
+ }
36
+ _LICENSE = 'CC-BY 4.0'
37
+ _GENDERS = ["MALE", "FEMALE", "OTHER", "NAN"]
38
+
39
+ class AudioKeywordSpottingConfig(datasets.BuilderConfig):
40
+ """BuilderConfig for Audio-Keyword-Spotting"""
41
+ def __init__(self, language='', **kwargs):
42
+ super(AudioKeywordSpottingConfig, self).__init__(**kwargs)
43
+ self.language = _LANG_ISO_DICT.get(language, language)
44
+
45
+ class AudioKeywordSpotting(datasets.GeneratorBasedBuilder):
46
+ """Audio-Keyword-Spotting class"""
47
+ BUILDER_CONFIGS = [AudioKeywordSpottingConfig(name=x, description=f'Audio keyword spotting for language code {x}', language=x) for x in _LANGUAGES]
48
+
49
+ DEFAULT_CONFIG_NAME = ''
50
+
51
+ BUILDER_CONFIG_CLASS = AudioKeywordSpottingConfig
52
+
53
+ VERSION = datasets.Version("0.0.1")
54
+
55
+ def _info(self):
56
+ features = datasets.Features(
57
+ {
58
+ "file": datasets.Value("string"),
59
+ "is_valid": datasets.Value("bool"),
60
+ "language": datasets.Value("string"),
61
+ "speaker_id": datasets.Value("string"),
62
+ "gender": datasets.ClassLabel(names=_GENDERS),
63
+ "keyword": datasets.Value("string"),
64
+ "audio": datasets.Audio(sampling_rate=16_000),
65
+ }
66
+ )
67
+
68
+ return datasets.DatasetInfo(
69
+ description=_DESCRIPTION,
70
+ features=features,
71
+ homepage=_HOMEPAGE,
72
+ license=_LICENSE,
73
+ citation=_CITATION,
74
+ )
75
+
76
+ def _split_generators(self, dl_manager):
77
+
78
+ if self.config.language == '':
79
+ raise ValueError('Please specify a language.')
80
+ elif self.config.language not in _LANGUAGES:
81
+ raise ValueError(f'{self.config.language} does not appear in the list of languages: {_LANGUAGES}')
82
+
83
+ data_dir = dl_manager.download(_URLS['metadata'])
84
+ with open(data_dir, 'r') as f:
85
+ filemeta = json.load(f)
86
+
87
+ audio_dir = dl_manager.download_and_extract(_URLS['files'][self.config.name])
88
+
89
+ langmeta = filemeta[self.config.language]
90
+
91
+ return [
92
+ datasets.SplitGenerator(
93
+ name=datasets.Split.TRAIN,
94
+ # These kwargs will be passed to _generate_examples
95
+ gen_kwargs={
96
+ "audio_dir": audio_dir,
97
+ "data": langmeta,
98
+ "split": "train",
99
+ },
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ # These kwargs will be passed to _generate_examples
104
+ gen_kwargs={
105
+ "audio_dir": audio_dir,
106
+ "data": langmeta,
107
+ "split": "validation",
108
+ },
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TEST,
112
+ # These kwargs will be passed to _generate_examples
113
+ gen_kwargs={
114
+ "audio_dir": audio_dir,
115
+ "data": langmeta,
116
+ "split": "test",
117
+ },
118
+ ),
119
+ ]
120
+
121
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
122
+ def _generate_examples(self, audio_dir, data, split):
123
+ for key, row in enumerate(data[split]):
124
+ try:
125
+ tfile = os.path.join(audio_dir, row['file'])
126
+ if not tfile.endswith('.wav'):
127
+ os.rename(tfile, tfile + '.wav')
128
+ tfile += '.wav'
129
+ yield key, {
130
+ "file": tfile,
131
+ "is_valid": row['is_valid'],
132
+ "language": self.config.language,
133
+ "speaker_id": row['speaker_id'],
134
+ "gender": row['gender'],
135
+ "sentence": row['sentence'],
136
+ "keyword": row['keyword'],
137
+ "audio": tfile,
138
+ }
139
+ except Exception as e:
140
+ print(e)
141
+ print(f'In split {split}: {row["file"]} failed to download. Data may be missing.')
142
  pass