agkphysics commited on
Commit
5a2fa42
1 Parent(s): cac0e78

Add unbalanced config. Fix memory leak.

Browse files
Files changed (2) hide show
  1. AudioSet.py +44 -19
  2. README.md +127 -24
AudioSet.py CHANGED
@@ -51,6 +51,10 @@ _LICENSE = "cc-by-4.0"
51
 
52
  _URL_PREFIX = "https://huggingface.co/datasets/agkphysics/AudioSet/resolve/main"
53
 
 
 
 
 
54
 
55
  def _iter_tar(path):
56
  """Iterate through the tar archive, but without skipping some files, which the HF
@@ -68,6 +72,20 @@ def _iter_tar(path):
68
  class AudioSetDataset(datasets.GeneratorBasedBuilder):
69
  VERSION = datasets.Version("1.0.0")
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def _info(self) -> datasets.DatasetInfo:
72
  return datasets.DatasetInfo(
73
  description=_DESCRIPTION,
@@ -92,14 +110,24 @@ class AudioSetDataset(datasets.GeneratorBasedBuilder):
92
  prefix = prefix + "/data"
93
 
94
  _LABEL_URLS = {
95
- "bal_train": f"{prefix}/balanced_train_segments.csv",
 
 
 
 
96
  "eval": f"{prefix}/eval_segments.csv",
97
  "ontology": f"{prefix}/ontology.json",
98
  }
99
-
100
  _DATA_URLS = {
101
- "bal_train": [f"{prefix}/bal_train0{i}.tar" for i in range(10)],
102
- "eval": [f"{prefix}/eval0{i}.tar" for i in range(9)],
 
 
 
 
 
 
 
103
  }
104
 
105
  tar_files = dl_manager.download(_DATA_URLS)
@@ -129,30 +157,27 @@ class AudioSetDataset(datasets.GeneratorBasedBuilder):
129
  ]
130
 
131
  def _generate_examples(self, labels, ontology, audio_files):
 
 
 
 
132
  labels_df = pd.read_csv(
133
  labels,
134
  skiprows=3,
135
  header=None,
136
  skipinitialspace=True,
137
  names=["vid_id", "start", "end", "labels"],
 
138
  )
139
- with open(ontology) as fid:
140
- ontology_data = json.load(fid)
141
- id_to_name = {x["id"]: x["name"] for x in ontology_data}
142
 
143
- examples = {}
144
- for _, row in labels_df.iterrows():
145
- label_ids = row["labels"].split(",")
146
  human_labels = [id_to_name[x] for x in label_ids]
147
- examples[row["vid_id"]] = {
148
- "video_id": row["vid_id"],
149
  "labels": label_ids,
150
  "human_labels": human_labels,
 
151
  }
152
-
153
- for path, fid in audio_files:
154
- vid_id = os.path.splitext(os.path.basename(path))[0]
155
- if vid_id in examples:
156
- audio = {"path": path, "bytes": fid.read()}
157
- examples[vid_id]["audio"] = audio
158
- yield vid_id, examples[vid_id]
 
51
 
52
  _URL_PREFIX = "https://huggingface.co/datasets/agkphysics/AudioSet/resolve/main"
53
 
54
+ _N_BAL_TRAIN_TARS = 10
55
+ _N_UNBAL_TRAIN_TARS = 870
56
+ _N_EVAL_TARS = 9
57
+
58
 
59
  def _iter_tar(path):
60
  """Iterate through the tar archive, but without skipping some files, which the HF
 
72
  class AudioSetDataset(datasets.GeneratorBasedBuilder):
73
  VERSION = datasets.Version("1.0.0")
74
 
75
+ BUILDER_CONFIGS = [
76
+ datasets.BuilderConfig(
77
+ name="balanced",
78
+ version=VERSION,
79
+ description="Balanced training and balanced evaluation set.",
80
+ ),
81
+ datasets.BuilderConfig(
82
+ name="unbalanced",
83
+ version=VERSION,
84
+ description="Full unbalanced training set and balanced evaluation set.",
85
+ ),
86
+ ]
87
+ DEFAULT_CONFIG_NAME = "balanced"
88
+
89
  def _info(self) -> datasets.DatasetInfo:
90
  return datasets.DatasetInfo(
91
  description=_DESCRIPTION,
 
110
  prefix = prefix + "/data"
111
 
112
  _LABEL_URLS = {
113
+ "bal_train": (
114
+ f"{prefix}/balanced_train_segments.csv"
115
+ if self.config.name == "balanced"
116
+ else f"{prefix}/unbalanced_train_segments.csv"
117
+ ),
118
  "eval": f"{prefix}/eval_segments.csv",
119
  "ontology": f"{prefix}/ontology.json",
120
  }
 
121
  _DATA_URLS = {
122
+ "bal_train": (
123
+ [f"{prefix}/bal_train0{i}.tar" for i in range(_N_BAL_TRAIN_TARS)]
124
+ if self.config.name == "balanced"
125
+ else [
126
+ f"{prefix}/unbal_train{i:03d}.tar"
127
+ for i in range(_N_UNBAL_TRAIN_TARS)
128
+ ]
129
+ ),
130
+ "eval": [f"{prefix}/eval0{i}.tar" for i in range(_N_EVAL_TARS)],
131
  }
132
 
133
  tar_files = dl_manager.download(_DATA_URLS)
 
157
  ]
158
 
159
  def _generate_examples(self, labels, ontology, audio_files):
160
+ with open(ontology) as fid:
161
+ ontology_data = json.load(fid)
162
+ id_to_name = {x["id"]: x["name"] for x in ontology_data}
163
+
164
  labels_df = pd.read_csv(
165
  labels,
166
  skiprows=3,
167
  header=None,
168
  skipinitialspace=True,
169
  names=["vid_id", "start", "end", "labels"],
170
+ index_col="vid_id",
171
  )
 
 
 
172
 
173
+ for path, fid in audio_files:
174
+ vid_id = os.path.splitext(os.path.basename(path))[0]
175
+ label_ids = labels_df.loc[vid_id, "labels"].split(",")
176
  human_labels = [id_to_name[x] for x in label_ids]
177
+ example = {
178
+ "video_id": vid_id,
179
  "labels": label_ids,
180
  "human_labels": human_labels,
181
+ "audio": {"path": path, "bytes": fid.read()},
182
  }
183
+ yield vid_id, example
 
 
 
 
 
 
README.md CHANGED
@@ -1,13 +1,23 @@
1
  ---
 
 
2
  license: cc-by-4.0
 
 
 
 
 
3
  task_categories:
4
  - audio-classification
 
 
 
 
 
5
  tags:
6
  - audio
7
- size_categories:
8
- - 10K<n<100K
9
- paperswithcode_id: audioset
10
  dataset_info:
 
11
  features:
12
  - name: video_id
13
  dtype: string
@@ -26,29 +36,52 @@ dataset_info:
26
  num_examples: 17142
27
  download_size: 49805654900
28
  dataset_size: 49779893265
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  ---
30
 
31
  # Dataset Card for AudioSet
32
 
33
- ## Dataset Details
34
- This repository contains the balanced training set and evaluation set of
35
- the [AudioSet data](
36
- https://research.google.com/audioset/dataset/index.html). The YouTube
37
- videos were downloaded in March 2023, so not all of the original audios
38
- are available.
39
-
40
- The distribuion of audio clips is as follows. In parentheses is the dict
41
- key used for HugginFace `datasets`:
42
- - `bal_train` (`train`): 18685 audio clips out of 22160 originally.
43
- - `eval` (`test`): 17142 audio clips out of 20371 originally.
44
-
45
- You can use the `datasets` library to load this dataset, in which case
46
- the raw audio will be returned along with a sequence of one or more
47
- labels. Note that the raw audio is returned without further processing,
48
- so you will need to decode and possibly downsample the audio for model
49
- training.
50
-
51
- Example instance from the `train` subset:
 
 
 
 
52
  ```python
53
  {
54
  'video_id': '--PJHxphWEs',
@@ -63,8 +96,78 @@ Example instance from the `train` subset:
63
  }
64
  ```
65
 
66
- Most audio is sampled at 48 kHz 24 bit, but about 10% is sampled at
67
- 44.1 kHz 24 bit. Audio files are stored in the FLAC format.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  ## Citation
70
  ```bibtex
 
1
  ---
2
+ language:
3
+ - en
4
  license: cc-by-4.0
5
+ size_categories:
6
+ - 10K<n<100K
7
+ - 1M<n<10M
8
+ source_datasets:
9
+ - original
10
  task_categories:
11
  - audio-classification
12
+ paperswithcode_id: audioset
13
+ pretty_name: AudioSet
14
+ config_names:
15
+ - balanced
16
+ - unbalanced
17
  tags:
18
  - audio
 
 
 
19
  dataset_info:
20
+ - config_name: balanced
21
  features:
22
  - name: video_id
23
  dtype: string
 
36
  num_examples: 17142
37
  download_size: 49805654900
38
  dataset_size: 49779893265
39
+ - config_name: unbalanced
40
+ features:
41
+ - name: video_id
42
+ dtype: string
43
+ - name: audio
44
+ dtype: audio
45
+ - name: labels
46
+ sequence: string
47
+ - name: human_labels
48
+ sequence: string
49
+ splits:
50
+ - name: train
51
+ num_bytes: 2408656417541
52
+ num_examples: 1738788
53
+ - name: test
54
+ num_bytes: 23763682278
55
+ num_examples: 17142
56
+ download_size: 2433673104977
57
+ dataset_size: 2432420099819
58
  ---
59
 
60
  # Dataset Card for AudioSet
61
 
62
+ ## Dataset Description
63
+ - **Homepage**: https://research.google.com/audioset/index.html
64
+ - **Paper**: https://storage.googleapis.com/gweb-research2023-media/pubtools/pdf/45857.pdf
65
+ - **Leaderboard**: https://paperswithcode.com/sota/audio-classification-on-audioset
66
+
67
+ ### Dataset Summary
68
+ [AudioSet](https://research.google.com/audioset/dataset/index.html) is a
69
+ dataset of 10-second clips from YouTube, annotated into one or more
70
+ sound categories, following the AudioSet ontology.
71
+
72
+ ### Supported Tasks and Leaderboards
73
+ - `audio-classification`: Classify audio clips into categories. The
74
+ leaderboard is available
75
+ [here](https://paperswithcode.com/sota/audio-classification-on-audioset)
76
+
77
+ ### Languages
78
+ The class labels in the dataset are in English.
79
+
80
+
81
+ ## Dataset Structure
82
+
83
+ ### Data Instances
84
+ Example instance from the dataset:
85
  ```python
86
  {
87
  'video_id': '--PJHxphWEs',
 
96
  }
97
  ```
98
 
99
+ ### Data Fields
100
+ Instances have the following fields:
101
+ - `video_id`: a `string` feature containing the original YouTube ID.
102
+ - `audio`: an `Audio` feature containing the audio data and sample rate.
103
+ - `labels`: a sequence of `string` features containing the labels
104
+ associated with the audio clip.
105
+ - `human_labels`: a sequence of `string` features containing the
106
+ human-readable forms of the same labels as in `labels`.
107
+
108
+ ### Data Splits
109
+ The distribuion of audio clips is as follows:
110
+
111
+ #### `balanced` configuration
112
+ | |train|test |
113
+ |-----------|----:|----:|
114
+ |# instances|18685|17142|
115
+
116
+ #### `unbalanced` configuration
117
+ | |train |test |
118
+ |-----------|------:|----:|
119
+ |# instances|1738788|17142|
120
+
121
+
122
+ ## Dataset Creation
123
+
124
+ ### Curation Rationale
125
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
126
+
127
+ ### Source Data
128
+
129
+ #### Initial Data Collection and Normalization
130
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
131
+
132
+ #### Who are the source language producers?
133
+ The labels are from the AudioSet ontology. Audio clips are from YouTube.
134
+
135
+ ### Annotations
136
+
137
+ #### Annotation process
138
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
139
+
140
+ #### Who are the annotators?
141
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
142
+
143
+ ### Personal and Sensitive Information
144
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
145
+
146
+ ## Considerations for Using the Data
147
+
148
+ ### Social Impact of Dataset
149
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
150
+
151
+ ### Discussion of Biases
152
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
153
+
154
+ ### Other Known Limitations
155
+ 1. The YouTube videos in this copy of AudioSet were downloaded in March
156
+ 2023, so not all of the original audios are available. The number of
157
+ clips able to be downloaded is as follows:
158
+ - Balanced train: 18685 audio clips out of 22160 originally.
159
+ - Unbalanced train: 1738788 clips out of 2041789 originally.
160
+ - Evaluation: 17142 audio clips out of 20371 originally.
161
+ 2. Most audio is sampled at 48 kHz 24 bit, but about 10% is sampled at
162
+ 44.1 kHz 24 bit. Audio files are stored in the FLAC format.
163
+
164
+ ## Additional Information
165
+
166
+ ### Dataset Curators
167
+ [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
168
+
169
+ ### Licensing Information
170
+ The AudioSet data is licensed under CC-BY-4.0
171
 
172
  ## Citation
173
  ```bibtex