BrunoHays commited on
Commit
ea7da14
1 Parent(s): 674bdcb

Update multilingual-TEDX-fr.py

Browse files
Files changed (1) hide show
  1. multilingual-TEDX-fr.py +5 -4
multilingual-TEDX-fr.py CHANGED
@@ -123,7 +123,6 @@ class TEDX(datasets.GeneratorBasedBuilder):
123
  splitted_dataset = {}
124
  segments = dl_manager.download(segments)
125
  sentences = dl_manager.download(sentences)
126
- print(segments)
127
  for split in segments:
128
  audios_path, utterances = self._split_by_audio_file(segments[split], sentences[split], split)
129
  audios_path = dl_manager.download(audios_path)
@@ -220,13 +219,15 @@ class TEDX(datasets.GeneratorBasedBuilder):
220
  audio = self.load_audio(audio_path)
221
  for utterance in self._merged_utterances_iterator(utterances):
222
  transcript_name = f"{utterance.speaker_id}-{utterance.index}"
 
 
223
  yield transcript_name, {
224
  "file": transcript_name,
225
  "index": utterance.index,
226
  "sentence": utterance.sentence,
227
- "start_timestamp": utterance.start_timestamp,
228
- "end_timestamp": utterance.end_timestamp,
229
  "speaker_id": utterance.speaker_id,
230
  "audio": {"path": transcript_name,
231
- "array": self._cut_audio(audio, utterance.start_timestamp, utterance.end_timestamp),
232
  "sampling_rate": SAMPLING_RATE}}
 
123
  splitted_dataset = {}
124
  segments = dl_manager.download(segments)
125
  sentences = dl_manager.download(sentences)
 
126
  for split in segments:
127
  audios_path, utterances = self._split_by_audio_file(segments[split], sentences[split], split)
128
  audios_path = dl_manager.download(audios_path)
 
219
  audio = self.load_audio(audio_path)
220
  for utterance in self._merged_utterances_iterator(utterances):
221
  transcript_name = f"{utterance.speaker_id}-{utterance.index}"
222
+ start_timestamp = float(utterance.start_timestamp)
223
+ end_timestamp = float(utterance.end_timestamp)
224
  yield transcript_name, {
225
  "file": transcript_name,
226
  "index": utterance.index,
227
  "sentence": utterance.sentence,
228
+ "start_timestamp": start_timestamp,
229
+ "end_timestamp": end_timestamp,
230
  "speaker_id": utterance.speaker_id,
231
  "audio": {"path": transcript_name,
232
+ "array": self._cut_audio(audio, start_timestamp, end_timestamp),
233
  "sampling_rate": SAMPLING_RATE}}