hy_asr_grqaser / hy_asr_grqaser.py
aburnazyan
fixed script + audios
66b2451
raw
history blame
1.41 kB
##
import os
import pandas as pd
from datasets import Dataset, DatasetDict, Features, Value, Audio
##
def load_dataset_script(data_dir):
"""
Load dataset script for custom audio-transcription dataset.
:param data_dir: Directory where the data and metadata.csv are stored.
:return: A Hugging Face Dataset object.
"""
# Load metadata.csv
metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
# Create lists for audio files and transcriptions
audio_files = []
transcriptions = []
# Iterate through the metadata and populate the lists
for _, row in metadata.iterrows():
audio_files.append({'path': os.path.join(data_dir, row['file_name'])})
transcriptions.append(row['transcription'])
# Define features of the dataset
features = Features({
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
'sentence': Value('string')
})
# Create a dataset
dataset = Dataset.from_dict({
'audio': audio_files,
'sentence': transcriptions
}, features=features)
# You can split the dataset here if needed, or return as a single dataset
return DatasetDict({'train': dataset})
## Example usage
if __name__ == "__main__":
data_directory = "C:\\Projects\\aeneas\\hy_asr_grqaser"
dataset = load_dataset_script(data_directory)
print(dataset["train"][2])