hy_asr_grqaser / hy_asr_grqaser.py
aburnazyan
debugging
67cd8e7
raw
history blame
1.56 kB
##
import os
import pandas as pd
from datasets import Dataset, DatasetDict, Features, Value, Audio
##
def load_dataset_script(data_dir):
print(f"data_dir: {data_dir}")
"""
Load dataset script for custom audio-transcription dataset.
:param data_dir: Directory where the data and metadata.csv are stored.
:return: A Hugging Face Dataset object.
"""
# Load metadata.csv
metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
# Create lists for audio files and transcriptions
audio_files = []
transcriptions = []
# Iterate through the metadata and populate the lists
for _, row in metadata.iterrows():
audio_files.append({'path': os.path.join(data_dir, row['file_name'])})
transcriptions.append(row['transcription'])
# Define features of the dataset
features = Features({
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
'sentence': Value('string')
})
# Create a dataset
dataset = Dataset.from_dict({
'audio': audio_files,
'sentence': transcriptions
}, features=features)
# You can split the dataset here if needed, or return as a single dataset
return DatasetDict({'train': dataset})
## Example usage
# if __name__ == "__main__":
# data_directory = "C:\\Projects\\aeneas\\hy_asr_grqaser"
# dataset = load_dataset_script(data_directory)
# print(dataset["train"][1])
# from datasets import load_dataset
# load_dataset("aburnazy/hy_asr_grqaser", data_dir=".")