hy_asr_grqaser / hy_asr_grqaser.py
aburnazyan
fixes
1df0862
raw
history blame
2.05 kB
##
import os
import pandas as pd
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version
class HyAsrGrqaser(GeneratorBasedBuilder):
"""Armenian Audio-Transcription Dataset"""
VERSION = Version("1.0.0")
def _info(self):
return DatasetInfo(
description="This dataset contains Armenian speech and transcriptions.",
features=Features({
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
'sentence': Value('string')
}),
supervised_keys=("audio", "sentence"),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Assuming the script is in the root of the project structure
data_dir = os.path.dirname(__file__)
metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"data_dir": data_dir, "metadata_path": metadata_path}
),
]
def _generate_examples(self, data_dir, metadata_path):
"""Yields examples."""
# Load metadata.csv
metadata = pd.read_csv(metadata_path)
# Generate examples
for idx, row in metadata.iterrows():
file_path = os.path.join(data_dir, row['file_name'])
transcription_path = os.path.join(data_dir, row['transcription_file'])
with open(transcription_path, 'r') as f:
transcription = f.read().strip()
yield idx, {
'audio': {'path': file_path},
'sentence': transcription
}
# Testing the dataset locally
# if __name__ == "__main__":
# from datasets import load_dataset
# dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
# print(dataset["train"][0])
##
from datasets import load_dataset
dataset = load_dataset("aburnazy/hy_asr_grqaser")
print(dataset)