File size: 2,418 Bytes
66b2451 82cd9ba 1df0862 6c81428 f9ffa46 69683f6 1df0862 6c81428 a17e547 8e428b8 58e93ed 6c81428 8e428b8 6c81428 8b27d6b 6c81428 568948d 6c81428 67cd8e7 6c81428 1df0862 0b6b084 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
##
import os
import pandas as pd
from datasets import GeneratorBasedBuilder, DatasetInfo, SplitGenerator, Split, Features, Value, Audio, Version
_URLS = {"train": "https://huggingface.co/datasets/aburnazy/hy_asr_grqaser/resolve/main/data/train.tar.gz"}
class HyAsrGrqaser(GeneratorBasedBuilder):
"""Armenian Audio-Transcription Dataset"""
VERSION = Version("1.0.0")
def _info(self):
return DatasetInfo(
description="This dataset contains Armenian speech and transcriptions.",
features=Features({
'audio': Audio(sampling_rate=16_000), # Adjust the sampling rate as needed
'sentence': Value('string')
}),
supervised_keys=("audio", "sentence"),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Assuming the script is in the root of the project structure
# metadata_path = os.path.join(os.path.dirname(__file__), "metadata.csv")
metadata_path = dl_manager.download_and_extract(
"https://huggingface.co/datasets/aburnazy/hy_asr_grqaser/resolve/main/metadata.csv")
data_dir = dl_manager.download_and_extract(_URLS)
print(f"----------data_dir: {data_dir}, \n----------metadata_path: {metadata_path}")
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={"data_dir": data_dir['train'], "metadata_path": metadata_path}
),
]
def _generate_examples(self, data_dir, metadata_path):
print(f"data_dir: {data_dir}, metadata_path: {metadata_path}")
"""Yields examples."""
# Load metadata.csv
metadata = pd.read_csv(metadata_path)
# Generate examples
for idx, row in metadata.iterrows():
file_path = os.path.join(data_dir, row['file_name'])
transcription = row['transcription']
yield idx, {
'audio': {'path': file_path},
'sentence': transcription
}
# Testing the dataset locally
# if __name__ == "__main__":
# from datasets import load_dataset
# dataset = load_dataset("C:\\Projects\\aeneas\\hy_asr_grqaser\\hy_asr_grqaser.py")
# print(dataset["train"][0])
##
# from datasets import load_dataset
#
# dataset = load_dataset("aburnazy/hy_asr_grqaser")
# print('------------------')
# print(dataset["train"][0])
|