File size: 1,411 Bytes
66b2451
82cd9ba
 
 
 
66b2451
82cd9ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66b2451
82cd9ba
66b2451
82cd9ba
66b2451
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
##
import os
import pandas as pd
from datasets import Dataset, DatasetDict, Features, Value, Audio

##
def load_dataset_script(data_dir):
    """
    Load dataset script for custom audio-transcription dataset.

    :param data_dir: Directory where the data and metadata.csv are stored.
    :return: A Hugging Face Dataset object.
    """
    # Load metadata.csv
    metadata = pd.read_csv(os.path.join(data_dir, "metadata.csv"))
    
    # Create lists for audio files and transcriptions
    audio_files = []
    transcriptions = []

    # Iterate through the metadata and populate the lists
    for _, row in metadata.iterrows():
        audio_files.append({'path': os.path.join(data_dir, row['file_name'])})
        transcriptions.append(row['transcription'])

    # Define features of the dataset
    features = Features({
        'audio': Audio(sampling_rate=16_000),  # Adjust the sampling rate as needed
        'sentence': Value('string')
    })

    # Create a dataset
    dataset = Dataset.from_dict({
        'audio': audio_files,
        'sentence': transcriptions
    }, features=features)

    # You can split the dataset here if needed, or return as a single dataset
    return DatasetDict({'train': dataset})


## Example usage
if __name__ == "__main__":
    data_directory = "C:\\Projects\\aeneas\\hy_asr_grqaser"
    dataset = load_dataset_script(data_directory)
    print(dataset["train"][2])