File size: 4,862 Bytes
a4600e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
logging: !import logging.yml
licit_chars: !import:licit_french_chars_no_spelling utils/lists_dicts_etc/licit_characters.yml
mappings: !import:taxonomy_to_large_taxonomy utils/lists_dicts_etc/taxonomy_mappings.yml
regex_to_replace: !import:default_regex_to_replace utils/lists_dicts_etc/regexes_to_replace.yml
chars_to_ignore: !import:default_char_to_ignore_no_dash utils/lists_dicts_etc/characters_to_ignore.yml

dataset:
  (): preprocess_dataset.my_datasets.ProcessedDataset
  name: "multilingual_librispeech_fr_processed"
  origin_dataset_loader:
    (): preprocess_dataset.dataset_loaders.HuggingfaceHubLoader
    dataset_repo_id: "facebook/multilingual_librispeech"
    config_name: "french"
  dataset_savers:
    - (): preprocess_dataset.dataset_savers.SaveToHuggingFaceHub
  processing_chain:
    (): preprocess_dataset.dataset_processors.ProcessingChain
    processors:
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.mergers.ConcatenateTwoSplits
        first_split: "train"
        other_split: "validation"
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.removers.SplitsToKeep
        splits_to_keep: ["train", "test"]
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.Shuffle
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.ProcessOneSplit
        split: "train"
        processor:
          (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.selectors.Selector
          selection:
            (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.selectors.FirstNRows
            n_rows: 80
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.ProcessOneSplit
        split: "test"
        processor:
          (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.selectors.Selector
          selection:
            (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.selectors.FirstNRows
            n_rows: 20
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.LambdaMap
        lambda_func_str: 'lambda batch: {
                         "taxonomy": "librispeech",
                         "taxonomy_large": "librispeech"}'
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.filters.Filter
        print_report: true
        filter:
          # remove all the samples where the accented characters have been removed by mistake
          (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.filters.FilterColumnStartWith
          column_name: "id"
          values_to_remove: ["10032_9061","10058_9061","10065_9061","10082_9061","11772_12174","123_2301","123_4904","123_9061","12823_13638","1590_1356","204_4904","2587_4904","2607_4904","28_149","3595_4904","3698_4904","5232_4904","5232_5203","5612_4904","5840_4904","6318_5203","6362_5471","7032_5203","7423_9061","9804_9061","9854_9061"]
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.RenameColumn
        origin_column_name: 'id'
        target_column_name: 'path'
      - (): preprocess_dataset.dataset_processors.misc_preprocessors.RenameColumn
        origin_column_name: 'text'
        target_column_name: 'sentence'
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.removers.ColumnsToKeep
        columns_to_keep: ["sentence", "audio", "path", "taxonomy", "taxonomy_large"]
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.duplicators.DuplicateColumn
        origin_column_name: "sentence"
        new_column_name: "sentence_processed"
      - (): preprocess_dataset.dataset_processors.text_preprocessors.TextPreProcessingChain
        column: "sentence_processed"
        processors:
          - (): preprocess_dataset.dataset_processors.text_preprocessors.preprocessors.Normalize
          - (): preprocess_dataset.dataset_processors.text_preprocessors.preprocessors.ReplaceRegexProcessor
            regex_to_replace: !cfg regex_to_replace
          - (): preprocess_dataset.dataset_processors.text_preprocessors.preprocessors.RemoveSpecialCharsProcessor
            chars_to_ignore: !cfg chars_to_ignore
          - (): preprocess_dataset.dataset_processors.text_preprocessors.preprocessors.LowerCaseProcessor
            do_not_lowercase_spellings: false
      - (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.filters.Filter
        print_report: true
        filter:
          (): preprocess_dataset.dataset_processors.dataset_shape_preprocessors.filters.LicitCharactersFilter
          licit_chars: !cfg licit_chars
          column_name: "sentence_processed"
      - (): preprocess_dataset.dataset_processors.audio_preprocessors.CastAudioColumn
      - (): preprocess_dataset.dataset_processors.audio_preprocessors.ForceResampling