yky-h commited on
Commit
466ac7c
1 Parent(s): c0f5a37
Files changed (6) hide show
  1. README.md +92 -0
  2. config.json +96 -0
  3. fairseq/model.pt +3 -0
  4. preprocessor_config.json +9 -0
  5. pytorch_model.bin +3 -0
  6. rinna.png +0 -0
README.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
3
+ language: ja
4
+ license: apache-2.0
5
+ datasets: reazon-research/reazonspeech
6
+ inference: false
7
+ tags:
8
+ - data2vec
9
+ - speech
10
+ ---
11
+
12
+ # `rinna/japanese-data2vec-audio-base`
13
+
14
+ ![rinna-icon](./rinna.png)
15
+
16
+ # Overview
17
+
18
+ This is a Japanese data2vec Audio Base model trained by [rinna Co., Ltd.](https://rinna.co.jp/)
19
+
20
+ * **Model summary**
21
+
22
+ The model architecture is the same as the [original data2vec Audio Base model](https://huggingface.co/facebook/data2vec-audio-base), which contains 12 transformer layers with 12 attention heads.
23
+ The model was trained using code from the [official repository](https://github.com/facebookresearch/fairseq/tree/main/examples/data2vec#data2vec), and the detailed training configuration can be found in the same repository and the [original paper](https://ai.meta.com/research/data2vec-a-general-framework-for-self-supervised-learning-in-speech-vision-and-language/).
24
+
25
+
26
+ * **Training**
27
+
28
+ The model was trained on approximately 19,000 hours of following Japanese speech corpus ReazonSpeech v1.
29
+ - [ReazonSpeech](https://huggingface.co/datasets/reazon-research/reazonspeech)
30
+
31
+ * **Contributors**
32
+
33
+ - [Yukiya Hono](https://huggingface.co/yky-h)
34
+ - [Kentaro Mitsui](https://huggingface.co/Kentaro321)
35
+ - [Kei Sawada](https://huggingface.co/keisawada)
36
+
37
+ ---
38
+
39
+ # How to use the model
40
+
41
+ ```python
42
+ import soundfile as sf
43
+ from transformers import AutoFeatureExtractor, AutoModel
44
+
45
+ model_name = "rinna/japanese-data2vec-audio-base"
46
+ feature_extractor = AutoFeatureExtractor.from_pretrained(model_name)
47
+ model = AutoModel.from_pretrained(model_name)
48
+ model.eval()
49
+
50
+ raw_speech_16kHz, sr = sf.read(audio_file)
51
+ inputs = feature_extractor(
52
+ raw_speech_16kHz,
53
+ return_tensors="pt",
54
+ sampling_rate=sr,
55
+ )
56
+ outputs = model(**inputs)
57
+
58
+ print(f"Input: {inputs.input_values.size()}") # [1, #samples]
59
+ print(f"Output: {outputs.last_hidden_state.size()}") # [1, #frames, 768]
60
+ ```
61
+
62
+ A fairseq checkpoint file can also be available [here](https://huggingface.co/rinna/japanese-data2vec-audio-base/tree/main/fairseq).
63
+
64
+ ---
65
+
66
+ # How to cite
67
+ ```bibtex
68
+ @misc{rinna-japanese-data2vec-audio-base,
69
+ title={rinna/japanese-data2vec-audio-base},
70
+ author={Hono, Yukiya and Mitsui, Kentaro and Sawada, Kei},
71
+ url={https://huggingface.co/rinna/japanese-data2vec-audio-base}
72
+ }
73
+ ```
74
+
75
+ ---
76
+
77
+ # Citations
78
+ ```bibtex
79
+ @inproceedings{baevski2022data2vec,
80
+ title={Data2vec: A general framework for self-supervised learning in speech, vision and language},
81
+ author={Baevski, Alexei and Hsu, Wei-Ning and Xu, Qiantong and Babu, Arun and Gu, Jiatao and Auli, Michael},
82
+ booktitle={International Conference on Machine Learning},
83
+ pages={1298--1312},
84
+ year={2022},
85
+ organization={PMLR},
86
+ doi={10.48550/arXiv.2202.03555}
87
+ }
88
+ ```
89
+ ---
90
+
91
+ # License
92
+ [The Apache 2.0 license](https://www.apache.org/licenses/LICENSE-2.0)
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "rinna/japanese-data2vec-base",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "architectures": [
8
+ "Data2VecAudioModel"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 1,
12
+ "classifier_proj_size": 256,
13
+ "conv_bias": false,
14
+ "conv_dim": [
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512
22
+ ],
23
+ "conv_kernel": [
24
+ 10,
25
+ 3,
26
+ 3,
27
+ 3,
28
+ 3,
29
+ 2,
30
+ 2
31
+ ],
32
+ "conv_pos_kernel_size": 19,
33
+ "conv_stride": [
34
+ 5,
35
+ 2,
36
+ 2,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2
41
+ ],
42
+ "ctc_loss_reduction": "sum",
43
+ "ctc_zero_infinity": false,
44
+ "eos_token_id": 2,
45
+ "feat_extract_activation": "gelu",
46
+ "feat_proj_dropout": 0.0,
47
+ "final_dropout": 0.1,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "initializer_range": 0.02,
52
+ "intermediate_size": 3072,
53
+ "layer_norm_eps": 1e-05,
54
+ "layerdrop": 0.1,
55
+ "mask_feature_length": 10,
56
+ "mask_feature_min_masks": 0,
57
+ "mask_feature_prob": 0.0,
58
+ "mask_time_length": 10,
59
+ "mask_time_min_masks": 2,
60
+ "mask_time_prob": 0.05,
61
+ "model_type": "data2vec-audio",
62
+ "num_adapter_layers": 3,
63
+ "num_attention_heads": 12,
64
+ "num_conv_pos_embedding_groups": 16,
65
+ "num_conv_pos_embeddings": 5,
66
+ "num_feat_extract_layers": 7,
67
+ "num_hidden_layers": 12,
68
+ "output_hidden_size": 768,
69
+ "pad_token_id": 0,
70
+ "tdnn_dilation": [
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 1,
75
+ 1
76
+ ],
77
+ "tdnn_dim": [
78
+ 512,
79
+ 512,
80
+ 512,
81
+ 512,
82
+ 1500
83
+ ],
84
+ "tdnn_kernel": [
85
+ 5,
86
+ 3,
87
+ 3,
88
+ 1,
89
+ 1
90
+ ],
91
+ "torch_dtype": "float32",
92
+ "transformers_version": "4.28.1",
93
+ "use_weighted_layer_sum": false,
94
+ "vocab_size": 32,
95
+ "xvector_output_dim": 512
96
+ }
fairseq/model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca2fad0e75704d7d293f0cb2ddcd87603550e0aa5078c4736ea7b0bf4f1d63d
3
+ size 729428441
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e06d84bb424ca9ca5b11d82348fd04b7c02d60a4a128694c4f4800ecb1ed908
3
+ size 372731429
rinna.png ADDED