Plachta commited on
Commit
7bdd516
1 Parent(s): 3a97b81

Upload 2 files

Browse files
DiT_seed_v2_uvit_whisper_small_wavenet_bigvgan_pruned.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ec8841b20bb46df9f7e8e570a6946a4b87b940133c7f0e778487ff33841f720
3
+ size 440312082
config_dit_mel_seed_uvit_whisper_small_wavenet.yml ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ log_dir: "./runs"
2
+ save_freq: 1
3
+ log_interval: 10
4
+ save_interval: 1000
5
+ device: "cuda"
6
+ epochs: 1000 # number of epochs for first stage training (pre-training)
7
+ batch_size: 2
8
+ batch_length: 100 # maximum duration of audio in a batch (in seconds)
9
+ max_len: 80 # maximum number of frames
10
+ pretrained_model: ""
11
+ pretrained_encoder: "./temp_ckpt.pth"
12
+ load_only_params: False # set to true if do not want to load epoch numbers and optimizer parameters
13
+
14
+ preprocess_params:
15
+ sr: 22050
16
+ spect_params:
17
+ n_fft: 1024
18
+ win_length: 1024
19
+ hop_length: 256
20
+ n_mels: 80
21
+ fmin: 0
22
+ fmax: "None"
23
+
24
+ model_params:
25
+ dit_type: "DiT" # uDiT or DiT
26
+ reg_loss_type: "l1" # l1 or l2
27
+
28
+ speech_tokenizer:
29
+ type: 'whisper'
30
+ whisper_name: "openai/whisper-small"
31
+ path: "speech_tokenizer_v1.onnx"
32
+
33
+ cosyvoice:
34
+ path: "../CosyVoice/pretrained_models/CosyVoice-300M"
35
+
36
+ style_encoder:
37
+ dim: 192
38
+ campplus_path: "campplus_cn_common.bin"
39
+
40
+ DAC:
41
+ encoder_dim: 64
42
+ encoder_rates: [2, 5, 5, 6]
43
+ decoder_dim: 1536
44
+ decoder_rates: [ 6, 5, 5, 2 ]
45
+ sr: 24000
46
+
47
+ length_regulator:
48
+ channels: 512
49
+ is_discrete: false
50
+ in_channels: 768
51
+ content_codebook_size: 2048
52
+ sampling_ratios: [1, 1, 1, 1]
53
+ vector_quantize: false
54
+ n_codebooks: 1
55
+ quantizer_dropout: 0.0
56
+ f0_condition: false
57
+ n_f0_bins: 512
58
+
59
+ DiT:
60
+ hidden_dim: 512
61
+ num_heads: 8
62
+ depth: 13
63
+ class_dropout_prob: 0.1
64
+ block_size: 8192
65
+ in_channels: 80
66
+ style_condition: true
67
+ final_layer_type: 'wavenet'
68
+ target: 'mel' # mel or codec
69
+ content_dim: 512
70
+ content_codebook_size: 1024
71
+ content_type: 'discrete'
72
+ f0_condition: false
73
+ n_f0_bins: 512
74
+ content_codebooks: 1
75
+ is_causal: false
76
+ long_skip_connection: true
77
+ zero_prompt_speech_token: false # for prompt component, do not input corresponding speech token
78
+ time_as_token: false
79
+ style_as_token: false
80
+ uvit_skip_connection: true
81
+ add_resblock_in_transformer: false
82
+
83
+ wavenet:
84
+ hidden_dim: 512
85
+ num_layers: 8
86
+ kernel_size: 5
87
+ dilation_rate: 1
88
+ p_dropout: 0.2
89
+ style_condition: true
90
+
91
+ loss_params:
92
+ base_lr: 0.0001
93
+ lambda_mel: 45
94
+ lambda_kl: 1.0