atonyxu commited on
Commit
5ff3703
1 Parent(s): 6a40250

Upload folder using huggingface_hub

Browse files
furry/baobai/baobai7/D_1600.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed5492a941395df91168cee98cd52f28a66dc92d45f40ff657d21080da8e1961
3
+ size 561093259
furry/baobai/baobai7/D_800.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cd7a71339ce7d57f8d1f84acbae3a62e8ab41cbb6d385c2f7faadd4fb485bb1
3
+ size 561078861
furry/baobai/baobai7/G_1600.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e48b986a6f915c1bb6d7b2197550c04b86e408ea381a43d794b74629d178a979
3
+ size 627897375
furry/baobai/baobai7/G_800.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ce059e24c28eebaa420bf4b94a173d6445aee9e520b4d33adf45a2ba4a1170b
3
+ size 627845601
furry/baobai/baobai7/config.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "eval_interval": 800,
5
+ "seed": 1234,
6
+ "epochs": 10000,
7
+ "learning_rate": 0.0001,
8
+ "betas": [
9
+ 0.8,
10
+ 0.99
11
+ ],
12
+ "eps": 1e-09,
13
+ "batch_size": 6,
14
+ "fp16_run": false,
15
+ "lr_decay": 0.999875,
16
+ "segment_size": 10240,
17
+ "init_lr_ratio": 1,
18
+ "warmup_epochs": 0,
19
+ "c_mel": 45,
20
+ "c_kl": 1.0,
21
+ "use_sr": true,
22
+ "max_speclen": 512,
23
+ "port": "8001",
24
+ "keep_ckpts": 3,
25
+ "all_in_mem": false,
26
+ "vol_aug": false
27
+ },
28
+ "data": {
29
+ "training_files": "filelists/train.txt",
30
+ "validation_files": "filelists/val.txt",
31
+ "max_wav_value": 32768.0,
32
+ "sampling_rate": 44100,
33
+ "filter_length": 2048,
34
+ "hop_length": 512,
35
+ "win_length": 2048,
36
+ "n_mel_channels": 80,
37
+ "mel_fmin": 0.0,
38
+ "mel_fmax": 22050,
39
+ "unit_interpolate_mode": "nearest"
40
+ },
41
+ "model": {
42
+ "inter_channels": 192,
43
+ "hidden_channels": 192,
44
+ "filter_channels": 768,
45
+ "n_heads": 2,
46
+ "n_layers": 6,
47
+ "kernel_size": 3,
48
+ "p_dropout": 0.1,
49
+ "resblock": "1",
50
+ "resblock_kernel_sizes": [
51
+ 3,
52
+ 7,
53
+ 11
54
+ ],
55
+ "resblock_dilation_sizes": [
56
+ [
57
+ 1,
58
+ 3,
59
+ 5
60
+ ],
61
+ [
62
+ 1,
63
+ 3,
64
+ 5
65
+ ],
66
+ [
67
+ 1,
68
+ 3,
69
+ 5
70
+ ]
71
+ ],
72
+ "upsample_rates": [
73
+ 8,
74
+ 8,
75
+ 2,
76
+ 2,
77
+ 2
78
+ ],
79
+ "upsample_initial_channel": 512,
80
+ "upsample_kernel_sizes": [
81
+ 16,
82
+ 16,
83
+ 4,
84
+ 4,
85
+ 4
86
+ ],
87
+ "n_layers_q": 3,
88
+ "use_spectral_norm": false,
89
+ "gin_channels": 768,
90
+ "ssl_dim": 768,
91
+ "n_speakers": 1,
92
+ "vocoder_name": "nsf-hifigan",
93
+ "speech_encoder": "vec768l12",
94
+ "speaker_embedding": false,
95
+ "vol_embedding": false
96
+ },
97
+ "spk": {
98
+ "baobai": 0
99
+ }
100
+ }
furry/baobai/baobai7/eval/events.out.tfevents.1688135826.8f800fcb77a7.3347.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce669a54c0fc9799dd83d9cecaed60fb81c48b4a3e5607b8d974be1903c71efc
3
+ size 5136305
furry/baobai/baobai7/eval/events.out.tfevents.1688175723.8a2cd6bfa912.2909.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3f39af3ab5b8d4d48d7bc969c18f5c6ac72e9012406bb7e430a98a8181ae14d
3
+ size 88
furry/baobai/baobai7/eval/events.out.tfevents.1688175857.8a2cd6bfa912.3516.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c49cfdf7f7851c4e5b8fc395bd58ff16c34936a28a7c2232adc7a659083aed9
3
+ size 88
furry/baobai/baobai7/events.out.tfevents.1688135826.8f800fcb77a7.3347.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c484c3d215b9d7787103a211c4f2273dea717390d2e6b054bf43bdabebe53180
3
+ size 1844627
furry/baobai/baobai7/events.out.tfevents.1688175723.8a2cd6bfa912.2909.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d04fcacd313c2b7cba543e06c02187fcac8800f694efb30d47a05dc8f9bca689
3
+ size 88
furry/baobai/baobai7/events.out.tfevents.1688175857.8a2cd6bfa912.3516.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f3b24136f168bac872a7f48c51d78abd60fec40c3d8574da94ef77aa132a6b
3
+ size 88
furry/baobai/baobai7/githash ADDED
@@ -0,0 +1 @@
 
 
1
+ 793729403f4a25dbc0d5f4f8f6f3748cc5b2126e
furry/baobai/baobai7/train.log ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-30 14:37:06,826 44k INFO {'train': {'log_interval': 200, 'eval_interval': 800, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3, 'all_in_mem': False, 'vol_aug': False}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'unit_interpolate_mode': 'nearest'}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 768, 'ssl_dim': 768, 'n_speakers': 1, 'vocoder_name': 'nsf-hifigan', 'speech_encoder': 'vec768l12', 'speaker_embedding': False, 'vol_embedding': False}, 'spk': {'baobai': 0}, 'model_dir': './logs/44k'}
2
+ 2023-06-30 14:37:19,151 44k INFO emb_g.weight is not in the checkpoint
3
+ 2023-06-30 14:37:19,258 44k INFO Loaded checkpoint './logs/44k/G_0.pth' (iteration 0)
4
+ 2023-06-30 14:37:22,595 44k INFO Loaded checkpoint './logs/44k/D_0.pth' (iteration 0)
5
+ 2023-06-30 14:38:18,700 44k INFO ====> Epoch: 1, cost 71.88 s
6
+ 2023-06-30 14:38:51,259 44k INFO ====> Epoch: 2, cost 32.56 s
7
+ 2023-06-30 14:39:22,978 44k INFO ====> Epoch: 3, cost 31.72 s
8
+ 2023-06-30 14:39:54,380 44k INFO ====> Epoch: 4, cost 31.40 s
9
+ 2023-06-30 14:40:25,324 44k INFO ====> Epoch: 5, cost 30.94 s
10
+ 2023-06-30 14:40:56,095 44k INFO ====> Epoch: 6, cost 30.77 s
11
+ 2023-06-30 14:41:29,337 44k INFO ====> Epoch: 7, cost 33.24 s
12
+ 2023-06-30 14:42:02,358 44k INFO ====> Epoch: 8, cost 33.02 s
13
+ 2023-06-30 14:42:35,317 44k INFO ====> Epoch: 9, cost 32.96 s
14
+ 2023-06-30 14:42:57,112 44k INFO Train Epoch: 10 [48%]
15
+ 2023-06-30 14:42:57,114 44k INFO Losses: [1.8959810733795166, 2.6880526542663574, 17.734180450439453, 19.562091827392578, 1.2946572303771973], step: 200, lr: 9.98875562335968e-05, reference_loss: 43.174964904785156
16
+ 2023-06-30 14:43:10,441 44k INFO ====> Epoch: 10, cost 35.12 s
17
+ 2023-06-30 14:43:43,162 44k INFO ====> Epoch: 11, cost 32.72 s
18
+ 2023-06-30 14:44:15,981 44k INFO ====> Epoch: 12, cost 32.82 s
19
+ 2023-06-30 14:44:48,728 44k INFO ====> Epoch: 13, cost 32.75 s
20
+ 2023-06-30 14:45:25,133 44k INFO ====> Epoch: 14, cost 36.40 s
21
+ 2023-06-30 14:45:58,539 44k INFO ====> Epoch: 15, cost 33.41 s
22
+ 2023-06-30 14:46:31,629 44k INFO ====> Epoch: 16, cost 33.09 s
23
+ 2023-06-30 14:47:04,664 44k INFO ====> Epoch: 17, cost 33.04 s
24
+ 2023-06-30 14:47:37,740 44k INFO ====> Epoch: 18, cost 33.08 s
25
+ 2023-06-30 14:48:10,719 44k INFO ====> Epoch: 19, cost 32.98 s
26
+ 2023-06-30 14:48:22,893 44k INFO Train Epoch: 20 [0%]
27
+ 2023-06-30 14:48:22,898 44k INFO Losses: [2.3653783798217773, 2.557995319366455, 14.70919418334961, 19.53294563293457, 1.1693130731582642], step: 400, lr: 9.976276699833672e-05, reference_loss: 40.33482360839844
28
+ 2023-06-30 14:48:44,505 44k INFO ====> Epoch: 20, cost 33.79 s
29
+ 2023-06-30 14:49:17,551 44k INFO ====> Epoch: 21, cost 33.05 s
30
+ 2023-06-30 14:49:51,128 44k INFO ====> Epoch: 22, cost 33.58 s
31
+ 2023-06-30 14:50:23,065 44k INFO ====> Epoch: 23, cost 31.94 s
32
+ 2023-06-30 14:50:54,615 44k INFO ====> Epoch: 24, cost 31.55 s
33
+ 2023-06-30 14:51:25,520 44k INFO ====> Epoch: 25, cost 30.90 s
34
+ 2023-06-30 14:51:56,697 44k INFO ====> Epoch: 26, cost 31.18 s
35
+ 2023-06-30 14:52:28,209 44k INFO ====> Epoch: 27, cost 31.51 s
36
+ 2023-06-30 14:52:59,970 44k INFO ====> Epoch: 28, cost 31.76 s
37
+ 2023-06-30 14:53:20,763 44k INFO Train Epoch: 29 [52%]
38
+ 2023-06-30 14:53:20,769 44k INFO Losses: [2.6521246433258057, 2.4930050373077393, 12.666550636291504, 16.643795013427734, 0.7230401635169983], step: 600, lr: 9.965058998565574e-05, reference_loss: 35.17851638793945
39
+ 2023-06-30 14:53:33,093 44k INFO ====> Epoch: 29, cost 33.12 s
40
+ 2023-06-30 14:54:05,436 44k INFO ====> Epoch: 30, cost 32.34 s
41
+ 2023-06-30 14:54:41,000 44k INFO ====> Epoch: 31, cost 35.56 s
42
+ 2023-06-30 14:55:12,993 44k INFO ====> Epoch: 32, cost 31.99 s
43
+ 2023-06-30 14:55:44,685 44k INFO ====> Epoch: 33, cost 31.69 s
44
+ 2023-06-30 14:56:17,040 44k INFO ====> Epoch: 34, cost 32.35 s
45
+ 2023-06-30 14:56:49,716 44k INFO ====> Epoch: 35, cost 32.68 s
46
+ 2023-06-30 14:57:22,629 44k INFO ====> Epoch: 36, cost 32.91 s
47
+ 2023-06-30 14:57:55,282 44k INFO ====> Epoch: 37, cost 32.65 s
48
+ 2023-06-30 14:58:28,221 44k INFO ====> Epoch: 38, cost 32.94 s
49
+ 2023-06-30 14:58:40,622 44k INFO Train Epoch: 39 [5%]
50
+ 2023-06-30 14:58:40,627 44k INFO Losses: [2.364532470703125, 2.376206874847412, 13.579978942871094, 18.379371643066406, 0.7464990019798279], step: 800, lr: 9.952609679164422e-05, reference_loss: 37.44658660888672
51
+ 2023-06-30 14:58:57,785 44k INFO Saving model and optimizer state at iteration 39 to ./logs/44k/G_800.pth
52
+ 2023-06-30 14:59:00,507 44k INFO Saving model and optimizer state at iteration 39 to ./logs/44k/D_800.pth
53
+ 2023-06-30 14:59:25,308 44k INFO ====> Epoch: 39, cost 57.09 s
54
+ 2023-06-30 14:59:57,095 44k INFO ====> Epoch: 40, cost 31.79 s
55
+ 2023-06-30 15:00:28,121 44k INFO ====> Epoch: 41, cost 31.03 s
56
+ 2023-06-30 15:00:59,305 44k INFO ====> Epoch: 42, cost 31.18 s
57
+ 2023-06-30 15:01:30,318 44k INFO ====> Epoch: 43, cost 31.01 s
58
+ 2023-06-30 15:02:01,218 44k INFO ====> Epoch: 44, cost 30.90 s
59
+ 2023-06-30 15:02:32,111 44k INFO ====> Epoch: 45, cost 30.89 s
60
+ 2023-06-30 15:03:02,826 44k INFO ====> Epoch: 46, cost 30.71 s
61
+ 2023-06-30 15:03:33,946 44k INFO ====> Epoch: 47, cost 31.12 s
62
+ 2023-06-30 15:03:56,499 44k INFO Train Epoch: 48 [57%]
63
+ 2023-06-30 15:03:56,506 44k INFO Losses: [2.2675538063049316, 3.6340787410736084, 16.671533584594727, 18.78742790222168, 1.167222499847412], step: 1000, lr: 9.941418589985758e-05, reference_loss: 42.52781677246094
64
+ 2023-06-30 15:04:08,250 44k INFO ====> Epoch: 48, cost 34.30 s
65
+ 2023-06-30 15:04:39,311 44k INFO ====> Epoch: 49, cost 31.06 s
66
+ 2023-06-30 15:05:10,224 44k INFO ====> Epoch: 50, cost 30.91 s
67
+ 2023-06-30 15:05:41,114 44k INFO ====> Epoch: 51, cost 30.89 s
68
+ 2023-06-30 15:06:12,050 44k INFO ====> Epoch: 52, cost 30.94 s
69
+ 2023-06-30 15:06:42,814 44k INFO ====> Epoch: 53, cost 30.76 s
70
+ 2023-06-30 15:07:13,737 44k INFO ====> Epoch: 54, cost 30.92 s
71
+ 2023-06-30 15:07:44,705 44k INFO ====> Epoch: 55, cost 30.97 s
72
+ 2023-06-30 15:08:15,630 44k INFO ====> Epoch: 56, cost 30.92 s
73
+ 2023-06-30 15:08:48,247 44k INFO ====> Epoch: 57, cost 32.62 s
74
+ 2023-06-30 15:08:59,617 44k INFO Train Epoch: 58 [10%]
75
+ 2023-06-30 15:08:59,618 44k INFO Losses: [1.9926245212554932, 2.864957332611084, 18.438426971435547, 18.126304626464844, 0.8427446484565735], step: 1200, lr: 9.928998804478705e-05, reference_loss: 42.26505661010742
76
+ 2023-06-30 15:09:19,600 44k INFO ====> Epoch: 58, cost 31.35 s
77
+ 2023-06-30 15:09:49,643 44k INFO ====> Epoch: 59, cost 30.04 s
78
+ 2023-06-30 15:10:19,992 44k INFO ====> Epoch: 60, cost 30.35 s
79
+ 2023-06-30 15:10:50,560 44k INFO ====> Epoch: 61, cost 30.57 s
80
+ 2023-06-30 15:11:21,195 44k INFO ====> Epoch: 62, cost 30.63 s
81
+ 2023-06-30 15:11:52,084 44k INFO ====> Epoch: 63, cost 30.89 s
82
+ 2023-06-30 15:12:22,952 44k INFO ====> Epoch: 64, cost 30.87 s
83
+ 2023-06-30 15:12:53,969 44k INFO ====> Epoch: 65, cost 31.02 s
84
+ 2023-06-30 15:13:25,364 44k INFO ====> Epoch: 66, cost 31.39 s
85
+ 2023-06-30 15:13:47,506 44k INFO Train Epoch: 67 [62%]
86
+ 2023-06-30 15:13:47,512 44k INFO Losses: [2.647841691970825, 2.527953624725342, 9.182493209838867, 16.640634536743164, 1.2151633501052856], step: 1400, lr: 9.917834264256819e-05, reference_loss: 32.214088439941406
87
+ 2023-06-30 15:13:57,889 44k INFO ====> Epoch: 67, cost 32.53 s
88
+ 2023-06-30 15:14:28,999 44k INFO ====> Epoch: 68, cost 31.11 s
89
+ 2023-06-30 15:14:59,985 44k INFO ====> Epoch: 69, cost 30.99 s
90
+ 2023-06-30 15:15:30,993 44k INFO ====> Epoch: 70, cost 31.01 s
91
+ 2023-06-30 15:16:02,022 44k INFO ====> Epoch: 71, cost 31.03 s
92
+ 2023-06-30 15:16:33,079 44k INFO ====> Epoch: 72, cost 31.06 s
93
+ 2023-06-30 15:17:04,536 44k INFO ====> Epoch: 73, cost 31.46 s
94
+ 2023-06-30 15:17:35,690 44k INFO ====> Epoch: 74, cost 31.15 s
95
+ 2023-06-30 15:18:06,820 44k INFO ====> Epoch: 75, cost 31.13 s
96
+ 2023-06-30 15:18:39,687 44k INFO ====> Epoch: 76, cost 32.87 s
97
+ 2023-06-30 15:18:53,121 44k INFO Train Epoch: 77 [14%]
98
+ 2023-06-30 15:18:53,127 44k INFO Losses: [2.432774066925049, 2.5553371906280518, 13.044061660766602, 16.947370529174805, 0.8192602396011353], step: 1600, lr: 9.905443942579728e-05, reference_loss: 35.798805236816406
99
+ 2023-06-30 15:19:03,141 44k INFO Saving model and optimizer state at iteration 77 to ./logs/44k/G_1600.pth
100
+ 2023-06-30 15:19:05,722 44k INFO Saving model and optimizer state at iteration 77 to ./logs/44k/D_1600.pth
101
+ 2023-06-30 15:19:35,411 44k INFO ====> Epoch: 77, cost 55.72 s
102
+ 2023-06-30 15:20:08,613 44k INFO ====> Epoch: 78, cost 33.20 s
103
+ 2023-06-30 15:20:40,902 44k INFO ====> Epoch: 79, cost 32.29 s
104
+ 2023-06-30 15:21:13,341 44k INFO ====> Epoch: 80, cost 32.44 s
105
+ 2023-06-30 15:21:45,657 44k INFO ====> Epoch: 81, cost 32.32 s
106
+ 2023-06-30 15:22:18,734 44k INFO ====> Epoch: 82, cost 33.08 s
107
+ 2023-06-30 15:22:51,159 44k INFO ====> Epoch: 83, cost 32.43 s
108
+ 2023-06-30 15:23:24,259 44k INFO ====> Epoch: 84, cost 33.10 s
109
+ 2023-06-30 15:23:56,661 44k INFO ====> Epoch: 85, cost 32.40 s
110
+ 2023-06-30 15:24:20,207 44k INFO Train Epoch: 86 [67%]
111
+ 2023-06-30 15:24:20,214 44k INFO Losses: [2.352099895477295, 2.2271785736083984, 13.951580047607422, 17.9833927154541, 0.5835058093070984], step: 1800, lr: 9.894305888331732e-05, reference_loss: 37.09775924682617
112
+ 2023-06-30 15:24:29,659 44k INFO ====> Epoch: 86, cost 33.00 s
113
+ 2023-06-30 15:25:01,308 44k INFO ====> Epoch: 87, cost 31.65 s
114
+ 2023-06-30 15:25:32,659 44k INFO ====> Epoch: 88, cost 31.35 s
115
+ 2023-06-30 15:26:03,885 44k INFO ====> Epoch: 89, cost 31.23 s
116
+ 2023-06-30 15:26:35,491 44k INFO ====> Epoch: 90, cost 31.61 s
117
+ 2023-06-30 15:27:06,743 44k INFO ====> Epoch: 91, cost 31.25 s
118
+ 2023-06-30 15:27:37,584 44k INFO ====> Epoch: 92, cost 30.84 s
119
+ 2023-06-30 15:28:08,393 44k INFO ====> Epoch: 93, cost 30.81 s
120
+ 2023-06-30 15:28:39,617 44k INFO ====> Epoch: 94, cost 31.22 s
121
+ 2023-06-30 15:29:10,633 44k INFO ====> Epoch: 95, cost 31.02 s
122
+ 2023-06-30 15:29:23,575 44k INFO Train Epoch: 96 [19%]
123
+ 2023-06-30 15:29:23,577 44k INFO Losses: [2.3719770908355713, 2.3133671283721924, 11.204004287719727, 17.378263473510742, 0.7282853722572327], step: 2000, lr: 9.881944960586671e-05, reference_loss: 33.99589920043945
124
+ 2023-06-30 15:29:42,777 44k INFO ====> Epoch: 96, cost 32.14 s
125
+ 2023-06-30 15:30:13,698 44k INFO ====> Epoch: 97, cost 30.92 s
126
+ 2023-06-30 15:30:44,702 44k INFO ====> Epoch: 98, cost 31.00 s
127
+ 2023-06-30 15:31:18,320 44k INFO ====> Epoch: 99, cost 33.62 s
128
+ 2023-07-01 01:42:02,697 44k INFO {'train': {'log_interval': 200, 'eval_interval': 800, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3, 'all_in_mem': False, 'vol_aug': False}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'unit_interpolate_mode': 'nearest'}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 768, 'ssl_dim': 768, 'n_speakers': 1, 'vocoder_name': 'nsf-hifigan', 'speech_encoder': 'vec768l12', 'speaker_embedding': False, 'vol_embedding': False}, 'spk': {'baobai': 0}, 'model_dir': './logs/44k'}
129
+ 2023-07-01 01:42:03,056 44k WARNING git hash values are different. 79372940(saved) != e8365d4e(current)
130
+ 2023-07-01 01:42:21,852 44k INFO Loaded checkpoint './logs/44k/G_1600.pth' (iteration 77)
131
+ 2023-07-01 01:42:26,474 44k INFO Loaded checkpoint './logs/44k/D_1600.pth' (iteration 77)
132
+ 2023-07-01 01:44:17,042 44k INFO {'train': {'log_interval': 200, 'eval_interval': 800, 'seed': 1234, 'epochs': 10000, 'learning_rate': 0.0001, 'betas': [0.8, 0.99], 'eps': 1e-09, 'batch_size': 6, 'fp16_run': False, 'lr_decay': 0.999875, 'segment_size': 10240, 'init_lr_ratio': 1, 'warmup_epochs': 0, 'c_mel': 45, 'c_kl': 1.0, 'use_sr': True, 'max_speclen': 512, 'port': '8001', 'keep_ckpts': 3, 'all_in_mem': False, 'vol_aug': False}, 'data': {'training_files': 'filelists/train.txt', 'validation_files': 'filelists/val.txt', 'max_wav_value': 32768.0, 'sampling_rate': 44100, 'filter_length': 2048, 'hop_length': 512, 'win_length': 2048, 'n_mel_channels': 80, 'mel_fmin': 0.0, 'mel_fmax': 22050, 'unit_interpolate_mode': 'nearest'}, 'model': {'inter_channels': 192, 'hidden_channels': 192, 'filter_channels': 768, 'n_heads': 2, 'n_layers': 6, 'kernel_size': 3, 'p_dropout': 0.1, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11], 'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'upsample_rates': [8, 8, 2, 2, 2], 'upsample_initial_channel': 512, 'upsample_kernel_sizes': [16, 16, 4, 4, 4], 'n_layers_q': 3, 'use_spectral_norm': False, 'gin_channels': 768, 'ssl_dim': 768, 'n_speakers': 1, 'vocoder_name': 'nsf-hifigan', 'speech_encoder': 'vec768l12', 'speaker_embedding': False, 'vol_embedding': False}, 'spk': {'baobai': 0}, 'model_dir': './logs/44k'}
133
+ 2023-07-01 01:44:17,053 44k WARNING git hash values are different. 79372940(saved) != e8365d4e(current)
134
+ 2023-07-01 01:44:22,866 44k INFO Loaded checkpoint './logs/44k/G_1600.pth' (iteration 77)
135
+ 2023-07-01 01:44:23,773 44k INFO Loaded checkpoint './logs/44k/D_1600.pth' (iteration 77)