nevreal commited on
Commit
e0821c8
1 Parent(s): aeeb858

Create rvc.py

Browse files
Files changed (1) hide show
  1. rvc.py +219 -0
rvc.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import datetime
3
+ import logging
4
+ import os
5
+ import time
6
+ import traceback
7
+
8
+ import edge_tts
9
+ import gradio as gr
10
+ import librosa
11
+ import torch
12
+ from fairseq import checkpoint_utils
13
+ from huggingface_hub import snapshot_download
14
+
15
+
16
+ from config import Config
17
+ from lib.infer_pack.models import (
18
+ SynthesizerTrnMs256NSFsid,
19
+ SynthesizerTrnMs256NSFsid_nono,
20
+ SynthesizerTrnMs768NSFsid,
21
+ SynthesizerTrnMs768NSFsid_nono,
22
+ )
23
+ from rmvpe import RMVPE
24
+ from vc_infer_pipeline import VC
25
+
26
+ logging.getLogger("fairseq").setLevel(logging.WARNING)
27
+ logging.getLogger("numba").setLevel(logging.WARNING)
28
+ logging.getLogger("markdown_it").setLevel(logging.WARNING)
29
+ logging.getLogger("urllib3").setLevel(logging.WARNING)
30
+ logging.getLogger("matplotlib").setLevel(logging.WARNING)
31
+
32
+ limitation = os.getenv("SYSTEM") == "spaces"
33
+
34
+ config = Config()
35
+
36
+ # Edge TTS
37
+ edge_output_filename = "edge_output.mp3"
38
+ tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
39
+ tts_voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
40
+
41
+ # RVC models
42
+ model_root = snapshot_download(repo_id="NoCrypt/miku_RVC", token=os.environ["TOKEN"])
43
+ models = [d for d in os.listdir(model_root) if os.path.isdir(f"{model_root}/{d}")]
44
+ models.sort()
45
+
46
+
47
+ def model_data(model_name):
48
+ # global n_spk, tgt_sr, net_g, vc, cpt, version, index_file
49
+ pth_path = [
50
+ f"{model_root}/{model_name}/{f}"
51
+ for f in os.listdir(f"{model_root}/{model_name}")
52
+ if f.endswith(".pth")
53
+ ][0]
54
+ print(f"Loading {pth_path}")
55
+ cpt = torch.load(pth_path, map_location="cpu")
56
+ tgt_sr = cpt["config"][-1]
57
+ cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
58
+ if_f0 = cpt.get("f0", 1)
59
+ version = cpt.get("version", "v1")
60
+ if version == "v1":
61
+ if if_f0 == 1:
62
+ net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
63
+ else:
64
+ net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
65
+ elif version == "v2":
66
+ if if_f0 == 1:
67
+ net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
68
+ else:
69
+ net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
70
+ else:
71
+ raise ValueError("Unknown version")
72
+ del net_g.enc_q
73
+ net_g.load_state_dict(cpt["weight"], strict=False)
74
+ print("Model loaded")
75
+ net_g.eval().to(config.device)
76
+ if config.is_half:
77
+ net_g = net_g.half()
78
+ else:
79
+ net_g = net_g.float()
80
+ vc = VC(tgt_sr, config)
81
+ # n_spk = cpt["config"][-3]
82
+
83
+ index_files = [
84
+ f"{model_root}/{model_name}/{f}"
85
+ for f in os.listdir(f"{model_root}/{model_name}")
86
+ if f.endswith(".index")
87
+ ]
88
+ if len(index_files) == 0:
89
+ print("No index file found")
90
+ index_file = ""
91
+ else:
92
+ index_file = index_files[0]
93
+ print(f"Index file found: {index_file}")
94
+
95
+ return tgt_sr, net_g, vc, version, index_file, if_f0
96
+
97
+
98
+ def load_hubert():
99
+ # global hubert_model
100
+ models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
101
+ ["hubert_base.pt"],
102
+ suffix="",
103
+ )
104
+ hubert_model = models[0]
105
+ hubert_model = hubert_model.to(config.device)
106
+ if config.is_half:
107
+ hubert_model = hubert_model.half()
108
+ else:
109
+ hubert_model = hubert_model.float()
110
+ return hubert_model.eval()
111
+
112
+
113
+ def tts(
114
+ model_name,
115
+ speed,
116
+ tts_text,
117
+ tts_voice,
118
+ f0_up_key,
119
+ f0_method,
120
+ index_rate,
121
+ protect,
122
+ filter_radius=3,
123
+ resample_sr=0,
124
+ rms_mix_rate=0.25,
125
+ ):
126
+ print("------------------")
127
+ print(datetime.datetime.now())
128
+ print("tts_text:")
129
+ print(tts_text)
130
+ print(f"tts_voice: {tts_voice}, speed: {speed}")
131
+ print(f"Model name: {model_name}")
132
+ print(f"F0: {f0_method}, Key: {f0_up_key}, Index: {index_rate}, Protect: {protect}")
133
+ try:
134
+ if limitation and len(tts_text) > 1000:
135
+ print("Error: Text too long")
136
+ return (
137
+ f"Text characters should be at most 1000 in this huggingface space, but got {len(tts_text)} characters.",
138
+ None,
139
+ None,
140
+ )
141
+ t0 = time.time()
142
+ if speed >= 0:
143
+ speed_str = f"+{speed}%"
144
+ else:
145
+ speed_str = f"{speed}%"
146
+ asyncio.run(
147
+ edge_tts.Communicate(
148
+ tts_text, "-".join(tts_voice.split("-")[:-1]), rate=speed_str
149
+ ).save(edge_output_filename)
150
+ )
151
+ t1 = time.time()
152
+ edge_time = t1 - t0
153
+ audio, sr = librosa.load(edge_output_filename, sr=16000, mono=True)
154
+ duration = len(audio) / sr
155
+ print(f"Audio duration: {duration}s")
156
+ if limitation and duration >= 200:
157
+ print("Error: Audio too long")
158
+ return (
159
+ f"Audio should be less than 200 seconds in this huggingface space, but got {duration}s.",
160
+ edge_output_filename,
161
+ None,
162
+ )
163
+ f0_up_key = int(f0_up_key)
164
+
165
+ tgt_sr, net_g, vc, version, index_file, if_f0 = model_data(model_name)
166
+ if f0_method == "rmvpe":
167
+ vc.model_rmvpe = rmvpe_model
168
+ times = [0, 0, 0]
169
+ audio_opt = vc.pipeline(
170
+ hubert_model,
171
+ net_g,
172
+ 0,
173
+ audio,
174
+ edge_output_filename,
175
+ times,
176
+ f0_up_key,
177
+ f0_method,
178
+ index_file,
179
+ # file_big_npy,
180
+ index_rate,
181
+ if_f0,
182
+ filter_radius,
183
+ tgt_sr,
184
+ resample_sr,
185
+ rms_mix_rate,
186
+ version,
187
+ protect,
188
+ None,
189
+ )
190
+ if tgt_sr != resample_sr >= 16000:
191
+ tgt_sr = resample_sr
192
+ info = f"Success. Time: edge-tts: {edge_time}s, npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s"
193
+ print(info)
194
+ return (
195
+ info,
196
+ edge_output_filename,
197
+ (tgt_sr, audio_opt),
198
+ )
199
+ except EOFError:
200
+ info = (
201
+ "It seems that the edge-tts output is not valid. "
202
+ "This may occur when the input text and the speaker do not match. "
203
+ "For example, maybe you entered Japanese (without alphabets) text but chose non-Japanese speaker?"
204
+ )
205
+ print(info)
206
+ return info, None, None
207
+ except:
208
+ info = traceback.format_exc()
209
+ print(info)
210
+ return info, None, None
211
+
212
+
213
+ print("Loading hubert model...")
214
+ hubert_model = load_hubert()
215
+ print("Hubert model loaded.")
216
+
217
+ print("Loading rmvpe model...")
218
+ rmvpe_model = RMVPE("rmvpe.pt", config.is_half, config.device)
219
+ print("rmvpe model loaded.")