darksakura commited on
Commit
144b372
1 Parent(s): e1cb985

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +233 -0
app.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ import gradio as gr
5
+ import librosa
6
+ import base64
7
+ import numpy as np
8
+ import soundfile
9
+ #from inference.infer_tool import Svc
10
+ from inference.infer_tool import Svc
11
+ import logging
12
+ import time
13
+ from tts_voices import SUPPORTED_LANGUAGES
14
+ logging.getLogger('numba').setLevel(logging.WARNING)
15
+ logging.getLogger('markdown_it').setLevel(logging.WARNING)
16
+ logging.getLogger('urllib3').setLevel(logging.WARNING)
17
+ logging.getLogger('matplotlib').setLevel(logging.WARNING)
18
+
19
+ #hf_token = os.environ.get('TOKEN')
20
+ #hf_token1 = os.environ.get('TOKEN1')
21
+ #hf_token2 = os.environ.get('TOKEN2')
22
+ #hf_token_config = os.environ.get('TOKEN_config')
23
+
24
+ from matplotlib import pyplot as plt
25
+ import datetime
26
+ import subprocess
27
+
28
+ def tts_fn(_text, _gender, _lang, _rate, _volume, sid, vc_transform, auto_f0,cluster_ratio, slice_db, f0_predictor):
29
+ if len( _text) > 400:
30
+ return base64.b64decode( b'6K+35LiK5Lyg5bCP5LqONDAw5a2X55qE5paH5pys' ).decode(), None
31
+ try:
32
+
33
+
34
+ _rate = f"+{int(_rate*100)}%" if _rate >= 0 else f"{int(_rate*100)}%"
35
+ _volume = f"+{int(_volume*100)}%" if _volume >= 0 else f"{int(_volume*100)}%"
36
+ if _lang == "Auto":
37
+ _gender = "Male" if _gender == "男" else "Female"
38
+ subprocess.run([r"python", "tts.py", _text, _lang, _rate, _volume, _gender])
39
+ else:
40
+ subprocess.run([r"python", "tts.py", _text, _lang, _rate, _volume])
41
+ input_audio = "tts.wav"
42
+ audio, sampling_rate = soundfile.read(input_audio)
43
+ if np.issubdtype(audio.dtype, np.integer):
44
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
45
+ if len(audio.shape) > 1:
46
+ audio = librosa.to_mono(audio.transpose(1, 0))
47
+ if sampling_rate != 44100:
48
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=44100)
49
+ soundfile.write(input_audio, audio, 44100, format="wav")
50
+
51
+ output_file_path = "tts_output.mp3"
52
+ _audio = model.slice_inference(input_audio, sid, vc_transform, slice_db, cluster_ratio, auto_f0, 0.4,f0_predictor=f0_predictor,clip_seconds=40)
53
+ print (_text, _gender, _lang, _rate, _volume, sid, vc_transform, auto_f0,cluster_ratio, slice_db, f0_predictor)
54
+ soundfile.write("tts_output.mp3", _audio, 44100, format="mp3")
55
+ return "Success", output_file_path
56
+
57
+ except Exception as e:
58
+ print(e)
59
+
60
+
61
+
62
+ def f0_to_pitch(ff):
63
+ f0_pitch = 69 + 12 * np.log2(ff / 441)
64
+ return f0_pitch
65
+ def compute_f0(wav_file1, wav_file2,tran):
66
+ y1, sr1 = librosa.load(wav_file1, sr=44100)
67
+ y2, sr2 = librosa.load(wav_file2, sr=44100)
68
+
69
+ # Compute the f0 using the YIN pitch estimation method
70
+ f0_1 = librosa.core.yin(y1, fmin=1, fmax=400)
71
+ f0_2 = librosa.core.yin(y2, fmin=1, fmax=400)
72
+ # 半 音 偏差
73
+ sum_y = []
74
+ if np.sum(wav_file1 == 0) / len(wav_file1) > 0.9:
75
+ mistake, var_take = 0, 0
76
+ else:
77
+ for i in range(min(len(f0_1), len(f0_2))):
78
+ if f0_1[i] > 0 and f0_2[i] > 0:
79
+ sum_y.append(
80
+ abs(f0_to_pitch(f0_2[i]) - (f0_to_pitch(f0_1[i]) + tran)))
81
+ num_y = 0
82
+ for x in sum_y:
83
+ num_y += x
84
+ len_y = len(sum_y) if len(sum_y) else 1
85
+ mistake = round(float(num_y / len_y), 2)
86
+ var_take = round(float(np.std(sum_y, ddof=1)), 2)
87
+ print("mistake", mistake, var_take)
88
+ return f0_1, f0_2, sr1, sr2, round(mistake / 10, 2), round(var_take / 10, 2)
89
+
90
+ def same_auth(username, password):
91
+ now = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
92
+ print(username, password,now.strftime("%Y-%m-%d %H:%M:%S"))
93
+ username = username.replace("https://","").replace("http://","").replace("/","")
94
+ return username == base64.b64decode( b'c292aXRzNC5ub2dpemFrYTQ2LmNj' ).decode() or username == base64.b64decode( b'c292aXRzNC1kZXYubm9naXpha2E0Ni5jYw==' ).decode() or password == base64.b64decode( b'c292aXRzNC1kZXYubm9naXpha2E0Ni5jYw==' ).decode() or password == base64.b64decode( b'c292aXRzNC5ub2dpemFrYTQ2LmNj' ).decode()
95
+
96
+ def vc_fn(output_format,sid, input_audio, vc_transform, auto_f0,cluster_ratio, slice_db,f0_predictor,clip_seconds=50):
97
+
98
+ start_time = time.time()
99
+ if input_audio is None:
100
+ return "You need to upload an audio ", None
101
+ audio, sampling_rate = soundfile.read(input_audio)
102
+ duration = audio.shape[0] / sampling_rate
103
+ if duration > 280:
104
+ return base64.b64decode( b'6K+35LiK5Lyg5bCP5LqOMjgwc+eahOmfs+mike+8jOmcgOimgei9rOaNoumVv+mfs+mikeivt+S9v+eUqHRnYm90' ).decode(), None , None
105
+ if np.issubdtype(audio.dtype, np.integer):
106
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
107
+ if len(audio.shape) > 1:
108
+ audio = librosa.to_mono(audio.transpose(1, 0))
109
+ if sampling_rate != 44100:
110
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=44100)
111
+ out_wav_path = "temp.wav"
112
+ soundfile.write(out_wav_path, audio, 44100, format="wav")
113
+
114
+ now = datetime.datetime.utcnow() + datetime.timedelta(hours=8)
115
+ print(sid, vc_transform, auto_f0,cluster_ratio, slice_db,f0_predictor,now.strftime("%Y-%m-%d %H:%M:%S"))
116
+ _audio = model.slice_inference(out_wav_path, sid, vc_transform, slice_db, cluster_ratio, auto_f0, 0.4,f0_predictor=f0_predictor,clip_seconds=clip_seconds,loudness_envelope_adjustment = 0)
117
+ out_wav_path1 = 'output_'+f'{sid}_{vc_transform}.{output_format}'
118
+ soundfile.write(out_wav_path1, _audio, 44100, format=output_format)
119
+ used_time = round(time.time() - start_time, 2)
120
+ if auto_f0:
121
+ out_str = base64.b64decode( b'5L2g5byA5ZCv5LqG6Ieq5YqoZjDpooTmtYvvvIzku4XpmZDovazmjaLor63pn7PvvIzmrYzlo7DkuI3opoHli77pgInmraTpobnkvJrnqbbmnoHot5HosIMK' ).decode()
122
+ out_str = out_str+ (base64.b64decode( b'U3VjY2VzcyEgdG90YWwgdXNlIHRpbWU6e31z' ).decode().format(
123
+ used_time))
124
+ else:
125
+ out_str = (base64.b64decode( b'U3VjY2VzcyEgdG90YWwgdXNlIHRpbWU6e31z' ).decode().format(
126
+ used_time))
127
+ return out_str ,out_wav_path1
128
+ #return out_str ,out_wav_path1, gr.Image.update("temp.svg")
129
+
130
+ def change_audio(audio,vc):
131
+ new_audio = audio
132
+
133
+ return new_audio,vc
134
+ def loadmodel(model_):
135
+ global model
136
+ model_name = os.path.splitext(os.path.basename(model_))[0]
137
+ model = Svc(model_, "configs/" + model_name + ".json", cluster_model_path="./kmeans/" + model_name + ".pt")
138
+ global sid
139
+ spks = list(model.spk2id.keys())
140
+ sid = sid.update(choices=spks)
141
+ print(model_, "configs/" + model_name + ".json", "./kmeans/" + model_name + ".pt")
142
+
143
+ return "success",sid
144
+
145
+ def update_dropdown(new_choices):
146
+ global model
147
+ spks = list(model.spk2id.keys())
148
+ new_choices = gr.Dropdown.update(choices=spks)
149
+ return new_choices
150
+
151
+ sid =""
152
+ import pyzipper
153
+ hf_token1 = os.environ.get('TOKEN1').encode("utf-8")
154
+ with pyzipper.AESZipFile('./N.zip') as zf:
155
+ zf.pwd = hf_token1
156
+ zf.extractall()
157
+ model = Svc("./N/44.pth", "configs/44.json" , cluster_model_path="./kmeans/44.pt")
158
+ modelPaths = []
159
+ for dirpath, dirnames, filenames in os.walk("./N/"):
160
+ for filename in filenames:
161
+ modelPaths.append(os.path.join(dirpath, filename))
162
+
163
+ app = gr.Blocks()
164
+ with app:
165
+ with gr.Tabs():
166
+ with gr.TabItem(" "):
167
+
168
+
169
+ gr.Markdown(value=base64.b64decode( b'ICAgICAgICAgICAgICAgICAgICAjIOWJjeiogAogICAgICAgICAgICAgICAgICAgICog5LmD5pyo5Z2CNzXkvY1UVFPvvJpbaHR0cHM6Ly92aXRzLm5vZ2l6YWthNDYuY2NdKGh0dHBzOi8vdml0cy5ub2dpemFrYTQ2LmNjKSAKICAgICAgICAgICAgICAgICAgICAqIOWbveWGheiuv+mXrui+g+aFou+8jOW7uuiuruS9v+eUqOS7o+eQhi7mm7TmlrDkuo4yMy0wOS0xNuOAgueUsTM15bCP5pe26K+t6Z+z6K6t57uDNDbkvY0KICAgICAgICAgICAgICAgICAgICAqIOasoui/juWKoOWFpeiuqOiuulRH576kOltodHRwczovL3QubWUvK3ZQOE5LMU5NTGlZek1ESmxdKGh0dHBzOi8vdC5tZS8rdlA4TksxTk1MaVl6TURKbCkg576k6YeM5pyJ56yo56yoQm905pa55L6/5Lii5q2M5Y2zQWnnv7vllLHlkozliIbnprvlubLlo7As5LiN6L+H6LSo6YeP5Y+v5rKh5pyJ5omL5Yqo5YiG56a755qE5aW944CCCiAgICAgICAgICAgICAgICAgICAgIyDlo7DmmI4KICAgICAgICAgICAgICAgICAgICAqIOWmgueUqOatpOaooeWei+WItuS9nOmfs+mikeivt+agh+azqOacrOWcqOe6v+i9rOaNouWcsOWdgO+8mmh0dHBzOi8vc292aXRzNC5ub2dpemFrYTQ2LmNj').decode())
170
+
171
+ with gr.Tabs():
172
+ with gr.TabItem("单个音频上传"):
173
+ vc_input3 = gr.Audio("上传音频<280s无BGM无和声的干声", type="filepath", source="upload",value="examples/1.mp3")
174
+
175
+ with gr.TabItem("文字转语音(实验性)"):
176
+ gr.Markdown("文字转语音(TTS)说明:使用edge_tts服务生成音频,并转换为So-VITS模型音色。")
177
+ auto_f0 = gr.Checkbox(label="自动f0预测,配合聚类模型f0预测效果更好(仅限转换语音,歌声不要勾选此项会究极跑调)", value=False)
178
+ with gr.Row():
179
+ text_input = gr.Textbox(label = "在此输入需要转译的文字(建议打开自动f0预测)限定200字以内,建议f0预测器选dio")#, lines=4
180
+ with gr.Row():
181
+ tts_gender = gr.Radio(label = "说话人性别", choices = ["男","女"], value = "女")
182
+ tts_lang = gr.Dropdown(label = "选择语言,Auto为根据输入文字自动识别", choices=SUPPORTED_LANGUAGES, value = "Auto")
183
+ with gr.Row():
184
+ tts_rate = gr.Slider(label = "TTS语音变速(倍速相对值)", minimum = -1, maximum = 3, value = 0, step = 0.1)
185
+ tts_volume = gr.Slider(label = "TTS语音音量(相对值)", minimum = -1, maximum = 1.5, value = 0, step = 0.1)
186
+ vc_tts_submit = gr.Button("文本转语音", variant="primary")
187
+ spks = list(model.spk2id.keys())
188
+
189
+ sid = gr.Dropdown(label="音色", choices=spks, value=base64.b64decode( b'SE9TSElOT19NSU5BTUk=' ).decode())
190
+ sid.change(fn=update_dropdown,inputs=[sid],outputs=[sid])
191
+ sid.update(interactive=True)
192
+ with gr.Accordion(label="↓切换模型(默认44位,另外有58个成员的模型可选,音色具有抽奖性质,可切换尝试)", open=False):
193
+ modelstrs = gr.Dropdown(label = "模型", choices = modelPaths, value = modelPaths[0], type = "value")
194
+ btnMod = gr.Button("载入模型")
195
+ statusa = gr.TextArea()
196
+ btnMod.click(loadmodel, inputs=[modelstrs], outputs = [statusa,sid])
197
+ with gr.Row():
198
+ slice_db = gr.Slider(label="切片阈值(较嘈杂时-30,保留呼吸声时-50)",maximum=-30, minimum=-70, step=1, value=-40)
199
+ vc_transform = gr.Slider(label="变调(整数,可以正负,半音数量,升高八度就是12)",maximum=16, minimum=-16, step=1, value=0)
200
+ f0_predictor = gr.Radio(label=base64.b64decode( b'ZjDpooTmtYvlmago5aaC6YGH5ZOR6Z+z5Y+v5Lul5bCd6K+V5pu05o2iZjAp5Yet5bmy5aOw5bmy5YeA56iL5bqm6YCJ5oup44CC5o6o6I2QZmNwZeWSjHJtdnBl' ).decode(), choices=["pm","dio","harvest","fcpe","rmvpe"], value="fcpe")
201
+ with gr.Row():
202
+ cluster_ratio = gr.Number(label="聚类模型混合比例,0-1之间,默认为0不启用聚类,能提升音色相似度,但会导致咬字下降(如果使用建议0.5左右)", value=0)#聚
203
+ output_format = gr.Radio(label=base64.b64decode( b'6Z+z6aKR6L6T5Ye65qC85byPKE1QM+S8muWvvOiHtOaXtumXtOi9tOWkmjI3bXMs6ZyA5ZCI5oiQ6K+36YCJZmxhYyk=' ).decode(), choices=["flac", "mp3"], value = "mp3")#格式
204
+ vc_submit = gr.Button("音频转换", variant="primary")
205
+
206
+ vc_output1 = gr.Textbox(label=base64.b64decode( b'6Z+z6auY5bmz5Z2H5YGP5beu5Y2K6Z+z5pWw6YeP77yM5L2T546w6L2s5o2i6Z+z6aKR55qE6LeR6LCD5oOF5Ya177yI5LiA6Iis5bCP5LqOMC4177yJ' ).decode())
207
+ vc_output2 = gr.Audio(label="Output Audio")
208
+
209
+ with gr.Row():
210
+ gr.Examples(
211
+ label=base64.b64decode( b'5bmy5aOw56S65L6L77yM54K55Ye75pu05o2i' ).decode(),
212
+ examples=[
213
+ ["examples/1.mp3", 0],
214
+ ["examples/2.mp3", 2],
215
+ ["examples/3.mp3", 0],
216
+ ["examples/4.mp3", 5],
217
+ ["examples/5.mp3", 0],
218
+ ["examples/6.mp3", 0],
219
+ ],
220
+ inputs=[vc_input3, vc_transform],
221
+ fn=change_audio)
222
+
223
+ #gr.Markdown(value=base64.b64decode(b'6I635Y+W5bmy5aOw5pyA5paw6L+e5oub77yaCjEuIOaJo+S6uuWjsOOAkDTpgIkx77yM566A5Y2V55qE5q2M55SoVVZSLU1EWDIzQy1JbnN0Vm9jIEhR77yM5aSN5p2C55qE5q2M55SoM19IUOOAke+8mgoyLiDljrvlkozlo7DjgJAz6YCJMe+8jOWTquS4quaViOaenOWlveWwseeUqOWTquS4quOAke+8mgogICAgVVZSLUJWRS00Ql9TTi00NDEwMC0x6YCJSW5zdHJ1bWVudGFsIE9ubHkKICAgIDVfSFBfS2FyYW9rZS1VVlLpgIlWb2NhbHMgT25seQogICAgNl9IUF9LYXJhb2tlLVVWUumAiVZvY2FscyBPbmx5CjMuIOWOu+a3t+WTjeOAkDLpgIkx77yM5qC55o2u5re35ZON55qE56iL5bqm6YCJ5oup44CR77yaCiAgICBVVlItRGUtRWNoby1Ob3JtYWzpgIlObyBFY2hvIE9ubHnvvIjovbvluqbmt7flk43vvIkKICAgIFVWUi1EZS1FY2hvLUFnZ3Jlc3NpdmXpgIlObyBFY2hvIE9ubHnvvIjph43luqbmt7flk43vvIkK56Gu5L+dVVZS5piv5pyA5paw54mI5pys77yaNS42LjAK5aaC5p6cVVZS6YeM6Z2i5rKh5LiK6L+w5qih5Z6L77yM54K55bCP5omz5omL77yM5Y67RG93bmxvYWQgQ2VudGVy6YeM6Z2i5LiL6L295qih5Z6L77yI6K+36Ieq5aSH5qKv5a2Q77yM5ZCm5YiZ5Lya5LiL6L295aSx6LSl77yJClVWUuS4i+i9ve+8mmh0dHBzOi8vdWx0aW1hdGV2b2NhbHJlbW92ZXIuY29tLwrlpKfpg6jliIbmrYzpg73og73pgJrov4d1dnLlvpfliLDlubLlo7Ao6Zmk5LqG5aSa5Lq65ZCI5ZSxKe+8jOWcqOS6juWkmuWwneivle+8jOWOu+WSjOWjsOWSjOWOu+a3t+WTjeWPr+S7peWAkuaNogoK5b6I6YGX5oa+55qE6YCa55+l77yM5pyJMS8z55qE5oiQ5ZGY6Z+z6Imy5rOE6Zyy77yM5Li76KaB5piv5LiA5Lqbc29sb+absuWwkeeahOaIkOWRmCA65bGx5LiLIOe+juaciO+8jOaftOeUsCDmn5roj5zvvIzml6nlt50g6IGW5p2l77yM5riF5a6uIOODrOOCpO+8jOS4gOODjueArCDnvo7nqbrvvIzoj4Xljp8g5ZKy5pyI77yM5Lit6KW/IOOCouODq+ODjuetiSAK').decode())
224
+ gr.Markdown(value=base64.b64decode(b'QUtJTU9UT19NQU5BVFNVLOeni+WFgyDnnJ/lpI98SUtVVEFfRVJJS0Es55Sf55SwIOe1teaiqOiKsXxOYW5hbWkgSGFzaGltb3RvLOapi+acrCDlpYjjgIXmnKp8SVRPX0pVTk5BLOS8iuiXpCDntJTlpYh8SU5PVUVfU0FZVVJJLOS6leS4iiDlsI/nmb7lkIh8RVRPX01JU0Es6KGb6JekIOe+juW9qXxLQVdBR09fSElOQSzlt53lvowg6Zm96I+cfEtJVEFOT19ISU5BS08s5YyX6YeOIOaXpeWliOWtkHxTQUlUT19BU1VLQSzpvYvol6Qg6aOb6bOlfFNBVE9fWVVVUkks5paJ6JekIOWEqumHjHxTQUtVUkFJX1JFSUtBLOahnOS6lSDnjrLpppl8U0FTQUtJX0tPVE9LTyzkvZDjgIXmnKgg55C05a2QfFNISVJBSVNISV9NQUks55m955+zIOm6u+iho3xTSElOVUNISV9NQUks5paw5YaFIOecnuiho3xTVVpVS0lfQVlBTkUs6Yi05pyoIOe1oumfs3xUQUtBWUFNQV9LQVpVTUks6auY5bGxIOS4gOWun3xURVJBREFfUkFOWkUs5a+655SwIOiYreS4lnxOSVNISU5PX05BTkFTRSzopb/ph44g5LiD54CsfEhJR1VDSElfSElOQSzmqIvlj6Mg5pel5aWIfEhPU0hJTk9fTUlOQU1JLOaYn+mHjiDjgb/jgarjgb98SE9SSV9NSU9OQSzloIAg5pyq5aSu5aWIfE1BVFNVTVVSQV9TQVlVUkks5p2+5p2RIOaymeWPi+eQhnxZQU1BWkFLSV9SRU5BLOWxseW0jiDmgJzlpYh8V0FLQVRTVUtJX1lVTUks6Iul5pyIIOS9kee+jnxXQVRBTkFCRV9NSVJJQSzmuKHovrog44G/44KK5oSbfElUT19SSVJJQSzkvIrol6Qg55CG44CF5p2PfElXQU1PVE9fUkVOS0Es5bKp5pysIOiTruWKoHxVTUVaQVdBX01JTkFNSSzmooXmvqQg576O5rOifE9aT05PX01PTU9LTyzlpKflnJIg5qGD5a2QfEtVQk9fU0hJT1JJLOS5heS/nSDlj7Lnt5Lph4x8U0FLQUdVQ0hJX1RBTUFNSSzpmKrlj6Mg54+g576OfFNBVE9fS0FFREUs5L2Q6JekIOalk3xOQUtBTVVSQV9SRU5PLOS4readkSDpupfkuYN8TVVLQUlfSEFaVUtJLOWQkeS6lSDokYnmnIh8WUFNQVNISVRBX01JWlVLSSzlsbHkuIsg576O5pyIfFlPREFfWVVVS0ks5LiO55SwIOelkOW4jHxFTkRPX1NBS1VSQSzpgaDol6Qg44GV44GP44KJfEtBS0lfSEFSVUtBLOizgOWWnCDpgaXpppl8S0FLRUhBU0hJX1NBWUFLQSzmjpvmqYsg5rKZ6IC26aaZfEtBTkFHQVdBX1NBWUEs6YeR5bedIOe0l+iAtnxLSVRBR0FXQV9ZVVJJLOWMl+W3nSDmgqDnkIZ8U0hJQkFUQV9ZVU5BLOaftOeUsCDmn5roj5x8U0VJTUlZQV9SRUks5riF5a6uIOODrOOCpHxUQU1VUkFfTUFZVSznlLDmnZEg55yf5L2RfFRTVVRTVUlfQVlBTUUs562S5LqVIOOBguOChOOCgXxIQVlBS0FXQV9TRUlSQSzml6nlt50g6IGW5p2lfFlBS1VCT19NSU8s55+i5LmF5L+dIOe+jue3knxIQVJVS0FfS1VST01JLOm7kuimiyDmmI7pppl8UklLQV9TQVRPLOS9kOiXpCDnkoPmnpx8UlVOQV9IQVlBU0hJLOaelyDnkaDlpYh8TUlZVV9NQVRTVU8s5p2+5bC+IOe+juS9kXxOQU9fWVVNSUtJLOW8k+acqCDlpYjmlrx8SU9LSV9NQU8s5LqU55m+5Z+OIOiMieWkrnxJS0VEQV9URVJFU0Es5rGg55SwIOeRm+e0l3xJQ0hJTk9TRV9NSUtVLOS4gOODjueArCDnvo7nqbp8SU5PVUVfTkFHSSzkupXkuIog5ZKMfE9HQVdBX0FZQSzlsI/lt50g5b2pfE9LVURBX0lST0hBLOWlpeeUsCDjgYTjgo3jga98S0FXQVNBS0lfU0FLVVJBLOW3ne+okSDmoZx8U1VHQVdBUkFfU0FUU1VLSSzoj4Xljp8g5ZKy5pyIfFRPTUlTQVRPX05BTyzlhqjph4wg5aWI5aSufE5BS0FOSVNISV9BUlVOTyzkuK3opb8g44Ki44Or44OO').decode())
225
+
226
+ vc_submit.click(vc_fn, [output_format,sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db,f0_predictor],
227
+ [vc_output1, vc_output2])
228
+ vc_tts_submit.click(tts_fn, [text_input, tts_gender, tts_lang, tts_rate, tts_volume, sid, vc_transform,auto_f0,cluster_ratio, slice_db, f0_predictor], [vc_output1, vc_output2])
229
+
230
+ app.launch()
231
+
232
+
233
+