Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import random
|
3 |
import argparse
|
@@ -5,7 +6,6 @@ import argparse
|
|
5 |
import torch
|
6 |
import gradio as gr
|
7 |
import numpy as np
|
8 |
-
import spaces
|
9 |
|
10 |
import ChatTTS
|
11 |
|
@@ -15,38 +15,39 @@ chat.load_models()
|
|
15 |
|
16 |
|
17 |
|
18 |
-
|
19 |
def generate_seed():
|
20 |
new_seed = random.randint(1, 100000000)
|
21 |
return {
|
22 |
"__type__": "update",
|
23 |
"value": new_seed
|
24 |
-
|
|
|
25 |
@spaces.GPU
|
26 |
def generate_audio(text, temperature, top_P, top_K, audio_seed_input, text_seed_input, refine_text_flag):
|
|
|
27 |
torch.manual_seed(audio_seed_input)
|
28 |
rand_spk = torch.randn(768)
|
29 |
params_infer_code = {
|
30 |
-
'spk_emb': rand_spk,
|
31 |
'temperature': temperature,
|
32 |
'top_P': top_P,
|
33 |
'top_K': top_K,
|
34 |
-
|
35 |
params_refine_text = {'prompt': '[oral_2][laugh_0][break_6]'}
|
36 |
|
37 |
torch.manual_seed(text_seed_input)
|
38 |
|
39 |
if refine_text_flag:
|
40 |
-
text = chat.infer(text,
|
41 |
skip_refine_text=False,
|
42 |
refine_text_only=True,
|
43 |
params_refine_text=params_refine_text,
|
44 |
params_infer_code=params_infer_code
|
45 |
)
|
46 |
|
47 |
-
wav = chat.infer(text,
|
48 |
-
skip_refine_text=True,
|
49 |
-
params_refine_text=params_refine_text,
|
50 |
params_infer_code=params_infer_code
|
51 |
)
|
52 |
|
@@ -54,50 +55,53 @@ def generate_audio(text, temperature, top_P, top_K, audio_seed_input, text_seed_
|
|
54 |
sample_rate = 24000
|
55 |
text_data = text[0] if isinstance(text, list) else text
|
56 |
|
57 |
-
return (sample_rate, audio_data), text_data
|
|
|
|
|
|
|
|
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
return None, "", "Text is too long, please input text with less than 100 characters."
|
62 |
-
audio, generated_text = generate_audio(text, temperature, top_P, top_K, audio_seed_input, text_seed_input, refine_text_flag)
|
63 |
-
return audio, generated_text, ""
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
gr.
|
|
|
|
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
|
|
|
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
top_k_slider = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_K")
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
-
|
|
|
|
|
|
|
86 |
|
87 |
-
|
88 |
-
audio_output = gr.Audio(label="Output Audio")
|
89 |
|
90 |
-
generate_audio_seed.click(generate_seed, inputs=[], outputs=audio_seed_input)
|
91 |
-
generate_text_seed.click(generate_seed, inputs=[], outputs=text_seed_input)
|
92 |
-
generate_button.click(check_and_generate, inputs=[text_input, temperature_slider, top_p_slider, top_k_slider, audio_seed_input, text_seed_input, refine_text_checkbox], outputs=[audio_output, text_output, text_length_warning])
|
93 |
|
94 |
-
parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
|
95 |
-
parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
|
96 |
-
parser.add_argument('--server_port', type=int, default=8080, help='Server port')
|
97 |
-
args = parser.parse_args()
|
98 |
|
99 |
-
# demo.queue(max_size=1).launch(server_name=args.server_name, server_port=args.server_port, inbrowser=True)
|
100 |
-
demo.queue(max_size=4).launch()
|
101 |
|
102 |
if __name__ == '__main__':
|
103 |
-
|
|
|
1 |
+
import spaces
|
2 |
import os
|
3 |
import random
|
4 |
import argparse
|
|
|
6 |
import torch
|
7 |
import gradio as gr
|
8 |
import numpy as np
|
|
|
9 |
|
10 |
import ChatTTS
|
11 |
|
|
|
15 |
|
16 |
|
17 |
|
|
|
18 |
def generate_seed():
|
19 |
new_seed = random.randint(1, 100000000)
|
20 |
return {
|
21 |
"__type__": "update",
|
22 |
"value": new_seed
|
23 |
+
}
|
24 |
+
|
25 |
@spaces.GPU
|
26 |
def generate_audio(text, temperature, top_P, top_K, audio_seed_input, text_seed_input, refine_text_flag):
|
27 |
+
|
28 |
torch.manual_seed(audio_seed_input)
|
29 |
rand_spk = torch.randn(768)
|
30 |
params_infer_code = {
|
31 |
+
'spk_emb': rand_spk,
|
32 |
'temperature': temperature,
|
33 |
'top_P': top_P,
|
34 |
'top_K': top_K,
|
35 |
+
}
|
36 |
params_refine_text = {'prompt': '[oral_2][laugh_0][break_6]'}
|
37 |
|
38 |
torch.manual_seed(text_seed_input)
|
39 |
|
40 |
if refine_text_flag:
|
41 |
+
text = chat.infer(text,
|
42 |
skip_refine_text=False,
|
43 |
refine_text_only=True,
|
44 |
params_refine_text=params_refine_text,
|
45 |
params_infer_code=params_infer_code
|
46 |
)
|
47 |
|
48 |
+
wav = chat.infer(text,
|
49 |
+
skip_refine_text=True,
|
50 |
+
params_refine_text=params_refine_text,
|
51 |
params_infer_code=params_infer_code
|
52 |
)
|
53 |
|
|
|
55 |
sample_rate = 24000
|
56 |
text_data = text[0] if isinstance(text, list) else text
|
57 |
|
58 |
+
return [(sample_rate, audio_data), text_data]
|
59 |
+
|
60 |
+
|
61 |
+
with gr.Blocks() as demo:
|
62 |
+
gr.Markdown("# Deployed by [ChatTTS.com](https://chattts.com)")
|
63 |
|
64 |
+
default_text = "四川美食确实以辣闻名,但也有不辣的选择。比如甜水面、赖汤圆、蛋烘糕、叶儿粑等,这些小吃口味温和,甜而不腻,也很受欢迎。"
|
65 |
+
text_input = gr.Textbox(label="Input Text", lines=4, placeholder="Please Input Text...", value=default_text)
|
|
|
|
|
|
|
66 |
|
67 |
+
with gr.Row():
|
68 |
+
refine_text_checkbox = gr.Checkbox(label="Refine text", value=True)
|
69 |
+
temperature_slider = gr.Slider(minimum=0.00001, maximum=1.0, step=0.00001, value=0.3, label="Audio temperature")
|
70 |
+
top_p_slider = gr.Slider(minimum=0.1, maximum=0.9, step=0.05, value=0.7, label="top_P")
|
71 |
+
top_k_slider = gr.Slider(minimum=1, maximum=20, step=1, value=20, label="top_K")
|
72 |
|
73 |
+
with gr.Row():
|
74 |
+
audio_seed_input = gr.Number(value=42, label="Audio Seed")
|
75 |
+
generate_audio_seed = gr.Button("\U0001F3B2")
|
76 |
+
text_seed_input = gr.Number(value=42, label="Text Seed")
|
77 |
+
generate_text_seed = gr.Button("\U0001F3B2")
|
78 |
|
79 |
+
generate_button = gr.Button("Generate")
|
80 |
+
|
81 |
+
text_output = gr.Textbox(label="Output Text", interactive=False)
|
82 |
+
audio_output = gr.Audio(label="Output Audio")
|
|
|
83 |
|
84 |
+
generate_audio_seed.click(generate_seed,
|
85 |
+
inputs=[],
|
86 |
+
outputs=audio_seed_input)
|
87 |
+
|
88 |
+
generate_text_seed.click(generate_seed,
|
89 |
+
inputs=[],
|
90 |
+
outputs=text_seed_input)
|
91 |
+
|
92 |
+
generate_button.click(generate_audio,
|
93 |
+
inputs=[text_input, temperature_slider, top_p_slider, top_k_slider, audio_seed_input, text_seed_input, refine_text_checkbox],
|
94 |
+
outputs=[audio_output, text_output])
|
95 |
|
96 |
+
parser = argparse.ArgumentParser(description='ChatTTS demo Launch')
|
97 |
+
parser.add_argument('--server_name', type=str, default='0.0.0.0', help='Server name')
|
98 |
+
parser.add_argument('--server_port', type=int, default=8080, help='Server port')
|
99 |
+
args = parser.parse_args()
|
100 |
|
101 |
+
# demo.launch(server_name=args.server_name, server_port=args.server_port, inbrowser=True)
|
|
|
102 |
|
|
|
|
|
|
|
103 |
|
|
|
|
|
|
|
|
|
104 |
|
|
|
|
|
105 |
|
106 |
if __name__ == '__main__':
|
107 |
+
demo.launch()
|