Spaces:
Runtime error
Runtime error
soujanyaporia
commited on
Commit
•
86a3494
1
Parent(s):
0b1b36b
Update app.py
Browse files
app.py
CHANGED
@@ -53,13 +53,13 @@ class Tango:
|
|
53 |
for i in range(0, len(lst), n):
|
54 |
yield lst[i:i + n]
|
55 |
|
56 |
-
def generate(self, prompt, steps=100, guidance=3, samples=
|
57 |
""" Genrate audio for a single prompt string. """
|
58 |
with torch.no_grad():
|
59 |
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
60 |
mel = self.vae.decode_first_stage(latents)
|
61 |
wave = self.vae.decode_to_waveform(mel)
|
62 |
-
return wave
|
63 |
|
64 |
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
|
65 |
""" Genrate audio for a list of prompt strings. """
|
@@ -83,33 +83,18 @@ tango.vae.to(device_type)
|
|
83 |
tango.stft.to(device_type)
|
84 |
tango.model.to(device_type)
|
85 |
|
86 |
-
@spaces.GPU(duration=
|
87 |
def gradio_generate(prompt, output_format, steps, guidance):
|
88 |
output_wave = tango.generate(prompt, steps, guidance)
|
89 |
-
output_filename_1 = "tmp.wav"
|
90 |
-
wavio.write(output_filename_1, output_wave[0], rate=16000, sampwidth=2)
|
91 |
-
# output_wave = tango.generate_for_batch([prompt], steps, guidance, samples=3)
|
92 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
93 |
-
|
94 |
-
|
95 |
-
# wavio.write(output_filename_1, output_wave[0][0], rate=16000, sampwidth=2)
|
96 |
-
# output_filename_2 = "tmp2.wav"
|
97 |
-
# wavio.write(output_filename_2, output_wave[0][1], rate=16000, sampwidth=2)
|
98 |
-
# output_filename_3 = "tmp3.wav"
|
99 |
-
# wavio.write(output_filename_3, output_wave[0][2], rate=16000, sampwidth=2)
|
100 |
|
101 |
if (output_format == "mp3"):
|
102 |
-
AudioSegment.from_wav("
|
103 |
-
|
104 |
-
# AudioSegment.from_wav("tmp1.wav").export("tmp1.mp3", format = "mp3")
|
105 |
-
# output_filename_1 = "tmp1.mp3"
|
106 |
-
# AudioSegment.from_wav("tmp2.wav").export("tmp2.mp3", format = "mp3")
|
107 |
-
# output_filename_2 = "tmp2.mp3"
|
108 |
-
# AudioSegment.from_wav("tmp3.wav").export("tmp3.mp3", format = "mp3")
|
109 |
-
# output_filename_3 = "tmp3.mp3"
|
110 |
|
111 |
-
|
112 |
-
return output_filename_1
|
113 |
|
114 |
# description_text = """
|
115 |
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
@@ -138,11 +123,8 @@ Generate audio using Tango2 by providing a text prompt. Tango2 was built from Ta
|
|
138 |
"""
|
139 |
# Gradio input and output components
|
140 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
141 |
-
output_format = gr.Radio(label = "Output format", info = "The file you can
|
142 |
-
|
143 |
-
# output_audio_1 = gr.Audio(label="Generated Audio #1/3", type="filepath")
|
144 |
-
# output_audio_2 = gr.Audio(label="Generated Audio #2/3", type="filepath")
|
145 |
-
# output_audio_3 = gr.Audio(label="Generated Audio #3/3", type="filepath")
|
146 |
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
|
147 |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
|
148 |
|
@@ -150,8 +132,7 @@ guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guid
|
|
150 |
gr_interface = gr.Interface(
|
151 |
fn=gradio_generate,
|
152 |
inputs=[input_text, output_format, denoising_steps, guidance_scale],
|
153 |
-
outputs=[
|
154 |
-
# outputs=[output_audio_1, output_audio_2, output_audio_3],
|
155 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
156 |
description=description_text,
|
157 |
allow_flagging=False,
|
|
|
53 |
for i in range(0, len(lst), n):
|
54 |
yield lst[i:i + n]
|
55 |
|
56 |
+
def generate(self, prompt, steps=100, guidance=3, samples=1, disable_progress=True):
|
57 |
""" Genrate audio for a single prompt string. """
|
58 |
with torch.no_grad():
|
59 |
latents = self.model.inference([prompt], self.scheduler, steps, guidance, samples, disable_progress=disable_progress)
|
60 |
mel = self.vae.decode_first_stage(latents)
|
61 |
wave = self.vae.decode_to_waveform(mel)
|
62 |
+
return wave[0]
|
63 |
|
64 |
def generate_for_batch(self, prompts, steps=200, guidance=3, samples=1, batch_size=8, disable_progress=True):
|
65 |
""" Genrate audio for a list of prompt strings. """
|
|
|
83 |
tango.stft.to(device_type)
|
84 |
tango.model.to(device_type)
|
85 |
|
86 |
+
@spaces.GPU(duration=60)
|
87 |
def gradio_generate(prompt, output_format, steps, guidance):
|
88 |
output_wave = tango.generate(prompt, steps, guidance)
|
|
|
|
|
|
|
89 |
# output_filename = f"{prompt.replace(' ', '_')}_{steps}_{guidance}"[:250] + ".wav"
|
90 |
+
output_filename = "temp.wav"
|
91 |
+
wavio.write(output_filename, output_wave, rate=16000, sampwidth=2)
|
|
|
|
|
|
|
|
|
|
|
92 |
|
93 |
if (output_format == "mp3"):
|
94 |
+
AudioSegment.from_wav("temp.wav").export("temp.mp3", format = "mp3")
|
95 |
+
output_filename = "temp.mp3"
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
+
return output_filename
|
|
|
98 |
|
99 |
# description_text = """
|
100 |
# <p><a href="https://huggingface.co/spaces/declare-lab/tango/blob/main/app.py?duplicate=true"> <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> For faster inference without waiting in queue, you may duplicate the space and upgrade to a GPU in the settings. <br/><br/>
|
|
|
123 |
"""
|
124 |
# Gradio input and output components
|
125 |
input_text = gr.Textbox(lines=2, label="Prompt")
|
126 |
+
output_format = gr.Radio(label = "Output format", info = "The file you can dowload", choices = ["mp3", "wav"], value = "wav")
|
127 |
+
output_audio = gr.Audio(label="Generated Audio", type="filepath")
|
|
|
|
|
|
|
128 |
denoising_steps = gr.Slider(minimum=100, maximum=200, value=100, step=1, label="Steps", interactive=True)
|
129 |
guidance_scale = gr.Slider(minimum=1, maximum=10, value=3, step=0.1, label="Guidance Scale", interactive=True)
|
130 |
|
|
|
132 |
gr_interface = gr.Interface(
|
133 |
fn=gradio_generate,
|
134 |
inputs=[input_text, output_format, denoising_steps, guidance_scale],
|
135 |
+
outputs=[output_audio],
|
|
|
136 |
title="Tango 2: Aligning Diffusion-based Text-to-Audio Generations through Direct Preference Optimization",
|
137 |
description=description_text,
|
138 |
allow_flagging=False,
|