Update docs
Browse files- README.md +7 -1
- do_tts.py +107 -89
- utils/audio.py +2 -0
README.md
CHANGED
@@ -14,19 +14,25 @@ expect ~5 seconds of speech to take ~30 seconds to produce on the latest hardwar
|
|
14 |
|
15 |
## What the heck is this?
|
16 |
|
17 |
-
Tortoise TTS is inspired by OpenAI's DALLE, applied to speech data. It is made up of 4 separate models that work together
|
|
|
|
|
18 |
|
19 |
First, an autoregressive transformer stack predicts discrete speech "tokens" given a text prompt. This model is very
|
20 |
similar to the GPT model used by DALLE, except it operates on speech data.
|
|
|
21 |
|
22 |
Next, a CLIP model judges a batch of outputs from the autoregressive transformer against the provided text and stack
|
23 |
ranks the outputs according to most probable. You could use greedy or beam-search decoding but in my experience CLIP
|
24 |
decoding creates considerably better results.
|
|
|
25 |
|
26 |
Next, the speech "tokens" are decoded into a low-quality MEL spectrogram using a VQVAE.
|
|
|
27 |
|
28 |
Finally, the output of the VQVAE is further decoded by a UNet diffusion model into raw audio, which can be placed in
|
29 |
a wav file.
|
|
|
30 |
|
31 |
## How do I use this?
|
32 |
|
|
|
14 |
|
15 |
## What the heck is this?
|
16 |
|
17 |
+
Tortoise TTS is inspired by OpenAI's DALLE, applied to speech data. It is made up of 4 separate models that work together.
|
18 |
+
These models are all derived from different repositories which are all linked. All the models have been modified
|
19 |
+
for this use case (some substantially so).
|
20 |
|
21 |
First, an autoregressive transformer stack predicts discrete speech "tokens" given a text prompt. This model is very
|
22 |
similar to the GPT model used by DALLE, except it operates on speech data.
|
23 |
+
Based on: [GPT2 from Transformers](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
24 |
|
25 |
Next, a CLIP model judges a batch of outputs from the autoregressive transformer against the provided text and stack
|
26 |
ranks the outputs according to most probable. You could use greedy or beam-search decoding but in my experience CLIP
|
27 |
decoding creates considerably better results.
|
28 |
+
Based on [CLIP from lucidrains](https://github.com/lucidrains/DALLE-pytorch/blob/main/dalle_pytorch/dalle_pytorch.py)
|
29 |
|
30 |
Next, the speech "tokens" are decoded into a low-quality MEL spectrogram using a VQVAE.
|
31 |
+
Based on [VQVAE2 by rosinality](https://github.com/rosinality/vq-vae-2-pytorch)
|
32 |
|
33 |
Finally, the output of the VQVAE is further decoded by a UNet diffusion model into raw audio, which can be placed in
|
34 |
a wav file.
|
35 |
+
Based on [ImprovedDiffusion by openai](https://github.com/openai/improved-diffusion)
|
36 |
|
37 |
## How do I use this?
|
38 |
|
do_tts.py
CHANGED
@@ -25,25 +25,7 @@ def load_discrete_vocoder_diffuser(trained_diffusion_steps=4000, desired_diffusi
|
|
25 |
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps))
|
26 |
|
27 |
|
28 |
-
def
|
29 |
-
"""
|
30 |
-
Uses the specified diffusion model and DVAE model to convert the provided MEL & conditioning inputs into an audio clip.
|
31 |
-
"""
|
32 |
-
with torch.no_grad():
|
33 |
-
mel = dvae_model.decode(mel_codes)[0]
|
34 |
-
|
35 |
-
# Pad MEL to multiples of 2048//spectrogram_compression_factor
|
36 |
-
msl = mel.shape[-1]
|
37 |
-
dsl = 2048 // spectrogram_compression_factor
|
38 |
-
gap = dsl - (msl % dsl)
|
39 |
-
if gap > 0:
|
40 |
-
mel = torch.nn.functional.pad(mel, (0, gap))
|
41 |
-
|
42 |
-
output_shape = (mel.shape[0], 1, mel.shape[-1] * spectrogram_compression_factor)
|
43 |
-
return diffuser.p_sample_loop(diffusion_model, output_shape, model_kwargs={'spectrogram': mel, 'conditioning_input': conditioning_input})
|
44 |
-
|
45 |
-
|
46 |
-
def load_conditioning(path, sample_rate=22050, cond_length=44100):
|
47 |
rel_clip = load_audio(path, sample_rate)
|
48 |
gap = rel_clip.shape[-1] - cond_length
|
49 |
if gap < 0:
|
@@ -82,86 +64,122 @@ def fix_autoregressive_output(codes, stop_token):
|
|
82 |
return codes
|
83 |
|
84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
if __name__ == '__main__':
|
|
|
|
|
86 |
preselected_cond_voices = {
|
87 |
-
|
88 |
-
'
|
89 |
-
'
|
90 |
-
'
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
}
|
92 |
|
93 |
parser = argparse.ArgumentParser()
|
94 |
parser.add_argument('-autoregressive_model_path', type=str, help='Autoregressive model checkpoint to load.', default='.models/unified_voice.pth')
|
95 |
parser.add_argument('-clip_model_path', type=str, help='CLIP model checkpoint to load.', default='.models/clip.pth')
|
96 |
-
parser.add_argument('-diffusion_model_path', type=str, help='Diffusion model checkpoint to load.', default='
|
97 |
-
parser.add_argument('-dvae_model_path', type=str, help='DVAE model checkpoint to load.', default='
|
98 |
parser.add_argument('-text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.")
|
99 |
-
parser.add_argument('-
|
100 |
-
parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=
|
101 |
-
parser.add_argument('-num_batches', type=int, help='How many batches those samples should be produced over.', default=
|
102 |
parser.add_argument('-num_outputs', type=int, help='Number of outputs to produce.', default=2)
|
103 |
parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/')
|
104 |
args = parser.parse_args()
|
105 |
os.makedirs(args.output_path, exist_ok=True)
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
model_var_type='learned_range', loss_type='mse', betas=get_named_beta_schedule('linear', trained_diffusion_steps))
|
26 |
|
27 |
|
28 |
+
def load_conditioning(path, sample_rate=22050, cond_length=132300):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
rel_clip = load_audio(path, sample_rate)
|
30 |
gap = rel_clip.shape[-1] - cond_length
|
31 |
if gap < 0:
|
|
|
64 |
return codes
|
65 |
|
66 |
|
67 |
+
def do_spectrogram_diffusion(diffusion_model, dvae_model, diffuser, mel_codes, conditioning_input, spectrogram_compression_factor=128, mean=False):
|
68 |
+
"""
|
69 |
+
Uses the specified diffusion model and DVAE model to convert the provided MEL & conditioning inputs into an audio clip.
|
70 |
+
"""
|
71 |
+
with torch.no_grad():
|
72 |
+
mel = dvae_model.decode(mel_codes)[0]
|
73 |
+
|
74 |
+
# Pad MEL to multiples of 2048//spectrogram_compression_factor
|
75 |
+
msl = mel.shape[-1]
|
76 |
+
dsl = 2048 // spectrogram_compression_factor
|
77 |
+
gap = dsl - (msl % dsl)
|
78 |
+
if gap > 0:
|
79 |
+
mel = torch.nn.functional.pad(mel, (0, gap))
|
80 |
+
|
81 |
+
output_shape = (mel.shape[0], 1, mel.shape[-1] * spectrogram_compression_factor)
|
82 |
+
if mean:
|
83 |
+
return diffuser.p_sample_loop(diffusion_model, output_shape, noise=torch.zeros(output_shape, device=mel_codes.device),
|
84 |
+
model_kwargs={'spectrogram': mel, 'conditioning_input': conditioning_input})
|
85 |
+
else:
|
86 |
+
return diffuser.p_sample_loop(diffusion_model, output_shape, model_kwargs={'spectrogram': mel, 'conditioning_input': conditioning_input})
|
87 |
+
|
88 |
+
|
89 |
if __name__ == '__main__':
|
90 |
+
# These are voices drawn randomly from the training set. You are free to substitute your own voices in, but testing
|
91 |
+
# has shown that the model does not generalize to new voices very well.
|
92 |
preselected_cond_voices = {
|
93 |
+
# Male voices
|
94 |
+
'dotrice': ['voices/dotrice/1.wav', 'voices/dotrice/2.wav'],
|
95 |
+
'harris': ['voices/male_harris1.wav', 'voices/male_harris2.wav'],
|
96 |
+
'lescault': ['voices/male_lescault1.wav', 'voices/male_lescault2.wav'],
|
97 |
+
'otto': ['voices/male_otto1.wav', 'voices/male_otto2.wav'],
|
98 |
+
# Female voices
|
99 |
+
'atkins': ['voices/female_atkins1.wav', 'voices/female_atkins2.wav'],
|
100 |
+
'grace': ['voices/female_grace1.wav', 'voices/female_grace2.wav'],
|
101 |
+
'kennard': ['voices/female_kennard1.wav', 'voices/female_kennard2.wav'],
|
102 |
+
'mol': ['voices/female_mol1.wav', 'voices/female_mol2.wav'],
|
103 |
}
|
104 |
|
105 |
parser = argparse.ArgumentParser()
|
106 |
parser.add_argument('-autoregressive_model_path', type=str, help='Autoregressive model checkpoint to load.', default='.models/unified_voice.pth')
|
107 |
parser.add_argument('-clip_model_path', type=str, help='CLIP model checkpoint to load.', default='.models/clip.pth')
|
108 |
+
parser.add_argument('-diffusion_model_path', type=str, help='Diffusion model checkpoint to load.', default='.models/diffusion_vocoder.pth')
|
109 |
+
parser.add_argument('-dvae_model_path', type=str, help='DVAE model checkpoint to load.', default='.models/dvae.pth')
|
110 |
parser.add_argument('-text', type=str, help='Text to speak.', default="I am a language model that has learned to speak.")
|
111 |
+
parser.add_argument('-voice', type=str, help='Use a preset conditioning voice (defined above). Overrides cond_path.', default='dotrice,harris,lescault,otto,atkins,grace,kennard,mol')
|
112 |
+
parser.add_argument('-num_samples', type=int, help='How many total outputs the autoregressive transformer should produce.', default=512)
|
113 |
+
parser.add_argument('-num_batches', type=int, help='How many batches those samples should be produced over.', default=16)
|
114 |
parser.add_argument('-num_outputs', type=int, help='Number of outputs to produce.', default=2)
|
115 |
parser.add_argument('-output_path', type=str, help='Where to store outputs.', default='results/')
|
116 |
args = parser.parse_args()
|
117 |
os.makedirs(args.output_path, exist_ok=True)
|
118 |
|
119 |
+
for voice in args.voice.split(','):
|
120 |
+
print("Loading GPT TTS..")
|
121 |
+
autoregressive = UnifiedVoice(max_mel_tokens=300, max_text_tokens=200, max_conditioning_inputs=2, layers=30, model_dim=1024,
|
122 |
+
heads=16, number_text_tokens=256, start_text_token=255, checkpointing=False, train_solo_embeddings=False).cuda().eval()
|
123 |
+
autoregressive.load_state_dict(torch.load(args.autoregressive_model_path))
|
124 |
+
stop_mel_token = autoregressive.stop_mel_token
|
125 |
+
|
126 |
+
print("Loading data..")
|
127 |
+
tokenizer = VoiceBpeTokenizer()
|
128 |
+
text = torch.IntTensor(tokenizer.encode(args.text)).unsqueeze(0).cuda()
|
129 |
+
text = F.pad(text, (0,1)) # This may not be necessary.
|
130 |
+
cond_paths = preselected_cond_voices[voice]
|
131 |
+
conds = []
|
132 |
+
for cond_path in cond_paths:
|
133 |
+
c, cond_wav = load_conditioning(cond_path)
|
134 |
+
conds.append(c)
|
135 |
+
conds = torch.stack(conds, dim=1) # And just use the last cond_wav for the diffusion model.
|
136 |
+
|
137 |
+
with torch.no_grad():
|
138 |
+
print("Performing autoregressive inference..")
|
139 |
+
samples = []
|
140 |
+
for b in tqdm(range(args.num_batches)):
|
141 |
+
codes = autoregressive.inference_speech(conds, text, num_beams=1, repetition_penalty=1.0, do_sample=True, top_k=50, top_p=.95,
|
142 |
+
temperature=.9, num_return_sequences=args.num_samples//args.num_batches, length_penalty=1)
|
143 |
+
padding_needed = 250 - codes.shape[1]
|
144 |
+
codes = F.pad(codes, (0, padding_needed), value=stop_mel_token)
|
145 |
+
samples.append(codes)
|
146 |
+
del autoregressive
|
147 |
+
|
148 |
+
print("Loading CLIP..")
|
149 |
+
clip = VoiceCLIP(dim_text=512, dim_speech=512, dim_latent=512, num_text_tokens=256, text_enc_depth=8, text_seq_len=120, text_heads=8,
|
150 |
+
num_speech_tokens=8192, speech_enc_depth=10, speech_heads=8, speech_seq_len=250).cuda().eval()
|
151 |
+
clip.load_state_dict(torch.load(args.clip_model_path))
|
152 |
+
print("Performing CLIP filtering..")
|
153 |
+
clip_results = []
|
154 |
+
for batch in samples:
|
155 |
+
for i in range(batch.shape[0]):
|
156 |
+
batch[i] = fix_autoregressive_output(batch[i], stop_mel_token)
|
157 |
+
text = text[:, :120] # Ugly hack to fix the fact that I didn't train CLIP to handle long enough text.
|
158 |
+
clip_results.append(clip(text.repeat(batch.shape[0], 1),
|
159 |
+
torch.full((batch.shape[0],), fill_value=text.shape[1]-1, dtype=torch.long, device='cuda'),
|
160 |
+
batch, torch.full((batch.shape[0],), fill_value=batch.shape[1]*1024, dtype=torch.long, device='cuda'),
|
161 |
+
return_loss=False))
|
162 |
+
clip_results = torch.cat(clip_results, dim=0)
|
163 |
+
samples = torch.cat(samples, dim=0)
|
164 |
+
best_results = samples[torch.topk(clip_results, k=args.num_outputs).indices]
|
165 |
+
|
166 |
+
# Delete the autoregressive and clip models to free up GPU memory
|
167 |
+
del samples, clip
|
168 |
+
|
169 |
+
print("Loading DVAE..")
|
170 |
+
dvae = DiscreteVAE(positional_dims=1, channels=80, hidden_dim=512, num_resnet_blocks=3, codebook_dim=512, num_tokens=8192, num_layers=2,
|
171 |
+
record_codes=True, kernel_size=3, use_transposed_convs=False).cuda().eval()
|
172 |
+
dvae.load_state_dict(torch.load(args.dvae_model_path))
|
173 |
+
print("Loading Diffusion Model..")
|
174 |
+
diffusion = DiscreteDiffusionVocoder(model_channels=128, dvae_dim=80, channel_mult=[1, 1, 1.5, 2, 3, 4, 6, 8, 8, 8, 8], num_res_blocks=[1, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1],
|
175 |
+
spectrogram_conditioning_resolutions=[2,512], attention_resolutions=[512,1024], num_heads=4, kernel_size=3, scale_factor=2,
|
176 |
+
conditioning_inputs_provided=True, time_embed_dim_multiplier=4).cuda().eval()
|
177 |
+
diffusion.load_state_dict(torch.load(args.diffusion_model_path))
|
178 |
+
diffuser = load_discrete_vocoder_diffuser(desired_diffusion_steps=100)
|
179 |
+
|
180 |
+
print("Performing vocoding..")
|
181 |
+
# Perform vocoding on each batch element separately: The diffusion model is very memory (and compute!) intensive.
|
182 |
+
for b in range(best_results.shape[0]):
|
183 |
+
code = best_results[b].unsqueeze(0)
|
184 |
+
wav = do_spectrogram_diffusion(diffusion, dvae, diffuser, code, cond_wav, spectrogram_compression_factor=256, mean=True)
|
185 |
+
torchaudio.save(os.path.join(args.output_path, f'{voice}_{b}.wav'), wav.squeeze(0).cpu(), 22050)
|
utils/audio.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1 |
import torch
|
2 |
import torchaudio
|
|
|
|
|
3 |
|
4 |
|
5 |
def load_wav_to_torch(full_path):
|
|
|
1 |
import torch
|
2 |
import torchaudio
|
3 |
+
import numpy as np
|
4 |
+
from scipy.io.wavfile import read
|
5 |
|
6 |
|
7 |
def load_wav_to_torch(full_path):
|