File size: 3,090 Bytes
29a7123
1610722
badff1c
828d42b
1610722
94cde68
c7fbbca
1610722
94cde68
badff1c
828d42b
badff1c
1610722
 
 
 
 
 
 
 
 
 
 
 
 
94cde68
 
 
 
 
1610722
94cde68
1610722
 
 
 
 
94cde68
1610722
 
94cde68
 
 
 
 
 
828d42b
94cde68
c7fbbca
1610722
94cde68
 
 
 
 
 
 
 
 
05020c4
94cde68
828d42b
c7fbbca
badff1c
 
 
05020c4
 
94cde68
05020c4
c7fbbca
94cde68
 
badff1c
 
94cde68
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import torch
import soundfile as sf
import spaces
import os
import numpy as np
from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
from speechbrain.pretrained import EncoderClassifier
from datasets import load_dataset

device = "cuda" if torch.cuda.is_available() else "cpu"

def load_models_and_data():
    model_name = "microsoft/speecht5_tts"
    processor = SpeechT5Processor.from_pretrained(model_name)
    model = SpeechT5ForTextToSpeech.from_pretrained("emirhanbilgic/speecht5_finetuned_emirhan_tr").to(device)
    vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan").to(device)
    
    spk_model_name = "speechbrain/spkrec-xvect-voxceleb"
    speaker_model = EncoderClassifier.from_hparams(
        source=spk_model_name,
        run_opts={"device": device},
        savedir=os.path.join("/tmp", spk_model_name),
    )
    
    # Load a sample from a dataset for default embedding
    dataset = load_dataset("erenfazlioglu/turkishvoicedataset", split="train")
    example = dataset[304]
    
    return model, processor, vocoder, speaker_model, example

model, processor, vocoder, speaker_model, default_example = load_models_and_data()

def create_speaker_embedding(waveform):
    with torch.no_grad():
        speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform).unsqueeze(0).to(device))
        speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2)
        speaker_embeddings = speaker_embeddings.squeeze()
    return speaker_embeddings

def prepare_default_embedding(example):
    audio = example["audio"]
    return create_speaker_embedding(audio["array"])

default_embedding = prepare_default_embedding(default_example)

@spaces.GPU(duration = 60)
def text_to_speech(text, audio_file=None):
    inputs = processor(text=text, return_tensors="pt").to(device)
    
    if audio_file is not None:
        # Load the audio file and create speaker embedding
        waveform, sample_rate = sf.read(audio_file)
        if len(waveform.shape) > 1:
            waveform = waveform[:, 0]  # Take the first channel if stereo
        speaker_embeddings = create_speaker_embedding(waveform)
    else:
        # Use default embedding if no audio file is provided
        speaker_embeddings = default_embedding
    
    speech = model.generate_speech(inputs["input_ids"], speaker_embeddings.unsqueeze(0), vocoder=vocoder)
    sf.write("output.wav", speech.cpu().numpy(), samplerate=16000)
    return "output.wav"

iface = gr.Interface(
    fn=text_to_speech,
    inputs=[
        gr.Textbox(label="Enter Turkish text to convert to speech"),
        gr.Audio(label="Upload a short audio sample of the target speaker (optional)", type="filepath")
    ],
    outputs=gr.Audio(label="Generated Speech"),
    title="Turkish SpeechT5 Text-to-Speech Demo with Optional Custom Voice",
    description="Enter Turkish text, optionally upload a short audio sample of the target speaker, and listen to the generated speech using the fine-tuned SpeechT5 model."
)

iface.launch(share=True)