File size: 5,216 Bytes
295a0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424ac31
295a0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09d8aa9
bdaab11
 
f2d4d5e
295a0ef
 
 
 
 
 
 
 
199efe2
295a0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11fbbf8
295a0ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
import json
import os
import re
import librosa
import numpy as np
import torch
from torch import no_grad, LongTensor
import commons
import utils
import gradio as gr
from models import SynthesizerTrn
from text import text_to_sequence
from text.symbols import symbols

limitation = os.getenv("SYSTEM") == "spaces"  # limit text and audio length in huggingface spaces


def get_text(text, hps):
    text_norm = text_to_sequence(text, hps.data.text_cleaners)
    if hps.data.add_blank:
        text_norm = commons.intersperse(text_norm, 0)
    text_norm = torch.LongTensor(text_norm)
    return text_norm


def create_tts_fn(net_g, hps, speaker_ids):
    def tts_fn(text, speaker, speed):
        if limitation:
            text_len = len(text)
            max_len = 5000
            if text_len > max_len:
                return "Error: Text is too long", None

        speaker_id = speaker_ids[speaker]
        stn_tst = get_text(text, hps)
        
        with no_grad():
            x_tst = stn_tst.unsqueeze(0)
            x_tst_lengths = LongTensor([stn_tst.size(0)])
            sid = LongTensor([speaker_id])
            audio = net_g.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=.667, noise_scale_w=0.8,
                                length_scale=1.0 / speed)[0][0, 0].data.cpu().float().numpy()
        del stn_tst, x_tst, x_tst_lengths, sid
        return "Success", (hps.data.sampling_rate, audio)

    return tts_fn
    
    
css = """
        #advanced-btn {
            color: white;
            border-color: black;
            background: black;
            font-size: .7rem !important;
            line-height: 19px;
            margin-top: 24px;
            margin-bottom: 12px;
            padding: 2px 8px;
            border-radius: 14px !important;
        }
        #advanced-options {
            display: none;
            margin-bottom: 20px;
        }
"""

if __name__ == '__main__':
    models_tts = []
    name = 'AronaTTS'
    lang = 'ζ—₯本θͺž (Japanese)'
    example = 'ε…ˆη”Ÿγ€δ»Šζ—₯γ―ε€©ζ°—γŒζœ¬ε½“γ«γ„γ„γ§γ™γ­γ€‚'
    config_path = f"pretrained_model/arona_ms_istft_vits.json"
    model_path = f"pretrained_model/arona_ms_istft_vits.pth"
    cover_path = f"pretrained_model/cover.gif"

    hps = utils.get_hparams_from_file(config_path)

    net_g = SynthesizerTrn(
        len(symbols),
        hps.data.filter_length // 2 + 1,
        hps.train.segment_size // hps.data.hop_length,
        n_speakers=hps.data.n_speakers,
        **hps.model)
    _ = net_g.eval()

    utils.load_checkpoint(model_path, net_g, None)

    net_g.eval()

    speaker_ids = [0]
    speakers = [name]

    t = 'vits'
    models_tts.append((name, cover_path, speakers, lang, example,
                        hps.symbols, create_tts_fn(net_g, hps, speaker_ids)))
                               
    app = gr.Blocks(css=css)

    with app:
        gr.Markdown("# BlueArchive AronaTTS Using VITS Model\n"
                    "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=openduckparty.AronaTTS)\n\n")
        
        for i, (name, cover_path, speakers, lang, example, symbols, tts_fn
                ) in enumerate(models_tts):

            with gr.Column():
                gr.Markdown(f"## {name}\n\n"
                            f"![cover](file/{cover_path})\n\n"
                            f"lang: {lang}")
                tts_input1 = gr.TextArea(label="Text (5000 words limitation)", value=example,
                                            elem_id=f"tts-input{i}")
                tts_input2 = gr.Dropdown(label="Speaker", choices=speakers,
                                            type="index", value=speakers[0])
                tts_input3 = gr.Slider(label="Speed", value=1, minimum=0.1, maximum=2, step=0.1)
                tts_submit = gr.Button("Generate", variant="primary")
                tts_output1 = gr.Textbox(label="Output Message")
                tts_output2 = gr.Audio(label="Output Audio")
                tts_submit.click(tts_fn, [tts_input1, tts_input2, tts_input3],
                                    [tts_output1, tts_output2])
                _js=f"""
                (i,phonemes) => {{
                    let root = document.querySelector("body > gradio-app");
                    if (root.shadowRoot != null)
                        root = root.shadowRoot;
                    let text_input = root.querySelector("#tts-input{i}").querySelector("textarea");
                    let startPos = text_input.selectionStart;
                    let endPos = text_input.selectionEnd;
                    let oldTxt = text_input.value;
                    let result = oldTxt.substring(0, startPos) + phonemes[i] + oldTxt.substring(endPos);
                    text_input.value = result;
                    let x = window.scrollX, y = window.scrollY;
                    text_input.focus();
                    text_input.selectionStart = startPos + phonemes[i].length;
                    text_input.selectionEnd = startPos + phonemes[i].length;
                    text_input.blur();
                    window.scrollTo(x, y);
                    return [];
                }}"""

    app.queue(concurrency_count=3).launch(show_api=False)