l2 / app.py
darksakura's picture
Upload app.py
39f47ed
raw
history blame
12.7 kB
# flake8: noqa: E402
import logging
logging.getLogger("numba").setLevel(logging.WARNING)
logging.getLogger("markdown_it").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
logging.basicConfig(
level=logging.INFO, format="| %(name)s | %(levelname)s | %(message)s"
)
logger = logging.getLogger(__name__)
import datetime
import numpy as np
import torch
import zipfile
import shutil
import sys, os
import json
import argparse
import commons
import utils
from models import SynthesizerTrn
from text.symbols import symbols
from text import cleaned_text_to_sequence, get_bert
from text.cleaner import clean_text
import gradio as gr
import webbrowser
import re
from scipy.io.wavfile import write
net_g = None
import soundfile
BandList = {
" ":["AKIMOTO_MANATSU" ,"ENDO_SAKURA" ,"ETO_MISA" ,"FUKAGAWA_MAI" ,"HARUKA_KUROMI" ,"HASHIMOTO_NANAMI" ,"HAYAKAWA_SEIRA" ,"HIGUCHI_HINA" ,"HORI_MIONA" ,"HOSHINO_MINAMI" ,
"ICHINOSE_MIKU" ,"IKEDA_TERESA" ,"IKOMA_RINA" ,"IKUTA_ERIKA" ,"INOUE_NAGI" ,"INOUE_SAYURI" ,"IOKI_MAO" ,"ITO_JUNNA" ,"ITO_KARIN" ,"ITO_MARIKA" ,"ITO_RIRIA" ,"IWAMOTO_RENKA" ,
"KAKEHASHI_SAYAKA" ,"KAKI_HARUKA" ,"KANAGAWA_SAYA" ,"KAWAGO_HINA" ,"KAWAMURA_MAHIRO" ,"KAWASAKI_SAKURA" ,"KITAGAWA_YURI" ,"KITANO_HINAKO" ,"KUBO_SHIORI" ,"MATSUMURA_SAYURI" ,
"MIYU_MATSUO" ,"MUKAI_HAZUKI" ,"NAKADA_KANA" ,"NAKAMOTO_HIMEKA" ,"NAKAMURA_RENO" ,"NAKANISHI_ARUNO" ,"NAO_YUMIKI" ,"NISHINO_NANASE" ,"NOUJO_AMI" ,"OGAWA_AYA" ,"OKAMOTO_HINA" ,
"OKUDA_IROHA" ,"OZONO_MOMOKO" ,"RIKA_SATO" ,"RUNA_HAYASHI" ,"SAGARA_IORI" ,"SAITO_ASUKA" ,"SAITO_CHIHARU" ,"SAKAGUCHI_TAMAMI" ,"SAKURAI_REIKA" ,"SASAKI_KOTOKO" ,"SATO_KAEDE" ,
"SATO_YUURI" ,"SEIMIYA_REI" ,"SHIBATA_YUNA" ,"SHINUCHI_MAI" ,"SHIRAISHI_MAI" ,"SUGAWARA_SATSUKI" ,"SUZUKI_AYANE" ,"TAKAYAMA_KAZUMI" ,"TAMURA_MAYU" ,"TERADA_RANZE",
"TOMISATO_NAO" ,"TSUTSUI_AYAME" ,"UMEZAWA_MINAMI" ,"WADA_MAAYA" ,"WAKATSUKI_YUMI" ,"WATANABE_MIRIA" ,"YAKUBO_MIO" ,"YAMASHITA_MIZUKI" ,"YAMAZAKI_RENA" ,"YODA_YUUKI" ,"YOSHIDA_AYANO_CHRISTIE"
],
}
if sys.platform == "darwin" and torch.backends.mps.is_available():
device = "mps"
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
else:
device = "cuda"
def is_japanese(string):
for ch in string:
if ord(ch) > 0x3040 and ord(ch) < 0x30FF:
return True
return False
def extrac(text):
text = re.sub("<[^>]*>","",text)
result_list = re.split(r'\n', text)
final_list = []
for i in result_list:
i = i.replace('\n','').replace(' ','')
#Current length of single sentence: 20
if len(i)>1:
if len(i) > 20:
try:
cur_list = re.split(r'。|!', i)
for i in cur_list:
if len(i)>1:
final_list.append(i+'。')
except:
pass
else:
final_list.append(i)
'''
final_list.append(i)
'''
final_list = [x for x in final_list if x != '']
return final_list
def get_text(text, language_str, hps):
norm_text, phone, tone, word2ph = clean_text(text, language_str)
phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
if hps.data.add_blank:
phone = commons.intersperse(phone, 0)
tone = commons.intersperse(tone, 0)
language = commons.intersperse(language, 0)
for i in range(len(word2ph)):
word2ph[i] = word2ph[i] * 2
word2ph[0] += 1
bert = get_bert(norm_text, word2ph, language_str, device)
del word2ph
assert bert.shape[-1] == len(phone), phone
if language_str == "ZH":
bert = bert
ja_bert = torch.zeros(768, len(phone))
elif language_str == "JA":
ja_bert = bert
bert = torch.zeros(1024, len(phone))
else:
bert = torch.zeros(1024, len(phone))
ja_bert = torch.zeros(768, len(phone))
assert bert.shape[-1] == len(
phone
), f"Bert seq len {bert.shape[-1]} != {len(phone)}"
phone = torch.LongTensor(phone)
tone = torch.LongTensor(tone)
language = torch.LongTensor(language)
return bert, ja_bert, phone, tone, language
def infer(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language):
print(text, sdp_ratio, noise_scale, noise_scale_w, length_scale, sid, language)
global net_g
bert, ja_bert, phones, tones, lang_ids = get_text(text, language, hps)
with torch.no_grad():
x_tst = phones.to(device).unsqueeze(0)
tones = tones.to(device).unsqueeze(0)
lang_ids = lang_ids.to(device).unsqueeze(0)
bert = bert.to(device).unsqueeze(0)
ja_bert = ja_bert.to(device).unsqueeze(0)
x_tst_lengths = torch.LongTensor([phones.size(0)]).to(device)
del phones
speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(device)
audio = (
net_g.infer(
x_tst,
x_tst_lengths,
speakers,
tones,
lang_ids,
bert,
ja_bert,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
)[0][0, 0]
.data.cpu()
.float()
.numpy()
)
del x_tst, tones, lang_ids, bert, x_tst_lengths, speakers
return audio
def tts_fn(
text, speaker, sdp_ratio, noise_scale, noise_scale_w, length_scale,LongSentence
):
if not LongSentence:
with torch.no_grad():
audio = infer(
text,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language= "JP" if is_japanese(text) else "ZH",
)
torch.cuda.empty_cache()
return (hps.data.sampling_rate, audio)
else:
audiopath = 'voice.wav'
a = ['【','[','(','(']
b = ['】',']',')',')']
for i in a:
text = text.replace(i,'<')
for i in b:
text = text.replace(i,'>')
final_list = extrac(text.replace('“','').replace('”',''))
audio_fin = []
for sentence in final_list:
with torch.no_grad():
audio = infer(
sentence,
sdp_ratio=sdp_ratio,
noise_scale=noise_scale,
noise_scale_w=noise_scale_w,
length_scale=length_scale,
sid=speaker,
language= "JP" if is_japanese(text) else "ZH",
)
audio_fin.append(audio)
soundfile.write("tts_output.mp3", np.concatenate(audio_fin), 44100, format="mp3")
return ("tts_output.mp3" )
def split_into_sentences(text):
"""将文本分割为句子,基于中文的标点符号"""
sentences = re.split(r'(?<=[。!?…\n])', text)
return [sentence.strip() for sentence in sentences if sentence]
def seconds_to_ass_time(seconds):
"""将秒数转换为ASS时间格式"""
hours = int(seconds / 3600)
minutes = int((seconds % 3600) / 60)
seconds = int(seconds) % 60
milliseconds = int((seconds - int(seconds)) * 1000)
return "{:01d}:{:02d}:{:02d}.{:02d}".format(hours, minutes, seconds, int(milliseconds / 10))
if __name__ == "__main__":
import pyzipper
hf_token1 = os.environ.get('TOKEN1').encode("utf-8")
with pyzipper.AESZipFile('./N/vits.zip') as zf:
zf.pwd = hf_token1
zf.extractall()
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--model", default="./G_51000.pth", help="path of your model"
)
parser.add_argument(
"-c",
"--config",
default="./N/config.json",
help="path of your config file",
)
parser.add_argument(
"--share", default=True, help="make link public", action="store_true"
)
parser.add_argument(
"-d", "--debug", action="store_true", help="enable DEBUG-LEVEL log"
)
args = parser.parse_args()
if args.debug:
logger.info("Enable DEBUG-LEVEL log")
logging.basicConfig(level=logging.DEBUG)
hps = utils.get_hparams_from_file(args.config)
device = (
"cuda:0"
if torch.cuda.is_available()
else (
"mps"
if sys.platform == "darwin" and torch.backends.mps.is_available()
else "cpu"
)
)
net_g = SynthesizerTrn(
len(symbols),
hps.data.filter_length // 2 + 1,
hps.train.segment_size // hps.data.hop_length,
n_speakers=hps.data.n_speakers,
**hps.model,
).to(device)
_ = net_g.eval()
_ = utils.load_checkpoint(args.model, net_g, None, skip_optimizer=True)
speaker_ids = hps.data.spk2id
speakers = list(speaker_ids.keys())
languages = ["ZH", "JP"]
with gr.Blocks() as app:
gr.Markdown(
f"其他玩具:乃 木 坂 46 Sovits音色转换ai翻唱:<a href='https://sovits4.nogizaka46.cc/'>sovits4.nogizaka46.cc"
)
gr.Markdown(
f"【乃 木 坂 46 全员TTS】,使用本模型请严格遵守法律法规!\n 发布二创作品请标注本项目网址<a href='https://vits.nogizaka46.cc/'>vits.nogizaka46.cc</a>"
)
for band in BandList:
with gr.TabItem(band):
for name in BandList[band]:
with gr.TabItem(name):
with gr.Row():
#with gr.Column():
#with gr.Row():
#gr.Markdown(
#'<div align="center">'
#f'<img style="width:auto;height:400px;" src="file/image/SAITO_ASUKA.png">'
#'</div>'
#)
with gr.Column():
text = gr.TextArea(
label="输入纯日语或者中文",
placeholder="输入纯日语或者中文",
value="純粋な日本語または中国語を入力してください。",
)
btn = gr.Button("点击生成", variant="primary")
audio_output = gr.Audio(label="Output Audio")
LongSentence = gr.Checkbox(value=True, label="Generate LongSentence")
with gr.Accordion(label="TTS设定", open=True):
sdp_ratio = gr.Slider(
minimum=0, maximum=1, value=0.2, step=0.01, label="SDP/DP混合比"
)
noise_scale = gr.Slider(
minimum=0.1, maximum=2, value=0.6, step=0.01, label="感情调节"
)
noise_scale_w = gr.Slider(
minimum=0.1, maximum=2, value=0.8, step=0.01, label="音素长度"
)
length_scale = gr.Slider(
minimum=0.1, maximum=2, value=1.05, step=0.01, label="生成长度"
)
speaker = gr.Dropdown(
choices=speakers, value=name, label="说话人(在这选择说话人将保留输入文本)"
)
btn.click(
tts_fn,
inputs=[
text,
speaker,
sdp_ratio,
noise_scale,
noise_scale_w,
length_scale,
LongSentence,
],
outputs=[audio_output],
)
app.launch()