Kangarroar's picture
Update app.py
976a73f
raw
history blame
No virus
1.99 kB
import gradio as gr
from utils.hparams import hparams
from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
import utils
import librosa
import torchcrepe
from infer import *
import logging
from infer_tools.infer_tool import *
import io
def render_audio(audio_file):
print(audio_file)
############
logging.getLogger('numba').setLevel(logging.WARNING)
# 工程文件夹名,训练时用的那个
project_name = "Unnamed"
model_path = f'./checkpoints/Unnamed/model_ckpt_steps_192000.ckpt'
config_path=f'./checkpoints/Unnamed/config.yaml'
hubert_gpu=True
svc_model = Svc(project_name,config_path,hubert_gpu, model_path)
print('model loaded')
wav_fn = audio_file
demoaudio, sr = librosa.load(wav_fn)
key = 0 # 音高调整,支持正负(半音)
# 加速倍数
pndm_speedup = 20
wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩
f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen)
def segment(audio):
pass # Implement your image segmentation model here...
demo = gr.Blocks()
with demo:
gr.Markdown("# **<p align='center'>DIFF-SVC Inference</p>**")
gr.Markdown(
"""
<p style='text-align: center'>
Render whatever model you want with this space!
</p>
"""
)
ckpt_file = gr.File(label= 'Load your CKPT', type="file")
config_file = gr.File(label= 'Load your Config File', type="file")
audio_file = gr.Audio(label = 'Load your WAV', type="filepath")
gr.Slider(2, 20, value=4)
b1 = gr.Button("Render")
b1.click(fn=render_audio, inputs=audio_file)
demo.launch()