Spaces:
Configuration error
Configuration error
import gradio as gr | |
from utils.hparams import hparams | |
from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe | |
import numpy as np | |
import matplotlib.pyplot as plt | |
import IPython.display as ipd | |
import utils | |
import librosa | |
import torchcrepe | |
from infer import * | |
import logging | |
from infer_tools.infer_tool import * | |
import io | |
import tempfile | |
##Render function | |
def render_audio(audio_file): | |
print(audio_file) | |
print(ckpt) | |
print(yaml) | |
############ | |
logging.getLogger('numba').setLevel(logging.WARNING) | |
# 工程文件夹名,训练时用的那个 | |
project_name = "Unnamed" | |
model_path = ckpt | |
config_path= yaml | |
hubert_gpu=True | |
svc_model = Svc(project_name,config_path,hubert_gpu, model_path) | |
print('model loaded') | |
wav_fn = audio_file | |
demoaudio, sr = librosa.load(wav_fn) | |
key = -8 # 音高调整,支持正负(半音) | |
# 加速倍数 | |
pndm_speedup = 20 | |
wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩 | |
f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05, | |
use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen) | |
############################################ | |
#Transform ckpt binary into .ckpt | |
def transform_binary(ckpt_file): | |
# Create a temporary file and write the binary contents to it | |
temp_file = tempfile.NamedTemporaryFile(suffix='.ckpt', delete=False) | |
temp_file.write(ckpt_file) | |
print("CKPT Path is:", temp_file.name) | |
global ckpt | |
ckpt = temp_file.name | |
print(ckpt) | |
print(ckpt) | |
print(ckpt) | |
return temp_file.name | |
#Transform yaml binary into .yaml | |
def transform_binary2(yaml_file): | |
# Create a temporary file and write the binary contents to it | |
temp_file = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) | |
temp_file.write(yaml_file) | |
print("YAML Path is:", temp_file.name) | |
global yaml | |
yaml = temp_file.name | |
print(yaml) | |
print(yaml) | |
return temp_file.name | |
#Play audio | |
def play(audio_file): | |
print(audio_file) | |
upload_input = gr.inputs.File() | |
output_label = gr.outputs.Label() | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("# **<p align='center'>DIFF-SVC Inference</p>**") | |
gr.Markdown( | |
""" | |
<p style='text-align: center'> | |
Render whatever model you want with this space! | |
</p> | |
""" | |
) | |
ckpt_file = gr.File(label= 'Load your CKPT', type="binary") | |
yaml_file = gr.File(label= 'Load your YAML', type="binary") | |
audio_file = gr.Audio(label = 'Load your WAV', type="filepath") | |
#Button 1 | |
b1 = gr.Button("Decompile CKPT") | |
b1.click(transform_binary, inputs=ckpt_file) | |
#Button 2 | |
b2 = gr.Button("Decompile YAML") | |
b2.click(transform_binary2, inputs=yaml_file) | |
#Button 4 | |
b4 = gr.Button("Render") | |
b4.click(fn=render_audio, inputs=[audio_file]) | |
def spam(): | |
print(yaml) | |
print(ckpt) | |
#b5 = gr.Button("SPAM ME") | |
#b5.click(fn=spam) | |
demo.launch(share=True) | |