File size: 1,991 Bytes
eb2d1d3
33de63c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
976a73f
33de63c
 
 
 
976a73f
33de63c
 
 
 
 
 
 
 
 
 
 
d588216
eb2d1d3
 
 
 
 
 
 
 
 
 
 
33de63c
 
eb2d1d3
 
 
33de63c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
import gradio as gr
from utils.hparams import hparams
from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
import utils
import librosa
import torchcrepe
from infer import *
import logging
from infer_tools.infer_tool import *
import io
def render_audio(audio_file):
    print(audio_file)
    ############
    logging.getLogger('numba').setLevel(logging.WARNING)

    # 工程文件夹名,训练时用的那个
    project_name = "Unnamed"
    model_path = f'./checkpoints/Unnamed/model_ckpt_steps_192000.ckpt'
    config_path=f'./checkpoints/Unnamed/config.yaml'
    hubert_gpu=True
    svc_model = Svc(project_name,config_path,hubert_gpu, model_path)
    print('model loaded')
    wav_fn = audio_file
    demoaudio, sr = librosa.load(wav_fn)
    key = 0 # 音高调整,支持正负(半音)
    # 加速倍数

    pndm_speedup = 20
    wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩
    f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
                                        use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen)
                                


def segment(audio):
    pass  # Implement your image segmentation model here...

demo = gr.Blocks()
with demo:
    gr.Markdown("# **<p align='center'>DIFF-SVC Inference</p>**")

    gr.Markdown(
        """
        <p style='text-align: center'>
        Render whatever model you want with this space!
        </p>
        """
    )
    ckpt_file = gr.File(label= 'Load your CKPT', type="file")
    config_file = gr.File(label= 'Load your Config File', type="file")
    audio_file = gr.Audio(label = 'Load your WAV', type="filepath")
    gr.Slider(2, 20, value=4)
    b1 = gr.Button("Render")
    b1.click(fn=render_audio, inputs=audio_file)


demo.launch()