File size: 3,146 Bytes
eb2d1d3
33de63c
 
 
 
 
 
 
 
 
 
 
 
c385861
 
33de63c
 
c385861
 
33de63c
 
96a8840
33de63c
 
c385861
 
976a73f
33de63c
 
 
 
96a8840
33de63c
 
 
 
 
96a8840
 
 
c385861
96a8840
c385861
 
 
 
 
 
 
 
 
 
 
 
 
33de63c
c385861
 
 
 
 
 
 
 
 
 
 
33de63c
c385861
 
 
 
 
d588216
eb2d1d3
 
 
 
 
 
 
 
 
 
 
c385861
 
eb2d1d3
c385861
 
 
 
 
 
 
 
 
 
 
 
 
 
96a8840
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import gradio as gr
from utils.hparams import hparams
from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe
import numpy as np
import matplotlib.pyplot as plt
import IPython.display as ipd
import utils
import librosa
import torchcrepe
from infer import *
import logging
from infer_tools.infer_tool import *
import io
import tempfile
##Render function
def render_audio(audio_file):
    print(audio_file)
    print(ckpt)
    print(yaml)
    ############
    logging.getLogger('numba').setLevel(logging.WARNING)
    global wav_gen

    project_name = "Unnamed"
    model_path = ckpt
    config_path= yaml
    hubert_gpu=True
    svc_model = Svc(project_name,config_path,hubert_gpu, model_path)
    print('model loaded')
    wav_fn = audio_file
    demoaudio, sr = librosa.load(wav_fn)
    key = 0

    pndm_speedup = 20
    wav_gen='queeeeee.wav'#直接改后缀可以保存不同格式音频,如flac可无损压缩
    f0_tst, f0_pred, audio = run_clip(svc_model,file_path=wav_fn, key=key, acc=pndm_speedup, use_crepe=True, use_pe=True, thre=0.05,
                                        use_gt_mel=False, add_noise_step=500,project_name=project_name,out_path=wav_gen)
    # Play the audio file in the output component
    output_audio.play(wav_gen)
    return wav_gen
############################################
# Create the output components
#Transform ckpt binary into .ckpt
def transform_binary(ckpt_file):
    # Create a temporary file and write the binary contents to it
    temp_file = tempfile.NamedTemporaryFile(suffix='.ckpt', delete=False)
    temp_file.write(ckpt_file)
    print("CKPT Path is:", temp_file.name)
    global ckpt
    ckpt = temp_file.name
    
    print(ckpt)
    print(ckpt)
    print(ckpt)
    return temp_file.name

#Transform yaml binary into .yaml
def transform_binary2(yaml_file):
    # Create a temporary file and write the binary contents to it
    temp_file = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False)
    temp_file.write(yaml_file)
    print("YAML Path is:", temp_file.name)
    global yaml
    yaml = temp_file.name
    print(yaml)
    print(yaml)
    return temp_file.name

#Play audio
def play(audio_file):
    print(audio_file)
upload_input = gr.inputs.File()
output_label = gr.outputs.Label()

demo = gr.Blocks()
with demo:
    gr.Markdown("# **<p align='center'>DIFF-SVC Inference</p>**")

    gr.Markdown(
        """
        <p style='text-align: center'>
        Render whatever model you want with this space!
        </p>
        """
    )
    ckpt_file = gr.File(label= 'Load your CKPT', type="binary")
    yaml_file = gr.File(label= 'Load your YAML', type="binary")
    audio_file = gr.Audio(label = 'Load your WAV', type="filepath")
    #Button 1
    b1 = gr.Button("Decompile CKPT")
    b1.click(transform_binary, inputs=ckpt_file)
    #Button 2
    b2 = gr.Button("Decompile YAML")
    b2.click(transform_binary2, inputs=yaml_file)
    #Button 4
    b4 = gr.Button("Render")
    b4.click(fn=render_audio, inputs=[audio_file])
    def spam():
        print(yaml)
        print(ckpt)
    #b5 = gr.Button("SPAM ME")
    #b5.click(fn=spam)
demo.launch()