Update demo.py
Browse files
demo.py
CHANGED
@@ -1,8 +1,106 @@
|
|
1 |
import os
|
2 |
import shutil
|
3 |
from os import listdir
|
4 |
-
import gradio as gr
|
5 |
from colorama import Fore
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
def run_inference(model_name, pitch, input_path, f0_method, save_as, index_rate, volume_normalization, consonant_protection):
|
8 |
# Setting paths for model and index files
|
|
|
1 |
import os
|
2 |
import shutil
|
3 |
from os import listdir
|
|
|
4 |
from colorama import Fore
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
import numpy as np
|
8 |
+
import faiss
|
9 |
+
from pathlib import Path
|
10 |
+
from sklearn.cluster import MiniBatchKMeans
|
11 |
+
import traceback
|
12 |
+
import gradio as gr
|
13 |
+
|
14 |
+
# Function to preprocess data
|
15 |
+
def preprocess_data(model_name, dataset_folder):
|
16 |
+
logs_path = f'/content/RVC/logs/{model_name}'
|
17 |
+
temp_DG_path = '/content/temp_DG'
|
18 |
+
|
19 |
+
if os.path.exists(logs_path):
|
20 |
+
print("Model already exists, This will be resume training.")
|
21 |
+
os.makedirs(temp_DG_path, exist_ok=True)
|
22 |
+
|
23 |
+
# Move files for resuming training
|
24 |
+
for item in os.listdir(logs_path):
|
25 |
+
item_path = os.path.join(logs_path, item)
|
26 |
+
if os.path.isfile(item_path) and (item.startswith('D_') or item.startswith('G_')) and item.endswith('.pth'):
|
27 |
+
shutil.copy(item_path, temp_DG_path)
|
28 |
+
|
29 |
+
for item in os.listdir(logs_path):
|
30 |
+
item_path = os.path.join(logs_path, item)
|
31 |
+
if os.path.isfile(item_path):
|
32 |
+
os.remove(item_path)
|
33 |
+
elif os.path.isdir(item_path):
|
34 |
+
shutil.rmtree(item_path)
|
35 |
+
|
36 |
+
for file_name in os.listdir(temp_DG_path):
|
37 |
+
shutil.move(os.path.join(temp_DG_path, file_name), logs_path)
|
38 |
+
|
39 |
+
shutil.rmtree(temp_DG_path)
|
40 |
+
|
41 |
+
if len(os.listdir(dataset_folder)) < 1:
|
42 |
+
return "Error: Dataset folder is empty."
|
43 |
+
|
44 |
+
os.makedirs(f'./logs/{model_name}', exist_ok=True)
|
45 |
+
!python infer/modules/train/preprocess.py {dataset_folder} 32000 2 ./logs/{model_name} False 3.0
|
46 |
+
|
47 |
+
with open(f'./logs/{model_name}/preprocess.log', 'r') as f:
|
48 |
+
log_content = f.read()
|
49 |
+
|
50 |
+
if 'end preprocess' in log_content:
|
51 |
+
return "Success: Data preprocessing complete."
|
52 |
+
else:
|
53 |
+
return "Error preprocessing data. Check your dataset folder."
|
54 |
+
|
55 |
+
# Function to extract F0 feature
|
56 |
+
def extract_f0_feature(model_name, f0method):
|
57 |
+
if f0method != "rmvpe_gpu":
|
58 |
+
!python infer/modules/train/extract/extract_f0_print.py ./logs/{model_name} 2 {f0method}
|
59 |
+
else:
|
60 |
+
!python infer/modules/train/extract/extract_f0_rmvpe.py 1 0 0 ./logs/{model_name} True
|
61 |
+
|
62 |
+
with open(f'./logs/{model_name}/extract_f0_feature.log', 'r') as f:
|
63 |
+
log_content = f.read()
|
64 |
+
|
65 |
+
if 'all-feature-done' in log_content:
|
66 |
+
return "Success: F0 feature extraction complete."
|
67 |
+
else:
|
68 |
+
return "Error extracting F0 feature."
|
69 |
+
|
70 |
+
# Function to train index
|
71 |
+
def train_index(exp_dir1, version19):
|
72 |
+
exp_dir = f"logs/{exp_dir1}"
|
73 |
+
os.makedirs(exp_dir, exist_ok=True)
|
74 |
+
feature_dir = f"{exp_dir}/3_feature768" if version19 == "v2" else f"{exp_dir}/3_feature256"
|
75 |
+
|
76 |
+
if not os.path.exists(feature_dir) or len(os.listdir(feature_dir)) == 0:
|
77 |
+
return "Please run feature extraction first."
|
78 |
+
|
79 |
+
npys = [np.load(f"{feature_dir}/{name}") for name in sorted(os.listdir(feature_dir))]
|
80 |
+
big_npy = np.concatenate(npys, axis=0)
|
81 |
+
big_npy_idx = np.arange(big_npy.shape[0])
|
82 |
+
np.random.shuffle(big_npy_idx)
|
83 |
+
big_npy = big_npy[big_npy_idx]
|
84 |
+
|
85 |
+
if big_npy.shape[0] > 2e5:
|
86 |
+
big_npy = MiniBatchKMeans(n_clusters=10000, batch_size=256, init="random").fit(big_npy).cluster_centers_
|
87 |
+
|
88 |
+
np.save(f"{exp_dir}/total_fea.npy", big_npy)
|
89 |
+
n_ivf = min(int(16 * np.sqrt(big_npy.shape[0])), big_npy.shape[0] // 39)
|
90 |
+
|
91 |
+
index = faiss.index_factory(768 if version19 == "v2" else 256, f"IVF{n_ivf},Flat")
|
92 |
+
index.train(big_npy)
|
93 |
+
faiss.write_index(index, f"{exp_dir}/trained_IVF{n_ivf}_Flat_nprobe_1_{exp_dir1}_{version19}.index")
|
94 |
+
|
95 |
+
batch_size_add = 8192
|
96 |
+
for i in range(0, big_npy.shape[0], batch_size_add):
|
97 |
+
index.add(big_npy[i:i + batch_size_add])
|
98 |
+
|
99 |
+
faiss.write_index(index, f"{exp_dir}/added_IVF{n_ivf}_Flat_nprobe_1_{exp_dir1}_{version19}.index")
|
100 |
+
return f"Indexing completed. Index saved to {exp_dir}/added_IVF{n_ivf}_Flat_nprobe_1_{exp_dir1}_{version19}.index"
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
|
105 |
def run_inference(model_name, pitch, input_path, f0_method, save_as, index_rate, volume_normalization, consonant_protection):
|
106 |
# Setting paths for model and index files
|