import os os.environ["CUDA_VISIBLE_DEVICES"] = "" from utils.hparams import hparams from preprocessing.data_gen_utils import get_pitch_parselmouth,get_pitch_crepe import numpy as np import matplotlib.pyplot as plt import IPython.display as ipd import utils import librosa import torchcrepe from infer import * import logging from infer_tools.infer_tool import * map_location=torch.device('cpu') ##EDIT FOR CPU # Open the file and read it into a string with open("/home/user/.local/lib/python3.8/site-packages/torch/serialization.py") as f: text = f.read() # Replace the original line with the new line text = text.replace("def load(f, map_location=None, pickle_module=pickle, **pickle_load_args):", "def load(f, map_location='cpu', pickle_module=pickle, **pickle_load_args):") # Save the modified string to the original file with open("/home/user/.local/lib/python3.8/site-packages/torch/serialization.py", "w") as f: f.write(text) print("Replaced") with open("/home/user/.local/lib/python3.8/site-packages/torch/serialization.py") as f: text = f.read() print(text) ############ logging.getLogger('numba').setLevel(logging.WARNING) # 工程文件夹名,训练时用的那个 project_name = "Unnamed" model_path = f'./checkpoints/Unnamed/model_ckpt_steps_192000.ckpt' config_path=f'./checkpoints/Unnamed/config.yaml' hubert_gpu=False svc_model = Svc(project_name,config_path,hubert_gpu, model_path) print('model loaded')