L0SG commited on
Commit
d76c3f8
1 Parent(s): 36de23b
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -59,10 +59,10 @@ from meldataset import get_mel_spectrogram
59
 
60
  # load wav file and compute mel spectrogram
61
  wav, sr = librosa.load('/path/to/your/audio.wav', sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1]
62
- wav = torch.FloatTensor(wav).to(device).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time]
63
 
64
  # compute mel spectrogram from the ground truth audio
65
- mel = get_mel_spectrogram(wav, model.h) # mel is FloatTensor with shape [B(1), C_mel, T_frame]
66
 
67
  # generate waveform from mel
68
  with torch.inference_mode():
 
59
 
60
  # load wav file and compute mel spectrogram
61
  wav, sr = librosa.load('/path/to/your/audio.wav', sr=model.h.sampling_rate, mono=True) # wav is np.ndarray with shape [T_time] and values in [-1, 1]
62
+ wav = torch.FloatTensor(wav).unsqueeze(0) # wav is FloatTensor with shape [B(1), T_time]
63
 
64
  # compute mel spectrogram from the ground truth audio
65
+ mel = get_mel_spectrogram(wav, model.h).to(device) # mel is FloatTensor with shape [B(1), C_mel, T_frame]
66
 
67
  # generate waveform from mel
68
  with torch.inference_mode():