Hatman commited on
Commit
675e1e5
1 Parent(s): fb7a30b
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import gradio as gr
2
  import spaces
3
  import torch
4
- #import librosa
5
- #import numpy as np
6
  from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
7
 
8
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@@ -13,13 +12,14 @@ model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name).to(device)
13
 
14
  def preprocess_audio(audio):
15
  print('hallo')
16
- #audio_array, sampling_rate = librosa.load(audio, sr=16000) # Load and resample to 16kHz
17
- #return {'speech': audio_array, 'sampling_rate': sampling_rate}
 
18
 
19
  @spaces.GPU
20
  def inference(audio):
21
  print('hello')
22
- '''
23
  example = preprocess_audio(audio)
24
  inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
25
  inputs = inputs.to(device) # Move inputs to GPU
@@ -27,7 +27,7 @@ def inference(audio):
27
  logits = model(**inputs).logits
28
  predicted_ids = torch.argmax(logits, dim=-1)
29
  return model.config.id2label[predicted_ids.item()], logits, predicted_ids # Move tensors back to CPU for further processing
30
- '''
31
 
32
  iface = gr.Interface(fn=inference,
33
  inputs=gr.Audio(type="filepath"),
 
1
  import gradio as gr
2
  import spaces
3
  import torch
4
+ import torchaudio
 
5
  from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
6
 
7
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
 
12
 
13
  def preprocess_audio(audio):
14
  print('hallo')
15
+ waveform, sampling_rate = torchaudio.load(audio)
16
+ resampled_waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate, new_freq=16000)(waveform)
17
+ return {'speech': resampled_waveform.numpy().flatten(), 'sampling_rate': 16000}
18
 
19
  @spaces.GPU
20
  def inference(audio):
21
  print('hello')
22
+
23
  example = preprocess_audio(audio)
24
  inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
25
  inputs = inputs.to(device) # Move inputs to GPU
 
27
  logits = model(**inputs).logits
28
  predicted_ids = torch.argmax(logits, dim=-1)
29
  return model.config.id2label[predicted_ids.item()], logits, predicted_ids # Move tensors back to CPU for further processing
30
+
31
 
32
  iface = gr.Interface(fn=inference,
33
  inputs=gr.Audio(type="filepath"),