Hatman commited on
Commit
90f8c97
1 Parent(s): a1288b8

Remove Numpy

Browse files
Files changed (2) hide show
  1. app.py +3 -6
  2. requirements.txt +1 -2
app.py CHANGED
@@ -2,24 +2,21 @@ import gradio as gr
2
  import spaces
3
  import torch
4
  import librosa
5
- import numpy as np
6
  from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
7
 
8
-
9
-
10
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
11
 
12
  model_name = "Hemg/human-emotion-detection"
13
  feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name).to(device)
14
  model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name).to(device)
15
 
16
- def preprocess_audio(example):
17
- audio_array, sampling_rate = librosa.load(audio_file_path, sr=16000) # Load and resample to 16kHz
18
  return {'speech': audio_array, 'sampling_rate': sampling_rate}
19
 
20
  @spaces.GPU
21
  def inference(audio):
22
- example = preprocess_audio(audio_file_path)
23
  inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
24
  inputs = inputs.to(device) # Move inputs to GPU
25
  with torch.no_grad():
 
2
  import spaces
3
  import torch
4
  import librosa
 
5
  from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification
6
 
 
 
7
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
8
 
9
  model_name = "Hemg/human-emotion-detection"
10
  feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name).to(device)
11
  model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name).to(device)
12
 
13
+ def preprocess_audio(audio):
14
+ audio_array, sampling_rate = librosa.load(audio, sr=16000) # Load and resample to 16kHz
15
  return {'speech': audio_array, 'sampling_rate': sampling_rate}
16
 
17
  @spaces.GPU
18
  def inference(audio):
19
+ example = preprocess_audio(audio)
20
  inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
21
  inputs = inputs.to(device) # Move inputs to GPU
22
  with torch.no_grad():
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
  gradio
2
  torch
3
  transformers
4
- librosa
5
- numpy
 
1
  gradio
2
  torch
3
  transformers
4
+ librosa