File size: 1,479 Bytes
d1fb9a5
 
 
675e1e5
a1288b8
d1fb9a5
8a7312c
a1288b8
 
864e9d8
8a7312c
f24fccb
 
a1288b8
90f8c97
675e1e5
 
 
d1fb9a5
 
a1288b8
90f8c97
a1288b8
f24fccb
a1288b8
 
 
 
675e1e5
a1288b8
864e9d8
 
f24fccb
a1288b8
 
 
d1fb9a5
f24fccb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
import spaces
import torch
import torchaudio
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2ForSequenceClassification

device = "cuda" if torch.cuda.is_available() else "cpu"

model_name = "Hemg/human-emotion-detection"
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(model_name)
model = Wav2Vec2ForSequenceClassification.from_pretrained(model_name)
print(device)


def preprocess_audio(audio):
    waveform, sampling_rate = torchaudio.load(audio)
    resampled_waveform = torchaudio.transforms.Resample(orig_freq=sampling_rate, new_freq=16000)(waveform)
    return {'speech': resampled_waveform.numpy().flatten(), 'sampling_rate': 16000}

@spaces.GPU
def inference(audio):
    example = preprocess_audio(audio)
    inputs = feature_extractor(example['speech'], sampling_rate=16000, return_tensors="pt", padding=True)
    inputs = inputs # Move inputs to GPU
    with torch.no_grad():
        logits = model(**inputs).logits
    predicted_ids = torch.argmax(logits, dim=-1)
    return model.config.id2label[predicted_ids.item()], logits, predicted_ids   # Move tensors back to CPU for further processing
    

iface = gr.Interface(fn=inference,
                     inputs=gr.Audio(type="filepath"),
                     outputs=[gr.Label(label="Predicted Sentiment")],
                     title="Audio Sentiment Analysis",
                     description="Upload an audio file or record one to analyze sentiment.")


iface.launch()