alvi123 commited on
Commit
8037f98
β€’
1 Parent(s): 4ebf57c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -103
README.md CHANGED
@@ -1,103 +0,0 @@
1
- import gradio as gr
2
- import wave
3
- import matplotlib.pyplot as plt
4
- import numpy as np
5
- from extract_features import *
6
- import pickle
7
- import soundfile
8
- import librosa
9
-
10
- classifier = pickle.load(open('finalized_rf.sav', 'rb'))
11
-
12
- def emotion_predict(input):
13
- input_features = extract_feature(input, mfcc=True, chroma=True, mel=True, contrast=True, tonnetz=True)
14
- rf_prediction = classifier.predict(input_features.reshape(1,-1))
15
- if rf_prediction == 'happy':
16
- return 'Happy 😎'
17
- elif rf_prediction == 'neutral':
18
- return 'Neutral 😐'
19
- elif rf_prediction == 'sad':
20
- return 'Sad 😒'
21
- else:
22
- return 'Angry 😀'
23
-
24
-
25
- def plot_fig(input):
26
- wav = wave.open(input, 'r')
27
-
28
- raw = wav.readframes(-1)
29
- raw = np.frombuffer(raw, "int16")
30
- sampleRate = wav.getframerate()
31
-
32
- Time = np.linspace(0, len(raw)/sampleRate, num=len(raw))
33
-
34
- fig = plt.figure()
35
-
36
- plt.rcParams["figure.figsize"] = (50,15)
37
-
38
- plt.title("Waveform Of the Audio", fontsize=25)
39
-
40
- plt.xticks(fontsize=15)
41
-
42
- plt.yticks(fontsize=15)
43
-
44
- plt.ylabel("Amplitude", fontsize=25)
45
-
46
- plt.plot(Time, raw, color='red')
47
-
48
- return fig
49
-
50
-
51
- with gr.Blocks() as app:
52
- gr.Markdown(
53
- """
54
- # Speech Emotion Detector 🎡😍
55
- This application classifies inputted audio πŸ”Š according to the verbal emotion into four categories:
56
- 1. Happy 😎
57
- 2. Neutral 😐
58
- 3. Sad 😒
59
- 4. Angry 😀
60
- """
61
- )
62
- with gr.Tab("Record Audio"):
63
- record_input = gr.Audio(source="microphone", type="filepath")
64
-
65
- with gr.Accordion("Audio Visualization", open=False):
66
- gr.Markdown(
67
- """
68
- ### Visualization will work only after Audio has been submitted
69
- """
70
- )
71
- plot_record = gr.Button("Display Audio Signal")
72
- plot_record_c = gr.Plot(label='Waveform Of the Audio')
73
-
74
- record_button = gr.Button("Detect Emotion")
75
- record_output = gr.Text(label = 'Emotion Detected')
76
-
77
- with gr.Tab("Upload Audio File"):
78
- gr.Markdown(
79
- """
80
- ## Uploaded Audio should be of .wav format
81
- """
82
- )
83
-
84
- upload_input = gr.Audio(type="filepath")
85
-
86
- with gr.Accordion("Audio Visualization", open=False):
87
- gr.Markdown(
88
- """
89
- ### Visualization will work only after Audio has been submitted
90
- """
91
- )
92
- plot_upload = gr.Button("Display Audio Signal")
93
- plot_upload_c = gr.Plot(label='Waveform Of the Audio')
94
-
95
- upload_button = gr.Button("Detect Emotion")
96
- upload_output = gr.Text(label = 'Emotion Detected')
97
-
98
- record_button.click(emotion_predict, inputs=record_input, outputs=record_output)
99
- upload_button.click(emotion_predict, inputs=upload_input, outputs=upload_output)
100
- plot_record.click(plot_fig, inputs=record_input, outputs=plot_record_c)
101
- plot_upload.click(plot_fig, inputs=upload_input, outputs=plot_upload_c)
102
-
103
- app.launch()