alvi123 commited on
Commit
b554590
β€’
1 Parent(s): 077f7d5
Files changed (1) hide show
  1. app.py +19 -20
app.py CHANGED
@@ -12,14 +12,14 @@ classifier = pickle.load(open('finalized_rf.sav', 'rb'))
12
  def emotion_predict(input):
13
  input_features = extract_feature(input, mfcc=True, chroma=True, mel=True, contrast=True, tonnetz=True)
14
  rf_prediction = classifier.predict(input_features.reshape(1,-1))
15
- if rf_prediction == 'kata-benda':
16
- return 'kata-benda'
17
- elif rf_prediction == 'kata-kerja':
18
- return 'kata-kerja'
19
- elif rf_prediction == 'kata-keterangan':
20
- return 'kata-keterangan'
21
  else:
22
- return 'kata-sifat'
23
 
24
 
25
  def plot_fig(input):
@@ -51,13 +51,12 @@ def plot_fig(input):
51
  with gr.Blocks() as app:
52
  gr.Markdown(
53
  """
54
- # πŸ’žPROLOVE 🎡🎸🎼
55
-
56
- This application classifies inputted audio according to pronunciation into four categories:
57
- 1. kata benda
58
- 2. kata kerja
59
- 3. kata keterangan
60
- 4. kata sifat
61
  """
62
  )
63
  with gr.Tab("Record Audio"):
@@ -72,8 +71,8 @@ with gr.Blocks() as app:
72
  plot_record = gr.Button("Display Audio Signal")
73
  plot_record_c = gr.Plot(label='Waveform Of the Audio')
74
 
75
- record_button = gr.Button("Detection Parts of Speech")
76
- record_output = gr.Text(label = 'detected')
77
 
78
  with gr.Tab("Upload Audio File"):
79
  gr.Markdown(
@@ -93,12 +92,12 @@ with gr.Blocks() as app:
93
  plot_upload = gr.Button("Display Audio Signal")
94
  plot_upload_c = gr.Plot(label='Waveform Of the Audio')
95
 
96
- upload_button = gr.Button("Detection Parts of Speech")
97
- upload_output = gr.Text(label = 'detected')
98
-
99
  record_button.click(emotion_predict, inputs=record_input, outputs=record_output)
100
  upload_button.click(emotion_predict, inputs=upload_input, outputs=upload_output)
101
  plot_record.click(plot_fig, inputs=record_input, outputs=plot_record_c)
102
- plot_upload.click(plot_fig, inputs=upload_input, outputs=plot_upload_c)
103
 
104
  app.launch()
 
12
  def emotion_predict(input):
13
  input_features = extract_feature(input, mfcc=True, chroma=True, mel=True, contrast=True, tonnetz=True)
14
  rf_prediction = classifier.predict(input_features.reshape(1,-1))
15
+ if rf_prediction == 'happy':
16
+ return 'Happy 😎'
17
+ elif rf_prediction == 'neutral':
18
+ return 'Neutral 😐'
19
+ elif rf_prediction == 'sad':
20
+ return 'Sad 😒'
21
  else:
22
+ return 'Angry 😀'
23
 
24
 
25
  def plot_fig(input):
 
51
  with gr.Blocks() as app:
52
  gr.Markdown(
53
  """
54
+ # Speech Emotion Detector 🎡😍
55
+ This application classifies inputted audio πŸ”Š according to the verbal emotion into four categories:
56
+ 1. Happy 😎
57
+ 2. Neutral 😐
58
+ 3. Sad 😒
59
+ 4. Angry 😀
 
60
  """
61
  )
62
  with gr.Tab("Record Audio"):
 
71
  plot_record = gr.Button("Display Audio Signal")
72
  plot_record_c = gr.Plot(label='Waveform Of the Audio')
73
 
74
+ record_button = gr.Button("Detect Emotion")
75
+ record_output = gr.Text(label = 'Emotion Detected')
76
 
77
  with gr.Tab("Upload Audio File"):
78
  gr.Markdown(
 
92
  plot_upload = gr.Button("Display Audio Signal")
93
  plot_upload_c = gr.Plot(label='Waveform Of the Audio')
94
 
95
+ upload_button = gr.Button("Detect Emotion")
96
+ upload_output = gr.Text(label = 'Emotion Detected')
97
+
98
  record_button.click(emotion_predict, inputs=record_input, outputs=record_output)
99
  upload_button.click(emotion_predict, inputs=upload_input, outputs=upload_output)
100
  plot_record.click(plot_fig, inputs=record_input, outputs=plot_record_c)
101
+ plot_upload.click(plot_fig, inputs=upload_input, outputs=plot_upload_c)
102
 
103
  app.launch()