AlexTolstenko commited on
Commit
e6425ef
·
1 Parent(s): 56faed9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -9,20 +9,20 @@ import gradio as gr
9
  from model import MFCC_CNN
10
 
11
  EMOTIONS = {
12
- 'neutral_Male': 9,
13
- 'happy_Male': 7,
14
- 'sad_Male': 11,
15
- 'angry_Male': 1,
16
- 'fear_Male': 5,
17
- 'disgust_Male': 3,
18
- 'surprise_Male': 13,
19
- 'neutral_Female': 8,
20
- 'happy_Female': 6,
21
- 'sad_Female': 10,
22
- 'angry_Female': 0,
23
- 'fear_Female': 4,
24
- 'disgust_Female': 2,
25
- 'surprise_Female': 12
26
  }
27
 
28
  # LOAD AUDIO
@@ -35,7 +35,7 @@ WIN_LENGTH = 2048
35
  WINDOW = 'hann'
36
  HOP_LENGTH = 512
37
 
38
- PATH = './chekpoint/models-epoch=97-val_loss=2.09.ckpt'
39
  ckpt = torch.load(PATH)
40
 
41
  pretrained_model = MFCC_CNN(14)
@@ -54,7 +54,7 @@ def processAudio(audio_file):
54
  feature_set = []
55
 
56
  mfcc = lr.feature.mfcc(y=signal,
57
- r=sr,
58
  n_mfcc=N_MFCC,
59
  win_length=WIN_LENGTH,
60
  window=WINDOW,
@@ -71,7 +71,7 @@ def processAudio(audio_file):
71
 
72
  demo = gr.Interface(
73
  fn=processAudio,
74
- inputs=gr.Audio(),
75
  outputs=gr.Label(),
76
  examples=[
77
  [os.path.join(os.path.dirname(__file__), "files/03-01-01-01-02-02-01.wav")],
@@ -81,4 +81,4 @@ demo = gr.Interface(
81
  )
82
 
83
  if __name__ == '__main__':
84
- demo.launch()
 
9
  from model import MFCC_CNN
10
 
11
  EMOTIONS = {
12
+ 9 : 'neutral_Male',
13
+ 7: 'happy_Male',
14
+ 11: 'sad_Male',
15
+ 1: 'angry_Male',
16
+ 5: 'fear_Male',
17
+ 3: 'disgust_Male',
18
+ 13 : 'surprise_Male',
19
+ 8 : 'neutral_Female',
20
+ 6 : 'happy_Female',
21
+ 10 : 'sad_Female',
22
+ 0 : 'angry_Female',
23
+ 4 : 'fear_Female',
24
+ 2 : 'disgust_Female',
25
+ 12 : 'surprise_Female'
26
  }
27
 
28
  # LOAD AUDIO
 
35
  WINDOW = 'hann'
36
  HOP_LENGTH = 512
37
 
38
+ PATH = 'C:\python_tmp\gradio_test\chekpoint\models-epoch=97-val_loss=2.09.ckpt'
39
  ckpt = torch.load(PATH)
40
 
41
  pretrained_model = MFCC_CNN(14)
 
54
  feature_set = []
55
 
56
  mfcc = lr.feature.mfcc(y=signal,
57
+ sr=sr,
58
  n_mfcc=N_MFCC,
59
  win_length=WIN_LENGTH,
60
  window=WINDOW,
 
71
 
72
  demo = gr.Interface(
73
  fn=processAudio,
74
+ inputs=gr.Audio(type='filepath'),
75
  outputs=gr.Label(),
76
  examples=[
77
  [os.path.join(os.path.dirname(__file__), "files/03-01-01-01-02-02-01.wav")],
 
81
  )
82
 
83
  if __name__ == '__main__':
84
+ demo.launch()