aliabd commited on
Commit
a226547
·
1 Parent(s): 72ca791

Upload with huggingface_hub

Browse files
README.md CHANGED
@@ -1,12 +1,11 @@
 
1
  ---
2
- title: Main Note Main
3
- emoji: 🚀
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 3.6
8
- app_file: app.py
9
  pinned: false
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: main_note_main
4
+ emoji: 🔥
5
+ colorFrom: indigo
6
+ colorTo: indigo
7
  sdk: gradio
8
  sdk_version: 3.6
9
+ app_file: run.py
10
  pinned: false
11
  ---
 
 
__pycache__/run.cpython-36.pyc ADDED
Binary file (1.4 kB). View file
 
audio/cantina.wav ADDED
Binary file (132 kB). View file
 
audio/recording1.wav ADDED
Binary file (639 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ scipy
2
+ numpy
3
+ matplotlibhttps://gradio-main-build.s3.amazonaws.com/c3bec6153737855510542e8154391f328ac72606/gradio-3.6-py3-none-any.whl
run.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import log2, pow
2
+ import os
3
+
4
+ import numpy as np
5
+ from scipy.fftpack import fft
6
+
7
+ import gradio as gr
8
+
9
+ A4 = 440
10
+ C0 = A4 * pow(2, -4.75)
11
+ name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
12
+
13
+
14
+ def get_pitch(freq):
15
+ h = round(12 * log2(freq / C0))
16
+ n = h % 12
17
+ return name[n]
18
+
19
+
20
+ def main_note(audio):
21
+ rate, y = audio
22
+ if len(y.shape) == 2:
23
+ y = y.T[0]
24
+ N = len(y)
25
+ T = 1.0 / rate
26
+ x = np.linspace(0.0, N * T, N)
27
+ yf = fft(y)
28
+ yf2 = 2.0 / N * np.abs(yf[0 : N // 2])
29
+ xf = np.linspace(0.0, 1.0 / (2.0 * T), N // 2)
30
+
31
+ volume_per_pitch = {}
32
+ total_volume = np.sum(yf2)
33
+ for freq, volume in zip(xf, yf2):
34
+ if freq == 0:
35
+ continue
36
+ pitch = get_pitch(freq)
37
+ if pitch not in volume_per_pitch:
38
+ volume_per_pitch[pitch] = 0
39
+ volume_per_pitch[pitch] += 1.0 * volume / total_volume
40
+ volume_per_pitch = {k: float(v) for k, v in volume_per_pitch.items()}
41
+ return volume_per_pitch
42
+
43
+
44
+ demo = gr.Interface(
45
+ main_note,
46
+ gr.Audio(source="microphone"),
47
+ gr.Label(num_top_classes=4),
48
+ examples=[
49
+ [os.path.join(os.path.dirname(__file__),"audio/recording1.wav")],
50
+ [os.path.join(os.path.dirname(__file__),"audio/cantina.wav")],
51
+ ],
52
+ interpretation="default",
53
+ )
54
+
55
+ if __name__ == "__main__":
56
+ demo.launch()
screenshot.png ADDED