apoorvmi commited on
Commit
af180fb
·
1 Parent(s): 334f576

trying alternative

Browse files
Files changed (2) hide show
  1. app.py +231 -199
  2. requirements.txt +3 -1
app.py CHANGED
@@ -1,201 +1,233 @@
1
- import numpy as np
2
- import time
3
- import cv2
4
- import streamlit as st
5
- from pygame import mixer
6
-
7
- def state_machine(sumation, sound):
8
- # Check if blue color object present in the ROI
9
- yes = (sumation) > A1_thickness[0]*A1_thickness[1]*0.8
10
-
11
- # If present play the respective instrument.
12
- if yes and sound == 1:
13
- xylo_A1.play()
14
- elif yes and sound == 2:
15
- xylo_A2.play()
16
- elif yes and sound == 3: # New condition for A4 drum
17
- xylo_A3.play()
18
- elif yes and sound == 4:
19
- xylo_A4.play()
20
- elif yes and sound == 5:
21
- xylo_A5.play()
22
- elif yes and sound == 6:
23
- xylo_A6.play()
24
- elif yes and sound == 7:
25
- xylo_A7.play()
26
-
27
- def ROI_analysis(frame, sound):
28
- # converting the image into HSV
29
- hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
30
- # generating mask for
31
- mask = cv2.inRange(hsv, blueLower, blueUpper)
32
-
33
- # Calculating the number of white pixels depicting the blue color pixels in the ROI
34
- sumation = np.sum(mask)
35
-
36
- # Function that decides to play the instrument or not.
37
- state_machine(sumation, sound)
38
-
39
- return mask
40
-
41
- Verbose = False
42
-
43
- mixer.init()
44
- xylo_A1 = mixer.Sound('A1new.mp3')
45
- xylo_A2 = mixer.Sound('A2new.mp3')
46
- xylo_A3 = mixer.Sound('A3new.mp3')
47
- xylo_A4 = mixer.Sound('A4new.mp3')
48
- xylo_A5 = mixer.Sound('A5.wav')
49
- xylo_A6 = mixer.Sound('A6.wav')
50
- xylo_A7 = mixer.Sound('A7.wav')
51
-
52
- blueLower = (80, 150, 10)
53
- blueUpper = (120, 255, 255)
54
-
55
- camera = cv2.VideoCapture(0)
56
- ret, frame = camera.read()
57
- H, W = frame.shape[:2]
58
-
59
- A1 = cv2.resize(cv2.imread('A1.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
60
- A2 = cv2.resize(cv2.imread('A2.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
61
- A3 = cv2.resize(cv2.imread('A3.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
62
- A4 = cv2.resize(cv2.imread('A4.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
63
- A5 = cv2.resize(cv2.imread('A5.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
64
- A6 = cv2.resize(cv2.imread('A6.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
65
- A7 = cv2.resize(cv2.imread('A7.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
66
-
67
- A1_center = [np.shape(frame)[1]*1//8, np.shape(frame)[0]*6//8]
68
- A2_center = [np.shape(frame)[1]*2//8, np.shape(frame)[0]*6//8]
69
- A3_center = [np.shape(frame)[1]*3//8, np.shape(frame)[0]*6//8]
70
- A4_center = [np.shape(frame)[1]*4//8, np.shape(frame)[0]*6//8]
71
- A5_center = [np.shape(frame)[1]*5//8, np.shape(frame)[0]*6//8]
72
- A6_center = [np.shape(frame)[1]*6//8, np.shape(frame)[0]*6//8]
73
- A7_center = [np.shape(frame)[1]*7//8, np.shape(frame)[0]*6//8]
74
-
75
- A1_thickness = [300, 150]
76
- A1_top = [A1_center[0]-A1_thickness[0]//2, A1_center[1]-A1_thickness[1]//2]
77
- A1_btm = [A1_center[0]+A1_thickness[0]//2, A1_center[1]+A1_thickness[1]//2]
78
-
79
- A2_thickness = [300, 150]
80
- A2_top = [A2_center[0]-A2_thickness[0]//2, A2_center[1]-A2_thickness[1]//2]
81
- A2_btm = [A2_center[0]+A2_thickness[0]//2, A2_center[1]+A2_thickness[1]//2]
82
-
83
- A3_thickness = [300, 150]
84
- A3_top = [A3_center[0]-A3_thickness[0]//2, A3_center[1]-A3_thickness[1]//2]
85
- A3_btm = [A3_center[0]+A3_thickness[0]//2, A3_center[1]+A3_thickness[1]//2]
86
-
87
- A4_thickness = [300, 150]
88
- A4_top = [A4_center[0]-A4_thickness[0]//2, A4_center[1]-A4_thickness[1]//2]
89
- A4_btm = [A4_center[0]+A4_thickness[0]//2, A4_center[1]+A4_thickness[1]//2]
90
-
91
- A5_thickness = [300, 150]
92
- A5_top = [A5_center[0]-A5_thickness[0]//2, A5_center[1]-A5_thickness[1]//2]
93
- A5_btm = [A5_center[0]+A5_thickness[0]//2, A5_center[1]+A5_thickness[1]//2]
94
-
95
- A6_thickness = [300, 150]
96
- A6_top = [A6_center[0]-A6_thickness[0]//2, A6_center[1]-A6_thickness[1]//2]
97
- A6_btm = [A6_center[0]+A6_thickness[0]//2, A6_center[1]+A6_thickness[1]//2]
98
-
99
- A7_thickness = [300, 150]
100
- A7_top = [A7_center[0]-A7_thickness[0]//2, A7_center[1]-A7_thickness[1]//2]
101
- A7_btm = [A7_center[0]+A7_thickness[0]//2, A7_center[1]+A7_thickness[1]//2]
102
-
103
- st.title('Airial Xylophone')
104
-
105
- Verbose = st.checkbox('Display ROI')
106
-
107
- stframe = st.image([])
108
-
109
- time.sleep(1)
110
-
111
- while True:
112
- # Grab the current frame
113
- ret, frame = camera.read()
114
- frame = cv2.flip(frame, 1)
115
-
116
- if not ret:
117
- break
118
-
119
- # Selecting ROI corresponding to A1
120
- A1_ROI = np.copy(frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]])
121
- mask = ROI_analysis(A1_ROI, 1)
122
-
123
- # Selecting ROI corresponding to A2
124
- A2_ROI = np.copy(frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]])
125
- mask = ROI_analysis(A2_ROI, 2)
126
-
127
- # Selecting ROI corresponding to A3
128
- A3_ROI = np.copy(frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]])
129
- mask = ROI_analysis(A3_ROI, 3) # Play A3 audio
130
-
131
- # Selecting ROI corresponding to A4
132
- A4_ROI = np.copy(frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]])
133
- mask = ROI_analysis(A4_ROI, 4) # Play A4 audio
134
-
135
- # Selecting ROI corresponding to A5
136
- A5_ROI = np.copy(frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]])
137
- mask = ROI_analysis(A5_ROI, 5) # Play A5 audio
138
-
139
- # Selecting ROI corresponding to A6
140
- A6_ROI = np.copy(frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]])
141
- mask = ROI_analysis(A6_ROI, 6) # Play A6 audio
142
-
143
- # Selecting ROI corresponding to A7
144
- A7_ROI = np.copy(frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]])
145
- mask = ROI_analysis(A7_ROI, 7) # Play A7 audio
146
-
147
- # Writing text on the image
148
- cv2.putText(frame, 'Project : Airial Xylophone', (10, 30), 2, 1, (20, 20, 20), 2)
149
-
150
- # Display the ROI to view the blue colour being detected
151
- if Verbose:
152
- # Displaying the ROI in the Image
153
- frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]] = cv2.bitwise_and(
154
- frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]],
155
- frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]], mask=mask[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]])
156
- frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]] = cv2.bitwise_and(
157
- frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]],
158
- frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]], mask=mask[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]])
159
- frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]] = cv2.bitwise_and(
160
- frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]],
161
- frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]], mask=mask[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]])
162
- frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]] = cv2.bitwise_and(
163
- frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]],
164
- frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]], mask=mask[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]])
165
- frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]] = cv2.bitwise_and(
166
- frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]],
167
- frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]], mask=mask[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]])
168
- frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]] = cv2.bitwise_and(
169
- frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]],
170
- frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]], mask=mask[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]])
171
- frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]] = cv2.bitwise_and(
172
- frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]],
173
- frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]], mask=mask[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  else:
176
- # Augmenting the image of the instruments on the frame
177
- frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]] = cv2.addWeighted(A1, 1,
178
- frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]], 1, 0)
179
- frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]] = cv2.addWeighted(A2, 1,
180
- frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]], 1, 0)
181
- frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]] = cv2.addWeighted(A3, 1,
182
- frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]], 1, 0)
183
- frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]] = cv2.addWeighted(A4, 1,
184
- frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]], 1, 0)
185
- frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]] = cv2.addWeighted(A5, 1,
186
- frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]], 1, 0)
187
- frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]] = cv2.addWeighted(A6, 1,
188
- frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]], 1, 0)
189
- frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]] = cv2.addWeighted(A7, 1,
190
- frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]], 1, 0)
191
-
192
- stframe.image(frame, channels="BGR")
193
-
194
- key = cv2.waitKey(1) & 0xFF
195
- # if the 'q' key is pressed, stop the loop
196
- if key == ord("q"):
197
- break
198
-
199
- # Cleanup the camera and close any open windows
200
- camera.release()
201
- cv2.destroyAllWindows()
 
1
+ # import numpy as np
2
+ # import time
3
+ # import cv2
4
+ # import streamlit as st
5
+ # from pygame import mixer
6
+
7
+ # def state_machine(sumation, sound):
8
+ # # Check if blue color object present in the ROI
9
+ # yes = (sumation) > A1_thickness[0]*A1_thickness[1]*0.8
10
+
11
+ # # If present play the respective instrument.
12
+ # if yes and sound == 1:
13
+ # xylo_A1.play()
14
+ # elif yes and sound == 2:
15
+ # xylo_A2.play()
16
+ # elif yes and sound == 3: # New condition for A4 drum
17
+ # xylo_A3.play()
18
+ # elif yes and sound == 4:
19
+ # xylo_A4.play()
20
+ # elif yes and sound == 5:
21
+ # xylo_A5.play()
22
+ # elif yes and sound == 6:
23
+ # xylo_A6.play()
24
+ # elif yes and sound == 7:
25
+ # xylo_A7.play()
26
+
27
+ # def ROI_analysis(frame, sound):
28
+ # # converting the image into HSV
29
+ # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
30
+ # # generating mask for
31
+ # mask = cv2.inRange(hsv, blueLower, blueUpper)
32
+
33
+ # # Calculating the number of white pixels depicting the blue color pixels in the ROI
34
+ # sumation = np.sum(mask)
35
+
36
+ # # Function that decides to play the instrument or not.
37
+ # state_machine(sumation, sound)
38
+
39
+ # return mask
40
+
41
+ # Verbose = False
42
+
43
+ # mixer.init()
44
+ # xylo_A1 = mixer.Sound('A1new.mp3')
45
+ # xylo_A2 = mixer.Sound('A2new.mp3')
46
+ # xylo_A3 = mixer.Sound('A3new.mp3')
47
+ # xylo_A4 = mixer.Sound('A4new.mp3')
48
+ # xylo_A5 = mixer.Sound('A5.wav')
49
+ # xylo_A6 = mixer.Sound('A6.wav')
50
+ # xylo_A7 = mixer.Sound('A7.wav')
51
+
52
+ # blueLower = (80, 150, 10)
53
+ # blueUpper = (120, 255, 255)
54
+
55
+ # camera = cv2.VideoCapture(0)
56
+ # ret, frame = camera.read()
57
+ # H, W = frame.shape[:2]
58
+
59
+ # A1 = cv2.resize(cv2.imread('A1.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
60
+ # A2 = cv2.resize(cv2.imread('A2.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
61
+ # A3 = cv2.resize(cv2.imread('A3.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
62
+ # A4 = cv2.resize(cv2.imread('A4.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
63
+ # A5 = cv2.resize(cv2.imread('A5.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
64
+ # A6 = cv2.resize(cv2.imread('A6.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
65
+ # A7 = cv2.resize(cv2.imread('A7.png'), (300, 150), interpolation=cv2.INTER_CUBIC)
66
+
67
+ # A1_center = [np.shape(frame)[1]*1//8, np.shape(frame)[0]*6//8]
68
+ # A2_center = [np.shape(frame)[1]*2//8, np.shape(frame)[0]*6//8]
69
+ # A3_center = [np.shape(frame)[1]*3//8, np.shape(frame)[0]*6//8]
70
+ # A4_center = [np.shape(frame)[1]*4//8, np.shape(frame)[0]*6//8]
71
+ # A5_center = [np.shape(frame)[1]*5//8, np.shape(frame)[0]*6//8]
72
+ # A6_center = [np.shape(frame)[1]*6//8, np.shape(frame)[0]*6//8]
73
+ # A7_center = [np.shape(frame)[1]*7//8, np.shape(frame)[0]*6//8]
74
+
75
+ # A1_thickness = [300, 150]
76
+ # A1_top = [A1_center[0]-A1_thickness[0]//2, A1_center[1]-A1_thickness[1]//2]
77
+ # A1_btm = [A1_center[0]+A1_thickness[0]//2, A1_center[1]+A1_thickness[1]//2]
78
+
79
+ # A2_thickness = [300, 150]
80
+ # A2_top = [A2_center[0]-A2_thickness[0]//2, A2_center[1]-A2_thickness[1]//2]
81
+ # A2_btm = [A2_center[0]+A2_thickness[0]//2, A2_center[1]+A2_thickness[1]//2]
82
+
83
+ # A3_thickness = [300, 150]
84
+ # A3_top = [A3_center[0]-A3_thickness[0]//2, A3_center[1]-A3_thickness[1]//2]
85
+ # A3_btm = [A3_center[0]+A3_thickness[0]//2, A3_center[1]+A3_thickness[1]//2]
86
+
87
+ # A4_thickness = [300, 150]
88
+ # A4_top = [A4_center[0]-A4_thickness[0]//2, A4_center[1]-A4_thickness[1]//2]
89
+ # A4_btm = [A4_center[0]+A4_thickness[0]//2, A4_center[1]+A4_thickness[1]//2]
90
+
91
+ # A5_thickness = [300, 150]
92
+ # A5_top = [A5_center[0]-A5_thickness[0]//2, A5_center[1]-A5_thickness[1]//2]
93
+ # A5_btm = [A5_center[0]+A5_thickness[0]//2, A5_center[1]+A5_thickness[1]//2]
94
+
95
+ # A6_thickness = [300, 150]
96
+ # A6_top = [A6_center[0]-A6_thickness[0]//2, A6_center[1]-A6_thickness[1]//2]
97
+ # A6_btm = [A6_center[0]+A6_thickness[0]//2, A6_center[1]+A6_thickness[1]//2]
98
+
99
+ # A7_thickness = [300, 150]
100
+ # A7_top = [A7_center[0]-A7_thickness[0]//2, A7_center[1]-A7_thickness[1]//2]
101
+ # A7_btm = [A7_center[0]+A7_thickness[0]//2, A7_center[1]+A7_thickness[1]//2]
102
+
103
+ # st.title('Airial Xylophone')
104
+
105
+ # Verbose = st.checkbox('Display ROI')
106
+
107
+ # stframe = st.image([])
108
+
109
+ # time.sleep(1)
110
+
111
+ # while True:
112
+ # # Grab the current frame
113
+ # ret, frame = camera.read()
114
+ # frame = cv2.flip(frame, 1)
115
+
116
+ # if not ret:
117
+ # break
118
+
119
+ # # Selecting ROI corresponding to A1
120
+ # A1_ROI = np.copy(frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]])
121
+ # mask = ROI_analysis(A1_ROI, 1)
122
+
123
+ # # Selecting ROI corresponding to A2
124
+ # A2_ROI = np.copy(frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]])
125
+ # mask = ROI_analysis(A2_ROI, 2)
126
+
127
+ # # Selecting ROI corresponding to A3
128
+ # A3_ROI = np.copy(frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]])
129
+ # mask = ROI_analysis(A3_ROI, 3) # Play A3 audio
130
+
131
+ # # Selecting ROI corresponding to A4
132
+ # A4_ROI = np.copy(frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]])
133
+ # mask = ROI_analysis(A4_ROI, 4) # Play A4 audio
134
+
135
+ # # Selecting ROI corresponding to A5
136
+ # A5_ROI = np.copy(frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]])
137
+ # mask = ROI_analysis(A5_ROI, 5) # Play A5 audio
138
+
139
+ # # Selecting ROI corresponding to A6
140
+ # A6_ROI = np.copy(frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]])
141
+ # mask = ROI_analysis(A6_ROI, 6) # Play A6 audio
142
+
143
+ # # Selecting ROI corresponding to A7
144
+ # A7_ROI = np.copy(frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]])
145
+ # mask = ROI_analysis(A7_ROI, 7) # Play A7 audio
146
+
147
+ # # Writing text on the image
148
+ # cv2.putText(frame, 'Project : Airial Xylophone', (10, 30), 2, 1, (20, 20, 20), 2)
149
+
150
+ # # Display the ROI to view the blue colour being detected
151
+ # if Verbose:
152
+ # # Displaying the ROI in the Image
153
+ # frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]] = cv2.bitwise_and(
154
+ # frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]],
155
+ # frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]], mask=mask[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]])
156
+ # frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]] = cv2.bitwise_and(
157
+ # frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]],
158
+ # frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]], mask=mask[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]])
159
+ # frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]] = cv2.bitwise_and(
160
+ # frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]],
161
+ # frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]], mask=mask[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]])
162
+ # frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]] = cv2.bitwise_and(
163
+ # frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]],
164
+ # frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]], mask=mask[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]])
165
+ # frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]] = cv2.bitwise_and(
166
+ # frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]],
167
+ # frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]], mask=mask[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]])
168
+ # frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]] = cv2.bitwise_and(
169
+ # frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]],
170
+ # frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]], mask=mask[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]])
171
+ # frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]] = cv2.bitwise_and(
172
+ # frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]],
173
+ # frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]], mask=mask[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]])
174
+
175
+ # else:
176
+ # # Augmenting the image of the instruments on the frame
177
+ # frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]] = cv2.addWeighted(A1, 1,
178
+ # frame[A1_top[1]:A1_btm[1], A1_top[0]:A1_btm[0]], 1, 0)
179
+ # frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]] = cv2.addWeighted(A2, 1,
180
+ # frame[A2_top[1]:A2_btm[1], A2_top[0]:A2_btm[0]], 1, 0)
181
+ # frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]] = cv2.addWeighted(A3, 1,
182
+ # frame[A3_top[1]:A3_btm[1], A3_top[0]:A3_btm[0]], 1, 0)
183
+ # frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]] = cv2.addWeighted(A4, 1,
184
+ # frame[A4_top[1]:A4_btm[1], A4_top[0]:A4_btm[0]], 1, 0)
185
+ # frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]] = cv2.addWeighted(A5, 1,
186
+ # frame[A5_top[1]:A5_btm[1], A5_top[0]:A5_btm[0]], 1, 0)
187
+ # frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]] = cv2.addWeighted(A6, 1,
188
+ # frame[A6_top[1]:A6_btm[1], A6_top[0]:A6_btm[0]], 1, 0)
189
+ # frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]] = cv2.addWeighted(A7, 1,
190
+ # frame[A7_top[1]:A7_btm[1], A7_top[0]:A7_btm[0]], 1, 0)
191
+
192
+ # stframe.image(frame, channels="BGR")
193
+
194
+ # key = cv2.waitKey(1) & 0xFF
195
+ # # if the 'q' key is pressed, stop the loop
196
+ # if key == ord("q"):
197
+ # break
198
+
199
+ # # Cleanup the camera and close any open windows
200
+ # camera.release()
201
+ # cv2.destroyAllWindows()
202
+
203
+
204
 
205
+ import streamlit as st
206
+ import soundfile as sf
207
+ import sounddevice as sd
208
+
209
+
210
+ st.title("Audio Player")
211
+
212
+ # Input audio file path
213
+ audio_file_path = "A6.wav"
214
+
215
+ # Play button
216
+ if st.button("Play"):
217
+ if audio_file_path:
218
+ try:
219
+ with st.spinner("Loading audio..."):
220
+ # Read audio file
221
+ data, samplerate = sf.read(audio_file_path)
222
+
223
+ # Play audio
224
+ sd.play(data, samplerate)
225
+ sd.wait() # Wait until playback is finished
226
+
227
+ st.success("Playback finished!")
228
+ except Exception as e:
229
+ st.error(f"Error: {e}")
230
  else:
231
+ st.warning("Please enter the path of a WAV audio file.")
232
+
233
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,3 +1,5 @@
1
  opencv-python
2
  pygame
3
- streamlit
 
 
 
1
  opencv-python
2
  pygame
3
+ streamlit
4
+ soundfile
5
+ sounddevice