HuggingFace-SK commited on
Commit
1fa3da0
·
1 Parent(s): 680858c

initial commit

Browse files
Files changed (3) hide show
  1. app.py +367 -37
  2. requirements.txt +6 -6
  3. templates/index.html +0 -0
app.py CHANGED
@@ -1,60 +1,390 @@
 
 
 
 
 
 
 
 
1
  import os
2
- import requests
3
- import json
4
- from io import BytesIO
 
 
 
 
 
 
 
 
5
 
6
- from flask import Flask, jsonify, render_template, request, send_file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
- from modules.inference import infer_t5
9
- from modules.dataset import query_emotion
10
 
11
- # https://huggingface.co/settings/tokens
12
- # https://huggingface.co/spaces/{username}/{space}/settings
13
- API_TOKEN = os.getenv("BIG_GAN_TOKEN")
 
 
 
 
 
14
 
15
- app = Flask(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
 
18
- @app.route("/")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  def index():
20
- return render_template("index.html")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
 
 
 
 
22
 
23
- @app.route("/infer_biggan")
24
- def biggan():
25
- input = request.args.get("input")
26
 
27
- output = requests.request(
28
- "POST",
29
- "https://api-inference.huggingface.co/models/osanseviero/BigGAN-deep-128",
30
- headers={"Authorization": f"Bearer {API_TOKEN}"},
31
- data=json.dumps(input),
32
- )
33
 
34
- return send_file(BytesIO(output.content), mimetype="image/png")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
 
 
 
 
36
 
37
- @app.route("/infer_t5")
38
- def t5():
39
- input = request.args.get("input")
40
 
41
- output = infer_t5(input)
 
 
 
 
 
 
 
42
 
43
- return jsonify({"output": output})
 
 
 
 
 
 
 
 
44
 
45
 
46
- @app.route("/query_emotion")
47
- def emotion():
48
- start = request.args.get("start")
49
- end = request.args.get("end")
 
 
 
 
 
50
 
51
- print(start)
52
- print(end)
 
 
 
 
 
53
 
54
- output = query_emotion(int(start), int(end))
 
 
 
 
 
55
 
56
- return jsonify({"output": output})
57
 
 
 
 
 
 
 
58
 
59
- if __name__ == "__main__":
60
- app.run(host="0.0.0.0", port=7860)
 
1
+ import cv2
2
+ import base64
3
+ import numpy as np
4
+ import io
5
+ from flask import Flask, render_template, Response
6
+ from flask_socketio import SocketIO, emit
7
+ from PIL import Image
8
+ from time import time as unix_time
9
  import os
10
+ import mediapipe as mp
11
+ from mediapipe.tasks import python
12
+ from mediapipe.tasks.python import vision
13
+ import time
14
+ import argparse
15
+ from mediapipe.framework.formats import landmark_pb2
16
+ from mediapipe import solutions
17
+ from tflite_support.task import vision as vision2
18
+ from tflite_support.task import core
19
+ from tflite_support.task import processor
20
+ from numpy.linalg import norm
21
 
22
+ #Image Annotation Utils
23
+ char_list=[]
24
+ global letter_result
25
+ letter_result = 0
26
+ global old_letter_result
27
+ old_letter_result = 0
28
+ MARGIN = 10 # pixels
29
+ FONT_SIZE = 1
30
+ FONT_THICKNESS = 1
31
+ HANDEDNESS_TEXT_COLOR = (88, 205, 54) # vibrant green
32
+ global test_x
33
+ global test_y
34
+ global result_to_show
35
+ result_to_show=0
36
+ global cresult_to_show
37
+ cresult_to_show=0
38
+ text_x = 0
39
+ text_y = 0
40
+ cwhich=0
41
+ lastwidth = 400
42
+ letterscore=0
43
+ frame_time=0
44
+ same_letter_time=0
45
+ no_hand_flag=1
46
+ # UTILS
47
 
 
 
48
 
49
+ def brightness(img):
50
+ if len(img.shape) == 3:
51
+ # Colored RGB or BGR (*Do Not* use HSV images with this function)
52
+ # create brightness with euclidean norm
53
+ return np.average(norm(img, axis=2)) / np.sqrt(3)
54
+ else:
55
+ # Grayscale
56
+ return np.average(img)
57
 
58
+
59
+ def draw_landmarks_on_image(rgb_image, detection_result):
60
+ hand_landmarks_list = detection_result.hand_landmarks
61
+ handedness_list = detection_result.handedness
62
+ annotated_image = np.copy(rgb_image)
63
+ crop = []
64
+ image_height, image_width, image_heightgray=annotated_image.shape
65
+
66
+
67
+ # Loop through the detected hands to visualize.
68
+ for idx in range(len(hand_landmarks_list)):
69
+ hand_landmarks = hand_landmarks_list[idx]
70
+ handedness = handedness_list[idx]
71
+
72
+ # Draw the hand landmarks.
73
+ hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
74
+ hand_landmarks_proto.landmark.extend([
75
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks
76
+ ])
77
+ solutions.drawing_utils.draw_landmarks(
78
+ annotated_image,
79
+ hand_landmarks_proto,
80
+ solutions.hands.HAND_CONNECTIONS,
81
+ solutions.drawing_styles.get_default_hand_landmarks_style(),
82
+ solutions.drawing_styles.get_default_hand_connections_style())
83
+
84
+ # Get bounding box
85
+
86
+ height, width, _ = annotated_image.shape
87
+
88
+ x_coordinates = [landmark.x for landmark in hand_landmarks]
89
+ y_coordinates = [landmark.y for landmark in hand_landmarks]
90
+
91
+ min_x = int(min(x_coordinates) * width) # Left
92
+ min_y = int(min(y_coordinates) * height) # Top
93
+ max_x = int(max(x_coordinates) * width) # Right
94
+ max_y = int(max(y_coordinates) * height) # Bottom
95
+
96
+ #Get dimensions of bounding box
97
+ sect_height = max_y-(min_y)
98
+ sect_width = max_x-(min_x)
99
+
100
+ #Get center of bounding box
101
+ center_x=(min_x+max_x)/2
102
+ center_y=(min_y+max_y)/2
103
+
104
+ sect_diameter=50
105
+ #Define dominant axis for aspect ratio
106
+
107
+ if(sect_height>sect_width):
108
+ sect_diameter = sect_height
109
+
110
+ if(sect_height<sect_width):
111
+ sect_diameter = sect_width
112
+
113
+ sect_diameter=sect_diameter+50 # Pad diameter
114
+ sect_radius=int(sect_diameter/2) # Find radius
115
+
116
+ #Crop Image
117
+ crop_top=int(center_y-sect_radius) #Top boundry
118
+ crop_bottom=int(center_y+sect_radius) #Bottom boundry
119
+ crop_left=int(center_x-sect_radius) #Left boundry
120
+ crop_right=int(center_x+sect_radius) #Right boundry
121
+
122
+ #Account for out of canvas
123
+ if(crop_top<0): #Bounding box too high
124
+ crop_top=0
125
+
126
+ if(crop_left<0): #Bounding box too far left
127
+ crop_left=0
128
+
129
+ if(crop_right>image_width): #Bounding box too far right
130
+ crop_right=image_width
131
+
132
+ if(crop_bottom>image_height): #Bounding box too low
133
+ crop_bottom=image_height
134
+
135
+ # Trace bounding box
136
+ annotated_image = cv2.rectangle(annotated_image, (crop_left, crop_top), (crop_right, crop_bottom), (255,0,0), 6)
137
+
138
+ global text_x
139
+ global text_y
140
+
141
+ # For text, currently not used
142
+ text_x=crop_left
143
+ text_y=crop_top
144
+
145
+ # Get cropped image
146
+ crop = annotated_image[crop_top:crop_bottom, crop_left:crop_right]
147
+
148
+ # Scale cropped image
149
+ h, w = crop.shape[0:2]
150
+ neww = 150
151
+ newh = int(neww*(h/w))
152
+ crop = cv2.resize(crop, (neww, newh))
153
+
154
+ #annotated_image[0:0+crop.shape[0], 0:0+crop.shape[1]] = crop # Used for superimposition
155
+
156
+ #annotated_image=crop # Used for replacement
157
+
158
+ return [annotated_image, crop]
159
+
160
+ #-------------------------------------------------------------
161
+
162
+ # Letter List
163
+ letter_list=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","#"]
164
+
165
+ # Initialise MediaPipe hand landmark detction
166
+ RESULT = None
167
+ BaseOptions = mp.tasks.BaseOptions
168
+ HandLandmarker = mp.tasks.vision.HandLandmarker
169
+ HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
170
+ HandLandmarkerResult = mp.tasks.vision.HandLandmarkerResult
171
+ VisionRunningMode = mp.tasks.vision.RunningMode
172
+
173
+ cbase_options = core.BaseOptions(file_name="./better_exported/model.tflite") # New tflite
174
+ ccbase_options = core.BaseOptions(file_name="./exported/model.tflite") # Old tflite
175
+
176
+ # Initialise ASL tflite model
177
+
178
+ cclassification_options = processor.ClassificationOptions(max_results=1)
179
+ coptions = vision2.ImageClassifierOptions(base_options=cbase_options, classification_options=cclassification_options)
180
+ ccoptions = vision2.ImageClassifierOptions(base_options=ccbase_options, classification_options=cclassification_options)
181
+ cclassifier = vision2.ImageClassifier.create_from_options(coptions)
182
+ ccclassifier = vision2.ImageClassifier.create_from_options(ccoptions)
183
 
184
 
185
+
186
+ def print_result(result: HandLandmarkerResult, output_image: mp.Image, timestamp_ms: int):
187
+
188
+ global RESULT
189
+ RESULT=result
190
+
191
+
192
+ options = HandLandmarkerOptions(
193
+ base_options=BaseOptions(model_asset_path='hand_landmarker.task'),
194
+ running_mode=VisionRunningMode.LIVE_STREAM,
195
+ result_callback=print_result)
196
+
197
+
198
+ detector = vision.HandLandmarker.create_from_options(options)
199
+ video_frames=[]
200
+
201
+
202
+
203
+ app = Flask(__name__)
204
+ socketio = SocketIO(app)
205
+
206
+ @app.route('/')
207
  def index():
208
+ return render_template('index.html')
209
+
210
+
211
+ @socketio.on('video_frame')
212
+ def handle_video_frame(frame):
213
+
214
+ response_frame = data_uri_to_image(frame)
215
+ decimg = response_frame
216
+
217
+ #--------------------------------------------
218
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=decimg) # Create MediaPipe image
219
+ #print(mp.Timestamp.from_seconds(time.time()).value)
220
+
221
+ detection_result = detector.detect_async(mp_image, mp.Timestamp.from_seconds(time.time()).value) # detct
222
+
223
+ # Try-Catch block, because detection is not done during model initialisation
224
+ global no_hand_flag, frame_time, same_letter_time, letter_result, old_letter_result, char_list, letterscore
225
+
226
+ try:
227
+ result_images = draw_landmarks_on_image(mp_image.numpy_view(), RESULT) # Array of annotated and cropped images
228
+ annotated_image = result_images[0]
229
+ cropped_image = result_images[1]
230
+
231
+ #Standardise and fit shape by resizing
232
+
233
+ h, w = annotated_image.shape[0:2]
234
+ neww = 500
235
+ newh = int(neww*(h/w))
236
+ resized_image = cv2.resize(annotated_image, (neww, newh))
237
+ final_image=resized_image
238
+
239
+ if(RESULT.handedness != []): # To chack if there is any result at all and then feed tflite model
240
+ no_hand_flag=0
241
+
242
+ if RESULT.handedness[0][0].display_name == 'Right':
243
+ tf_image = vision2.TensorImage.create_from_array(cropped_image)
244
+ classification_result = cclassifier.classify(tf_image) # New
245
+ cclassification_result = ccclassifier.classify(tf_image) # Old
246
+
247
+ result_to_show = classification_result.classifications[0].categories[0].category_name # New
248
+ cresult_to_show = cclassification_result.classifications[0].categories[0].category_name # Old
249
+
250
+ if cclassification_result.classifications[0].categories[0].score > classification_result.classifications[0].categories[0].score:
251
+ letter_result = cresult_to_show # To implement further UX with Text to Speech
252
+ cwhich="Old"
253
+ if result_to_show == "P" and cresult_to_show !="P":
254
+ cwhich="New"
255
+ letter_result = result_to_show
256
+
257
+
258
+ else:
259
+ letter_result = result_to_show # To implement further UX with Text to Speech
260
+ cwhich="New"
261
+ if cresult_to_show == "M" and cresult_to_show !="M":
262
+ cwhich="Old"
263
+
264
+ if result_to_show != "R" and cresult_to_show =="R":
265
+ cwhich="Old"
266
+ letter_result = cresult_to_show
267
+
268
+ if result_to_show != "T" and cresult_to_show =="T":
269
+ cwhich="Old"
270
+ letter_result = cresult_to_show
271
+ if cwhich=="Old" :
272
+ letterscore = cclassification_result.classifications[0].categories[0].score
273
+
274
+ if cwhich=="New" :
275
+ letterscore = classification_result.classifications[0].categories[0].score
276
+ else:
277
+ tf_image = vision2.TensorImage.create_from_array(cropped_image)
278
+ classification_result = cclassifier.classify(tf_image) # New
279
+ result_to_show = classification_result.classifications[0].categories[0].category_name # New
280
+
281
+ if result_to_show != "B":
282
+ letter_result='_'
283
+ else:
284
+ letter_result='>'
285
+
286
+ same_letter_time = round((unix_time()) - frame_time, 2)
287
+
288
+ #print(frame_time, same_letter_time)
289
+
290
+ if old_letter_result != letter_result:
291
+ frame_time = (unix_time())# Log Time
292
+ same_letter_time=0
293
+
294
+ old_letter_result = letter_result
295
+
296
+
297
+
298
+
299
+
300
+
301
 
302
+ else:
303
+ local_same_letter_time=0
304
+ if(no_hand_flag==0):
305
+ same_letter_time = round((unix_time()) - frame_time, 2)
306
 
307
+ local_same_letter_time = round((unix_time()) - frame_time, 2)
 
 
308
 
309
+ letterscore = 0
310
+ #print(brightness(final_image))
 
 
 
 
311
 
312
+ if local_same_letter_time>1.2 and brightness(final_image) < 40:
313
+ char_list.pop()
314
+ print (string.join(char_list))
315
+ frame_time = (unix_time())# Log Time
316
+ same_letter_time=0
317
+
318
+ if same_letter_time>1.4 and brightness(final_image) > 40:
319
+ frame_time = (unix_time())# Log Time
320
+ same_letter_time=0
321
+ no_hand_flag=1
322
+
323
+ same_letter_time_width = 10 + int(same_letter_time*100)
324
+ if same_letter_time_width > 190:
325
+ same_letter_time_width = 190
326
+ iheight, iwidth = final_image.shape[:2]
327
+ cv2.rectangle(final_image, (0, 0), (iwidth, 50), (255,255,255), -1)
328
 
329
+ cv2.rectangle(final_image, (0, 50), (200, 100), (255,255,255), -1)
330
+ cv2.rectangle(final_image, (8, 58), (192, 72), (255,100,100), -1)
331
+ cv2.rectangle(final_image, (10, 60), (190, 70), (255,200,200), -1)
332
+ cv2.rectangle(final_image, (10, 60), (same_letter_time_width, 70), (255,100,100), -1)
333
 
334
+ letterscore_width = 10+ int(letterscore*100)
 
 
335
 
336
+
337
+ cv2.rectangle(final_image, (8, 78), (192, 92), (100,100,200), -1)
338
+ cv2.rectangle(final_image, (10, 80), (190, 90), (150,175,255), -1)
339
+ cv2.rectangle(final_image, (10, 80), (letterscore_width, 90), (100,100,200), -1)
340
+
341
+ cv2.putText(final_image, f"{(letterscore_width)}", # Display result
342
+ (12, 70), cv2.FONT_HERSHEY_DUPLEX,
343
+ 0.5, (0, 0, 0), 1, cv2.LINE_AA)
344
 
345
+ cv2.putText(final_image, f"[ {letter_result} ] {''.join(char_list)} |", # Display result
346
+ (20, 40), cv2.FONT_HERSHEY_DUPLEX,
347
+ 0.7, (0, 0, 0), 2, cv2.LINE_AA)
348
+
349
+ print(letter_result, same_letter_time)
350
+
351
+ except Exception as e:
352
+ # Ha! The catch err{throw err} scenario, it was actually quite useful in debugging though
353
+ print(e)
354
 
355
 
356
+ if same_letter_time > 0 and RESULT.handedness != []: # If 'a' key was pressed and a hand exists
357
+
358
+ if same_letter_time > 1.7 and RESULT.handedness[0][0].display_name == 'Right': # Right hand
359
+ char_list.append(letter_result)
360
+ string = ""
361
+ print (string.join(char_list))
362
+ frame_time = (unix_time())# Log Time
363
+ same_letter_time=0
364
+
365
 
366
+ if same_letter_time > 0.9 and RESULT.handedness[0][0].display_name == 'Left':
367
+ if same_letter_time > 1.2 and letter_result=='_':
368
+ char_list.append(" ")
369
+ string = ""
370
+ print (string.join(char_list))
371
+ frame_time = (unix_time())# Log Time
372
+ same_letter_time=0
373
 
374
+ if letter_result == ">":
375
+ string=""
376
+ if(string.join(char_list) != ''):
377
+ os.system(f'echo {string.join(char_list)} | espeak -p 70 -s 140')
378
+ char_list=[]
379
+ print (string.join(char_list))
380
 
 
381
 
382
+ def data_uri_to_image(data_uri):
383
+ header, encoded = data_uri.split(',', 1)
384
+ decoded_data = base64.b64decode(encoded)
385
+ nparr = np.frombuffer(decoded_data, np.uint8)
386
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
387
+ return image
388
 
389
+ if (__name__ == '__main__'):
390
+ app.run( host='0.0.0.0', port=7860)
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- datasets==2.*
2
- flask==3.0.0
3
- requests==2.31.*
4
- sentencepiece==0.1.*
5
- torch==2.*
6
- transformers==4.*
 
1
+ opencv-python-headless == 4.10.0.84
2
+ numpy == 1.23.3
3
+ Flask == 3.0.3
4
+ Flask-SocketIO == 5.4.1
5
+ mediapipe == 0.10.11
6
+ tensorflow == 2.8.0
templates/index.html CHANGED
The diff for this file is too large to render. See raw diff