Emeritus-21 commited on
Commit
208b0e3
·
verified ·
1 Parent(s): b216f74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +259 -143
app.py CHANGED
@@ -1,196 +1,312 @@
1
  import cv2
2
  import base64
3
  import numpy as np
4
- from flask import Flask, render_template, request, jsonify, send_from_directory
5
- import time
 
 
 
 
6
  import mediapipe as mp
 
 
 
 
7
  from mediapipe.framework.formats import landmark_pb2
8
  from mediapipe import solutions
9
  from tflite_support.task import vision as vision2
10
- from tflite_support.task import core, processor
 
11
  from numpy.linalg import norm
12
 
13
- # Flask app setup
14
- app = Flask(__name__)
15
-
16
- # Global variables for letter detection results
17
  letter_result = 0
18
- result_to_show = 0
19
- cresult_to_show = 0
20
- letterscore = 0
21
- no_hand_flag = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
- # Initialize MediaPipe hand landmark detection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  BaseOptions = mp.tasks.BaseOptions
25
  HandLandmarker = mp.tasks.vision.HandLandmarker
26
  HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
27
  HandLandmarkerResult = mp.tasks.vision.HandLandmarkerResult
28
  VisionRunningMode = mp.tasks.vision.RunningMode
29
 
30
- # Load your TFLite models (adjust paths if needed)
31
- cbase_options = core.BaseOptions(file_name="./exported/model.tflite") # New model
32
- ccbase_options = core.BaseOptions(file_name="./exported/word.tflite") # Old model or word model
 
33
 
34
  cclassification_options = processor.ClassificationOptions(max_results=1)
35
  coptions = vision2.ImageClassifierOptions(base_options=cbase_options, classification_options=cclassification_options)
36
  ccoptions = vision2.ImageClassifierOptions(base_options=ccbase_options, classification_options=cclassification_options)
37
-
38
  cclassifier = vision2.ImageClassifier.create_from_options(coptions)
39
  ccclassifier = vision2.ImageClassifier.create_from_options(ccoptions)
40
 
41
- # Callback to store MediaPipe detection results asynchronously
42
- RESULT = None
43
 
44
  def print_result(result: HandLandmarkerResult, output_image: mp.Image, timestamp_ms: int):
 
45
  global RESULT
46
- RESULT = result
 
47
 
48
  options = HandLandmarkerOptions(
49
  base_options=BaseOptions(model_asset_path='hand_landmarker.task'),
50
  running_mode=VisionRunningMode.LIVE_STREAM,
51
  result_callback=print_result)
52
 
53
- detector = mp.tasks.vision.HandLandmarker.create_from_options(options)
54
 
55
- # Utility functions for image processing
56
- def data_uri_to_image(data_uri):
57
- header, encoded = data_uri.split(',', 1)
58
- decoded_data = base64.b64decode(encoded)
59
- nparr = np.frombuffer(decoded_data, np.uint8)
60
- image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
61
- return image
62
 
63
- def image_to_data_uri(image):
64
- _, buffer = cv2.imencode('.jpg', image)
65
- image_bytes = buffer.tobytes()
66
- base64_encoded = base64.b64encode(image_bytes).decode('utf-8')
67
- return f"data:image/jpeg;base64,{base64_encoded}"
68
 
69
- def draw_landmarks_on_image(rgb_image, detection_result):
70
- hand_landmarks_list = detection_result.hand_landmarks
71
- annotated_image = np.copy(rgb_image)
72
- image_height, image_width, _ = annotated_image.shape
73
-
74
- for idx in range(len(hand_landmarks_list)):
75
- hand_landmarks = hand_landmarks_list[idx]
76
- hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
77
- hand_landmarks_proto.landmark.extend([
78
- landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks
79
- ])
80
- solutions.drawing_utils.draw_landmarks(
81
- annotated_image,
82
- hand_landmarks_proto,
83
- solutions.hands.HAND_CONNECTIONS,
84
- solutions.drawing_styles.get_default_hand_landmarks_style(),
85
- solutions.drawing_styles.get_default_hand_connections_style()
86
- )
87
- return annotated_image
88
-
89
- # Letter list - modify if needed
90
- letter_list = [chr(i) for i in range(65, 91)] + ['#'] # A-Z + #
91
-
92
- # Isẹ̀kiri dictionary (Example mapping, update with real words)
93
- isekiri_dict = {
94
- 'A': 'Àṣẹ',
95
- 'B': 'Bí',
96
- 'C': 'Ṣe',
97
- 'D': 'Dá',
98
- 'E': 'Ẹ̀',
99
- 'F': 'Fẹ́',
100
- 'G': 'Gba',
101
- 'H': 'Hàn',
102
- 'I': 'Ìyà',
103
- 'J': 'Jẹ',
104
- 'K': 'Kọ',
105
- 'L': 'Lá',
106
- 'M': 'Má',
107
- 'N': 'Ná',
108
- 'O': 'Ọ̀',
109
- 'P': 'Pẹ̀',
110
- 'Q': 'Kù', # approximate since Q rarely used
111
- 'R': 'Rà',
112
- 'S': 'Ṣá',
113
- 'T': 'Tẹ',
114
- 'U': 'Ú',
115
- 'V': 'Vẹ',
116
- 'W': 'Wá',
117
- 'X': 'Ẹ́s',
118
- 'Y': 'Yá',
119
- 'Z': 'Zà',
120
- '#': '#'
121
- }
122
-
123
- # Routes for web UI and models
124
  @app.route('/')
125
  def index():
126
  return render_template('index.html')
127
 
128
- @app.route('/exported/<path:filename>')
129
- def send_model(filename):
130
- return send_from_directory('exported', filename)
131
 
132
- # Video frame processing API (ASL detection)
133
  @app.route('/api/data', methods=['POST'])
134
  def handle_video_frame():
135
- global letter_result, result_to_show, cresult_to_show, letterscore, no_hand_flag
136
-
137
- frame_data_uri = request.json.get('key')
138
- if not frame_data_uri:
139
- return jsonify({'error': 'No frame data received'}), 400
140
 
141
- frame = data_uri_to_image(frame_data_uri)
142
- mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
 
 
 
 
 
 
143
 
144
  try:
145
- detection_result = detector.detect_async(mp_image, mp.Timestamp.from_seconds(time.time()).value)
 
 
 
 
 
 
 
 
 
 
146
 
147
- global RESULT
148
- if RESULT is None:
149
- return jsonify({'result': '_', 'frame': frame_data_uri})
150
 
151
- annotated_image = draw_landmarks_on_image(frame, RESULT)
152
-
153
- if RESULT.handedness:
154
- no_hand_flag = 0
155
- # If right hand detected, classify using models
156
  if RESULT.handedness[0][0].display_name == 'Right':
157
- tf_image = vision2.TensorImage.create_from_array(frame)
158
- classification_result = cclassifier.classify(tf_image)
159
- cclassification_result = ccclassifier.classify(tf_image)
160
 
161
- result_to_show = classification_result.classifications[0].categories[0].category_name
162
- cresult_to_show = cclassification_result.classifications[0].categories[0].category_name
163
-
164
- # Simple decision logic between old and new models
165
  if cclassification_result.classifications[0].categories[0].score > classification_result.classifications[0].categories[0].score:
166
- letter_result = cresult_to_show
 
 
 
 
 
 
167
  else:
168
- letter_result = result_to_show
169
- letterscore = max(
170
- classification_result.classifications[0].categories[0].score,
171
- cclassification_result.classifications[0].categories[0].score
172
- )
 
 
 
 
 
 
 
 
 
 
 
 
173
  else:
174
- letter_result = '_'
175
- else:
176
- letter_result = '_'
177
-
178
- except Exception as e:
179
- print("Detection error:", e)
180
- letter_result = '_'
181
- annotated_image = frame
182
-
183
- frame_out = image_to_data_uri(annotated_image)
184
- return jsonify({"result": letter_result, "frame": frame_out})
185
-
186
- # Isẹ̀kiri translation API
187
- @app.route('/api/translate', methods=['POST'])
188
- def translate_to_isekiri():
189
- data = request.json
190
- text = data.get('text', '')
191
- # Translate each letter to Isẹ̀kiri word or keep as is if unknown
192
- translated = ' '.join(isekiri_dict.get(ch.upper(), ch) for ch in text if ch.strip())
193
- return jsonify({'isekiri': translated})
194
-
195
- if __name__ == '__main__':
196
- app.run(host='0.0.0.0', port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import cv2
2
  import base64
3
  import numpy as np
4
+ import io
5
+ from flask import Flask, render_template, Response, request, jsonify
6
+ from flask_socketio import SocketIO, emit
7
+ from PIL import Image
8
+ from time import time as unix_time
9
+ import os
10
  import mediapipe as mp
11
+ from mediapipe.tasks import python
12
+ from mediapipe.tasks.python import vision
13
+ import time
14
+ import argparse
15
  from mediapipe.framework.formats import landmark_pb2
16
  from mediapipe import solutions
17
  from tflite_support.task import vision as vision2
18
+ from tflite_support.task import core
19
+ from tflite_support.task import processor
20
  from numpy.linalg import norm
21
 
22
+ #Image Annotation Utils
23
+ char_list=[]
24
+ global letter_result
 
25
  letter_result = 0
26
+ global old_letter_result
27
+ old_letter_result = 0
28
+ MARGIN = 10 # pixels
29
+ FONT_SIZE = 1
30
+ FONT_THICKNESS = 1
31
+ HANDEDNESS_TEXT_COLOR = (88, 205, 54) # vibrant green
32
+ global test_x
33
+ global test_y
34
+ global result_to_show
35
+ result_to_show=0
36
+ global cresult_to_show
37
+ cresult_to_show=0
38
+ text_x = 0
39
+ text_y = 0
40
+ cwhich=0
41
+ lastwidth = 400
42
+ letterscore=0
43
+ frame_time=0
44
+ same_letter_time=0
45
+ no_hand_flag=1
46
+ # UTILS
47
+
48
+
49
+ def brightness(img):
50
+ if len(img.shape) == 3:
51
+ # Colored RGB or BGR (*Do Not* use HSV images with this function)
52
+ # create brightness with euclidean norm
53
+ return np.average(norm(img, axis=2)) / np.sqrt(3)
54
+ else:
55
+ # Grayscale
56
+ return np.average(img)
57
+
58
+
59
+ def draw_landmarks_on_image(rgb_image, detection_result):
60
+ hand_landmarks_list = detection_result.hand_landmarks
61
+ handedness_list = detection_result.handedness
62
+ annotated_image = np.copy(rgb_image)
63
+ crop = []
64
+ image_height, image_width, image_heightgray=annotated_image.shape
65
+
66
+
67
+ # Loop through the detected hands to visualize.
68
+ for idx in range(len(hand_landmarks_list)):
69
+ hand_landmarks = hand_landmarks_list[idx]
70
+ handedness = handedness_list[idx]
71
+
72
+ # Draw the hand landmarks.
73
+ hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
74
+ hand_landmarks_proto.landmark.extend([
75
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks
76
+ ])
77
+ solutions.drawing_utils.draw_landmarks(
78
+ annotated_image,
79
+ hand_landmarks_proto,
80
+ solutions.hands.HAND_CONNECTIONS,
81
+ solutions.drawing_styles.get_default_hand_landmarks_style(),
82
+ solutions.drawing_styles.get_default_hand_connections_style())
83
+
84
+ # Get bounding box
85
+
86
+ height, width, _ = annotated_image.shape
87
+
88
+ x_coordinates = [landmark.x for landmark in hand_landmarks]
89
+ y_coordinates = [landmark.y for landmark in hand_landmarks]
90
+
91
+ min_x = int(min(x_coordinates) * width) # Left
92
+ min_y = int(min(y_coordinates) * height) # Top
93
+ max_x = int(max(x_coordinates) * width) # Right
94
+ max_y = int(max(y_coordinates) * height) # Bottom
95
+
96
+ #Get dimensions of bounding box
97
+ sect_height = max_y-(min_y)
98
+ sect_width = max_x-(min_x)
99
+
100
+ #Get center of bounding box
101
+ center_x=(min_x+max_x)/2
102
+ center_y=(min_y+max_y)/2
103
+
104
+ sect_diameter=50
105
+ #Define dominant axis for aspect ratio
106
+
107
+ if(sect_height>sect_width):
108
+ sect_diameter = sect_height
109
+
110
+ if(sect_height<sect_width):
111
+ sect_diameter = sect_width
112
 
113
+ sect_diameter=sect_diameter+50 # Pad diameter
114
+ sect_radius=int(sect_diameter/2) # Find radius
115
+
116
+ #Crop Image
117
+ crop_top=int(center_y-sect_radius) #Top boundry
118
+ crop_bottom=int(center_y+sect_radius) #Bottom boundry
119
+ crop_left=int(center_x-sect_radius) #Left boundry
120
+ crop_right=int(center_x+sect_radius) #Right boundry
121
+
122
+ #Account for out of canvas
123
+ if(crop_top<0): #Bounding box too high
124
+ crop_top=0
125
+
126
+ if(crop_left<0): #Bounding box too far left
127
+ crop_left=0
128
+
129
+ if(crop_right>image_width): #Bounding box too far right
130
+ crop_right=image_width
131
+
132
+ if(crop_bottom>image_height): #Bounding box too low
133
+ crop_bottom=image_height
134
+
135
+ # Trace bounding box
136
+ annotated_image = cv2.rectangle(annotated_image, (crop_left, crop_top), (crop_right, crop_bottom), (255,0,0), 6)
137
+
138
+ global text_x
139
+ global text_y
140
+
141
+ # For text, currently not used
142
+ text_x=crop_left
143
+ text_y=crop_top
144
+
145
+ # Get cropped image
146
+ crop = annotated_image[crop_top:crop_bottom, crop_left:crop_right]
147
+
148
+ # Scale cropped image
149
+ h, w = crop.shape[0:2]
150
+ neww = 150
151
+ newh = int(neww*(h/w))
152
+ crop = cv2.resize(crop, (neww, newh))
153
+
154
+ #annotated_image[0:0+crop.shape[0], 0:0+crop.shape[1]] = crop # Used for superimposition
155
+
156
+ #annotated_image=crop # Used for replacement
157
+
158
+ return [annotated_image, crop]
159
+
160
+ #-------------------------------------------------------------
161
+
162
+ # Letter List
163
+ letter_list=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","#"]
164
+
165
+ # Initialise MediaPipe hand landmark detction
166
+ RESULT = None
167
  BaseOptions = mp.tasks.BaseOptions
168
  HandLandmarker = mp.tasks.vision.HandLandmarker
169
  HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
170
  HandLandmarkerResult = mp.tasks.vision.HandLandmarkerResult
171
  VisionRunningMode = mp.tasks.vision.RunningMode
172
 
173
+ cbase_options = core.BaseOptions(file_name="./better_exported/model.tflite") # New tflite
174
+ ccbase_options = core.BaseOptions(file_name="./exported/model.tflite") # Old tflite
175
+
176
+ # Initialise ASL tflite model
177
 
178
  cclassification_options = processor.ClassificationOptions(max_results=1)
179
  coptions = vision2.ImageClassifierOptions(base_options=cbase_options, classification_options=cclassification_options)
180
  ccoptions = vision2.ImageClassifierOptions(base_options=ccbase_options, classification_options=cclassification_options)
 
181
  cclassifier = vision2.ImageClassifier.create_from_options(coptions)
182
  ccclassifier = vision2.ImageClassifier.create_from_options(ccoptions)
183
 
184
+
 
185
 
186
  def print_result(result: HandLandmarkerResult, output_image: mp.Image, timestamp_ms: int):
187
+
188
  global RESULT
189
+ RESULT=result
190
+
191
 
192
  options = HandLandmarkerOptions(
193
  base_options=BaseOptions(model_asset_path='hand_landmarker.task'),
194
  running_mode=VisionRunningMode.LIVE_STREAM,
195
  result_callback=print_result)
196
 
 
197
 
198
+ detector = vision.HandLandmarker.create_from_options(options)
199
+ video_frames=[]
200
+
201
+
202
+
203
+ app = Flask(__name__)
 
204
 
 
 
 
 
 
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  @app.route('/')
207
  def index():
208
  return render_template('index.html')
209
 
 
 
 
210
 
 
211
  @app.route('/api/data', methods=['POST'])
212
  def handle_video_frame():
213
+ frame = request.json.get('key')
214
+ #print(request.json)
215
+ response_frame = data_uri_to_image(frame)
216
+ decimg = response_frame
 
217
 
218
+ #--------------------------------------------
219
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=decimg) # Create MediaPipe image
220
+ #print(mp.Timestamp.from_seconds(time.time()).value)
221
+
222
+ detection_result = detector.detect_async(mp_image, mp.Timestamp.from_seconds(time.time()).value) # detct
223
+
224
+ # Try-Catch block, because detection is not done during model initialisation
225
+ global no_hand_flag, frame_time, same_letter_time, letter_result, old_letter_result, char_list, letterscore
226
 
227
  try:
228
+ result_images = draw_landmarks_on_image(mp_image.numpy_view(), RESULT) # Array of annotated and cropped images
229
+ annotated_image = result_images[0]
230
+ cropped_image = result_images[1]
231
+
232
+ #Standardise and fit shape by resizing
233
+
234
+ h, w = annotated_image.shape[0:2]
235
+ neww = 500
236
+ newh = int(neww*(h/w))
237
+ resized_image = cv2.resize(annotated_image, (neww, newh))
238
+ final_image=resized_image
239
 
240
+ if(RESULT.handedness != []): # To chack if there is any result at all and then feed tflite model
241
+ no_hand_flag=0
 
242
 
 
 
 
 
 
243
  if RESULT.handedness[0][0].display_name == 'Right':
244
+ tf_image = vision2.TensorImage.create_from_array(cropped_image)
245
+ classification_result = cclassifier.classify(tf_image) # New
246
+ cclassification_result = ccclassifier.classify(tf_image) # Old
247
 
248
+ result_to_show = classification_result.classifications[0].categories[0].category_name # New
249
+ cresult_to_show = cclassification_result.classifications[0].categories[0].category_name # Old
250
+
 
251
  if cclassification_result.classifications[0].categories[0].score > classification_result.classifications[0].categories[0].score:
252
+ letter_result = cresult_to_show # To implement further UX with Text to Speech
253
+ cwhich="Old"
254
+ if result_to_show == "P" and cresult_to_show !="P":
255
+ cwhich="New"
256
+ letter_result = result_to_show
257
+
258
+
259
  else:
260
+ letter_result = result_to_show # To implement further UX with Text to Speech
261
+ cwhich="New"
262
+ if cresult_to_show == "M" and cresult_to_show !="M":
263
+ cwhich="Old"
264
+
265
+ if result_to_show != "R" and cresult_to_show =="R":
266
+ cwhich="Old"
267
+ letter_result = cresult_to_show
268
+
269
+ if result_to_show != "T" and cresult_to_show =="T":
270
+ cwhich="Old"
271
+ letter_result = cresult_to_show
272
+ if cwhich=="Old" :
273
+ letterscore = cclassification_result.classifications[0].categories[0].score
274
+
275
+ if cwhich=="New" :
276
+ letterscore = classification_result.classifications[0].categories[0].score
277
  else:
278
+ tf_image = vision2.TensorImage.create_from_array(cropped_image)
279
+ classification_result = cclassifier.classify(tf_image) # New
280
+ result_to_show = classification_result.classifications[0].categories[0].category_name # New
281
+
282
+ if result_to_show != "B":
283
+ letter_result='_'
284
+ else:
285
+ letter_result='>'
286
+ except Exception as e:
287
+ # Ha! The catch err{throw err} scenario, it was actually quite useful in debugging though
288
+ print(e)
289
+ frame_data = image_to_data_uri(final_image)
290
+ #print(frame_data)
291
+
292
+ return jsonify({"result": letter_result, "frame": frame_data}), 200
293
+
294
+ def data_uri_to_image(data_uri):
295
+ header, encoded = data_uri.split(',', 1)
296
+ decoded_data = base64.b64decode(encoded)
297
+ nparr = np.frombuffer(decoded_data, np.uint8)
298
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
299
+ return image
300
+
301
+ def image_to_data_uri(image):
302
+ # Encode the image as a JPEG
303
+ _, buffer = cv2.imencode('.jpg', image)
304
+ # Convert the buffer to bytes
305
+ image_bytes = buffer.tobytes()
306
+ # Encode the bytes to Base64
307
+ base64_encoded = base64.b64encode(image_bytes).decode('utf-8')
308
+ # Create the Data URI
309
+ data_uri = f"data:image/jpeg;base64,{base64_encoded}"
310
+ return data_uri
311
+ if (__name__ == '__main__'):
312
+ app.run( host='0.0.0.0', port=7860)