Emeritus-21 commited on
Commit
c26176e
·
verified ·
1 Parent(s): acd5666

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +196 -0
app.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import base64
3
+ import numpy as np
4
+ from flask import Flask, render_template, request, jsonify, send_from_directory
5
+ import time
6
+ import mediapipe as mp
7
+ from mediapipe.framework.formats import landmark_pb2
8
+ from mediapipe import solutions
9
+ from tflite_support.task import vision as vision2
10
+ from tflite_support.task import core, processor
11
+ from numpy.linalg import norm
12
+
13
+ # Flask app setup
14
+ app = Flask(__name__)
15
+
16
+ # Global variables for letter detection results
17
+ letter_result = 0
18
+ result_to_show = 0
19
+ cresult_to_show = 0
20
+ letterscore = 0
21
+ no_hand_flag = 1
22
+
23
+ # Initialize MediaPipe hand landmark detection
24
+ BaseOptions = mp.tasks.BaseOptions
25
+ HandLandmarker = mp.tasks.vision.HandLandmarker
26
+ HandLandmarkerOptions = mp.tasks.vision.HandLandmarkerOptions
27
+ HandLandmarkerResult = mp.tasks.vision.HandLandmarkerResult
28
+ VisionRunningMode = mp.tasks.vision.RunningMode
29
+
30
+ # Load your TFLite models (adjust paths if needed)
31
+ cbase_options = core.BaseOptions(file_name="./exported/model.tflite") # New model
32
+ ccbase_options = core.BaseOptions(file_name="./exported/word.tflite") # Old model or word model
33
+
34
+ cclassification_options = processor.ClassificationOptions(max_results=1)
35
+ coptions = vision2.ImageClassifierOptions(base_options=cbase_options, classification_options=cclassification_options)
36
+ ccoptions = vision2.ImageClassifierOptions(base_options=ccbase_options, classification_options=cclassification_options)
37
+
38
+ cclassifier = vision2.ImageClassifier.create_from_options(coptions)
39
+ ccclassifier = vision2.ImageClassifier.create_from_options(ccoptions)
40
+
41
+ # Callback to store MediaPipe detection results asynchronously
42
+ RESULT = None
43
+
44
+ def print_result(result: HandLandmarkerResult, output_image: mp.Image, timestamp_ms: int):
45
+ global RESULT
46
+ RESULT = result
47
+
48
+ options = HandLandmarkerOptions(
49
+ base_options=BaseOptions(model_asset_path='hand_landmarker.task'),
50
+ running_mode=VisionRunningMode.LIVE_STREAM,
51
+ result_callback=print_result)
52
+
53
+ detector = mp.tasks.vision.HandLandmarker.create_from_options(options)
54
+
55
+ # Utility functions for image processing
56
+ def data_uri_to_image(data_uri):
57
+ header, encoded = data_uri.split(',', 1)
58
+ decoded_data = base64.b64decode(encoded)
59
+ nparr = np.frombuffer(decoded_data, np.uint8)
60
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
61
+ return image
62
+
63
+ def image_to_data_uri(image):
64
+ _, buffer = cv2.imencode('.jpg', image)
65
+ image_bytes = buffer.tobytes()
66
+ base64_encoded = base64.b64encode(image_bytes).decode('utf-8')
67
+ return f"data:image/jpeg;base64,{base64_encoded}"
68
+
69
+ def draw_landmarks_on_image(rgb_image, detection_result):
70
+ hand_landmarks_list = detection_result.hand_landmarks
71
+ annotated_image = np.copy(rgb_image)
72
+ image_height, image_width, _ = annotated_image.shape
73
+
74
+ for idx in range(len(hand_landmarks_list)):
75
+ hand_landmarks = hand_landmarks_list[idx]
76
+ hand_landmarks_proto = landmark_pb2.NormalizedLandmarkList()
77
+ hand_landmarks_proto.landmark.extend([
78
+ landmark_pb2.NormalizedLandmark(x=landmark.x, y=landmark.y, z=landmark.z) for landmark in hand_landmarks
79
+ ])
80
+ solutions.drawing_utils.draw_landmarks(
81
+ annotated_image,
82
+ hand_landmarks_proto,
83
+ solutions.hands.HAND_CONNECTIONS,
84
+ solutions.drawing_styles.get_default_hand_landmarks_style(),
85
+ solutions.drawing_styles.get_default_hand_connections_style()
86
+ )
87
+ return annotated_image
88
+
89
+ # Letter list - modify if needed
90
+ letter_list = [chr(i) for i in range(65, 91)] + ['#'] # A-Z + #
91
+
92
+ # Isẹ̀kiri dictionary (Example mapping, update with real words)
93
+ isekiri_dict = {
94
+ 'A': 'Àṣẹ',
95
+ 'B': 'Bí',
96
+ 'C': 'Ṣe',
97
+ 'D': 'Dá',
98
+ 'E': 'Ẹ̀',
99
+ 'F': 'Fẹ́',
100
+ 'G': 'Gba',
101
+ 'H': 'Hàn',
102
+ 'I': 'Ìyà',
103
+ 'J': 'Jẹ',
104
+ 'K': 'Kọ',
105
+ 'L': 'Lá',
106
+ 'M': 'Má',
107
+ 'N': 'Ná',
108
+ 'O': 'Ọ̀',
109
+ 'P': 'Pẹ̀',
110
+ 'Q': 'Kù', # approximate since Q rarely used
111
+ 'R': 'Rà',
112
+ 'S': 'Ṣá',
113
+ 'T': 'Tẹ',
114
+ 'U': 'Ú',
115
+ 'V': 'Vẹ',
116
+ 'W': 'Wá',
117
+ 'X': 'Ẹ́s',
118
+ 'Y': 'Yá',
119
+ 'Z': 'Zà',
120
+ '#': '#'
121
+ }
122
+
123
+ # Routes for web UI and models
124
+ @app.route('/')
125
+ def index():
126
+ return render_template('index.html')
127
+
128
+ @app.route('/exported/<path:filename>')
129
+ def send_model(filename):
130
+ return send_from_directory('exported', filename)
131
+
132
+ # Video frame processing API (ASL detection)
133
+ @app.route('/api/data', methods=['POST'])
134
+ def handle_video_frame():
135
+ global letter_result, result_to_show, cresult_to_show, letterscore, no_hand_flag
136
+
137
+ frame_data_uri = request.json.get('key')
138
+ if not frame_data_uri:
139
+ return jsonify({'error': 'No frame data received'}), 400
140
+
141
+ frame = data_uri_to_image(frame_data_uri)
142
+ mp_image = mp.Image(image_format=mp.ImageFormat.SRGB, data=frame)
143
+
144
+ try:
145
+ detection_result = detector.detect_async(mp_image, mp.Timestamp.from_seconds(time.time()).value)
146
+
147
+ global RESULT
148
+ if RESULT is None:
149
+ return jsonify({'result': '_', 'frame': frame_data_uri})
150
+
151
+ annotated_image = draw_landmarks_on_image(frame, RESULT)
152
+
153
+ if RESULT.handedness:
154
+ no_hand_flag = 0
155
+ # If right hand detected, classify using models
156
+ if RESULT.handedness[0][0].display_name == 'Right':
157
+ tf_image = vision2.TensorImage.create_from_array(frame)
158
+ classification_result = cclassifier.classify(tf_image)
159
+ cclassification_result = ccclassifier.classify(tf_image)
160
+
161
+ result_to_show = classification_result.classifications[0].categories[0].category_name
162
+ cresult_to_show = cclassification_result.classifications[0].categories[0].category_name
163
+
164
+ # Simple decision logic between old and new models
165
+ if cclassification_result.classifications[0].categories[0].score > classification_result.classifications[0].categories[0].score:
166
+ letter_result = cresult_to_show
167
+ else:
168
+ letter_result = result_to_show
169
+ letterscore = max(
170
+ classification_result.classifications[0].categories[0].score,
171
+ cclassification_result.classifications[0].categories[0].score
172
+ )
173
+ else:
174
+ letter_result = '_'
175
+ else:
176
+ letter_result = '_'
177
+
178
+ except Exception as e:
179
+ print("Detection error:", e)
180
+ letter_result = '_'
181
+ annotated_image = frame
182
+
183
+ frame_out = image_to_data_uri(annotated_image)
184
+ return jsonify({"result": letter_result, "frame": frame_out})
185
+
186
+ # Isẹ̀kiri translation API
187
+ @app.route('/api/translate', methods=['POST'])
188
+ def translate_to_isekiri():
189
+ data = request.json
190
+ text = data.get('text', '')
191
+ # Translate each letter to Isẹ̀kiri word or keep as is if unknown
192
+ translated = ' '.join(isekiri_dict.get(ch.upper(), ch) for ch in text if ch.strip())
193
+ return jsonify({'isekiri': translated})
194
+
195
+ if __name__ == '__main__':
196
+ app.run(host='0.0.0.0', port=7860)