""" ISL Sign Language Translation - TechMatrix Solvers Initiative Core ISL Processing and Translation Models Developed by: TechMatrix Solvers Team - Abhay Gupta (Team Lead) - Kripanshu Gupta (Backend Developer) - Dipanshu Patel (UI/UX Designer) - Bhumika Patel (Deployment & Female Presenter) Institution: Shri Ram Group of Institutions """ import keras import numpy as np import cv2 import torch try: from scipy.ndimage.filters import gaussian_filter except ImportError: from scipy.ndimage import gaussian_filter import math import os from skimage.measure import label import pose_utils as utils # Simple TorchModuleWrapper replacement for compatibility class TorchModuleWrapper: """ Simple wrapper to make PyTorch models compatible with Keras-style usage """ def __init__(self, torch_model): self.torch_model = torch_model self.trainable = False def __call__(self, x): """Forward pass through the PyTorch model""" return self.torch_model(x) def eval(self): """Set model to evaluation mode""" if hasattr(self.torch_model, 'eval'): self.torch_model.eval() def train(self, mode=True): """Set model to train mode""" if hasattr(self.torch_model, 'train'): self.torch_model.train(mode) class ISLPoseEstimator(keras.Model): """ ISL Pose Estimation Model combining body and hand pose detection Developed by TechMatrix Solvers for accurate sign language recognition """ def __init__(self, pytorch_body_model, pytorch_hand_model): super().__init__() self.pytorch_body_wrapper = TorchModuleWrapper(pytorch_body_model) self.pytorch_body_wrapper.trainable = False self.pytorch_hand_wrapper = TorchModuleWrapper(pytorch_hand_model) self.pytorch_hand_wrapper.trainable = False self.num_body_joints = 26 self.num_body_pafs = 52 def call(self, input_image): """ Process input image and extract pose information Args: input_image: Input image tensor Returns: tuple: (body_candidates, body_subset, hand_peaks) """ candidate, subset = self.extract_body_pose(input_image.cpu().numpy()) hand_regions = utils.detect_hand_regions(candidate, subset, input_image.cpu().numpy()) all_hand_keypoints = [] for x, y, w, is_left in hand_regions: hand_peaks = self.extract_hand_pose(input_image.cpu().numpy()[y:y+w, x:x+w, :]) hand_peaks[:, 0] = np.where(hand_peaks[:, 0] == 0, hand_peaks[:, 0], hand_peaks[:, 0] + x) hand_peaks[:, 1] = np.where(hand_peaks[:, 1] == 0, hand_peaks[:, 1], hand_peaks[:, 1] + y) all_hand_keypoints.append(hand_peaks) return candidate, subset, all_hand_keypoints def extract_body_pose(self, input_image): """ Extract body pose keypoints from input image Args: input_image: Input image array Returns: tuple: (candidates, subset) containing pose information """ model_type = 'body25' scale_factors = [0.5] box_size = 368 stride = 8 padding_value = 128 threshold_1 = 0.1 threshold_2 = 0.05 # Calculate scale multipliers multiplier = [x * box_size / input_image.shape[0] for x in scale_factors] heatmap_average = np.zeros((input_image.shape[0], input_image.shape[1], self.num_body_joints)) paf_average = np.zeros((input_image.shape[0], input_image.shape[1], self.num_body_pafs)) for m in range(len(multiplier)): scale = multiplier[m] test_image = cv2.resize(input_image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) padded_image, pad = utils.pad_image_corner(test_image, stride, padding_value) # Prepare image tensor image_tensor = np.transpose(np.float32(padded_image[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 image_tensor = np.ascontiguousarray(image_tensor) # Convert to PyTorch tensor data = torch.from_numpy(image_tensor).float() if torch.cuda.is_available(): data = data.cuda() with torch.no_grad(): stage6_L1, stage6_L2 = self.pytorch_body_wrapper(data) stage6_L1 = stage6_L1.cpu().numpy() stage6_L2 = stage6_L2.cpu().numpy() # Process heatmaps heatmap = np.transpose(np.squeeze(stage6_L2), (1, 2, 0)) heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:padded_image.shape[0] - pad[2], :padded_image.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (input_image.shape[1], input_image.shape[0]), interpolation=cv2.INTER_CUBIC) # Process PAFs (Part Affinity Fields) paf = np.transpose(np.squeeze(stage6_L1), (1, 2, 0)) paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = paf[:padded_image.shape[0] - pad[2], :padded_image.shape[1] - pad[3], :] paf = cv2.resize(paf, (input_image.shape[1], input_image.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_average += heatmap / len(multiplier) paf_average += paf / len(multiplier) # Extract peaks from heatmaps all_peaks = [] peak_counter = 0 for part in range(self.num_body_joints - 1): original_map = heatmap_average[:, :, part] smoothed_heatmap = gaussian_filter(original_map, sigma=3) # Find local maxima left_map = np.zeros(smoothed_heatmap.shape) left_map[1:, :] = smoothed_heatmap[:-1, :] right_map = np.zeros(smoothed_heatmap.shape) right_map[:-1, :] = smoothed_heatmap[1:, :] up_map = np.zeros(smoothed_heatmap.shape) up_map[:, 1:] = smoothed_heatmap[:, :-1] down_map = np.zeros(smoothed_heatmap.shape) down_map[:, :-1] = smoothed_heatmap[:, 1:] peaks_binary = np.logical_and.reduce( (smoothed_heatmap >= left_map, smoothed_heatmap >= right_map, smoothed_heatmap >= up_map, smoothed_heatmap >= down_map, smoothed_heatmap > threshold_1) ) peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) peaks_with_score = [x + (original_map[x[1], x[0]],) for x in peaks] peak_id = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) # Define limb connections for body25 model if model_type == 'body25': limb_sequence = [ [1,0],[1,2],[2,3],[3,4],[1,5],[5,6],[6,7],[1,8],[8,9],[9,10], [10,11],[8,12],[12,13],[13,14],[0,15],[0,16],[15,17],[16,18], [11,24],[11,22],[14,21],[14,19],[22,23],[19,20] ] map_index = [ [30,31],[14,15],[16,17],[18,19],[22,23],[24,25],[26,27],[0,1],[6,7], [2,3],[4,5],[8,9],[10,11],[12,13],[32,33],[34,35],[36,37],[38,39], [50,51],[46,47],[44,45],[40,41],[48,49],[42,43] ] # Find connections between body parts connection_all = [] special_k = [] mid_num = 10 for k in range(len(map_index)): score_mid = paf_average[:, :, map_index[k]] candA = all_peaks[limb_sequence[k][0]] candB = all_peaks[limb_sequence[k][1]] nA = len(candA) nB = len(candB) indexA, indexB = limb_sequence[k] if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) norm = max(0.001, norm) vec = np.divide(vec, norm) startend = list(zip( np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num) )) vec_x = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend)) ]) vec_y = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend)) ]) score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = (sum(score_midpts) / len(score_midpts) + min(0.5 * input_image.shape[0] / norm - 1, 0)) criterion1 = len(np.nonzero(score_midpts > threshold_2)[0]) > 0.8 * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) # Create human pose subsets subset = -1 * np.ones((0, self.num_body_joints + 1)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(map_index)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(limb_sequence[k]) for i in range(len(connection_all[k])): found = 0 subset_idx = [-1, -1] for j in range(len(subset)): if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if subset[j][indexB] != partBs[i]: subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif not found and k < self.num_body_joints - 2: row = -1 * np.ones(self.num_body_joints + 1) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) # Filter out low-quality detections deleteIdx = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) return candidate, subset def extract_hand_pose(self, input_image): """ Extract hand pose keypoints from input image region Args: input_image: Cropped hand region image Returns: numpy.ndarray: Hand keypoint coordinates """ scale_factors = [0.5, 1.0, 1.5, 2.0] box_size = 368 stride = 8 padding_value = 128 threshold = 0.05 multiplier = [x * box_size / input_image.shape[0] for x in scale_factors] heatmap_average = np.zeros((input_image.shape[0], input_image.shape[1], 22)) for m in range(len(multiplier)): scale = multiplier[m] test_image = cv2.resize(input_image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) padded_image, pad = utils.pad_image_corner(test_image, stride, padding_value) # Prepare image tensor image_tensor = np.transpose(np.float32(padded_image[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 image_tensor = np.ascontiguousarray(image_tensor) data = torch.from_numpy(image_tensor).float() if torch.cuda.is_available(): data = data.cuda() with torch.no_grad(): output = self.pytorch_hand_wrapper(data).cpu().numpy() # Process heatmaps heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:padded_image.shape[0] - pad[2], :padded_image.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (input_image.shape[1], input_image.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_average += heatmap / len(multiplier) # Extract hand keypoints all_peaks = [] for part in range(21): original_map = heatmap_average[:, :, part] smoothed_heatmap = gaussian_filter(original_map, sigma=3) binary = np.ascontiguousarray(smoothed_heatmap > threshold, dtype=np.uint8) if np.sum(binary) == 0: all_peaks.append([0, 0]) continue label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) max_index = np.argmax([np.sum(original_map[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 label_img[label_img != max_index] = 0 original_map[label_img == 0] = 0 y, x = utils.find_array_maximum(original_map) all_peaks.append([x, y]) return np.array(all_peaks) class ISLTranslationModel(keras.Model): """ Complete ISL Translation Model combining pose estimation and LSTM translation Developed by TechMatrix Solvers for end-to-end sign language translation """ def __init__(self, body_model, hand_model, translation_model): super().__init__() self.pytorch_body_wrapper = TorchModuleWrapper(body_model) self.pytorch_body_wrapper.trainable = False self.pytorch_hand_wrapper = TorchModuleWrapper(hand_model) self.pytorch_hand_wrapper.trainable = False self.num_body_joints = 26 self.num_body_pafs = 52 self.model_type = 'body25' self.translation_network = translation_model def call(self, frame_sequence): """ Process a sequence of frames and return translation prediction Args: frame_sequence: Sequence of video frames Returns: Translation prediction probabilities """ window_size = 20 feature_sequence = [] blank_frame = np.zeros((1, 156)) for idx, frame in enumerate(frame_sequence.cpu()): # Extract pose features from current frame candidate, subset = self.extract_body_pose(frame.cpu().numpy()) hand_regions = utils.detect_hand_regions(candidate, subset, frame.cpu().numpy()) all_hand_keypoints = [] for x, y, w, is_left in hand_regions: peaks = self.extract_hand_pose(frame.cpu().numpy()[y:y+w, x:x+w, :]) peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x) peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y) all_hand_keypoints.append(peaks) # Extract structured pose data body_circles, body_sticks = utils.extract_body_pose_data(candidate, subset, self.model_type) hand_edges, hand_peaks = utils.extract_hand_pose_data(all_hand_keypoints) # Convert to feature vector feature_vector = self.create_feature_vector(body_circles, hand_peaks) feature_sequence.append(feature_vector) # Pad sequence if needed if len(feature_sequence) < window_size: for _ in range(window_size - len(feature_sequence)): feature_sequence.append(blank_frame) # Run translation model return self.translation_network(np.array(feature_sequence).reshape(1, 20, 156)) def create_feature_vector(self, body_circles, hand_peaks): """ Create feature vector from pose data Args: body_circles: Body keypoint coordinates hand_peaks: Hand keypoint data Returns: numpy.ndarray: 156-dimensional feature vector """ features = [] # Body keypoint x-coordinates (15 points) for idx in range(15): if idx < len(body_circles): features.append(body_circles[idx][0]) else: features.append(0) # Body keypoint y-coordinates (15 points) for idx in range(15): if idx < len(body_circles): features.append(body_circles[idx][1]) else: features.append(0) # Hand features for both hands for hand_idx in range(2): # Hand x-coordinates (21 points) for idx in range(21): if idx < len(hand_peaks[hand_idx]): features.append(float(hand_peaks[hand_idx][idx][0])) else: features.append(0) # Hand y-coordinates (21 points) for idx in range(21): if idx < len(hand_peaks[hand_idx]): features.append(float(hand_peaks[hand_idx][idx][1])) else: features.append(0) # Hand peak text/confidence (21 points) for idx in range(21): if idx < len(hand_peaks[hand_idx]): features.append(float(hand_peaks[hand_idx][idx][2])) else: features.append(0) return np.array(features) def extract_body_pose(self, input_image): """Extract body pose - same implementation as ISLPoseEstimator""" # This method would contain the same implementation as in ISLPoseEstimator # For brevity, using a placeholder that calls the same logic pose_estimator = ISLPoseEstimator(None, None) pose_estimator.pytorch_body_wrapper = self.pytorch_body_wrapper pose_estimator.num_body_joints = self.num_body_joints pose_estimator.num_body_pafs = self.num_body_pafs return pose_estimator.extract_body_pose(input_image) def extract_hand_pose(self, input_image): """Extract hand pose - same implementation as ISLPoseEstimator""" # This method would contain the same implementation as in ISLPoseEstimator # For brevity, using a placeholder that calls the same logic pose_estimator = ISLPoseEstimator(None, None) pose_estimator.pytorch_hand_wrapper = self.pytorch_hand_wrapper return pose_estimator.extract_hand_pose(input_image)