RunningPie commited on
Commit
d354874
·
verified ·
1 Parent(s): 54cf1e8

Upload folder using huggingface_hub

Browse files
infrerence_examples/keras_inference_example.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import pickle
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+ def load_model_and_processor():
7
+ """Load the trained model and processor."""
8
+ # Load the complete model
9
+ model = tf.keras.models.load_model('model.h5')
10
+
11
+ # Load the processor
12
+ with open('processor.pkl', 'rb') as f:
13
+ processor = pickle.load(f)
14
+
15
+ return model, processor
16
+
17
+ def predict_sign(model, processor, landmark_data):
18
+ """
19
+ Predict sign from landmark data.
20
+
21
+ Args:
22
+ model: Loaded Keras model
23
+ processor: SignLanguageProcessor instance
24
+ landmark_data: DataFrame with columns ['frame', 'row_id', 'x', 'y', 'z']
25
+
26
+ Returns:
27
+ predicted_class: Predicted sign class
28
+ confidence: Prediction confidence
29
+ """
30
+ # Process the landmark data
31
+ X, _ = processor.process_dataset(landmark_data)
32
+
33
+ if len(X) == 0:
34
+ return None, 0.0
35
+
36
+ # Make prediction
37
+ predictions = model.predict(X)
38
+ predicted_class = np.argmax(predictions, axis=1)[0]
39
+ confidence = np.max(predictions, axis=1)[0]
40
+
41
+ # Convert back to sign name if mapping exists
42
+ if hasattr(processor, 'index_to_sign'):
43
+ sign_name = processor.index_to_sign[predicted_class]
44
+ return sign_name, confidence
45
+
46
+ return predicted_class, confidence
47
+
48
+ # Example usage
49
+ if __name__ == "__main__":
50
+ # Load model and processor
51
+ model, processor = load_model_and_processor()
52
+
53
+ # Example landmark data (replace with your actual data)
54
+ # landmark_data = pd.read_csv('your_landmark_data.csv')
55
+
56
+ # Make prediction
57
+ # predicted_sign, confidence = predict_sign(model, processor, landmark_data)
58
+ # print(f"Predicted sign: {predicted_sign}, Confidence: {confidence:.3f}")
59
+
60
+ print("Model and processor loaded successfully!")
61
+ print(f"Model input shape: {model.input_shape}")
62
+ print(f"Model output shape: {model.output_shape}")
63
+ print(f"Number of classes: {processor.sign_count}")
infrerence_examples/tflite_inference_example.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+ import pickle
4
+
5
+ class TFLiteSignLanguagePredictor:
6
+ """TensorFlow Lite predictor for sign language recognition."""
7
+
8
+ def __init__(self, tflite_model_path, processor_path):
9
+ """
10
+ Initialize the TFLite predictor.
11
+
12
+ Args:
13
+ tflite_model_path: Path to .tflite model file
14
+ processor_path: Path to processor.pkl file
15
+ """
16
+ # Load TFLite model
17
+ self.interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
18
+ self.interpreter.allocate_tensors()
19
+
20
+ # Get input and output details
21
+ self.input_details = self.interpreter.get_input_details()
22
+ self.output_details = self.interpreter.get_output_details()
23
+
24
+ # Load processor
25
+ with open(processor_path, 'rb') as f:
26
+ self.processor = pickle.load(f)
27
+
28
+ print(f"TFLite model loaded successfully!")
29
+ print(f"Input shape: {self.input_details[0]['shape']}")
30
+ print(f"Output shape: {self.output_details[0]['shape']}")
31
+
32
+ def predict(self, landmark_data):
33
+ """
34
+ Predict sign from landmark data.
35
+
36
+ Args:
37
+ landmark_data: DataFrame with landmark data or preprocessed array
38
+
39
+ Returns:
40
+ tuple: (predicted_class, confidence, all_probabilities)
41
+ """
42
+ # Preprocess data if it's raw landmark data
43
+ if hasattr(landmark_data, 'columns'): # It's a DataFrame
44
+ X, _ = self.processor.process_dataset(landmark_data)
45
+ if len(X) == 0:
46
+ return None, 0.0, None
47
+ input_data = X[0:1].astype(np.float32)
48
+ else: # It's already preprocessed
49
+ input_data = landmark_data.astype(np.float32)
50
+ if len(input_data.shape) == 2: # Add batch dimension if needed
51
+ input_data = np.expand_dims(input_data, 0)
52
+
53
+ # Run inference
54
+ self.interpreter.set_tensor(self.input_details[0]['index'], input_data)
55
+ self.interpreter.invoke()
56
+ output_data = self.interpreter.get_tensor(self.output_details[0]['index'])
57
+
58
+ # Process results
59
+ probabilities = tf.nn.softmax(output_data[0]).numpy()
60
+ predicted_class = np.argmax(probabilities)
61
+ confidence = probabilities[predicted_class]
62
+
63
+ # Convert to sign name if mapping exists
64
+ if hasattr(self.processor, 'index_to_sign'):
65
+ sign_name = self.processor.index_to_sign[predicted_class]
66
+ return sign_name, confidence, probabilities
67
+
68
+ return predicted_class, confidence, probabilities
69
+
70
+ def predict_batch(self, batch_data):
71
+ """Predict multiple samples (if model supports batch inference)."""
72
+ # Note: Some TFLite models may only support single inference
73
+ # This is a template for batch processing
74
+ results = []
75
+ for i in range(len(batch_data)):
76
+ result = self.predict(batch_data[i:i+1])
77
+ results.append(result)
78
+ return results
79
+
80
+ # Example usage
81
+ if __name__ == "__main__":
82
+ # Initialize predictor
83
+ predictor = TFLiteSignLanguagePredictor(
84
+ tflite_model_path="model_optimized.tflite", # or model.tflite, model_quantized.tflite
85
+ processor_path="processor.pkl"
86
+ )
87
+
88
+ # Example prediction (replace with your actual data)
89
+ # landmark_data = pd.read_csv("your_landmark_data.csv")
90
+ # predicted_sign, confidence, probabilities = predictor.predict(landmark_data)
91
+ # print(f"Predicted: {predicted_sign}, Confidence: {confidence:.3f}")
92
+
93
+ print("TFLite predictor ready for inference!")
94
+
95
+ # Performance comparison function
96
+ def compare_model_performance(keras_model, tflite_paths, test_data):
97
+ """Compare performance between Keras and TFLite models."""
98
+ import time
99
+
100
+ print("\\n🏃 Performance Comparison:")
101
+ print("-" * 50)
102
+
103
+ # Test Keras model
104
+ start_time = time.time()
105
+ keras_pred = keras_model.predict(test_data)
106
+ keras_time = time.time() - start_time
107
+ print(f"Keras Model: {keras_time:.4f}s for {len(test_data)} samples")
108
+
109
+ # Test TFLite models
110
+ for name, path in tflite_paths.items():
111
+ interpreter = tf.lite.Interpreter(model_path=path)
112
+ interpreter.allocate_tensors()
113
+ input_details = interpreter.get_input_details()
114
+ output_details = interpreter.get_output_details()
115
+
116
+ start_time = time.time()
117
+ for i in range(len(test_data)):
118
+ test_input = test_data[i:i+1].astype(np.float32)
119
+ interpreter.set_tensor(input_details[0]['index'], test_input)
120
+ interpreter.invoke()
121
+ _ = interpreter.get_tensor(output_details[0]['index'])
122
+ tflite_time = time.time() - start_time
123
+
124
+ speedup = keras_time / tflite_time if tflite_time > 0 else 0
125
+ print(f"TFLite {name}: {tflite_time:.4f}s ({speedup:.1f}x {'faster' if speedup > 1 else 'slower'})")
keras_models/config.json ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "sign_language_recognition",
3
+ "framework": "tensorflow",
4
+ "max_len": 384,
5
+ "dim": 192,
6
+ "num_classes": 60,
7
+ "dropout_start_epoch": 15,
8
+ "batch_size": 32,
9
+ "learning_rate": 0.0005,
10
+ "weight_decay": 0.1,
11
+ "epochs_trained": 200,
12
+ "input_shape": [
13
+ null,
14
+ 384,
15
+ 708
16
+ ],
17
+ "output_shape": [
18
+ null,
19
+ 60
20
+ ],
21
+ "total_params": 1763418,
22
+ "sign_classes": {
23
+ "go": 0,
24
+ "hot": 1,
25
+ "dad": 2,
26
+ "yes": 3,
27
+ "no": 4,
28
+ "sick": 5,
29
+ "mom": 6,
30
+ "cut": 7,
31
+ "stuck": 8,
32
+ "outside": 9,
33
+ "talk": 10,
34
+ "arm": 11,
35
+ "up": 12,
36
+ "person": 13,
37
+ "can": 14,
38
+ "close": 15,
39
+ "face": 16,
40
+ "head": 17,
41
+ "mad": 18,
42
+ "wait": 19,
43
+ "eye": 20,
44
+ "hide": 21,
45
+ "home": 22,
46
+ "why": 23,
47
+ "quiet": 24,
48
+ "will": 25,
49
+ "glasswindow": 26,
50
+ "not": 27,
51
+ "fireman": 28,
52
+ "down": 29,
53
+ "child": 30,
54
+ "hesheit": 31,
55
+ "find": 32,
56
+ "jump": 33,
57
+ "where": 34,
58
+ "room": 35,
59
+ "look": 36,
60
+ "high": 37,
61
+ "hear": 38,
62
+ "now": 39,
63
+ "time": 40,
64
+ "open": 41,
65
+ "fall": 42,
66
+ "owie": 43,
67
+ "drop": 44,
68
+ "man": 45,
69
+ "give": 46,
70
+ "car": 47,
71
+ "fast": 48,
72
+ "bad": 49,
73
+ "have": 50,
74
+ "stairs": 51,
75
+ "who": 52,
76
+ "cry": 53,
77
+ "loud": 54,
78
+ "haveto": 55,
79
+ "water": 56,
80
+ "see": 57,
81
+ "police": 58,
82
+ "touch": 59
83
+ },
84
+ "created_at": "2025-07-10T07:14:08.417702"
85
+ }
keras_models/model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efe7b6664db32251bbcef3851cb32d154538b78f4aa2287695bf94affb4f547b
3
+ size 21418472
keras_models/model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4015ffa390b55f42901ba7df9be1d585770251e10b8f3112fab9bc8faee1d4c1
3
+ size 21399464
tflite_models/model.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6058819febc08655877b6c6c5487fc69440c7d5ad60a6953fe26a462b250dfdc
3
+ size 7068388
tflite_models/model_optimized.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b47c15a4a6c98e456f500ce06ab8ce5e3662ba99356d48d76fd6b4f04ea283e8
3
+ size 1938536
tflite_models/model_quantized.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05508b611536d6de90b3973d5687a1e88feb6ff92a70629fe2196e6f13c71f0c
3
+ size 2020008
tflite_models/tflite_metadata.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_type": "sign_language_recognition",
3
+ "framework": "tensorflow_lite",
4
+ "input_shape": [
5
+ null,
6
+ 384,
7
+ 708
8
+ ],
9
+ "output_shape": [
10
+ null,
11
+ 60
12
+ ],
13
+ "num_classes": 60,
14
+ "models": {
15
+ "basic": {
16
+ "filename": "model.tflite",
17
+ "size_bytes": 7068388,
18
+ "size_mb": 6.740940093994141,
19
+ "description": "Basic TFLite model with no optimizations. Best accuracy, larger size."
20
+ },
21
+ "optimized": {
22
+ "filename": "model_optimized.tflite",
23
+ "size_bytes": 1938536,
24
+ "size_mb": 1.8487319946289062,
25
+ "description": "Optimized with dynamic range quantization. Good balance of size and accuracy."
26
+ },
27
+ "quantized": {
28
+ "filename": "model_quantized.tflite",
29
+ "size_bytes": 2020008,
30
+ "size_mb": 1.9264297485351562,
31
+ "description": "Integer quantized model. Smallest size, may have slight accuracy loss."
32
+ }
33
+ }
34
+ }