File size: 3,744 Bytes
aa63775 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 | import tensorflow as tf
from tensorflow.keras import layers, Model
ACTIVITIES = [
'WALKING', 'WALKING_UPSTAIRS', 'WALKING_DOWNSTAIRS',
'SITTING', 'STANDING', 'LAYING'
]
N_CLASSES = len(ACTIVITIES)
INPUT_SHAPE = (128, 9) # 128 timesteps, 9 channels
def build_imu_model(input_shape=INPUT_SHAPE, n_classes=N_CLASSES):
"""
Compact 1D-CNN for IMU activity classification.
Designed from scratch for pruning + INT8 quantization.
Architecture rationale:
- Conv1D blocks β extract local motion patterns per axis
- BatchNorm β stable training, works well post-quantization
- MaxPooling β downsample before deeper layers
- GlobalAvgPool β replaces Flatten, much fewer params, less overfit
- Dense head β final classifier
Total params: ~85K (intentionally small for edge deployment)
"""
inputs = tf.keras.Input(shape=input_shape, name='imu_input')
# ββ Block 1 ββββββββββββββββββββββββββββββ
x = layers.Conv1D(32, kernel_size=5, padding='same',
activation='relu', name='conv1')(inputs)
x = layers.BatchNormalization(name='bn1')(x)
x = layers.MaxPooling1D(pool_size=2, name='pool1')(x) # 128 β 64
x = layers.Dropout(0.1, name='drop1')(x)
# ββ Block 2 ββββββββββββββββββββββββββββββ
x = layers.Conv1D(64, kernel_size=3, padding='same',
activation='relu', name='conv2')(x)
x = layers.BatchNormalization(name='bn2')(x)
x = layers.MaxPooling1D(pool_size=2, name='pool2')(x) # 64 β 32
x = layers.Dropout(0.1, name='drop2')(x)
# ββ Block 3 ββββββββββββββββββββββββββββββ
x = layers.Conv1D(128, kernel_size=3, padding='same',
activation='relu', name='conv3')(x)
x = layers.BatchNormalization(name='bn3')(x)
x = layers.MaxPooling1D(pool_size=2, name='pool3')(x) # 32 β 16
x = layers.Dropout(0.1, name='drop4')(x)
# ββ Block 4 (deeper feature extraction) ββ
x = layers.Conv1D(128, kernel_size=3, padding='same',
activation='relu', name='conv4')(x)
x = layers.BatchNormalization(name='bn4')(x)
# ββ Pooling + Head ββββββββββββββββββββββββ
x = layers.GlobalAveragePooling1D(name='gap')(x) # [N, 128]
x = layers.Dense(64, activation='relu', name='dense1')(x)
x = layers.Dropout(0.3, name='drop5')(x)
outputs = layers.Dense(
n_classes, activation='softmax', name='activity'
)(x)
model = Model(inputs, outputs, name='IMU_Classifier')
return model
# βββββββββββββββββββββββββββββββββββββββββββββ
# Quick sanity check when run directly
# βββββββββββββββββββββββββββββββββββββββββββββ
if __name__ == '__main__':
model = build_imu_model()
model.summary()
total = model.count_params()
size_kb = total * 4 / 1024 # FP32 = 4 bytes per param
print(f"\nββ Model Info ββββββββββββββββββββββββββ")
print(f"Total parameters : {total:,}")
print(f"FP32 size estimate: {size_kb:.1f} KB")
print(f"Input shape : {INPUT_SHAPE}")
print(f"Output classes : {N_CLASSES} β {ACTIVITIES}")
print(f"\nβ
Model built successfully.") |