github-actions[bot] commited on
Commit
c61ba70
·
1 Parent(s): f392f42

deploy: backend bundle from 9c864b98f64c05462a27b71841ae97fb4451e449

Browse files
Code/Model/data/models/version_registry.json CHANGED
@@ -27,7 +27,7 @@
27
  "model_type": "dl_multihead",
28
  "model_class": "CardAuthModel",
29
  "backbone": "resnet50+efficientnet_b7",
30
- "status": "production",
31
  "accuracy": 0.7741935483870968,
32
  "f1_score": 0.631578947368421,
33
  "roc_auc": 0.6333333333333333,
@@ -59,9 +59,29 @@
59
  "svdd_threshold": 0.7524242424242424,
60
  "back_threshold": 0.6534343434343434,
61
  "trained_at": "2026-02-15T07:30:42.692244"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  }
63
  ]
64
  },
65
- "production_model": "20260211_155409",
66
  "staging_model": "20260210_182731"
67
  }
 
27
  "model_type": "dl_multihead",
28
  "model_class": "CardAuthModel",
29
  "backbone": "resnet50+efficientnet_b7",
30
+ "status": "retired",
31
  "accuracy": 0.7741935483870968,
32
  "f1_score": 0.631578947368421,
33
  "roc_auc": 0.6333333333333333,
 
59
  "svdd_threshold": 0.7524242424242424,
60
  "back_threshold": 0.6534343434343434,
61
  "trained_at": "2026-02-15T07:30:42.692244"
62
+ },
63
+ {
64
+ "version": "20260216_091800",
65
+ "filename": "cardauth_multihead_20260216_091800_best.pth",
66
+ "model_type": "multi_head",
67
+ "backbone": "resnet50+efficientnet_b7",
68
+ "status": "production",
69
+ "accuracy": 1.0,
70
+ "f1_score": 1.0,
71
+ "roc_auc": 1.0,
72
+ "n_features": "end-to-end",
73
+ "dataset_size": 19113,
74
+ "total_params": 95402258,
75
+ "trainable_params": 81637498,
76
+ "best_epoch": 26,
77
+ "svdd_threshold": 0.08919191919191918,
78
+ "back_threshold": 0.0198989898989899,
79
+ "pipeline_type": "dl",
80
+ "trained_at": "2026-02-17T15:15:06.491447",
81
+ "promoted_at": "2026-02-18T00:00:00.000000"
82
  }
83
  ]
84
  },
85
+ "production_model": "20260216_091800",
86
  "staging_model": "20260210_182731"
87
  }
Code/Model/src/dl/backbone.py CHANGED
@@ -80,6 +80,19 @@ class ResNet50Backbone(nn.Module):
80
 
81
  return x
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  def get_trainable_params(self):
84
  """Get count of trainable vs frozen parameters."""
85
  trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
 
80
 
81
  return x
82
 
83
+ def get_layer_groups(self):
84
+ """
85
+ Get parameter groups split by layer depth for discriminative fine-tuning.
86
+
87
+ Returns:
88
+ List of 2 param lists: [layer3_params, layer4_params]
89
+ (Earlier layers are frozen and excluded.)
90
+ """
91
+ return [
92
+ [p for p in self.layer3.parameters() if p.requires_grad],
93
+ [p for p in self.layer4.parameters() if p.requires_grad],
94
+ ]
95
+
96
  def get_trainable_params(self):
97
  """Get count of trainable vs frozen parameters."""
98
  trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
Code/Model/src/dl/efficientnet.py CHANGED
@@ -69,6 +69,22 @@ class EfficientNetB7Backbone(nn.Module):
69
  x = torch.flatten(x, 1)
70
  return x
71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  def get_trainable_params(self):
73
  """Get count of trainable vs frozen parameters."""
74
  trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
 
69
  x = torch.flatten(x, 1)
70
  return x
71
 
72
+ def get_layer_groups(self):
73
+ """
74
+ Get parameter groups split by block depth for discriminative fine-tuning.
75
+
76
+ Returns:
77
+ List of 2 param lists: [block6_params, block7_params]
78
+ (Earlier blocks 0-5 are frozen and excluded.)
79
+ """
80
+ groups = [[], []]
81
+ for i, block in enumerate(self.features):
82
+ if i == 6:
83
+ groups[0].extend([p for p in block.parameters() if p.requires_grad])
84
+ elif i >= 7:
85
+ groups[1].extend([p for p in block.parameters() if p.requires_grad])
86
+ return groups
87
+
88
  def get_trainable_params(self):
89
  """Get count of trainable vs frozen parameters."""
90
  trainable = sum(p.numel() for p in self.parameters() if p.requires_grad)
Code/Model/src/dl/model.py CHANGED
@@ -250,26 +250,40 @@ class CardAuthModel(nn.Module):
250
 
251
  def get_param_groups(self, backbone_lr: float = 1e-4, head_lr: float = 1e-3):
252
  """
253
- Get parameter groups with differential learning rates.
 
 
 
 
 
254
 
255
  Args:
256
- backbone_lr: Learning rate for backbone parameters
257
  head_lr: Learning rate for head parameters
258
 
259
  Returns:
260
  List of parameter group dicts for optimizer
261
  """
262
- backbone_params = list(self.resnet.parameters()) + list(self.efficientnet.parameters())
 
 
 
 
 
263
  head_params = (
264
  list(self.pokemon_head.parameters())
265
  + list(self.back_auth_head.parameters())
266
  + list(self.svdd_heads.parameters())
267
  )
268
 
269
- return [
270
- {"params": [p for p in backbone_params if p.requires_grad], "lr": backbone_lr},
271
- {"params": head_params, "lr": head_lr},
272
- ]
 
 
 
 
273
 
274
 
275
  # Backward-compatible alias
 
250
 
251
  def get_param_groups(self, backbone_lr: float = 1e-4, head_lr: float = 1e-3):
252
  """
253
+ Get parameter groups with discriminative (layer-wise) learning rates.
254
+
255
+ 3 groups:
256
+ - Early trainable backbone layers (layer3/block6): backbone_lr * 0.1
257
+ - Late trainable backbone layers (layer4/block7+): backbone_lr
258
+ - Head parameters: head_lr
259
 
260
  Args:
261
+ backbone_lr: Learning rate for late backbone layers
262
  head_lr: Learning rate for head parameters
263
 
264
  Returns:
265
  List of parameter group dicts for optimizer
266
  """
267
+ resnet_groups = self.resnet.get_layer_groups() # [layer3, layer4]
268
+ efn_groups = self.efficientnet.get_layer_groups() # [block6, block7+]
269
+
270
+ early_backbone_params = resnet_groups[0] + efn_groups[0]
271
+ late_backbone_params = resnet_groups[1] + efn_groups[1]
272
+
273
  head_params = (
274
  list(self.pokemon_head.parameters())
275
  + list(self.back_auth_head.parameters())
276
  + list(self.svdd_heads.parameters())
277
  )
278
 
279
+ groups = []
280
+ if early_backbone_params:
281
+ groups.append({"params": early_backbone_params, "lr": backbone_lr * 0.1})
282
+ if late_backbone_params:
283
+ groups.append({"params": late_backbone_params, "lr": backbone_lr})
284
+ groups.append({"params": head_params, "lr": head_lr})
285
+
286
+ return groups
287
 
288
 
289
  # Backward-compatible alias
Code/Model/src/dl/transforms.py CHANGED
@@ -67,6 +67,30 @@ def get_eval_transforms(image_size: int = 224):
67
  ])
68
 
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  def denormalize(tensor, mean=None, std=None):
71
  """
72
  Reverse ImageNet normalization for visualization.
 
67
  ])
68
 
69
 
70
+ def get_minority_augment_transforms():
71
+ """
72
+ Get stronger augmentation pipeline for minority class images.
73
+
74
+ Applied BEFORE the standard train transforms to create visual diversity
75
+ for under-represented classes (e.g., fake backs). Includes more aggressive
76
+ geometric and color perturbations.
77
+
78
+ Returns:
79
+ torchvision.transforms.Compose pipeline (operates on PIL images)
80
+ """
81
+ return transforms.Compose([
82
+ transforms.RandomPerspective(distortion_scale=0.2, p=0.5),
83
+ transforms.RandomAdjustSharpness(sharpness_factor=2, p=0.3),
84
+ transforms.ColorJitter(
85
+ brightness=0.4,
86
+ contrast=0.4,
87
+ saturation=0.4,
88
+ hue=0.15,
89
+ ),
90
+ transforms.RandomVerticalFlip(p=0.3),
91
+ ])
92
+
93
+
94
  def denormalize(tensor, mean=None, std=None):
95
  """
96
  Reverse ImageNet normalization for visualization.
Code/Model/src/utils/config.py CHANGED
@@ -68,6 +68,14 @@ class Config:
68
  self.DL_HEAD_B_BETA = 0.40 # Back authenticator loss weight
69
  self.DL_HEAD_C_GAMMA = 0.45 # Front SVDD loss weight (primary mechanism)
70
 
 
 
 
 
 
 
 
 
71
  # Ensure DL directories exist
72
  self.DL_MODELS_DIR.mkdir(parents=True, exist_ok=True)
73
  self.DL_EXPANDED_DATA_DIR.mkdir(parents=True, exist_ok=True)
 
68
  self.DL_HEAD_B_BETA = 0.40 # Back authenticator loss weight
69
  self.DL_HEAD_C_GAMMA = 0.45 # Front SVDD loss weight (primary mechanism)
70
 
71
+ # Training improvements for counterfeit detection
72
+ self.DL_BACK_COUNTERFEIT_WEIGHT = 2.5 # Class weight for counterfeit backs (ratio real/fake: 300/120)
73
+ self.DL_MINORITY_AUGMENT_FACTOR = 2 # Duplication factor for minority class (backs_fake)
74
+ self.DL_CALIBRATION_FBETA = 2.0 # F-beta for threshold calibration (2.0 = recall-weighted)
75
+ self.DL_USE_FOCAL_LOSS = True # Enable focal loss for Head A/B
76
+ self.DL_FOCAL_GAMMA = 2.0 # Focal loss gamma (focus on hard examples)
77
+ self.DL_SVDD_CONTRASTIVE_ETA = 1.0 # Weight for contrastive SVDD term (Deep SAD)
78
+
79
  # Ensure DL directories exist
80
  self.DL_MODELS_DIR.mkdir(parents=True, exist_ok=True)
81
  self.DL_EXPANDED_DATA_DIR.mkdir(parents=True, exist_ok=True)