Spaces:
Sleeping
Sleeping
File size: 6,826 Bytes
61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 6e8e8fb 61c2d3f 6e8e8fb 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f 975672a 61c2d3f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
import torch
from torch import device
import torch.nn as nn
import timm
# Set device
DEVICE = device("cuda" if torch.cuda.is_available() else "cpu")
class EyeDetectionModels(object):
"""
A class to create and configure various deep learning models for eye detection tasks.
"""
def __init__(
self,
num_classes: int,
freeze_layers: bool = True,
device: device = DEVICE,
):
"""
Initialize the EyeDetectionModels class.
This class provides methods to create and configure various deep learning models for eye detection.
"""
# Initialize the model creator
self.num_classes = num_classes
self.freeze_layers = freeze_layers
self.device = device
self.models = {
"mobilenetv4": self.get_model_mobilenetv4,
"levit": self.get_model_levit,
"efficientvit": self.get_model_efficientvit,
"gernet": self.get_model_gernet,
"regnetx": self.get_model_regnetx,
}
# Model architecture functions
@staticmethod
def _get_feature_blocks(model: nn.Module) -> nn.ModuleList:
"""
Utility: locate the main feature blocks container in a timm model.
Returns a list-like module of blocks.
"""
for attr in ("features", "blocks", "layers", "stem"): # common container names
if hasattr(model, attr):
return nn.ModuleList(getattr(model, attr))
# fallback: collect all children except classifier/head
return nn.ModuleList(model.children())[:-1]
@staticmethod
def _freeze_except_last_n(blocks: nn.ModuleList, n: int) -> None:
total = len(blocks)
for idx, block in enumerate(blocks):
requires = idx >= total - n
for p in block.parameters():
p.requires_grad = requires
def get_model_mobilenetv4(self) -> nn.Module:
model = timm.create_model(
"mobilenetv4_conv_medium.e500_r256_in1k", pretrained=True
)
if self.freeze_layers:
blocks = self._get_feature_blocks(model)
self._freeze_except_last_n(blocks, 2)
# replace classifier
in_features = model.classifier.in_features
model.classifier = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
return model.to(self.device)
def get_model_levit(self) -> nn.Module:
model = timm.create_model("levit_128s.fb_dist_in1k", pretrained=True)
if self.freeze_layers:
blocks = self._get_feature_blocks(model)
self._freeze_except_last_n(blocks, 2)
# Attempt to extract in_features from model.head or classifier
in_features = 384
model.head = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
model.head_dist = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
return model.to(self.device)
def get_model_efficientvit(self) -> nn.Module:
model = timm.create_model("efficientvit_m1.r224_in1k", pretrained=True)
if self.freeze_layers:
blocks = self._get_feature_blocks(model)
self._freeze_except_last_n(blocks, 2)
# handle different head naming
in_features = 192
model.head.linear = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
return model.to(self.device)
def get_model_gernet(self) -> nn.Module:
"""
Load and configure a GENet (General and Efficient Network) model with customizable classifier.
Returns:
Configured GENet model
"""
model = timm.create_model("gernet_s.idstcv_in1k", pretrained=True)
if self.freeze_layers:
# For GENet, we need to specifically handle its structure
# It typically has a 'stem' and 'stages' structure
if hasattr(model, "stem") and hasattr(model, "stages"):
# Freeze stem completely
for param in model.stem.parameters():
param.requires_grad = False
# Freeze all stages except the last two
stages = list(model.stages.children())
total_stages = len(stages)
for i, stage in enumerate(stages):
requires_grad = i >= total_stages - 2
for param in stage.parameters():
param.requires_grad = requires_grad
else:
# Fallback to generic approach
blocks = self._get_feature_blocks(model)
self._freeze_except_last_n(blocks, 2)
# Replace classifier
in_features = model.head.fc.in_features
model.head.fc = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
return model.to(self.device)
def get_model_regnetx(self) -> nn.Module:
"""
Load and configure a RegNetX model with customizable classifier.
Returns:
Configured RegNetX model
"""
model = timm.create_model("regnetx_008.tv2_in1k", pretrained=True)
if self.freeze_layers:
for param in model.parameters():
param.requires_grad = False
# RegNetX typically has 'stem' + 'trunk' structure in timm
if hasattr(model, "trunk"):
# Unfreeze final stages of the trunk
trunk_blocks = list(model.trunk.children())
# Unfreeze approximately last 25% of trunk blocks
unfreeze_from = max(0, int(len(trunk_blocks) * 0.75))
for i in range(unfreeze_from, len(trunk_blocks)):
for param in trunk_blocks[i].parameters():
param.requires_grad = True
# Always unfreeze the classifier/head for fine-tuning
for param in model.head.parameters():
param.requires_grad = True
# Replace classifier
in_features = model.head.fc.in_features
model.head.fc = nn.Sequential(
nn.Linear(in_features, 512),
nn.ReLU(inplace=True),
nn.Dropout(0.4),
nn.Linear(512, self.num_classes),
)
return model.to(self.device)
|