alfiannajih commited on
Commit
56176e2
·
verified ·
1 Parent(s): c2c83fe

Upload model

Browse files
Files changed (3) hide show
  1. config.json +6 -2
  2. model.py +27 -0
  3. model.safetensors +3 -0
config.json CHANGED
@@ -1,9 +1,13 @@
1
  {
2
- "_attn_implementation_autoset": true,
 
 
3
  "auto_map": {
4
- "AutoConfig": "config.MobileNetV3Config"
 
5
  },
6
  "model_type": "mobilenetv3",
7
  "num_classes": 6,
 
8
  "transformers_version": "4.46.3"
9
  }
 
1
  {
2
+ "architectures": [
3
+ "MobileNetV3Model"
4
+ ],
5
  "auto_map": {
6
+ "AutoConfig": "config.MobileNetV3Config",
7
+ "AutoModelForImageClassification": "model.MobileNetV3Model"
8
  },
9
  "model_type": "mobilenetv3",
10
  "num_classes": 6,
11
+ "torch_dtype": "float32",
12
  "transformers_version": "4.46.3"
13
  }
model.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PreTrainedModel
2
+ from torchvision.models import mobilenet_v3_large, MobileNet_V3_Large_Weights
3
+ from torch import nn
4
+ import torch.nn.functional as F
5
+ from .config import MobileNetV3Config
6
+
7
+ class MobileNetV3Model(PreTrainedModel):
8
+ config_class = MobileNetV3Config
9
+
10
+ def __init__(self, config):
11
+ super().__init__(config)
12
+ self.model = mobilenet_v3_large(weights=MobileNet_V3_Large_Weights.DEFAULT)
13
+ self.model.classifier = nn.Sequential(
14
+ nn.Linear(960, 1280),
15
+ nn.Hardswish(),
16
+ nn.Dropout(p=0.2, inplace=True),
17
+ nn.Linear(1280, config.num_classes),
18
+ )
19
+
20
+ def forward(self, tensor, labels=None):
21
+ logits = self.model(tensor)
22
+
23
+ if labels is not None:
24
+ loss = F.cross_entropy(logits, labels)
25
+ return {"loss": loss, "logits": logits}
26
+
27
+ return {"logits": logits}
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f56288776e34197e9911489341fbbcaa1a74119c47e77a0e92cddff7c01ba17d
3
+ size 16968952