AB739 commited on
Commit
4d2f736
·
verified ·
1 Parent(s): c1ab64a

Create model.py

Browse files
Files changed (1) hide show
  1. tasks/model.py +107 -0
tasks/model.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import torch.nn.functional as F
4
+ from torch.utils.data import DataLoader, TensorDataset
5
+ from torchaudio import transforms
6
+ from torchvision import models
7
+
8
+
9
+
10
+ class BlazeFace(nn.Module):
11
+ def __init__(self, input_channels=1, use_double_block=False, activation="relu", use_optional_block=True):
12
+ super(BlazeFace, self).__init__()
13
+ self.activation = activation
14
+ self.use_double_block = use_double_block
15
+ self.use_optional_block = use_optional_block
16
+
17
+ def conv_block(in_channels, out_channels, kernel_size, stride, padding):
18
+ return nn.Sequential(
19
+ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
20
+ nn.BatchNorm2d(out_channels),
21
+ nn.ReLU() if activation == "relu" else nn.Sigmoid() # Apply ReLU activation (default) or Sigmoid
22
+ )
23
+
24
+ def depthwise_separable_block(in_channels, out_channels, stride):
25
+ return nn.Sequential(
26
+ nn.Conv2d(in_channels, in_channels, kernel_size=5, stride=stride, padding=2, groups=in_channels, bias=False),
27
+ nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),
28
+ nn.BatchNorm2d(out_channels),
29
+ nn.ReLU() if activation == "relu" else nn.Sigmoid()
30
+ )
31
+
32
+ def double_block(in_channels, filters_1, filters_2, stride):
33
+ return nn.Sequential(
34
+ depthwise_separable_block(in_channels, filters_1, stride),
35
+ depthwise_separable_block(filters_1, filters_2, 1)
36
+ )
37
+
38
+ # Define layers (first part: conv layers)
39
+ self.conv1 = conv_block(input_channels, 24, kernel_size=5, stride=2, padding=2)
40
+
41
+ # Define single blocks (subsequent conv blocks)
42
+ self.single_blocks = nn.ModuleList([
43
+ depthwise_separable_block(24, 24, stride=1),
44
+ depthwise_separable_block(24, 24, stride=1),
45
+ depthwise_separable_block(24, 48, stride=2),
46
+ depthwise_separable_block(48, 48, stride=1),
47
+ depthwise_separable_block(48, 48, stride=1)
48
+ ])
49
+
50
+ # Define double blocks if `use_double_block` is True
51
+ if self.use_double_block:
52
+ self.double_blocks = nn.ModuleList([
53
+ double_block(48, 24, 96, stride=2),
54
+ double_block(96, 24, 96, stride=1),
55
+ double_block(96, 24, 96, stride=2),
56
+ double_block(96, 24, 96, stride=1),
57
+ double_block(96, 24, 96, stride=2)
58
+ ])
59
+ else:
60
+ self.double_blocks = nn.ModuleList([
61
+ depthwise_separable_block(48, 96, stride=2),
62
+ depthwise_separable_block(96, 96, stride=1),
63
+ depthwise_separable_block(96, 96, stride=2),
64
+ depthwise_separable_block(96, 96, stride=1),
65
+ depthwise_separable_block(96, 96, stride=2)
66
+ ])
67
+
68
+ # Final convolutional head
69
+ self.conv_head = nn.Conv2d(96, 64, kernel_size=1, stride=1)
70
+ self.bn_head = nn.BatchNorm2d(64)
71
+
72
+ # Global Average Pooling
73
+ self.global_avg_pooling = nn.AdaptiveAvgPool2d(1)
74
+
75
+ def forward(self, x):
76
+ # First conv layer
77
+ x = self.conv1(x)
78
+
79
+ # Apply single blocks
80
+ for block in self.single_blocks:
81
+ x = block(x)
82
+
83
+ # Apply double blocks
84
+ for block in self.double_blocks:
85
+ x = block(x)
86
+
87
+ # Final head
88
+ x = self.conv_head(x)
89
+ x = self.bn_head(x)
90
+ x = F.relu(x)
91
+
92
+ # Global Average Pooling and Flatten
93
+ x = self.global_avg_pooling(x)
94
+ x = torch.flatten(x, 1)
95
+
96
+ return x
97
+
98
+ class BlazeFaceModel(nn.Module):
99
+ def __init__(self, input_channels, label_count, use_double_block=False, activation="relu", use_optional_block=True):
100
+ super(BlazeFaceModel, self).__init__()
101
+ self.blazeface_backbone = BlazeFace(input_channels=input_channels, use_double_block=use_double_block, activation=activation, use_optional_block=use_optional_block)
102
+ self.fc = nn.Linear(64, label_count)
103
+
104
+ def forward(self, x):
105
+ features = self.blazeface_backbone(x)
106
+ output = self.fc(features)
107
+ return output