cammarasana123 commited on
Commit
94d9ced
·
verified ·
1 Parent(s): 17773d7

Upload 2 files

Browse files
Files changed (2) hide show
  1. learning.py +61 -0
  2. wConv.py +26 -0
learning.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from wConv import wConv2d
6
+
7
+ class SimpleModel(nn.Module):
8
+ def __init__(self, num_classes=10):
9
+ super(SimpleModel, self).__init__()
10
+ #self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, padding=1, bias=True) ##--> We have replaced this convolution
11
+ self.conv1 = wConv2d(in_channels=1, out_channels=8, kernel_size=3, den=[0.75], padding=1, bias=True) ##--> with this convolution
12
+ self.pool = nn.MaxPool2d(2, 2)
13
+ #self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5, padding=2, bias=True) ##--> We have replaced this convolution
14
+ self.conv2 = wConv2d(in_channels=8, out_channels=16, kernel_size=5, den=[0.25,0.75], padding=2, bias=True) ##--> with this convolution
15
+
16
+ self.fc = nn.Linear(16 * 16 * 16, num_classes)
17
+
18
+ def forward(self, x):
19
+ x = self.pool(F.relu(self.conv1(x)))
20
+ x = self.pool(F.relu(self.conv2(x)))
21
+ x = x.view(x.size(0), -1)
22
+ x = self.fc(x)
23
+ return x
24
+
25
+
26
+ model = SimpleModel(num_classes=10)
27
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
28
+ model.to(device)
29
+
30
+ optimizer = optim.Adam(model.parameters(), lr=1e-3)
31
+ criterion = nn.CrossEntropyLoss()
32
+
33
+ num_samples = 100
34
+ batch_size = 4
35
+ num_batches = num_samples // batch_size
36
+
37
+ inputs = torch.randn(num_samples, 1, 64, 64)
38
+ targets = torch.randint(0, 10, (num_samples,))
39
+
40
+ num_epochs = 5
41
+
42
+ for epoch in range(num_epochs):
43
+ model.train()
44
+ running_loss = 0.0
45
+
46
+ for i in range(num_batches):
47
+ batch_inputs = inputs[i*batch_size:(i+1)*batch_size].to(device)
48
+ batch_targets = targets[i*batch_size:(i+1)*batch_size].to(device)
49
+
50
+ optimizer.zero_grad()
51
+ outputs = model(batch_inputs)
52
+ loss = criterion(outputs, batch_targets)
53
+ loss.backward()
54
+ optimizer.step()
55
+
56
+ running_loss += loss.item()
57
+
58
+ avg_loss = running_loss / num_batches
59
+ print(f"Epoch {epoch+1}/{num_epochs}, Loss: {avg_loss:.4f}")
60
+
61
+
wConv.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+ class wConv2d(nn.Module):
6
+ def __init__(self, in_channels, out_channels, kernel_size, den, stride=1, padding=1, groups=1, bias=False):
7
+ super(wConv2d, self).__init__()
8
+ self.stride = stride
9
+ self.padding = padding
10
+ self.kernel_size = kernel_size
11
+ self.groups = groups
12
+ self.weight = nn.Parameter(torch.empty(out_channels, in_channels // groups, kernel_size, kernel_size))
13
+ nn.init.kaiming_normal_(self.weight, mode='fan_out', nonlinearity='relu')
14
+ self.bias = nn.Parameter(torch.zeros(out_channels)) if bias else None
15
+
16
+ device = torch.device('cpu')
17
+ self.register_buffer('alfa', torch.cat([torch.tensor(den, device=device),torch.tensor([1.0], device=device),torch.flip(torch.tensor(den, device=device), dims=[0])]))
18
+ self.register_buffer('Phi', torch.outer(self.alfa, self.alfa))
19
+
20
+ if self.Phi.shape != (kernel_size, kernel_size):
21
+ raise ValueError(f"Phi shape {self.Phi.shape} must match kernel size ({kernel_size}, {kernel_size})")
22
+
23
+ def forward(self, x):
24
+ Phi = self.Phi.to(x.device)
25
+ weight_Phi = self.weight * Phi
26
+ return F.conv2d(x, weight_Phi, bias=self.bias, stride=self.stride, padding=self.padding, groups=self.groups)