| | import math |
| | import torch |
| | import torchvision |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| | from torchvision import transforms |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | trnscm = transforms.Compose([ |
| | transforms.Grayscale(num_output_channels=1), |
| | transforms.Resize((100, 100)), |
| | transforms.ToTensor(), |
| | transforms.Normalize(mean=[0.5], std=[0.5]) |
| | ]) |
| |
|
| | transform1 = transforms.Compose([transforms.Grayscale(num_output_channels = 1), |
| | transforms.Resize((100,100)), transforms.ToTensor()]) |
| |
|
| | |
| | class Siamese(torch.nn.Module): |
| | def __init__(self): |
| | super(Siamese, self).__init__() |
| | self.cnn1 = nn.Sequential( |
| | nn.ReflectionPad2d(1), |
| | nn.Conv2d(1, 4, kernel_size=3), |
| | nn.ReLU(inplace=True), |
| | nn.BatchNorm2d(4), |
| |
|
| | nn.ReflectionPad2d(1), |
| | nn.Conv2d(4, 8, kernel_size=3), |
| | nn.ReLU(inplace=True), |
| | nn.BatchNorm2d(8), |
| |
|
| |
|
| | nn.ReflectionPad2d(1), |
| | nn.Conv2d(8, 8, kernel_size=3), |
| | nn.ReLU(inplace=True), |
| | nn.BatchNorm2d(8), |
| | ) |
| |
|
| | self.fc1 = nn.Sequential( |
| | nn.Linear(8*100*100, 500), |
| | nn.ReLU(inplace=True), |
| |
|
| | nn.Linear(500, 500), |
| | nn.ReLU(inplace=True), |
| |
|
| | nn.Linear(500, 5)) |
| |
|
| | |
| | def forward_once(self, x): |
| | output = self.cnn1(x) |
| | output = output.view(output.size()[0], -1) |
| | output = self.fc1(output) |
| | return output |
| |
|
| | def forward(self, input1, input2): |
| | output1 = self.forward_once(input1) |
| | output2 = self.forward_once(input2) |
| | return output1, output2 |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | |
| | classes = ['person1','person2','person3','person4','person5','person6','person7'] |