| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
|
|
| import torch |
| import torch.nn.functional as F |
| from torch import nn |
|
|
| batch_size = 10 |
|
|
|
|
| |
| class BaseHeadSplit(nn.Module): |
| def __init__(self, base, head): |
| super(BaseHeadSplit, self).__init__() |
|
|
| self.base = base |
| self.head = head |
|
|
| def forward(self, x): |
| out = self.base(x) |
| out = self.head(out) |
|
|
| return out |
|
|
|
|
| |
|
|
|
|
| |
| class HARCNN(nn.Module): |
| def __init__( |
| self, |
| in_channels=9, |
| dim_hidden=64 * 26, |
| num_classes=6, |
| conv_kernel_size=(1, 9), |
| pool_kernel_size=(1, 2), |
| ): |
| super().__init__() |
| self.conv1 = nn.Sequential( |
| nn.Conv2d(in_channels, 32, kernel_size=conv_kernel_size), |
| nn.ReLU(), |
| nn.MaxPool2d(kernel_size=pool_kernel_size, stride=2), |
| ) |
| self.conv2 = nn.Sequential( |
| nn.Conv2d(32, 64, kernel_size=conv_kernel_size), |
| nn.ReLU(), |
| nn.MaxPool2d(kernel_size=pool_kernel_size, stride=2), |
| ) |
| self.fc = nn.Sequential( |
| nn.Linear(dim_hidden, 1024), |
| nn.ReLU(), |
| nn.Linear(1024, 512), |
| nn.ReLU(), |
| nn.Linear(512, num_classes), |
| ) |
|
|
| def forward(self, x): |
| out = self.conv1(x) |
| out = self.conv2(out) |
| out = torch.flatten(out, 1) |
| out = self.fc(out) |
| return out |
|
|
|
|
| |
| class Digit5CNN(nn.Module): |
| def __init__(self): |
| super(Digit5CNN, self).__init__() |
| self.encoder = nn.Sequential() |
| self.encoder.add_module( |
| "conv1", nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=2) |
| ) |
| self.encoder.add_module("bn1", nn.BatchNorm2d(64)) |
| self.encoder.add_module("relu1", nn.ReLU()) |
| self.encoder.add_module( |
| "maxpool1", |
| nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False), |
| ) |
| self.encoder.add_module( |
| "conv2", nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2) |
| ) |
| self.encoder.add_module("bn2", nn.BatchNorm2d(64)) |
| self.encoder.add_module("relu2", nn.ReLU()) |
| self.encoder.add_module( |
| "maxpool2", |
| nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False), |
| ) |
| self.encoder.add_module( |
| "conv3", nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2) |
| ) |
| self.encoder.add_module("bn3", nn.BatchNorm2d(128)) |
| self.encoder.add_module("relu3", nn.ReLU()) |
|
|
| self.linear = nn.Sequential() |
| self.linear.add_module("fc1", nn.Linear(8192, 3072)) |
| self.linear.add_module("bn4", nn.BatchNorm1d(3072)) |
| self.linear.add_module("relu4", nn.ReLU()) |
| self.linear.add_module("dropout", nn.Dropout()) |
| self.linear.add_module("fc2", nn.Linear(3072, 2048)) |
| self.linear.add_module("bn5", nn.BatchNorm1d(2048)) |
| self.linear.add_module("relu5", nn.ReLU()) |
|
|
| self.fc = nn.Linear(2048, 10) |
|
|
| def forward(self, x): |
| batch_size = x.size(0) |
| feature = self.encoder(x) |
| feature = feature.view(batch_size, -1) |
| feature = self.linear(feature) |
| out = self.fc(feature) |
| return out |
|
|
|
|
| |
| class AmazonMLP(nn.Module): |
| def __init__(self): |
| super(AmazonMLP, self).__init__() |
| self.encoder = nn.Sequential( |
| nn.Linear(5000, 1000), |
| |
| nn.ReLU(), |
| nn.Linear(1000, 500), |
| |
| nn.ReLU(), |
| nn.Linear(500, 100), |
| |
| nn.ReLU(), |
| ) |
| self.fc = nn.Linear(100, 2) |
|
|
| def forward(self, x): |
| out = self.encoder(x) |
| out = self.fc(out) |
| return out |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| class FedAvgCNN(nn.Module): |
| def __init__(self, in_features=1, num_classes=10, dim=1024): |
| super().__init__() |
| self.conv1 = nn.Sequential( |
| nn.Conv2d( |
| in_features, 32, kernel_size=5, padding=0, stride=1, bias=True |
| ), |
| nn.ReLU(inplace=True), |
| nn.MaxPool2d(kernel_size=(2, 2)), |
| ) |
| self.conv2 = nn.Sequential( |
| nn.Conv2d(32, 64, kernel_size=5, padding=0, stride=1, bias=True), |
| nn.ReLU(inplace=True), |
| nn.MaxPool2d(kernel_size=(2, 2)), |
| ) |
| self.fc1 = nn.Sequential(nn.Linear(dim, 512), nn.ReLU(inplace=True)) |
| self.fc = nn.Linear(512, num_classes) |
|
|
| def forward(self, x): |
| out = self.conv1(x) |
| out = self.conv2(out) |
| out = torch.flatten(out, 1) |
| out = self.fc1(out) |
| out = self.fc(out) |
| return out |
|
|
|
|
| |
|
|
|
|
| |
| class FedAvgMLP(nn.Module): |
| def __init__(self, in_features=784, num_classes=10, hidden_dim=200): |
| super().__init__() |
| self.fc1 = nn.Linear(in_features, hidden_dim) |
| self.fc2 = nn.Linear(hidden_dim, num_classes) |
| self.act = nn.ReLU(inplace=True) |
|
|
| def forward(self, x): |
| if x.ndim == 4: |
| x = x.view(x.size(0), -1) |
| x = self.act(self.fc1(x)) |
| x = self.fc2(x) |
| return x |
|
|
|
|
| |
|
|
|
|
| class Net(nn.Module): |
| def __init__(self): |
| super(Net, self).__init__() |
| self.conv1 = nn.Conv2d(1, batch_size, 2, 1) |
| self.conv2 = nn.Conv2d(batch_size, 32, 2, 1) |
| self.dropout1 = nn.Dropout(0.25) |
| self.dropout2 = nn.Dropout(0.5) |
| self.fc1 = nn.Linear(18432, 128) |
| self.fc = nn.Linear(128, 10) |
|
|
| def forward(self, x): |
| x = self.conv1(x) |
| x = nn.ReLU()(x) |
| x = nn.MaxPool2d(2, 1)(x) |
| x = self.dropout1(x) |
| x = self.conv2(x) |
| x = nn.ReLU()(x) |
| x = nn.MaxPool2d(2, 1)(x) |
| x = self.dropout2(x) |
| x = torch.flatten(x, 1) |
| x = self.fc1(x) |
| x = nn.ReLU()(x) |
| x = self.fc(x) |
| output = F.log_softmax(x, dim=1) |
| return output |
|
|
|
|
| |
|
|
|
|
| class Mclr_Logistic(nn.Module): |
| def __init__(self, input_dim=1 * 28 * 28, num_classes=10): |
| super(Mclr_Logistic, self).__init__() |
| self.fc = nn.Linear(input_dim, num_classes) |
|
|
| def forward(self, x): |
| x = torch.flatten(x, 1) |
| x = self.fc(x) |
| output = F.log_softmax(x, dim=1) |
| return output |
|
|
|
|
| |
|
|
|
|
| class DNN(nn.Module): |
| def __init__(self, input_dim=1 * 28 * 28, mid_dim=100, num_classes=10): |
| super(DNN, self).__init__() |
| self.fc1 = nn.Linear(input_dim, mid_dim) |
| self.fc = nn.Linear(mid_dim, num_classes) |
|
|
| def forward(self, x): |
| x = torch.flatten(x, 1) |
| x = F.relu(self.fc1(x)) |
| x = self.fc(x) |
| x = F.log_softmax(x, dim=1) |
| return x |
|
|
|
|
| |
|
|
|
|
| class CifarNet(nn.Module): |
| def __init__(self, num_classes=10): |
| super(CifarNet, self).__init__() |
| self.conv1 = nn.Conv2d(3, 6, 5) |
| self.pool = nn.MaxPool2d(2, 2) |
| self.conv2 = nn.Conv2d(6, batch_size, 5) |
| self.fc1 = nn.Linear(batch_size * 5 * 5, 120) |
| self.fc2 = nn.Linear(120, 84) |
| self.fc = nn.Linear(84, num_classes) |
|
|
| def forward(self, x): |
| x = self.pool(F.relu(self.conv1(x))) |
| x = self.pool(F.relu(self.conv2(x))) |
| x = x.view(-1, batch_size * 5 * 5) |
| x = F.relu(self.fc1(x)) |
| x = F.relu(self.fc2(x)) |
| x = self.fc(x) |
| x = F.log_softmax(x, dim=1) |
| return x |
|
|
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
|
|
| def init_weights(m): |
| classname = m.__class__.__name__ |
| if ( |
| classname.find("Conv2d") != -1 |
| or classname.find("ConvTranspose2d") != -1 |
| ): |
| nn.init.kaiming_uniform_(m.weight) |
| nn.init.zeros_(m.bias) |
| elif classname.find("BatchNorm") != -1: |
| nn.init.normal_(m.weight, 1.0, 0.02) |
| nn.init.zeros_(m.bias) |
| elif classname.find("Linear") != -1: |
| nn.init.xavier_normal_(m.weight) |
| nn.init.zeros_(m.bias) |
|
|
|
|
| class LeNet(nn.Module): |
| def __init__( |
| self, |
| feature_dim=50 * 4 * 4, |
| bottleneck_dim=256, |
| num_classes=10, |
| iswn=None, |
| ): |
| super(LeNet, self).__init__() |
|
|
| self.conv_params = nn.Sequential( |
| nn.Conv2d(1, 20, kernel_size=5), |
| nn.MaxPool2d(2), |
| nn.ReLU(), |
| nn.Conv2d(20, 50, kernel_size=5), |
| nn.Dropout2d(p=0.5), |
| nn.MaxPool2d(2), |
| nn.ReLU(), |
| ) |
| self.bn = nn.BatchNorm1d(bottleneck_dim, affine=True) |
| self.dropout = nn.Dropout(p=0.5) |
| self.bottleneck = nn.Linear(feature_dim, bottleneck_dim) |
| self.bottleneck.apply(init_weights) |
| self.fc = nn.Linear(bottleneck_dim, num_classes) |
| if iswn == "wn": |
| self.fc = nn.utils.weight_norm(self.fc, name="weight") |
| self.fc.apply(init_weights) |
|
|
| def forward(self, x): |
| x = self.conv_params(x) |
| x = x.view(x.size(0), -1) |
| x = self.bottleneck(x) |
| x = self.bn(x) |
| x = self.dropout(x) |
| x = self.fc(x) |
| x = F.log_softmax(x, dim=1) |
| return x |
|
|
|
|
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
|
|
|
|
| class LSTMNet(nn.Module): |
| def __init__( |
| self, |
| hidden_dim, |
| num_layers=2, |
| bidirectional=False, |
| dropout=0.2, |
| padding_idx=0, |
| vocab_size=98635, |
| num_classes=10, |
| ): |
| super().__init__() |
|
|
| self.dropout = nn.Dropout(dropout) |
| self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) |
| self.lstm = nn.LSTM( |
| input_size=hidden_dim, |
| hidden_size=hidden_dim, |
| num_layers=num_layers, |
| bidirectional=bidirectional, |
| dropout=dropout, |
| batch_first=True, |
| ) |
| dims = hidden_dim * 2 if bidirectional else hidden_dim |
| self.fc = nn.Linear(dims, num_classes) |
|
|
| def forward(self, x): |
| text, text_lengths = x |
|
|
| embedded = self.embedding(text) |
|
|
| |
| packed_embedded = nn.utils.rnn.pack_padded_sequence( |
| embedded, text_lengths, batch_first=True, enforce_sorted=False |
| ) |
| packed_output, (hidden, cell) = self.lstm(packed_embedded) |
|
|
| |
| out, out_lengths = nn.utils.rnn.pad_packed_sequence( |
| packed_output, batch_first=True |
| ) |
|
|
| out = torch.relu_(out[:, -1, :]) |
| out = self.dropout(out) |
| out = self.fc(out) |
| out = F.log_softmax(out, dim=1) |
|
|
| return out |
|
|
|
|
| |
|
|
|
|
| class fastText(nn.Module): |
| def __init__( |
| self, hidden_dim, padding_idx=0, vocab_size=98635, num_classes=10 |
| ): |
| super(fastText, self).__init__() |
|
|
| |
| self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) |
|
|
| |
| self.fc1 = nn.Linear(hidden_dim, hidden_dim) |
|
|
| |
| self.fc = nn.Linear(hidden_dim, num_classes) |
|
|
| def forward(self, x): |
| text, text_lengths = x |
|
|
| embedded_sent = self.embedding(text) |
| h = self.fc1(embedded_sent.mean(1)) |
| z = self.fc(h) |
| out = F.log_softmax(z, dim=1) |
|
|
| return out |
|
|
|
|
| |
|
|
|
|
| class TextCNN(nn.Module): |
| def __init__( |
| self, |
| hidden_dim, |
| num_channels=100, |
| kernel_size=[3, 4, 5], |
| max_len=200, |
| dropout=0.8, |
| padding_idx=0, |
| vocab_size=98635, |
| num_classes=10, |
| ): |
| super(TextCNN, self).__init__() |
|
|
| |
| self.embedding = nn.Embedding(vocab_size, hidden_dim, padding_idx) |
|
|
| |
| |
| self.conv1 = nn.Sequential( |
| nn.Conv1d( |
| in_channels=hidden_dim, |
| out_channels=num_channels, |
| kernel_size=kernel_size[0], |
| ), |
| nn.ReLU(), |
| nn.MaxPool1d(max_len - kernel_size[0] + 1), |
| ) |
| self.conv2 = nn.Sequential( |
| nn.Conv1d( |
| in_channels=hidden_dim, |
| out_channels=num_channels, |
| kernel_size=kernel_size[1], |
| ), |
| nn.ReLU(), |
| nn.MaxPool1d(max_len - kernel_size[1] + 1), |
| ) |
| self.conv3 = nn.Sequential( |
| nn.Conv1d( |
| in_channels=hidden_dim, |
| out_channels=num_channels, |
| kernel_size=kernel_size[2], |
| ), |
| nn.ReLU(), |
| nn.MaxPool1d(max_len - kernel_size[2] + 1), |
| ) |
|
|
| self.dropout = nn.Dropout(dropout) |
|
|
| |
| self.fc = nn.Linear(num_channels * len(kernel_size), num_classes) |
|
|
| def forward(self, x): |
| text, text_lengths = x |
|
|
| embedded_sent = self.embedding(text).permute(0, 2, 1) |
|
|
| conv_out1 = self.conv1(embedded_sent).squeeze(2) |
| conv_out2 = self.conv2(embedded_sent).squeeze(2) |
| conv_out3 = self.conv3(embedded_sent).squeeze(2) |
|
|
| all_out = torch.cat((conv_out1, conv_out2, conv_out3), 1) |
| final_feature_map = self.dropout(all_out) |
| out = self.fc(final_feature_map) |
| out = F.log_softmax(out, dim=1) |
|
|
| return out |
|
|
|
|
| |
|
|
|
|
| |
| |
| |
| |
|
|
| |
| |
| |
|
|