repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
SpinalNet | SpinalNet-master/MNIST_VGG/MNIST_VGG_and_SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for MNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,616 | 32.191429 | 116 | py |
SpinalNet | SpinalNet-master/MNIST_VGG/FashionMNIST_VGG_and _SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for Fashion-MNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.RandomResizedCrop(28, scale=(0.95,1)),
#torchvision.transforms.RandomCrop(28,2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,734 | 32.528571 | 116 | py |
SpinalNet | SpinalNet-master/MNIST_VGG/EMNIST_letters_VGG_and _SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for EMNIST(Letters).
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='letters', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='letters', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=62):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=62):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,677 | 32.751445 | 116 | py |
SpinalNet | SpinalNet-master/MNIST_VGG/QMNIST_VGG_and _SpinalVGG.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for QMNIST.
This code trains both NNs as two different models.
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torchvision
import torch.nn as nn
import math
import torch.nn.functional as F
import numpy as np
num_epochs = 200
batch_size_train = 100
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
class VGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(VGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.classifier = nn.Sequential(
nn.Dropout(p = 0.5),
nn.Linear(256, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p = 0.5),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return F.log_softmax(x, dim=1)
Half_width =128
layer_width =128
class SpinalVGG(nn.Module):
"""
Based on - https://github.com/kkweon/mnist-competition
from: https://github.com/ranihorev/Kuzushiji_MNIST/blob/master/KujuMNIST.ipynb
"""
def two_conv_pool(self, in_channels, f1, f2):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def three_conv_pool(self,in_channels, f1, f2, f3):
s = nn.Sequential(
nn.Conv2d(in_channels, f1, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f1),
nn.ReLU(inplace=True),
nn.Conv2d(f1, f2, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f2),
nn.ReLU(inplace=True),
nn.Conv2d(f2, f3, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(f3),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
for m in s.children():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
return s
def __init__(self, num_classes=10):
super(SpinalVGG, self).__init__()
self.l1 = self.two_conv_pool(1, 64, 64)
self.l2 = self.two_conv_pool(64, 128, 128)
self.l3 = self.three_conv_pool(128, 256, 256, 256)
self.l4 = self.three_conv_pool(256, 256, 256, 256)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(Half_width+layer_width, layer_width),
nn.BatchNorm1d(layer_width), nn.ReLU(inplace=True),)
self.fc_out = nn.Sequential(
nn.Dropout(p = 0.5), nn.Linear(layer_width*4, num_classes),)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return F.log_softmax(x, dim=1)
device = 'cuda'
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = VGG().to(device)
model2 = SpinalVGG().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 499:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1>= correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2>= correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 11,636 | 32.34384 | 116 | py |
SpinalNet | SpinalNet-master/MNIST/Arch2_Fashion_MNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 Fashion MNIST code.
@author: Dipu
"""
import torch
import torchvision
import numpy as np
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.005
momentum = 0.5
log_interval = 500
first_HL =300
max_accuracy= 0.0
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.RandomResizedCrop(28, scale=(0.95,1)),
#torchvision.transforms.RandomCrop(28,2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.FashionMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.5,), (0.5,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(train_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
#%%
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
device = 'cuda'
network = Net().to(device)
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
target = target.to(device)
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test(max_accuracy, epoch):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy_local =100. * correct / len(test_loader.dataset)
if accuracy_local > max_accuracy:
max_accuracy = accuracy_local
print('Epoch :{} Avg. loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(
epoch, test_loss, correct, len(test_loader.dataset), accuracy_local, max_accuracy))
return max_accuracy
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy = test(max_accuracy, epoch)
if epoch>50:
optimizer = optim.SGD(network.parameters(), lr=learning_rate*5*np.asscalar(pow(np.random.rand(1),3)),
momentum=momentum)
| 10,213 | 34.099656 | 105 | py |
SpinalNet | SpinalNet-master/MNIST/Arch2_KMNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 KMNIST code.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.KMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = 0.1*pow(torch.rand(1),5)
else:
max_accuracy = max_accuracy2
| 9,711 | 32.839721 | 117 | py |
SpinalNet | SpinalNet-master/MNIST/SpinalNet_MNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet MNIST code.
It ususlly provides better performance for the same number of epoch.
The same code can also be used for KMNIST, QMNIST and FashionMNIST.
torchvision.datasets.MNIST needs to be changed to
torchvision.datasets.FashionMNIST for FashionMNIST simulations
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
first_HL =8
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*6, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x1 = x[:, 0:160]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,160:320], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:160], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,160:320], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:160], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,160:320], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fc2(x)
#x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 5,560 | 29.387978 | 78 | py |
SpinalNet | SpinalNet-master/MNIST/Arch2_QMNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 QMNIST code.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
prob = 0.5
random_seed = 1
torch.backends.cudnn.enabled = False
torch.manual_seed(random_seed)
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.QMNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = learning_rate*.9
else:
max_accuracy = max_accuracy2
#workbook.close()
| 9,751 | 32.512027 | 117 | py |
SpinalNet | SpinalNet-master/MNIST/default_pytorch_EMNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the default EMNIST code for comparison.
The code is collected from:
nextjournal.com/gkoehler/pytorch-mnist
As the EMNIST needs split='digits', we make a different file for EMNIST
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 4,274 | 28.081633 | 84 | py |
SpinalNet | SpinalNet-master/MNIST/Arch2_MNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 MNIST code.
@author: Dipu
"""
import torch
import torchvision
import numpy as np
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 500
first_HL =30
max_accuracy= 0.0
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
device = 'cuda'
network = Net().to(device)
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
data = data.to(device)
target = target.to(device)
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
# print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, batch_idx * len(data), len(train_loader.dataset),
# 100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test(max_accuracy, epoch):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy_local =100. * correct / len(test_loader.dataset)
if accuracy_local > max_accuracy:
max_accuracy = accuracy_local
print('Epoch :{} Avg. loss: {:.4f}, Accuracy: {}/{} ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(
epoch, test_loss, correct, len(test_loader.dataset), accuracy_local, max_accuracy))
return max_accuracy
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy = test(max_accuracy, epoch)
if epoch>100:
optimizer = optim.SGD(network.parameters(), lr=learning_rate*5*np.asscalar(pow(np.random.rand(1),3)),
momentum=momentum)
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
#%%
with torch.no_grad():
output = network(example_data)
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Prediction: {}".format(
output.data.max(1, keepdim=True)[1][i].item()))
plt.xticks([])
plt.yticks([])
fig
| 10,686 | 33.253205 | 105 | py |
SpinalNet | SpinalNet-master/MNIST/default_pytorch_MNIST.py | # -*- coding: utf-8 -*-
"""
This Script contains the default MNIST code for comparison.
The code is collected from:
nextjournal.com/gkoehler/pytorch-mnist
The same code can also be used for KMNIST, QMNIST and FashionMNIST.
torchvision.datasets.MNIST needs to be changed to
torchvision.datasets.FashionMNIST for FashionMNIST simulations
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('/files/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
| 4,349 | 28.391892 | 76 | py |
SpinalNet | SpinalNet-master/MNIST/Arch2_EMNIST_Digits.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 for EMNIST digits.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
prob = 0.5
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = learning_rate*.8
else:
max_accuracy = max_accuracy2
#workbook.close()
| 9,736 | 32.926829 | 117 | py |
SpinalNet | SpinalNet-master/MNIST/SpinalNet_EMNIST_Digits.py | # -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet EMNIST digits code.
It provides better performance for the same number of epoch.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
first_HL =10
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*6, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x1 = x[:, 0:160]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,160:320], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:160], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,160:320], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:160], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,160:320], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fc2(x)
#x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
#%%
| 5,407 | 30.08046 | 84 | py |
SpinalNet | SpinalNet-master/Customizable Model/spinalnettorch.py | # Customizable SpinalNet. Supports up to 30 layers.
import torch
import torch.nn as nn
import numpy as np
class SpinalNet(nn.Module):
def __init__(self, Input_Size, Number_of_Split, HL_width, number_HL, Output_Size, Activation_Function):
super(SpinalNet, self).__init__()
Splitted_Input_Size = int(np.round(Input_Size/Number_of_Split))
self.lru = Activation_Function
self.fc1 = nn.Linear(Splitted_Input_Size, HL_width)
if number_HL>1:
self.fc2 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>2:
self.fc3 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>3:
self.fc4 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>4:
self.fc5 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>5:
self.fc6 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>6:
self.fc7 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>7:
self.fc8 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>8:
self.fc9 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>9:
self.fc10 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>10:
self.fc11 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>11:
self.fc12 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>12:
self.fc13 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>13:
self.fc14 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>14:
self.fc15 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>15:
self.fc16 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>16:
self.fc17 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>17:
self.fc18 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>18:
self.fc19 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>19:
self.fc20 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>20:
self.fc21 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>21:
self.fc22 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>22:
self.fc23 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>23:
self.fc24 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>24:
self.fc25 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>25:
self.fc26 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>26:
self.fc27 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>27:
self.fc28 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>28:
self.fc29 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>29:
self.fc30 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
self.fcx = nn.Linear(HL_width*number_HL, Output_Size)
def forward(self, x):
x_all =x
Splitted_Input_Size = self.fc1.in_features
HL_width = self.fc2.in_features - self.fc1.in_features
number_HL = int(np.round(self.fcx.in_features/HL_width))
length_x_all = number_HL*Splitted_Input_Size
while x_all.size(dim=1) < length_x_all:
x_all = torch.cat([x_all, x],dim=1)
x = self.lru(self.fc1(x_all[:,0:Splitted_Input_Size]))
x_out = x
counter1 = 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc2(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc3(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc4(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc5(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc6(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc7(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc8(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc9(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc10(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc11(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc12(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc13(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc14(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc15(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc16(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc17(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc18(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc19(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc20(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc21(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc22(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc23(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc24(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc25(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc26(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc27(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc28(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc29(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc30(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
#print("Size before output layer:",x_out.size(dim=1))
x = self.fcx(x_out)
return x | 12,469 | 46.414449 | 107 | py |
SpinalNet | SpinalNet-master/CIFAR-100/CNN_dropout_Default_and_SpinalFC_CIFAR100.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal CNN dropout code for CIFAR-100.
This code trains both NNs as two different models.
The code is collected and changed from:
https://zhenye-na.github.io/2018/09/28/pytorch-cnn-cifar10.html
This code gradually decreases the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(1)
random.seed(1)
Half_width =2048
layer_width = 256
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=200,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=200,
shuffle=False)
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 100)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
# 3x3 convolution
class SpinalCNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(SpinalCNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(layer_width*4, 100)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
# fc layer
x = self.fc_out(x)
return x
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = CNN().to(device)
model2 = SpinalCNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = curr_lr1/3
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % '.format(100 * correct1 / total1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = curr_lr2/3
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % '.format(100 * correct2 / total2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 9,248 | 29.22549 | 99 | py |
SpinalNet | SpinalNet-master/CIFAR-100/ResNet_Default_and_SpinalFC_CIFAR100.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal ResNet code for CIFAR-100.
This code trains both NNs as two different models.
There is option of choosing ResNet18(), ResNet34(), SpinalResNet18(), or
SpinalResNet34().
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
first_HL = 512
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR100(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(nn.Module):
"""Basic Block of ReseNet."""
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
"""Basic Block of ReseNet Builder."""
super(BasicBlock, self).__init__()
# First conv3x3 layer
self.conv1 = conv3x3(in_channels, out_channels, stride)
# Batch Normalization
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
# ReLU Activation Function
self.relu = nn.ReLU(inplace=True)
# Second conv3x3 layer
self.conv2 = conv3x3(out_channels, out_channels)
# Batch Normalization
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
# downsample for `residual`
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward Pass of Basic Block."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class SpinalResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=100):
"""Residual Neural Network Builder."""
super(SpinalResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
#self.fc_layer = nn.Linear(256, num_classes)
self.fc1 = nn.Linear(256, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(256 + first_HL, first_HL) #added
self.fc_layer = nn.Linear(first_HL*4, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out1 = self.maxpool2(out)
#print('out1',out1.shape)
out2 = out1[:,:,0,0]
#print('out2',out2.shape)
out2 = out2.view(out2.size(0),-1)
#print('out2',out2.shape)
x1 = out1[:,:,0,0]
x1 = self.relu(self.fc1(x1))
x2= torch.cat([ out1[:,:,0,1], x1], dim=1)
x2 = self.relu(self.fc1_1(x2))
x3= torch.cat([ out1[:,:,1,0], x2], dim=1)
x3 = self.relu(self.fc1_2(x3))
x4= torch.cat([ out1[:,:,1,1], x3], dim=1)
x4 = self.relu(self.fc1_3(x4))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
out = torch.cat([x, x4], dim=1)
out = self.fc_layer(out)
return out
class ResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=100):
"""Residual Neural Network Builder."""
super(ResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
#self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=1)
self.fc_layer = nn.Linear(256, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out = self.maxpool(out)
out = out.view(out.size(0), -1)
out = out.view(out.size(0), -1)
out = self.fc_layer(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2]).to(device)
def SpinalResNet18():
return SpinalResNet(BasicBlock, [2,2,2,2]).to(device)
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3]).to(device)
def SpinalResNet34():
return SpinalResNet(BasicBlock, [3, 4, 6, 3]).to(device)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = ResNet34().to(device)
model2 = SpinalResNet34().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
#%%
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
# if i == 249:
# print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
# .format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
# print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
# .format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer1, curr_lr1)
print('Epoch :{} Accuracy NN: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer2, curr_lr2)
print('Epoch :{} Accuracy SpinalNet: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 13,588 | 30.025114 | 101 | py |
SpinalNet | SpinalNet-master/CIFAR-100/VGG_Default_and_SpinalFC_CIFAR_100.py | # -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for CIFAR-100.
This code trains both NNs as two different models.
There is option of choosing NN among:
vgg11_bn(), vgg13_bn(), vgg16_bn(), vgg19_bn() and
Spinalvgg11_bn(), Spinalvgg13_bn(), Spinalvgg16_bn(), Spinalvgg19_bn()
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 200
learning_rate = 0.0001
Half_width =256
layer_width=512
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR100(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
cfg = {
'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
}
class VGG(nn.Module):
def __init__(self, features, num_class=100):
super().__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
return output
class SpinalVGG(nn.Module):
def __init__(self, features, num_class=100):
super().__init__()
self.features = features
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(), nn.Linear(layer_width*4, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
x = output
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
input_channel = 3
for l in cfg:
if l == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
continue
layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]
if batch_norm:
layers += [nn.BatchNorm2d(l)]
layers += [nn.ReLU(inplace=True)]
input_channel = l
return nn.Sequential(*layers)
def vgg11_bn():
return VGG(make_layers(cfg['A'], batch_norm=True))
def vgg13_bn():
return VGG(make_layers(cfg['B'], batch_norm=True))
def vgg16_bn():
return VGG(make_layers(cfg['D'], batch_norm=True))
def vgg19_bn():
return VGG(make_layers(cfg['E'], batch_norm=True))
def Spinalvgg11_bn():
return SpinalVGG(make_layers(cfg['A'], batch_norm=True))
def Spinalvgg13_bn():
return SpinalVGG(make_layers(cfg['B'], batch_norm=True))
def Spinalvgg16_bn():
return SpinalVGG(make_layers(cfg['D'], batch_norm=True))
def Spinalvgg19_bn():
return SpinalVGG(make_layers(cfg['E'], batch_norm=True))
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = vgg19_bn().to(device)
model2 = Spinalvgg19_bn().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 9,281 | 28.845659 | 116 | py |
SubOmiEmbed | SubOmiEmbed-main/train_test.py | """
Training and testing for OmiEmbed
"""
import time
import warnings
import numpy as np
import torch
from util import util
from params.train_test_params import TrainTestParams
from datasets import create_separate_dataloader
from models import create_model
from util.visualizer import Visualizer
if __name__ == "__main__":
warnings.filterwarnings('ignore')
full_start_time = time.time()
# Get parameters
param = TrainTestParams().parse()
if param.deterministic:
util.setup_seed(param.seed)
# Dataset related
full_dataloader, train_dataloader, val_dataloader, test_dataloader = create_separate_dataloader(param)
print('The size of training set is {}'.format(len(train_dataloader)))
# Get sample list for the dataset
param.sample_list = full_dataloader.get_sample_list()
# Get the dimension of input omics data
param.omics_dims = full_dataloader.get_omics_dims()
if param.downstream_task in ['classification', 'multitask', 'alltask']:
# Get the number of classes for the classification task
if param.class_num == 0:
param.class_num = full_dataloader.get_class_num()
if param.downstream_task != 'alltask':
print('The number of classes: {}'.format(param.class_num))
if param.downstream_task in ['regression', 'multitask', 'alltask']:
# Get the range of the target values
values_min = full_dataloader.get_values_min()
values_max = full_dataloader.get_values_max()
if param.regression_scale == 1:
param.regression_scale = values_max
print('The range of the target values is [{}, {}]'.format(values_min, values_max))
if param.downstream_task in ['survival', 'multitask', 'alltask']:
# Get the range of T
survival_T_min = full_dataloader.get_survival_T_min()
survival_T_max = full_dataloader.get_survival_T_max()
if param.survival_T_max == -1:
param.survival_T_max = survival_T_max
print('The range of survival T is [{}, {}]'.format(survival_T_min, survival_T_max))
# Model related
model = create_model(param) # Create a model given param.model and other parameters
model.setup(param) # Regular setup for the model: load and print networks, create schedulers
visualizer = Visualizer(param) # Create a visualizer to print results
# Start the epoch loop
visualizer.print_phase(model.phase)
for epoch in range(param.epoch_count, param.epoch_num + 1): # outer loop for different epochs
epoch_start_time = time.time() # Start time of this epoch
model.epoch = epoch
# TRAINING
model.set_train() # Set train mode for training
iter_load_start_time = time.time() # Start time of data loading for this iteration
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
if epoch == param.epoch_num_p1 + 1:
model.phase = 'p2' # Change to supervised phase
visualizer.print_phase(model.phase)
if epoch == param.epoch_num_p1 + param.epoch_num_p2 + 1:
model.phase = 'p3' # Change to supervised phase
visualizer.print_phase(model.phase)
# Start training loop
for i, data in enumerate(train_dataloader): # Inner loop for different iteration within one epoch
model.iter = i
dataset_size = len(train_dataloader)
actual_batch_size = len(data['index'])
iter_start_time = time.time() # Timer for computation per iteration
if i % param.print_freq == 0:
load_time = iter_start_time - iter_load_start_time # Data loading time for this iteration
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.update() # Calculate losses, gradients and update network parameters
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if i % param.print_freq == 0: # Print training losses and save logging information to the disk
comp_time = time.time() - iter_start_time # Computational time for this iteration
visualizer.print_train_log(epoch, i, losses_dict, metrics_dict, load_time, comp_time, param.batch_size, dataset_size)
iter_load_start_time = time.time()
# Model saving
if param.save_model:
if param.save_epoch_freq == -1: # Only save networks during last epoch
if epoch == param.epoch_num:
print('Saving the model at the end of epoch {:d}'.format(epoch))
model.save_networks(str(epoch))
elif epoch % param.save_epoch_freq == 0: # Save both the generator and the discriminator every <save_epoch_freq> epochs
print('Saving the model at the end of epoch {:d}'.format(epoch))
# model.save_networks('latest')
model.save_networks(str(epoch))
train_time = time.time() - epoch_start_time
current_lr = model.update_learning_rate() # update learning rates at the end of each epoch
visualizer.print_train_summary(epoch, losses_dict, output_dict, train_time, current_lr)
# TESTING
model.set_eval() # Set eval mode for testing
test_start_time = time.time() # Start time of testing
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
# Start testing loop
for i, data in enumerate(test_dataloader):
dataset_size = len(test_dataloader)
actual_batch_size = len(data['index'])
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.test() # Run forward to get the output tensors
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if i % param.print_freq == 0: # Print testing log
visualizer.print_test_log(epoch, i, losses_dict, metrics_dict, param.batch_size, dataset_size)
test_time = time.time() - test_start_time
visualizer.print_test_summary(epoch, losses_dict, output_dict, test_time)
if epoch == param.epoch_num:
visualizer.save_output_dict(output_dict)
full_time = time.time() - full_start_time
print('Full running time: {:.3f}s'.format(full_time))
| 7,049 | 54.952381 | 146 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_survival_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeSurvivalModel(VaeBasicModel):
"""
This class implements the VAE survival model, using the VAE framework with the survival prediction downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae survival prediction model
parser.set_defaults(net_down='multi_FC_survival')
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
parser.add_argument('--stratify_label', action='store_true', help='load extra label for stratified dataset separation')
return parser
def __init__(self, param):
"""
Initialize the VAE_survival class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('survival')
# specify the metrics you want to print out.
self.metric_names = []
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
# output tensor
self.y_out = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, None, param.time_num, None, param.init_type,
param.init_gain, self.gpu_ids)
self.loss_survival = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
def forward(self):
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss (downstream loss)
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# LOSS DOWN
self.loss_down = self.loss_survival
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out = self.y_out
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out': y_out}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
pass
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 5,390 | 38.933333 | 151 | py |
SubOmiEmbed | SubOmiEmbed-main/models/losses.py | import torch
import torch.nn as nn
def get_loss_func(loss_name, reduction='mean'):
"""
Return the loss function.
Parameters:
loss_name (str) -- the name of the loss function: BCE | MSE | L1 | CE
reduction (str) -- the reduction method applied to the loss function: sum | mean
"""
if loss_name == 'BCE':
return nn.BCEWithLogitsLoss(reduction=reduction)
elif loss_name == 'MSE':
return nn.MSELoss(reduction=reduction)
elif loss_name == 'L1':
return nn.L1Loss(reduction=reduction)
elif loss_name == 'CE':
return nn.CrossEntropyLoss(reduction=reduction)
else:
raise NotImplementedError('Loss function %s is not found' % loss_name)
def kl_loss(mean, log_var, reduction='mean'):
part_loss = 1 + log_var - mean.pow(2) - log_var.exp()
if reduction == 'mean':
loss = -0.5 * torch.mean(part_loss)
else:
loss = -0.5 * torch.sum(part_loss)
return loss
def MTLR_survival_loss(y_pred, y_true, E, tri_matrix, reduction='mean'):
"""
Compute the MTLR survival loss
"""
# Get censored index and uncensored index
censor_idx = []
uncensor_idx = []
for i in range(len(E)):
# If this is a uncensored data point
if E[i] == 1:
# Add to uncensored index list
uncensor_idx.append(i)
else:
# Add to censored index list
censor_idx.append(i)
# Separate y_true and y_pred
y_pred_censor = y_pred[censor_idx]
y_true_censor = y_true[censor_idx]
y_pred_uncensor = y_pred[uncensor_idx]
y_true_uncensor = y_true[uncensor_idx]
# Calculate likelihood for censored datapoint
phi_censor = torch.exp(torch.mm(y_pred_censor, tri_matrix))
reduc_phi_censor = torch.sum(phi_censor * y_true_censor, dim=1)
# Calculate likelihood for uncensored datapoint
phi_uncensor = torch.exp(torch.mm(y_pred_uncensor, tri_matrix))
reduc_phi_uncensor = torch.sum(phi_uncensor * y_true_uncensor, dim=1)
# Likelihood normalisation
z_censor = torch.exp(torch.mm(y_pred_censor, tri_matrix))
reduc_z_censor = torch.sum(z_censor, dim=1)
z_uncensor = torch.exp(torch.mm(y_pred_uncensor, tri_matrix))
reduc_z_uncensor = torch.sum(z_uncensor, dim=1)
# MTLR loss
loss = - (torch.sum(torch.log(reduc_phi_censor)) + torch.sum(torch.log(reduc_phi_uncensor)) - torch.sum(torch.log(reduc_z_censor)) - torch.sum(torch.log(reduc_z_uncensor)))
if reduction == 'mean':
loss = loss / E.shape[0]
return loss
| 2,558 | 32.671053 | 176 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_alltask_gn_model.py | import torch
import torch.nn as nn
from .basic_model import BasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskGNModel(BasicModel):
"""
This class implements the VAE multitasking model with GradNorm (all tasks), using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_alltask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# GradNorm ralated
parser.add_argument('--alpha', type=float, default=1.5, help='the additional hyperparameter for GradNorm')
parser.add_argument('--lr_gn', type=float, default=1e-3, help='the learning rate for GradNorm')
parser.add_argument('--k_survival', type=float, default=1.0, help='initial weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1.0, help='initial weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1.0, help='initial weight for the regression loss')
# Number of tasks
parser.add_argument('--task_num', type=int, default=7, help='the number of downstream tasks')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
self.loss_names.extend(['survival', 'classifier_1', 'classifier_2', 'classifier_3', 'classifier_4', 'classifier_5', 'regression', 'gradient', 'w_sur', 'w_cla_1', 'w_cla_2', 'w_cla_3', 'w_cla_4', 'w_cla_5', 'w_reg'])
# specify the models you want to save to the disk and load.
self.model_names = ['All']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# specify the metrics you want to print out.
self.metric_names = ['accuracy_1', 'accuracy_2', 'accuracy_3', 'accuracy_4', 'accuracy_5', 'rmse']
# define the network
self.netAll = networks.define_net(param.net_VAE, param.net_down, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.class_num, param.time_num, param.task_num,
param.init_type, param.init_gain, self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
self.loss_survival = None
self.loss_classifier_1 = None
self.loss_classifier_2 = None
self.loss_classifier_3 = None
self.loss_classifier_4 = None
self.loss_classifier_5 = None
self.loss_regression = None
self.loss_gradient = 0
self.loss_w_sur = None
self.loss_w_cla_1 = None
self.loss_w_cla_2 = None
self.loss_w_cla_3 = None
self.loss_w_cla_4 = None
self.loss_w_cla_5 = None
self.loss_w_reg = None
self.task_losses = None
self.weighted_losses = None
self.initial_losses = None
self.metric_accuracy_1 = None
self.metric_accuracy_2 = None
self.metric_accuracy_3 = None
self.metric_accuracy_4 = None
self.metric_accuracy_5 = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
# Weights of multiple downstream tasks
self.loss_weights = nn.Parameter(torch.ones(param.task_num, requires_grad=True, device=self.device))
if self.isTrain:
# Set the optimizer
self.optimizer_All = torch.optim.Adam([{'params': self.netAll.parameters(), 'lr': param.lr, 'betas': (param.beta1, 0.999), 'weight_decay': param.weight_decay},
{'params': self.loss_weights, 'lr': param.lr_gn}])
self.optimizers.append(self.optimizer_All)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
self.data_index = input_dict['index']
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = []
for i in range(self.param.task_num - 2):
self.label.append(input_dict['label'][i].to(self.device))
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
self.z, self.recon_omics, self.mean, self.log_var, self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netAll(self.input_omics)
# define the latent
self.latent = self.mean
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier_1 = self.lossFuncClass(self.y_out_cla[0], self.label[0])
self.loss_classifier_2 = self.lossFuncClass(self.y_out_cla[1], self.label[1])
self.loss_classifier_3 = self.lossFuncClass(self.y_out_cla[2], self.label[2])
self.loss_classifier_4 = self.lossFuncClass(self.y_out_cla[3], self.label[3])
self.loss_classifier_5 = self.lossFuncClass(self.y_out_cla[4], self.label[4])
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# Calculate the weighted downstream losses
# Add initial weights
self.task_losses = torch.stack([self.param.k_survival * self.loss_survival, self.param.k_classifier * self.loss_classifier_1, self.param.k_classifier * self.loss_classifier_2, self.param.k_classifier * self.loss_classifier_3, self.param.k_classifier * self.loss_classifier_4, self.param.k_classifier * self.loss_classifier_5, self.param.k_regression * self.loss_regression])
self.weighted_losses = self.loss_weights * self.task_losses
# LOSS DOWN
self.loss_down = self.weighted_losses.sum()
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# Log the loss weights
self.loss_w_sur = self.loss_weights[0] * self.param.k_survival
self.loss_w_cla_1 = self.loss_weights[1] * self.param.k_classifier
self.loss_w_cla_2 = self.loss_weights[2] * self.param.k_classifier
self.loss_w_cla_3 = self.loss_weights[3] * self.param.k_classifier
self.loss_w_cla_4 = self.loss_weights[4] * self.param.k_classifier
self.loss_w_cla_5 = self.loss_weights[5] * self.param.k_classifier
self.loss_w_reg = self.loss_weights[6] * self.param.k_regression
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.cal_losses() # Calculate losses
self.optimizer_All.zero_grad() # Set gradients to zero
# Calculate the GradNorm gradients
if isinstance(self.netAll, torch.nn.DataParallel):
W = list(self.netAll.module.get_last_encode_layer().parameters())
else:
W = list(self.netAll.get_last_encode_layer().parameters())
grad_norms = []
for weight, loss in zip(self.loss_weights, self.task_losses):
grad = torch.autograd.grad(loss, W, retain_graph=True)
grad_norms.append(torch.norm(weight * grad[0]))
grad_norms = torch.stack(grad_norms)
if self.iter == 0:
self.initial_losses = self.task_losses.detach()
# Calculate the constant targets
with torch.no_grad():
# loss ratios
loss_ratios = self.task_losses / self.initial_losses
# inverse training rate
inverse_train_rates = loss_ratios / loss_ratios.mean()
constant_terms = grad_norms.mean() * (inverse_train_rates ** self.param.alpha)
# Calculate the gradient loss
self.loss_gradient = (grad_norms - constant_terms).abs().sum()
# Set the gradients of weights
loss_weights_grad = torch.autograd.grad(self.loss_gradient, self.loss_weights)[0]
self.loss_All.backward()
self.loss_weights.grad = loss_weights_grad
self.optimizer_All.step() # Update weights
# Re-normalize the losses weights
with torch.no_grad():
normalize_coeff = len(self.loss_weights) / self.loss_weights.sum()
self.loss_weights.data = self.loss_weights.data * normalize_coeff
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = []
y_pred_cla = []
y_true_cla = []
for i in range(self.param.task_num - 2):
y_prob_cla.append(F.softmax(self.y_out_cla[i], dim=1))
_, y_pred_cla_i = torch.max(y_prob_cla[i], 1)
y_pred_cla.append(y_pred_cla_i)
y_true_cla.append(self.label[i])
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk,
'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla,
'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy_1 = (output_dict['y_true_cla'][0] == output_dict['y_pred_cla'][0]).sum().item() / len(
output_dict['y_true_cla'][0])
self.metric_accuracy_2 = (output_dict['y_true_cla'][1] == output_dict['y_pred_cla'][1]).sum().item() / len(
output_dict['y_true_cla'][1])
self.metric_accuracy_3 = (output_dict['y_true_cla'][2] == output_dict['y_pred_cla'][2]).sum().item() / len(
output_dict['y_true_cla'][2])
self.metric_accuracy_4 = (output_dict['y_true_cla'][3] == output_dict['y_pred_cla'][3]).sum().item() / len(
output_dict['y_true_cla'][3])
self.metric_accuracy_5 = (output_dict['y_true_cla'][4] == output_dict['y_pred_cla'][4]).sum().item() / len(
output_dict['y_true_cla'][4])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 17,700 | 47.231608 | 382 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_regression_model.py | import torch
from sklearn import metrics
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeRegressionModel(VaeBasicModel):
"""
This class implements the VAE regression model, using the VAE framework with the regression downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae regression model
parser.set_defaults(net_down='multi_FC_regression', not_stratified=True)
parser.add_argument('--regression_scale', type=int, default=1,
help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1',
help='choose the distance loss for regression task, options: [MSE | L1]')
return parser
def __init__(self, param):
"""
Initialize the VAE_regression class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('distance')
# specify the metrics you want to print out.
self.metric_names = ['rmse']
# input tensor
self.value = None
# output tensor
self.y_out = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, None, None, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_distance = None
self.metric_rmse = None
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.value = input_dict['value'].to(self.device)
def forward(self):
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the regression distance loss (downstream loss)
self.loss_distance = self.lossFuncDist(self.y_out.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.loss_distance
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
y_true = self.value
y_pred = self.y_out * self.param.regression_scale
return {'index': index, 'y_true': y_true, 'y_pred': y_pred}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true, y_pred, squared=False)
| 3,793 | 37.323232 | 152 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_alltask_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskModel(VaeBasicModel):
"""
This class implements the VAE multitasking model with all downstream tasks (5 classifiers + 1 regressor + 1 survival predictor), using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_alltask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# Loss combined
parser.add_argument('--k_survival', type=float, default=1,
help='weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1,
help='weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1,
help='weight for the regression loss')
# Number of tasks
parser.add_argument('--task_num', type=int, default=7,
help='the number of downstream tasks')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.extend(['survival', 'classifier_1', 'classifier_2', 'classifier_3', 'classifier_4', 'classifier_5', 'regression'])
# specify the metrics you want to print out.
self.metric_names = ['accuracy_1', 'accuracy_2', 'accuracy_3', 'accuracy_4', 'accuracy_5', 'rmse']
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, param.time_num, param.task_num, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_survival = None
self.loss_classifier_1 = None
self.loss_classifier_2 = None
self.loss_classifier_3 = None
self.loss_classifier_4 = None
self.loss_classifier_5 = None
self.loss_regression = None
self.metric_accuracy_1 = None
self.metric_accuracy_2 = None
self.metric_accuracy_3 = None
self.metric_accuracy_4 = None
self.metric_accuracy_5 = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = []
for i in range(self.param.task_num-2):
self.label.append(input_dict['label'][i].to(self.device))
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
VaeBasicModel.forward(self)
self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier_1 = self.lossFuncClass(self.y_out_cla[0], self.label[0])
self.loss_classifier_2 = self.lossFuncClass(self.y_out_cla[1], self.label[1])
self.loss_classifier_3 = self.lossFuncClass(self.y_out_cla[2], self.label[2])
self.loss_classifier_4 = self.lossFuncClass(self.y_out_cla[3], self.label[3])
self.loss_classifier_5 = self.lossFuncClass(self.y_out_cla[4], self.label[4])
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.param.k_survival * self.loss_survival + self.param.k_classifier * self.loss_classifier_1 + self.param.k_classifier * self.loss_classifier_2 + self.param.k_classifier * self.loss_classifier_3 + self.param.k_classifier * self.loss_classifier_4 + self.param.k_classifier * self.loss_classifier_5 + self.param.k_regression * self.loss_regression
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = []
y_pred_cla = []
y_true_cla = []
for i in range(self.param.task_num-2):
y_prob_cla.append(F.softmax(self.y_out_cla[i], dim=1))
_, y_pred_cla_i = torch.max(y_prob_cla[i], 1)
y_pred_cla.append(y_pred_cla_i)
y_true_cla.append(self.label[i])
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy_1 = (output_dict['y_true_cla'][0] == output_dict['y_pred_cla'][0]).sum().item() / len(output_dict['y_true_cla'][0])
self.metric_accuracy_2 = (output_dict['y_true_cla'][1] == output_dict['y_pred_cla'][1]).sum().item() / len(output_dict['y_true_cla'][1])
self.metric_accuracy_3 = (output_dict['y_true_cla'][2] == output_dict['y_pred_cla'][2]).sum().item() / len(output_dict['y_true_cla'][2])
self.metric_accuracy_4 = (output_dict['y_true_cla'][3] == output_dict['y_pred_cla'][3]).sum().item() / len(output_dict['y_true_cla'][3])
self.metric_accuracy_5 = (output_dict['y_true_cla'][4] == output_dict['y_pred_cla'][4]).sum().item() / len(output_dict['y_true_cla'][4])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 10,265 | 49.078049 | 371 | py |
SubOmiEmbed | SubOmiEmbed-main/models/networks.py | import torch
import torch.nn as nn
import functools
from torch.nn import init
from torch.optim import lr_scheduler
# Class components
class DownSample(nn.Module):
"""
SingleConv1D module + MaxPool
The output dimension = input dimension // down_ratio
"""
def __init__(self, input_chan_num, output_chan_num, down_ratio, kernel_size=9, norm_layer=nn.InstanceNorm1d,
leaky_slope=0.2, dropout_p=0):
"""
Construct a downsampling block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output tensor
down_ratio (int) -- the kernel size and stride of the MaxPool1d layer
kernel_size (int) -- the kernel size of the DoubleConv1D block
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
"""
super(DownSample, self).__init__()
self.down_sample = nn.Sequential(
SingleConv1D(input_chan_num, output_chan_num, kernel_size, norm_layer, leaky_slope),
nn.MaxPool1d(down_ratio),
nn.Dropout(p=dropout_p)
)
def forward(self, x):
return self.down_sample(x)
class UpSample(nn.Module):
"""
ConvTranspose1d + SingleConv1D
The output dimension = input dimension * ratio
"""
def __init__(self, input_chan_num, output_chan_num, up_ratio, kernel_size=9, norm_layer=nn.BatchNorm1d,
leaky_slope=0.2, dropout_p=0, attention=True):
"""
Construct a upsampling block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor (the tensor from get from the last layer, not the tensor from the skip-connection mechanism)
output_chan_num (int) -- the number of channels of the output tensor
up_ratio (int) -- the kernel size and stride of the ConvTranspose1d layer
kernel_size (int) -- the kernel size of the DoubleConv1D block
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
activation (bool) -- need activation or not
"""
super(UpSample, self).__init__()
self.attention = attention
self.up_sample = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.ConvTranspose1d(input_chan_num, input_chan_num, kernel_size=up_ratio, stride=up_ratio),
SingleConv1D(input_chan_num, output_chan_num, kernel_size, norm_layer, leaky_slope)
)
self.up_sample_no_relu = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.ConvTranspose1d(input_chan_num, input_chan_num, kernel_size=up_ratio, stride=up_ratio),
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=kernel_size, padding=kernel_size // 2)
)
def forward(self, x):
if self.attention:
return self.up_sample(x)
else:
return self.up_sample_no_relu(x)
class OutputConv(nn.Module):
"""
Output convolution layer
"""
def __init__(self, input_chan_num, output_chan_num):
"""
Construct the output convolution layer
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output omics data
"""
super(OutputConv, self).__init__()
self.output_conv = nn.Sequential(
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=1),
)
def forward(self, x):
return self.output_conv(x)
class SingleConv1D(nn.Module):
"""
Convolution1D => Norm1D => LeakyReLU
The omics data dimension keep the same during this process
"""
def __init__(self, input_chan_num, output_chan_num, kernel_size=9, norm_layer=nn.InstanceNorm1d, leaky_slope=0.2):
"""
Construct a single convolution block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output tensor
kernel_size (int) -- the kernel size of the convolution layer
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
"""
super(SingleConv1D, self).__init__()
# Only if the norm method is instance norm we use bias for the corresponding conv layer
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm1d
else:
use_bias = norm_layer == nn.InstanceNorm1d
self.single_conv_1d = nn.Sequential(
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=kernel_size, padding=kernel_size // 2,
bias=use_bias),
norm_layer(output_chan_num),
nn.LeakyReLU(negative_slope=leaky_slope, inplace=True)
)
def forward(self, x):
return self.single_conv_1d(x)
class FCBlock(nn.Module):
"""
Linear => Norm1D => LeakyReLU
"""
def __init__(self, input_dim, output_dim, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, activation=True, normalization=True, activation_name='LeakyReLU'):
"""
Construct a fully-connected block
Parameters:
input_dim (int) -- the dimension of the input tensor
output_dim (int) -- the dimension of the output tensor
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
activation (bool) -- need activation or not
normalization (bool) -- need normalization or not
activation_name (str) -- name of the activation function used in the FC block
"""
super(FCBlock, self).__init__()
# Linear
self.fc_block = [nn.Linear(input_dim, output_dim)]
# Norm
if normalization:
# FC block doesn't support InstanceNorm1d
if isinstance(norm_layer, functools.partial) and norm_layer.func == nn.InstanceNorm1d:
norm_layer = nn.BatchNorm1d
self.fc_block.append(norm_layer(output_dim))
# Dropout
if 0 < dropout_p <= 1:
self.fc_block.append(nn.Dropout(p=dropout_p))
# LeakyReLU
if activation:
if activation_name.lower() == 'leakyrelu':
self.fc_block.append(nn.LeakyReLU(negative_slope=leaky_slope, inplace=True))
elif activation_name.lower() == 'tanh':
self.fc_block.append(nn.Tanh())
else:
raise NotImplementedError('Activation function [%s] is not implemented' % activation_name)
self.fc_block = nn.Sequential(*self.fc_block)
def forward(self, x):
y = self.fc_block(x)
return y
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Unflatten(nn.Module):
def __init__(self, channel, dim):
super(Unflatten, self).__init__()
self.channel = channel
self.dim = dim
def forward(self, x):
return x.view(x.size(0), self.channel, self.dim)
class Identity(nn.Module):
def forward(self, x):
return x
# Class for VAE
# ConvVae
class ConvVaeABC(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_1A=4, ratio_2A=4, ratio_1C=2, ratio_2C=2, ratio_3=16,
latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeABC, self).__init__()
A_dim = omics_dims[0]
B_dim = omics_dims[1]
C_dim = omics_dims[2]
hidden_dim_1 = (B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.narrow_B = hidden_dim_2 // (4 * filter_num) * ratio_3 * (B_dim // ratio_1B // ratio_2B) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.narrow_A = hidden_dim_2 // (4 * filter_num) * ratio_3 * (A_dim // ratio_1A // ratio_2A) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.narrow_C = hidden_dim_2 // (4 * filter_num) * ratio_3 * (C_dim // ratio_1C // ratio_2C) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.B_dim = B_dim
self.A_dim = A_dim
self.C_dim = C_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 1 -> 8
self.down_sample_1C = DownSample(1, filter_num, down_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 8 -> 16
self.down_sample_2C = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
# C 16 -> 8
self.up_sample_2C = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 8 -> 1
self.up_sample_3C = UpSample(filter_num, filter_num, up_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C Output
self.output_conv_C = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_2_A = self.down_sample_1A(x[0])
level_2_C = self.down_sample_1C(x[2])
level_3_B = self.down_sample_2B(level_2_B)
level_3_A = self.down_sample_2A(level_2_A)
level_3_C = self.down_sample_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 2)
level_4 = self.down_sample_3(level_3)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_2_B = level_2.narrow(2, 0, self.narrow_B)
level_2_A = level_2.narrow(2, self.narrow_B, self.narrow_A)
level_2_C = level_2.narrow(2, self.narrow_B+self.narrow_A, self.narrow_C+1)
level_3_B = self.up_sample_2B(level_2_B)
level_3_A = self.up_sample_2A(level_2_A)
level_3_C = self.up_sample_2C(level_2_C)
level_4_B = self.up_sample_3B(level_3_B)
level_4_A = self.up_sample_3A(level_3_A)
level_4_C = self.up_sample_3C(level_3_C)
output_B = self.output_conv_B(level_4_B)
output_A = self.output_conv_A(level_4_A)
output_C = self.output_conv_C(level_4_C)
recon_B = output_B[:, :, 0:self.B_dim]
recon_A = output_A[:, :, 0:self.A_dim]
recon_C = output_C[:, :, 0:self.C_dim]
return [recon_A, recon_B, recon_C]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeAB(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_1A=4, ratio_2A=4, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeAB, self).__init__()
A_dim = omics_dims[0]
B_dim = omics_dims[1]
hidden_dim_1 = (B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 2) * filter_num * 4
self.narrow_B = hidden_dim_2 // (4 * filter_num) * ratio_3 * (B_dim // ratio_1B // ratio_2B) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A)
self.narrow_A = hidden_dim_2 // (4 * filter_num) * ratio_3 - self.narrow_B
self.B_dim = B_dim
self.A_dim = A_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_2_A = self.down_sample_1A(x[0])
level_3_B = self.down_sample_2B(level_2_B)
level_3_A = self.down_sample_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 2)
level_4 = self.down_sample_3(level_3)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_2_B = level_2.narrow(2, 0, self.narrow_B)
level_2_A = level_2.narrow(2, self.narrow_B, self.narrow_A)
level_3_B = self.up_sample_2B(level_2_B)
level_3_A = self.up_sample_2A(level_2_A)
level_4_B = self.up_sample_3B(level_3_B)
level_4_A = self.up_sample_3A(level_3_A)
output_B = self.output_conv_B(level_4_B)
output_A = self.output_conv_A(level_4_A)
recon_B = output_B[:, :, 0:self.B_dim]
recon_A = output_A[:, :, 0:self.A_dim]
return [recon_A, recon_B]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeB(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for DNA methylation dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for DNA methylation dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeB, self).__init__()
B_dim = omics_dims[1]
hidden_dim_1 = B_dim // ratio_1B // ratio_2B // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.B_dim = B_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_3_B = self.down_sample_2B(level_2_B)
level_4 = self.down_sample_3(level_3_B)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_B = self.up_sample_2B(level_2)
level_4_B = self.up_sample_3B(level_3_B)
output_B = self.output_conv_B(level_4_B)
recon_B = output_B[:, :, 0:self.B_dim]
return [None, recon_B]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeA(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for gene expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1A=4, ratio_2A=4, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeA, self).__init__()
A_dim = omics_dims[0]
hidden_dim_1 = A_dim // ratio_1A // ratio_2A // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.A_dim = A_dim
# ENCODER
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
def encode(self, x):
level_2_A = self.down_sample_1A(x[0])
level_3_A = self.down_sample_2A(level_2_A)
level_4 = self.down_sample_3(level_3_A)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_A = self.up_sample_2A(level_2)
level_4_A = self.up_sample_3A(level_3_A)
output_A = self.output_conv_A(level_4_A)
recon_A = output_A[:, :, 0:self.A_dim]
return [recon_A]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeC(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for miRNA expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1C=2, ratio_2C=2, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeC, self).__init__()
C_dim = omics_dims[2]
hidden_dim_1 = (C_dim // ratio_1C // ratio_2C) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.C_dim = C_dim
# ENCODER
# C 1 -> 8
self.down_sample_1C = DownSample(1, filter_num, down_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 8 -> 16
self.down_sample_2C = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 16 -> 8
self.up_sample_2C = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 8 -> 1
self.up_sample_3C = UpSample(filter_num, filter_num, up_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C Output
self.output_conv_C = OutputConv(filter_num, 1)
def encode(self, x):
level_2_C = self.down_sample_1C(x[2])
level_3_C = self.down_sample_2C(level_2_C)
level_4 = self.down_sample_3(level_3_C)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_C = self.up_sample_2C(level_2)
level_4_C = self.up_sample_3C(level_3_C)
output_C = self.output_conv_C(level_4_C)
recon_C = output_C[:, :, 0:self.C_dim]
return [None, None, recon_C]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# FcSepVae
class FcSepVaeABC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_1A=2048, dim_2A=1024, dim_1C=1024, dim_2C=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeABC, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim_list = omics_dims[1]
self.C_dim = omics_dims[2]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
self.dim_2C = dim_2C
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A+dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A+dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3C = FCBlock(dim_2C, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4C = FCBlock(dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_2_A = self.encode_fc_1A(x[0])
level_2_C = self.encode_fc_1C(x[2])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3_C = self.encode_fc_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_2_C = level_2.narrow(1, self.dim_2B+self.dim_2A, self.dim_2C)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
level_3_A = self.decode_fc_3A(level_2_A)
level_3_C = self.decode_fc_3C(level_2_C)
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
recon_A = self.decode_fc_4A(level_3_A)
recon_C = self.decode_fc_4C(level_3_C)
return [recon_A, recon_B_list, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcSepVaeAB(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_1A=2048, dim_2A=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeAB, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim_list = omics_dims[1]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_2_A = self.encode_fc_1A(x[0])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
level_3_A = self.decode_fc_3A(level_2_A)
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A, recon_B_list]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcSepVaeB(nn.Module):
"""
Defines a fully-connected variational autoencoder for DNA methylation dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeB, self).__init__()
self.B_dim_list = omics_dims[1]
self.dim_1B = dim_1B
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_3_B = self.encode_fc_2B(level_2_B)
level_4 = self.encode_fc_3(level_3_B)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_B = self.decode_fc_3B(level_2)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
return [None, recon_B_list]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# FcVae
class FcVaeABC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, param, omics_dims, omics_subset_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=384, dim_2B=256,
dim_1A=384, dim_2A=256, dim_1C=384, dim_2C=256, dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeABC, self).__init__()
if omics_subset_dims is not None:
self.A_subset_dim = omics_subset_dims[0]
self.B_subset_dim = omics_subset_dims[1]
self.C_subset_dim = omics_subset_dims[2]
# Decoder dimensions
self.dim_1A = dim_1A // param.dec_reduction_factor ; self.dim_1B = dim_1B // param.dec_reduction_factor ; self.dim_1C = dim_1C // param.dec_reduction_factor
self.dim_2A = dim_2A // param.dec_reduction_factor ; self.dim_2B = dim_2B // param.dec_reduction_factor ; self.dim_2C = dim_2C // param.dec_reduction_factor
# Encoder dimensions
dim_1A //= param.enc_reduction_factor ; dim_1B //= param.enc_reduction_factor ; dim_1C //= param.enc_reduction_factor
dim_2B //= param.enc_reduction_factor ; dim_2B //= param.enc_reduction_factor ; dim_2C //= param.enc_reduction_factor
self.A_dim = omics_dims[0]
self.B_dim = omics_dims[1]
self.C_dim = omics_dims[2]
# ENCODER
# Layer 1
if omics_subset_dims is None:
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
else:
self.encode_fc_1B = FCBlock(self.B_subset_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_subset_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_subset_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A+dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, self.dim_2B+self.dim_2A+self.dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(self.dim_2B, self.dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(self.dim_2A, self.dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3C = FCBlock(self.dim_2C, self.dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(self.dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4A = FCBlock(self.dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4C = FCBlock(self.dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_2_A = self.encode_fc_1A(x[0])
level_2_C = self.encode_fc_1C(x[2])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3_C = self.encode_fc_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_2_C = level_2.narrow(1, self.dim_2B+self.dim_2A, self.dim_2C)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_A = self.decode_fc_3A(level_2_A)
level_3_C = self.decode_fc_3C(level_2_C)
recon_B = self.decode_fc_4B(level_3_B)
recon_A = self.decode_fc_4A(level_3_A)
recon_C = self.decode_fc_4C(level_3_C)
return [recon_A, recon_B, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeAB(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=384, dim_2B=256,
dim_1A=384, dim_2A=256, dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeAB, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim = omics_dims[1]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
# ENCODER
# Layer 1
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_2_A = self.encode_fc_1A(x[0])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_A = self.decode_fc_3A(level_2_A)
recon_B = self.decode_fc_4B(level_3_B)
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A, recon_B]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeB(nn.Module):
"""
Defines a fully-connected variational autoencoder for DNA methylation dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=512, dim_2B=256,
dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeB, self).__init__()
self.B_dim = omics_dims[1]
# ENCODER
# Layer 1
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_3 = self.encode_fc_2B(level_2_B)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_B = self.decode_fc_3B(level_2)
recon_B = self.decode_fc_4B(level_3_B)
return [None, recon_B]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeA(nn.Module):
"""
Defines a fully-connected variational autoencoder for gene expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1A=1024, dim_2A=1024,
dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeA, self).__init__()
self.A_dim = omics_dims[0]
# ENCODER
# Layer 1
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_A = self.encode_fc_1A(x[0])
level_3_A = self.encode_fc_2A(level_2_A)
level_4 = self.encode_fc_3(level_3_A)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_A = self.decode_fc_3A(level_2)
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1C=1024, dim_2C=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeC, self).__init__()
self.C_dim = omics_dims[2]
self.dim_2C = dim_2C
# ENCODER
# Layer 1
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3C = FCBlock(dim_2C, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4C = FCBlock(dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_C = self.encode_fc_1C(x[2])
level_3_C = self.encode_fc_2C(level_2_C)
level_4 = self.encode_fc_3(level_3_C)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_C = self.decode_fc_3C(level_2)
recon_C = self.decode_fc_4C(level_3_C)
return [None, None, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# Class for downstream task
class MultiFcClassifier(nn.Module):
"""
Defines a multi-layer fully-connected classifier
"""
def __init__(self, param, class_num=2, latent_dim=256, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
class_dim_1=128, class_dim_2=64, layer_num=3):
"""
Construct a multi-layer fully-connected classifier
Parameters:
class_num (int) -- the number of class
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcClassifier, self).__init__()
class_dim_1 = class_dim_1 // param.down_reduction_factor
class_dim_2 = class_dim_2 // param.down_reduction_factor
self.input_fc = FCBlock(latent_dim, class_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = class_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, class_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_flag*dropout_p, activation=True)]
input_dim = class_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
self.output_fc = FCBlock(class_dim_2, class_num, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcRegression(nn.Module):
"""
Defines a multi-layer fully-connected regression net
"""
def __init__(self, latent_dim=256, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, down_dim_1=128,
down_dim_2=64, layer_num=3):
"""
Construct a one dimensional multi-layer regression net
Parameters:
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcRegression, self).__init__()
self.input_fc = FCBlock(latent_dim, down_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = down_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, down_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_flag*dropout_p, activation=True)]
input_dim = down_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
self.output_fc = FCBlock(down_dim_2, 1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcSurvival(nn.Module):
"""
Defines a multi-layer fully-connected survival predictor
"""
def __init__(self, time_num=256, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
down_dim_1=512, down_dim_2=256, layer_num=3):
"""
Construct a multi-layer fully-connected survival predictor
Parameters:
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcSurvival, self).__init__()
self.input_fc = FCBlock(latent_dim, down_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True, activation_name='Tanh')
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = down_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, down_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_p, activation=True, activation_name='Tanh')]
input_dim = down_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
# the output dimension should be the number of time intervals
self.output_fc = FCBlock(down_dim_2, time_num, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcMultitask(nn.Module):
"""
Defines a multi-layer fully-connected multitask downstream network
"""
def __init__(self, class_num=2, time_num=256, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
layer_num=3):
"""
Construct a multi-layer fully-connected multitask downstream network
Parameters:
class_num (int) -- the number of class
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the downstream networks, >=3
"""
super(MultiFcMultitask, self).__init__()
norm_layer_none = lambda x: Identity()
self.survival = MultiFcSurvival(time_num, latent_dim, norm_layer=norm_layer_none, leaky_slope=leaky_slope, dropout_p=0.5, layer_num=layer_num)
self.classifier = MultiFcClassifier(class_num, latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.2, layer_num=layer_num)
self.regression = MultiFcRegression(latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.01, layer_num=layer_num)
def forward(self, x):
y_out_sur = self.survival(x)
y_out_cla = self.classifier(x)
y_out_reg = self.regression(x)
return y_out_sur, y_out_cla, y_out_reg
class MultiFcAlltask(nn.Module):
"""
Defines a multi-layer fully-connected multitask downstream network (all tasks)
"""
def __init__(self, class_num, time_num=256, task_num=7, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
layer_num=3):
"""
Construct a multi-layer fully-connected multitask downstream network (all tasks)
Parameters:
class_num (list) -- the list of class numbers
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
task_num (int) -- the number of downstream tasks
"""
super(MultiFcAlltask, self).__init__()
norm_layer_none = lambda x: Identity()
self.survival = MultiFcSurvival(time_num, latent_dim, norm_layer=norm_layer_none, leaky_slope=leaky_slope, dropout_p=0.5, layer_num=layer_num)
self.classifiers = nn.ModuleList([MultiFcClassifier(class_num[i], latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.2, layer_num=layer_num) for i in range(task_num-2)])
self.regression = MultiFcRegression(latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.01, layer_num=layer_num)
self.task_num = task_num
def forward(self, x):
y_out_sur = self.survival(x)
y_out_cla = []
for i in range(self.task_num - 2):
y_out_cla.append(self.classifiers[i](x))
y_out_reg = self.regression(x)
return y_out_sur, y_out_cla, y_out_reg
# Class for the OmiEmbed combined network
class OmiEmbed(nn.Module):
"""
Defines the OmiEmbed combined network
"""
def __init__(self, net_VAE, net_down, omics_dims, omics_mode='multi_omics', norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9,
leaky_slope=0.2, dropout_p=0, latent_dim=128, class_num=2, time_num=256, task_num=7):
"""
Construct the OmiEmbed combined network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
class_num (int/list) -- the number of classes
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
"""
super(OmiEmbed, self).__init__()
self.vae = None
if net_VAE == 'conv_1d':
if omics_mode == 'abc':
self.vae = ConvVaeABC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = ConvVaeAB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = ConvVaeB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = ConvVaeA(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = ConvVaeC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc_sep':
if omics_mode == 'abc':
self.vae = FcSepVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = FcSepVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = FcSepVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc':
if omics_mode == 'abc':
self.vae = FcVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = FcVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = FcVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % net_VAE)
self.net_down = net_down
self.down = None
if net_down == 'multi_FC_classifier':
self.down = MultiFcClassifier(class_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_regression':
self.down = MultiFcRegression(latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_survival':
self.down = MultiFcSurvival(time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_multitask':
self.down = MultiFcMultitask(class_num, time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_alltask':
self.down = MultiFcAlltask(class_num, time_num, task_num, latent_dim, norm_layer, leaky_slope, dropout_p)
else:
raise NotImplementedError('Downstream model name [%s] is not recognized' % net_down)
def get_last_encode_layer(self):
return self.vae.get_last_encode_layer()
def forward(self, x):
z, recon_x, mean, log_var = self.vae(x)
if self.net_down == 'multi_FC_multitask' or self.net_down == 'multi_FC_alltask':
y_out_sur, y_out_cla, y_out_reg = self.down(mean)
return z, recon_x, mean, log_var, y_out_sur, y_out_cla, y_out_reg
else:
y_out = self.down(mean)
return z, recon_x, mean, log_var, y_out
def get_norm_layer(norm_type='batch'):
"""
Return a normalization layer
Parameters:
norm_type (str) -- the type of normalization applied to the model, default to use batch normalization, options: [batch | instance | none ]
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm1d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm1d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization method [%s] is not found' % norm_type)
return norm_layer
def define_net(net_VAE, net_down, omics_dims, omics_mode='multi_omics', norm_type='batch', filter_num=8, kernel_size=9,
leaky_slope=0.2, dropout_p=0, latent_dim=256, class_num=2, time_num=256, task_num=7, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the OmiEmbed network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_type (str) -- the name of normalization layers used in the network, default: batch
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
class_num (int) -- the number of classes
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns the OmiEmbed network
The network has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
net = OmiEmbed(net_VAE, net_down, omics_dims, omics_mode, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim, class_num, time_num, task_num)
return init_net(net, init_type, init_gain, gpu_ids)
def define_VAE(param, net_VAE, omics_subset_dims, omics_dims, omics_mode='multi_omics', norm_type='batch', filter_num=8, kernel_size=9, leaky_slope=0.2, dropout_p=0,
latent_dim=256, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the VAE network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_type (str) -- the name of normalization layers used in the network, default: batch
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns a VAE
The default backbone of the VAE is one dimensional convolutional layer.
The generator has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
if net_VAE == 'conv_1d':
if omics_mode == 'abc':
net = ConvVaeABC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'ab':
net = ConvVaeAB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'b':
net = ConvVaeB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'a':
net = ConvVaeA(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'c':
net = ConvVaeC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif net_VAE == 'fc_sep':
if omics_mode == 'abc':
net = FcSepVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
net = FcSepVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
net = FcSepVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
net = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
net = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc':
if omics_mode == 'abc':
net = FcVaeABC(param, omics_dims, omics_subset_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
net = FcVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
net = FcVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
net = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
net = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % net_VAE)
return init_net(net, init_type, init_gain, gpu_ids)
def define_down(param, net_down, norm_type='batch', leaky_slope=0.2, dropout_p=0, latent_dim=256, class_num=2, time_num=256,
task_num=7, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the downstream task network
Parameters:
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
norm_type (str) -- the name of normalization layers used in the network, default: batch
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
class_num (int) -- the number of class
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns a downstream task network
The default downstream task network is a multi-layer fully-connected classifier.
The generator has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
if net_down == 'multi_FC_classifier':
net = MultiFcClassifier(param, class_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_regression':
net = MultiFcRegression(latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_survival':
net = MultiFcSurvival(time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_multitask':
net = MultiFcMultitask(class_num, time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_alltask':
net = MultiFcAlltask(class_num, time_num, task_num, latent_dim, norm_layer, leaky_slope, dropout_p)
else:
raise NotImplementedError('Downstream model name [%s] is not recognized' % net_down)
return init_net(net, init_type, init_gain, gpu_ids)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Initialize a network:
1. register CPU/GPU device (with multi-GPU support);
2. initialize the network weights
Parameters:
net (nn.Module) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# multi-GPUs
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""
Initialize network weights.
Parameters:
net (nn.Module) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier_normal | xavier_uniform | kaiming_normal | kaiming_uniform | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
"""
# define the initialization function
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier_normal':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming_normal':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'kaiming_uniform':
init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('Initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def get_scheduler(optimizer, param):
"""
Return a learning rate scheduler
Parameters:
optimizer (opt class) -- the optimizer of the network
param (params class) -- param.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <param.niter> epochs and linearly decay the rate to zero
over the next <param.niter_decay> epochs.
"""
if param.lr_policy == 'linear':
def lambda_rule(epoch):
lr_lambda = 1.0 - max(0, epoch + param.epoch_count - param.epoch_num + param.epoch_num_decay) / float(param.epoch_num_decay + 1)
return lr_lambda
# lr_scheduler is imported from torch.optim
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif param.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=param.decay_step_size, gamma=0.1)
elif param.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif param.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=param.epoch_num, eta_min=0)
else:
return NotImplementedError('Learning rate policy [%s] is not found', param.lr_policy)
return scheduler
| 107,411 | 46.131198 | 202 | py |
SubOmiEmbed | SubOmiEmbed-main/models/basic_model.py | import os
import torch
import numpy as np
from abc import ABC, abstractmethod
from . import networks
from collections import OrderedDict
class BasicModel(ABC):
"""
This class is an abstract base class for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: Initialize the class, first call BasicModel.__init__(self, param)
-- <modify_commandline_parameters>: Add model-specific parameters, and rewrite default values for existing parameters
-- <set_input>: Unpack input data from the output dictionary of the dataloader
-- <forward>: Get the reconstructed omics data and results for the downstream task
-- <update>: Calculate losses, gradients and update network parameters
"""
def __init__(self, param):
"""
Initialize the BaseModel class
"""
self.param = param
self.gpu_ids = param.gpu_ids
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(param.checkpoints_dir, param.experiment_name) # save all the checkpoints to save_dir, and this is where to load the models
self.load_net_dir = os.path.join(param.checkpoints_dir, param.experiment_to_load) # load pretrained networks from certain experiment folder
self.isTrain = param.isTrain
self.phase = 'p1'
self.epoch = 1
self.iter = 0
# Improve the performance if the dimensionality and shape of the input data keep the same
torch.backends.cudnn.benchmark = True
self.plateau_metric = 0 # used for learning rate policy 'plateau'
self.loss_names = []
self.model_names = []
self.metric_names = []
self.optimizers = []
self.schedulers = []
self.latent = None
self.loss_embed = None
self.loss_embed_sum = []
self.loss_down = None
self.loss_All = None
@staticmethod
def modify_commandline_parameters(parser, is_train):
"""
Add model-specific parameters, and rewrite default values for existing parameters.
Parameters:
parser -- original parameter parser
is_train (bool) -- whether it is currently training phase or test phase. Use this flag to add or change training-specific or test-specific parameters.
Returns:
The modified parser.
"""
return parser
@abstractmethod
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its label
"""
pass
@abstractmethod
def forward(self):
"""
Run forward pass
"""
pass
@abstractmethod
def cal_losses(self):
"""
Calculate losses
"""
pass
@abstractmethod
def update(self):
"""
Calculate losses, gradients and update network weights; called in every training iteration
"""
pass
def setup(self, param):
"""
Load and print networks, create schedulers
"""
if self.isTrain:
self.print_networks(param)
# For every optimizer we have a scheduler
self.schedulers = [networks.get_scheduler(optimizer, param) for optimizer in self.optimizers]
# Loading the networks
if not self.isTrain or param.continue_train:
self.load_networks(param.epoch_to_load)
def update_learning_rate(self):
"""
Update learning rates for all the networks
Called at the end of each epoch
"""
lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.param.lr_policy == 'plateau':
scheduler.step(self.plateau_metric)
else:
scheduler.step()
return lr
def print_networks(self, param):
"""
Print the total number of parameters in the network and network architecture if detail is true
Save the networks information to the disk
"""
message = '\n----------------------Networks Information----------------------'
for model_name in self.model_names:
if isinstance(model_name, str):
net = getattr(self, 'net' + model_name)
num_params = 0
for parameter in net.parameters():
num_params += parameter.numel()
if param.detail:
message += '\n' + str(net)
message += '\n[Network {:s}] Total number of parameters : {:.3f} M'.format(model_name, num_params / 1e6)
message += '\n----------------------------------------------------------------\n'
# Save the networks information to the disk
net_info_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'net_info.txt')
with open(net_info_filename, 'w') as log_file:
log_file.write(message)
print(message)
def save_networks(self, epoch):
"""
Save all the networks to the disk.
Parameters:
epoch (str) -- current epoch
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '{:s}_net_{:s}.pth'.format(epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + name)
# If we use multi GPUs and apply the data parallel
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def load_networks(self, epoch):
"""
Load networks at specified epoch from the disk.
Parameters:
epoch (str) -- Which epoch to load
"""
for model_name in self.model_names:
if isinstance(model_name, str):
load_filename = '{:s}_net_{:s}.pth'.format(epoch, model_name)
load_path = os.path.join(self.load_net_dir, load_filename)
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + model_name)
# If we use multi GPUs and apply the data parallel
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('Loading the model from %s' % load_path)
state_dict = torch.load(load_path, map_location=self.device)
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
def set_train(self):
"""
Set train mode for networks
"""
for model_name in self.model_names:
if isinstance(model_name, str):
# Use the str to get the attribute aka the network (self.netXXX)
net = getattr(self, 'net' + model_name)
net.train()
self.isTrain = True
def set_eval(self):
"""
Set eval mode for networks
"""
for model_name in self.model_names:
if isinstance(model_name, str):
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + model_name)
net.eval()
self.isTrain = False
def test(self):
"""
Forward in testing to get the output tensors
"""
with torch.no_grad():
self.forward()
self.cal_losses()
# if self.param.use_subset_features:
# self.loss_embed_sum = []
# self.loss_down_sum = []
# self.y_out_subset = []
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.cal_losses()
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_embed = sum(self.loss_embed_sum) / self.param.subset_num
# self.loss_down = sum(self.loss_down_sum) / self.param.subset_num
# if self.param.agg_method == 'mean':
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
# else:
# self.forward()
# self.cal_losses()
def init_output_dict(self):
"""
initialize a dictionary for downstream task output
"""
output_dict = OrderedDict()
output_names = []
if self.param.downstream_task == 'classification':
output_names = ['index', 'y_true', 'y_pred', 'y_prob']
elif self.param.downstream_task == 'regression':
output_names = ['index', 'y_true', 'y_pred']
elif self.param.downstream_task == 'survival':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out']
elif self.param.downstream_task == 'multitask' or self.param.downstream_task == 'alltask':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out_sur', 'y_true_cla', 'y_pred_cla',
'y_prob_cla', 'y_true_reg', 'y_pred_reg']
for name in output_names:
output_dict[name] = None
return output_dict
def update_output_dict(self, output_dict):
"""
output_dict (OrderedDict) -- the output dictionary to be updated
"""
down_output = self.get_down_output()
output_names = []
if self.param.downstream_task == 'classification':
output_names = ['index', 'y_true', 'y_pred', 'y_prob']
elif self.param.downstream_task == 'regression':
output_names = ['index', 'y_true', 'y_pred']
elif self.param.downstream_task == 'survival':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out']
elif self.param.downstream_task == 'multitask' or self.param.downstream_task == 'alltask':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out_sur', 'y_true_cla',
'y_pred_cla', 'y_prob_cla', 'y_true_reg', 'y_pred_reg']
for name in output_names:
if output_dict[name] is None:
output_dict[name] = down_output[name]
else:
if self.param.downstream_task == 'alltask' and name in ['y_true_cla', 'y_pred_cla', 'y_prob_cla']:
for i in range(self.param.task_num-2):
output_dict[name][i] = torch.cat((output_dict[name][i], down_output[name][i]))
else:
output_dict[name] = torch.cat((output_dict[name], down_output[name]))
def init_losses_dict(self):
"""
initialize a losses dictionary
"""
losses_dict = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
losses_dict[name] = []
return losses_dict
def update_losses_dict(self, losses_dict, actual_batch_size):
"""
losses_dict (OrderedDict) -- the losses dictionary to be updated
actual_batch_size (int) -- actual batch size for loss normalization
"""
for name in self.loss_names:
if isinstance(name, str):
if self.param.reduction == 'sum':
losses_dict[name].append(float(getattr(self, 'loss_' + name))/actual_batch_size)
elif self.param.reduction == 'mean':
losses_dict[name].append(float(getattr(self, 'loss_' + name)))
def init_metrics_dict(self):
"""
initialize a metrics dictionary
"""
metrics_dict = OrderedDict()
for name in self.metric_names:
if isinstance(name, str):
metrics_dict[name] = None
return metrics_dict
def update_metrics_dict(self, metrics_dict):
"""
metrics_dict (OrderedDict) -- the metrics dictionary to be updated
"""
for name in self.metric_names:
if isinstance(name, str):
metrics_dict[name] = getattr(self, 'metric_' + name)
def init_log_dict(self):
"""
initialize losses and metrics dictionary
"""
output_dict = self.init_output_dict()
losses_dict = self.init_losses_dict()
metrics_dict = self.init_metrics_dict()
return output_dict, losses_dict, metrics_dict
def update_log_dict(self, output_dict, losses_dict, metrics_dict, actual_batch_size):
"""
output_dict (OrderedDict) -- the output dictionary to be updated
losses_dict (OrderedDict) -- the losses dictionary to be updated
metrics_dict (OrderedDict) -- the metrics dictionary to be updated
actual_batch_size (int) -- actual batch size for loss normalization
"""
self.update_output_dict(output_dict)
self.calculate_current_metrics(output_dict)
self.update_losses_dict(losses_dict, actual_batch_size)
self.update_metrics_dict(metrics_dict)
def init_latent_dict(self):
"""
initialize and return an empty latent space array and an empty index array
"""
latent_dict = OrderedDict()
latent_dict['index'] = np.zeros(shape=[0])
latent_dict['latent'] = np.zeros(shape=[0, self.param.latent_space_dim])
return latent_dict
def update_latent_dict(self, latent_dict):
"""
update the latent dict
latent_dict (OrderedDict)
"""
with torch.no_grad():
current_latent_array = self.latent.cpu().numpy()
latent_dict['latent'] = np.concatenate((latent_dict['latent'], current_latent_array))
current_index_array = self.data_index.cpu().numpy()
latent_dict['index'] = np.concatenate((latent_dict['index'], current_index_array))
return latent_dict
| 15,137 | 39.475936 | 166 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_classifier_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
import random
class VaeClassifierModel(VaeBasicModel):
"""
This class implements the VAE classifier model, using the VAE framework with the classification downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae regression model
parser.add_argument('--class_num', type=int, default=0,
help='the number of classes for the classification task')
return parser
def __init__(self, param):
"""
Initialize the VAE_classifier class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('classifier')
# specify the metrics you want to print out.
self.metric_names = ['accuracy']
# input tensor
self.label = None
# output tensor
self.y_out = None
self.y_out_subset = []
if param.use_subset_features:
if param.use_subset_identity:
param.latent_space_dim = param.latent_space_dim + param.subset_num
elif param.agg_method == 'concat':
# if param.use_subset_identity:
# param.latent_space_dim = (param.latent_space_dim + param.subset_num) * param.subset_num
# else:
param.latent_space_dim = param.latent_space_dim * param.subset_num
# define the network
self.netDown = networks.define_down(param, param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, None, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
self.loss_classifier = None
self.metric_accuracy = None
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.label = input_dict['label'].to(self.device)
def forward(self):
# if self.param.use_subset_features:
# self.latent_subset = []
# self.recon_omics_subset = []
# self.latent_identity = F.one_hot(torch.arange(0,self.param.subset_num).to(self.device))
# for subset in range(self.param.subset_num):
# self.subset = subset
# VaeBasicModel.forward(self)
# if self.param.use_subset_identity:
# self.latent_subset.append(torch.cat([self.latent, self.latent_identity[subset].repeat(self.latent.shape[0], 1)], dim=1))
# else:
# self.latent_subset.append(self.latent)
# self.recon_omics_subset.append(self.recon_omics)
# if self.param.agg_method == 'mean':
# self.latent = torch.mean(torch.stack(self.latent_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.latent = torch.max(torch.stack(self.latent_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.latent = torch.min(torch.stack(self.latent_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.latent = torch.sum(torch.stack(self.latent_subset), axis=0)
# elif self.param.agg_method == 'concat':
# self.latent = torch.cat(self.latent_subset, axis=1)
if self.param.use_subset_features:
self.latent_subset = []
self.recon_omics_subset = []
self.y_out_subset = []
self.latent_identity = F.one_hot(torch.arange(0,self.param.subset_num).to(self.device))
for subset in range(self.param.subset_num):
self.subset = subset
VaeBasicModel.forward(self)
if self.param.use_subset_identity:
self.latent = torch.cat([self.latent, self.latent_identity[subset].repeat(self.latent.shape[0], 1)], dim=1)
self.y_out = self.netDown(self.latent)
self.y_out_subset.append(self.y_out)
else:
self.latent_subset.append(self.latent)
self.recon_omics_subset.append(self.recon_omics)
if self.param.use_subset_identity:
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
if self.param.agg_method == 'mean':
self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
elif self.param.agg_method == 'max':
self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
elif self.param.agg_method == 'min':
self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
elif self.param.agg_method == 'sum':
self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
elif self.param.agg_method == 'concat':
self.y_out = torch.cat(self.y_out_subset, axis=1)
elif self.param.agg_method == 'random':
self.y_out = self.y_out_subset[random.randrange(0, self.param.subset_num)]
else:
if self.param.agg_method == 'mean':
self.latent = torch.mean(torch.stack(self.latent_subset), axis=0)
elif self.param.agg_method == 'max':
self.latent = torch.max(torch.stack(self.latent_subset), axis=0)[0]
elif self.param.agg_method == 'min':
self.latent = torch.min(torch.stack(self.latent_subset), axis=0)[0]
elif self.param.agg_method == 'sum':
self.latent = torch.sum(torch.stack(self.latent_subset), axis=0)
elif self.param.agg_method == 'concat':
self.latent = torch.cat(self.latent_subset, axis=1)
elif self.param.agg_method == 'random':
self.latent = self.latent_subset[random.randrange(0, self.param.subset_num)]
# Get the output tensor
self.y_out = self.netDown(self.latent)
else:
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
# if self.param.use_subset_features:
# VaeBasicModel.forward(self)
# # Get the output tensor
# self.y_out = self.netDown(self.latent)
# # if self.isTrain:
# # VaeBasicModel.forward(self)
# # # Get the output tensor
# # self.y_out = self.netDown(self.latent)
# # else:
# # self.y_out_subset = []
# # for subset in range(self.param.subset_num):
# # self.subset = subset
# # VaeBasicModel.forward(self)
# # # Get the output tensor
# # self.y_out_subset.append(self.netDown(self.latent))
# # self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# else:
# VaeBasicModel.forward(self)
# # Get the output tensor
# self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
if self.param.use_subset_features:
self.loss_embed_subset = []
for subset in range(self.param.subset_num):
self.recon_omics = self.recon_omics_subset[subset]
VaeBasicModel.cal_losses(self)
self.loss_embed_subset.append(self.loss_embed)
self.loss_embed = sum(self.loss_embed_subset)
else:
VaeBasicModel.cal_losses(self)
# Calculate the classification loss (downstream loss)
self.loss_classifier = self.lossFuncClass(self.y_out, self.label)
# LOSS DOWN
self.loss_down = self.loss_classifier
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# VaeBasicModel.cal_losses(self)
# # Calculate the classification loss (downstream loss)
# self.loss_classifier = self.lossFuncClass(self.y_out, self.label)
# # LOSS DOWN
# self.loss_down = self.loss_classifier
# self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
y_prob = F.softmax(self.y_out, dim=1)
_, y_pred = torch.max(y_prob, 1)
index = self.data_index
y_true = self.label
return {'index': index, 'y_true': y_true, 'y_pred': y_pred, 'y_prob': y_prob}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true'] == output_dict['y_pred']).sum().item() / len(output_dict['y_true'])
| 9,850 | 44.396313 | 151 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_multitask_model.py | import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeMultitaskModel(VaeBasicModel):
"""
This class implements the VAE multitasking model, using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_multitask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# Loss combined
parser.add_argument('--k_survival', type=float, default=1,
help='weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1,
help='weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1,
help='weight for the regression loss')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.extend(['survival', 'classifier', 'regression'])
# specify the metrics you want to print out.
self.metric_names = ['accuracy', 'rmse']
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, param.time_num, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_survival = None
self.loss_classifier = None
self.loss_regression = None
self.metric_accuracy = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = input_dict['label'].to(self.device)
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
VaeBasicModel.forward(self)
self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier = self.lossFuncClass(self.y_out_cla, self.label)
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.param.k_survival * self.loss_survival + self.param.k_classifier * self.loss_classifier + self.param.k_regression * self.loss_regression
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = F.softmax(self.y_out_cla, dim=1)
_, y_pred_cla = torch.max(y_prob_cla, 1)
y_true_cla = self.label
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true_cla'] == output_dict['y_pred_cla']).sum().item() / len(output_dict['y_true_cla'])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 8,142 | 44.238889 | 269 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_basic_model.py | import torch
from .basic_model import BasicModel
from . import networks
from . import losses
class VaeBasicModel(BasicModel):
"""
This is the basic VAE model class, called by all other VAE son classes.
"""
def __init__(self, param):
"""
Initialize the VAE basic class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
# specify the models you want to save to the disk and load.
self.model_names = ['Embed', 'Down']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.input_omics_subsets = []
# for feature subsetting
if self.param.use_subset_features:
self.omics_subset_dims = []
for i in range(3):
self.omics_subset_dims.append(param.omics_dims[i] // param.subset_num)
else:
self.omics_subset_dims = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
# define the network
self.netEmbed = networks.define_VAE(param, param.net_VAE, self.omics_subset_dims, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.init_type, param.init_gain,
self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
if self.isTrain:
# Set the optimizer
# netEmbed and netDown can set to different initial learning rate
self.optimizer_Embed = torch.optim.Adam(self.netEmbed.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Embed)
self.optimizer_Down = None
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
if self.param.use_subset_features:
self.input_omics_subsets = []
for i in range(self.param.subset_num):
input_subset = []
for j in range(3):
subset_size = self.input_omics[j].shape[1] // self.param.subset_num
indices = torch.tensor(range(subset_size * i, subset_size * (i+1))).to(self.device)
# mask = torch.zeros_like(self.input_omics[j])
# mask[:, indices] = 1
# input_subset.append(mask * self.input_omics[j])
input_subset.append(torch.index_select(self.input_omics[j], 1, indices))
self.input_omics_subsets.append(input_subset)
self.data_index = input_dict['index']
def forward(self):
# Get the output tensor
if self.param.use_subset_features:
self.z, self.recon_omics, self.mean, self.log_var = self.netEmbed(self.input_omics_subsets[self.subset])
# define the latent
if self.phase == 'p1' or self.phase == 'p3':
# self.latent = self.mean
self.latent = self.z
elif self.phase == 'p2':
# self.latent = self.mean.detach()
self.latent = self.z.detach()
else:
self.z, self.recon_omics, self.mean, self.log_var = self.netEmbed(self.input_omics)
# define the latent
if self.phase == 'p1' or self.phase == 'p3':
self.latent = self.mean
elif self.phase == 'p2':
self.latent = self.mean.detach()
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# if not self.isTrain:
# self.loss_embed_sum.append(self.loss_embed)
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_Embed.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_Embed.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_Down.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_Down.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.optimizer_Embed.zero_grad() # Set gradients to zero
self.optimizer_Down.zero_grad()
self.cal_losses() # Calculate losses
self.loss_All.backward() # Backpropagation
self.optimizer_Embed.step() # Update weights
self.optimizer_Down.step()
# if self.param.use_subset_features:
# self.loss_embed_sum = []
# self.loss_down_sum = []
# self.y_out_subset = []
# if self.phase == 'p1':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_embed.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# elif self.phase == 'p2':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Down.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_down.backward() # Backpropagation
# self.optimizer_Down.step() # Update weights
# elif self.phase == 'p3':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.optimizer_Down.zero_grad()
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_All.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# self.optimizer_Down.step()
# self.loss_embed = sum(self.loss_embed_sum) / self.param.subset_num
# self.loss_down = sum(self.loss_down_sum) / self.param.subset_num
# if self.param.agg_method == 'mean':
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
# else:
# if self.phase == 'p1':
# self.forward()
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# elif self.phase == 'p2':
# self.forward()
# self.optimizer_Down.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_down.backward() # Backpropagation
# self.optimizer_Down.step() # Update weights
# elif self.phase == 'p3':
# self.forward()
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.optimizer_Down.zero_grad()
# self.cal_losses() # Calculate losses
# self.loss_All.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# self.optimizer_Down.step()
| 12,659 | 48.84252 | 153 | py |
SubOmiEmbed | SubOmiEmbed-main/models/vae_multitask_gn_model.py | import torch
import torch.nn as nn
from .basic_model import BasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeMultitaskGNModel(BasicModel):
"""
This class implements the VAE multitasking model with GradNorm, using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_multitask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# GradNorm ralated
parser.add_argument('--alpha', type=float, default=1.5, help='the additional hyperparameter for GradNorm')
parser.add_argument('--lr_gn', type=float, default=1e-3, help='the learning rate for GradNorm')
parser.add_argument('--k_survival', type=float, default=1.0, help='initial weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1.0, help='initial weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1.0, help='initial weight for the regression loss')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
self.loss_names.extend(['survival', 'classifier', 'regression', 'gradient', 'w_sur', 'w_cla', 'w_reg'])
# specify the models you want to save to the disk and load.
self.model_names = ['All']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# specify the metrics you want to print out.
self.metric_names = ['accuracy', 'rmse']
# define the network
self.netAll = networks.define_net(param.net_VAE, param.net_down, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.class_num, param.time_num, None,
param.init_type, param.init_gain, self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
self.loss_survival = None
self.loss_classifier = None
self.loss_regression = None
self.loss_gradient = 0
self.loss_w_sur = None
self.loss_w_cla = None
self.loss_w_reg = None
self.task_losses = None
self.weighted_losses = None
self.initial_losses = None
self.metric_accuracy = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
# Weights of multiple downstream tasks
self.loss_weights = nn.Parameter(torch.ones(3, requires_grad=True, device=self.device))
if self.isTrain:
# Set the optimizer
self.optimizer_All = torch.optim.Adam([{'params': self.netAll.parameters(), 'lr': param.lr, 'betas': (param.beta1, 0.999), 'weight_decay': param.weight_decay},
{'params': self.loss_weights, 'lr': param.lr_gn}])
self.optimizers.append(self.optimizer_All)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
self.data_index = input_dict['index']
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = input_dict['label'].to(self.device)
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
self.z, self.recon_omics, self.mean, self.log_var, self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netAll(self.input_omics)
# define the latent
self.latent = self.mean
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier = self.lossFuncClass(self.y_out_cla, self.label)
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# Calculate the weighted downstream losses
# Add initial weights
self.task_losses = torch.stack([self.param.k_survival * self.loss_survival, self.param.k_classifier * self.loss_classifier, self.param.k_regression * self.loss_regression])
self.weighted_losses = self.loss_weights * self.task_losses
# LOSS DOWN
self.loss_down = self.weighted_losses.sum()
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# Log the loss weights
self.loss_w_sur = self.loss_weights[0] * self.param.k_survival
self.loss_w_cla = self.loss_weights[1] * self.param.k_classifier
self.loss_w_reg = self.loss_weights[2] * self.param.k_regression
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.cal_losses() # Calculate losses
self.optimizer_All.zero_grad() # Set gradients to zero
# Calculate the GradNorm gradients
if isinstance(self.netAll, torch.nn.DataParallel):
W = list(self.netAll.module.get_last_encode_layer().parameters())
else:
W = list(self.netAll.get_last_encode_layer().parameters())
grad_norms = []
for weight, loss in zip(self.loss_weights, self.task_losses):
grad = torch.autograd.grad(loss, W, retain_graph=True)
grad_norms.append(torch.norm(weight * grad[0]))
grad_norms = torch.stack(grad_norms)
if self.iter == 0:
self.initial_losses = self.task_losses.detach()
# Calculate the constant targets
with torch.no_grad():
# loss ratios
loss_ratios = self.task_losses / self.initial_losses
# inverse training rate
inverse_train_rates = loss_ratios / loss_ratios.mean()
constant_terms = grad_norms.mean() * (inverse_train_rates ** self.param.alpha)
# Calculate the gradient loss
self.loss_gradient = (grad_norms - constant_terms).abs().sum()
# Set the gradients of weights
loss_weights_grad = torch.autograd.grad(self.loss_gradient, self.loss_weights)[0]
self.loss_All.backward()
self.loss_weights.grad = loss_weights_grad
self.optimizer_All.step() # Update weights
# Re-normalize the losses weights
with torch.no_grad():
normalize_coeff = len(self.loss_weights) / self.loss_weights.sum()
self.loss_weights.data = self.loss_weights.data * normalize_coeff
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = F.softmax(self.y_out_cla, dim=1)
_, y_pred_cla = torch.max(y_prob_cla, 1)
y_true_cla = self.label
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true_cla'] == output_dict['y_pred_cla']).sum().item() / len(output_dict['y_true_cla'])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 15,071 | 45.091743 | 269 | py |
SubOmiEmbed | SubOmiEmbed-main/util/visualizer.py | import os
import time
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.preprocessing import label_binarize
from util import util
from util import metrics
from torch.utils.tensorboard import SummaryWriter
class Visualizer:
"""
This class print/save logging information
"""
def __init__(self, param):
"""
Initialize the Visualizer class
"""
self.param = param
self.output_path = os.path.join(param.checkpoints_dir, param.experiment_name)
tb_dir = os.path.join(self.output_path, 'tb_log')
util.mkdir(tb_dir)
if param.isTrain:
# Create a logging file to store training losses
self.train_log_filename = os.path.join(self.output_path, 'train_log.txt')
with open(self.train_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Log ({:s}) -----------------------\n'.format(now))
self.train_summary_filename = os.path.join(self.output_path, 'train_summary.txt')
with open(self.train_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_train_dir = os.path.join(self.output_path, 'tb_log', 'train')
util.mkdir(tb_train_dir)
util.clear_dir(tb_train_dir)
# Create TensorBoard writer
self.train_writer = SummaryWriter(log_dir=tb_train_dir)
if param.isTest:
# Create a logging file to store testing metrics
self.test_log_filename = os.path.join(self.output_path, 'test_log.txt')
with open(self.test_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Log ({:s}) -----------------------\n'.format(now))
self.test_summary_filename = os.path.join(self.output_path, 'test_summary.txt')
with open(self.test_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_test_dir = os.path.join(self.output_path, 'tb_log', 'test')
util.mkdir(tb_test_dir)
util.clear_dir(tb_test_dir)
# Create TensorBoard writer
self.test_writer = SummaryWriter(log_dir=tb_test_dir)
def print_train_log(self, epoch, iteration, losses_dict, metrics_dict, load_time, comp_time, batch_size, dataset_size, with_time=True):
"""
print train log on console and save the message to the disk
Parameters:
epoch (int) -- current epoch
iteration (int) -- current training iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
load_time (float) -- data loading time per data point (normalized by batch_size)
comp_time (float) -- computational time per data point (normalized by batch_size)
batch_size (int) -- batch size of training
dataset_size (int) -- size of the training dataset
with_time (bool) -- print the running time or not
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
if with_time:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d} Load_t: {:.3f} Comp_t: {:.3f}] '.format(epoch, data_point_covered, load_time, comp_time)
else:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d}]\n'.format(epoch, data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message) # print the message
with open(self.train_log_filename, 'a') as log_file:
log_file.write(message + '\n') # save the message
def print_train_summary(self, epoch, losses_dict, output_dict, train_time, current_lr):
"""
print the summary of this training epoch
Parameters:
epoch (int) -- epoch number of this training model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
train_time (float) -- time used for training this epoch
current_lr (float) -- the learning rate of this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TRAIN] [Epoch: {:3d}]\n'.format(int(epoch))
for name, loss in losses_dict.items():
write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.train_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.train_writer.add_scalar('metric_'+name, metric, epoch)
train_time_msg = 'Training time used: {:.3f}s'.format(train_time)
print_message += '\n' + train_time_msg
with open(self.train_log_filename, 'a') as log_file:
log_file.write(train_time_msg + '\n')
current_lr_msg = 'Learning rate for this epoch: {:.7f}'.format(current_lr)
print_message += '\n' + current_lr_msg
self.train_writer.add_scalar('lr', current_lr, epoch)
with open(self.train_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
print(print_message)
def print_test_log(self, epoch, iteration, losses_dict, metrics_dict, batch_size, dataset_size):
"""
print performance metrics of this iteration on console and save the message to the disk
Parameters:
epoch (int) -- epoch number of this testing model
iteration (int) -- current testing iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
batch_size (int) -- batch size of testing
dataset_size (int) -- size of the testing dataset
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
message = '[TEST] [Epoch: {:3d} Iter: {:4d}] '.format(int(epoch), data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(message + '\n')
def print_test_summary(self, epoch, losses_dict, output_dict, test_time):
"""
print the summary of this testing epoch
Parameters:
epoch (int) -- epoch number of this testing model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
test_time (float) -- time used for testing this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TEST] [Epoch: {:3d}] '.format(int(epoch))
for name, loss in losses_dict.items():
# write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.test_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.test_writer.add_scalar('metric_' + name, metric, epoch)
with open(self.test_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
test_time_msg = 'Testing time used: {:.3f}s'.format(test_time)
print_message += '\n' + test_time_msg
print(print_message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(test_time_msg + '\n')
def get_epoch_metrics(self, output_dict):
"""
Get the downstream task metrics for whole epoch
Parameters:
output_dict (OrderedDict) -- the output dictionary used to compute the downstream task metrics
"""
if self.param.downstream_task == 'classification':
y_true = output_dict['y_true'].cpu().numpy()
y_true_binary = label_binarize(y_true, classes=range(self.param.class_num))
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
if self.param.class_num == 2:
y_prob = y_prob[:, 1]
accuracy = sk.metrics.accuracy_score(y_true, y_pred)
precision = sk.metrics.precision_score(y_true, y_pred, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true, y_pred, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true, y_pred, average='macro', zero_division=0)
try:
auc = sk.metrics.roc_auc_score(y_true_binary, y_prob, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'auc': auc}
elif self.param.downstream_task == 'regression':
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().detach().numpy()
mse = sk.metrics.mean_squared_error(y_true, y_pred)
rmse = sk.metrics.mean_squared_error(y_true, y_pred, squared=False)
mae = sk.metrics.mean_absolute_error(y_true, y_pred)
medae = sk.metrics.median_absolute_error(y_true, y_pred)
r2 = sk.metrics.r2_score(y_true, y_pred)
return {'mse': mse, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'survival':
metrics_start_time = time.time()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs}
elif self.param.downstream_task == 'multitask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
y_true_cla = output_dict['y_true_cla'].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num))
y_pred_cla = output_dict['y_pred_cla'].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'].cpu().numpy()
if self.param.class_num == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy = sk.metrics.accuracy_score(y_true_cla, y_pred_cla)
precision = sk.metrics.precision_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
'''
try:
auc = sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
'''
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'alltask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
accuracy = []
f1 = []
auc = []
for i in range(self.param.task_num - 2):
y_true_cla = output_dict['y_true_cla'][i].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num[i]))
y_pred_cla = output_dict['y_pred_cla'][i].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'][i].cpu().numpy()
if self.param.class_num[i] == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy.append(sk.metrics.accuracy_score(y_true_cla, y_pred_cla))
f1.append(sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0))
try:
auc.append(sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro'))
except ValueError:
auc.append(-1)
print('ValueError: ROC AUC score is not defined in this case.')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
# mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
# medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy_1': accuracy[0], 'f1_1': f1[0], 'auc_1': auc[0], 'accuracy_2': accuracy[1], 'f1_2': f1[1], 'auc_2': auc[1], 'accuracy_3': accuracy[2], 'f1_3': f1[2], 'auc_3': auc[2], 'accuracy_4': accuracy[3], 'f1_4': f1[3], 'auc_4': auc[3], 'accuracy_5': accuracy[4], 'f1_5': f1[4], 'auc_5': auc[4], 'rmse': rmse, 'r2': r2}
def save_output_dict(self, output_dict):
"""
Save the downstream task output to disk
Parameters:
output_dict (OrderedDict) -- the downstream task output dictionary to be saved
"""
down_path = os.path.join(self.output_path, 'down_output')
util.mkdir(down_path)
if self.param.downstream_task == 'classification':
# Prepare files
index = output_dict['index'].numpy()
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
sample_list = self.param.sample_list[index]
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'y_true': y_true, 'y_pred': y_pred}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
prob_df = pd.DataFrame(y_prob, columns=range(self.param.class_num), index=sample_list)
y_prob_path = os.path.join(down_path, 'y_prob.tsv')
prob_df.to_csv(y_prob_path, sep='\t')
elif self.param.downstream_task == 'regression':
# Prepare files
index = output_dict['index'].numpy()
y_true = output_dict['y_true'].cpu().numpy()
y_pred = np.squeeze(output_dict['y_pred'].cpu().detach().numpy())
sample_list = self.param.sample_list[index]
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'y_true': y_true, 'y_pred': y_pred}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
elif self.param.downstream_task == 'survival':
# Prepare files
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out = output_dict['y_out'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_df = pd.DataFrame(y_out, index=sample_list)
y_out_path = os.path.join(down_path, 'y_out.tsv')
y_out_df.to_csv(y_out_path, sep='\t')
elif self.param.downstream_task == 'multitask':
# Survival
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out_sur = output_dict['y_out_sur'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
y_df_sur = pd.DataFrame(
{'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_sur_path = os.path.join(down_path, 'y_df_survival.tsv')
y_df_sur.to_csv(y_df_sur_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_sur_df = pd.DataFrame(y_out_sur, index=sample_list)
y_out_sur_path = os.path.join(down_path, 'y_out_survival.tsv')
y_out_sur_df.to_csv(y_out_sur_path, sep='\t')
# Classification
y_true_cla = output_dict['y_true_cla'].cpu().numpy()
y_pred_cla = output_dict['y_pred_cla'].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'].cpu().numpy()
y_df_cla = pd.DataFrame({'sample': sample_list, 'y_true': y_true_cla, 'y_pred': y_pred_cla}, index=index)
y_df_cla_path = os.path.join(down_path, 'y_df_classification.tsv')
y_df_cla.to_csv(y_df_cla_path, sep='\t')
prob_cla_df = pd.DataFrame(y_prob_cla, columns=range(self.param.class_num), index=sample_list)
y_prob_cla_path = os.path.join(down_path, 'y_prob_classification.tsv')
prob_cla_df.to_csv(y_prob_cla_path, sep='\t')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = np.squeeze(output_dict['y_pred_reg'].cpu().detach().numpy())
y_df_reg = pd.DataFrame({'sample': sample_list, 'y_true': y_true_reg, 'y_pred': y_pred_reg}, index=index)
y_df_reg_path = os.path.join(down_path, 'y_df_regression.tsv')
y_df_reg.to_csv(y_df_reg_path, sep='\t')
elif self.param.downstream_task == 'alltask':
# Survival
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out_sur = output_dict['y_out_sur'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
y_df_sur = pd.DataFrame(
{'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_sur_path = os.path.join(down_path, 'y_df_survival.tsv')
y_df_sur.to_csv(y_df_sur_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_sur_df = pd.DataFrame(y_out_sur, index=sample_list)
y_out_sur_path = os.path.join(down_path, 'y_out_survival.tsv')
y_out_sur_df.to_csv(y_out_sur_path, sep='\t')
# Classification
for i in range(self.param.task_num - 2):
y_true_cla = output_dict['y_true_cla'][i].cpu().numpy()
y_pred_cla = output_dict['y_pred_cla'][i].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'][i].cpu().numpy()
y_df_cla = pd.DataFrame({'sample': sample_list, 'y_true': y_true_cla, 'y_pred': y_pred_cla}, index=index)
y_df_cla_path = os.path.join(down_path, 'y_df_classification_'+str(i+1)+'.tsv')
y_df_cla.to_csv(y_df_cla_path, sep='\t')
prob_cla_df = pd.DataFrame(y_prob_cla, columns=range(self.param.class_num[i]), index=sample_list)
y_prob_cla_path = os.path.join(down_path, 'y_prob_classification_'+str(i+1)+'.tsv')
prob_cla_df.to_csv(y_prob_cla_path, sep='\t')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = np.squeeze(output_dict['y_pred_reg'].cpu().detach().numpy())
y_df_reg = pd.DataFrame({'sample': sample_list, 'y_true': y_true_reg, 'y_pred': y_pred_reg}, index=index)
y_df_reg_path = os.path.join(down_path, 'y_df_regression.tsv')
y_df_reg.to_csv(y_df_reg_path, sep='\t')
def save_latent_space(self, latent_dict, sample_list):
"""
save the latent space matrix to disc
Parameters:
latent_dict (OrderedDict) -- the latent space dictionary
sample_list (ndarray) -- the sample list for the latent matrix
"""
reordered_sample_list = sample_list[latent_dict['index'].astype(int)]
latent_df = pd.DataFrame(latent_dict['latent'], index=reordered_sample_list)
output_path = os.path.join(self.param.checkpoints_dir, self.param.experiment_name, 'latent_space.tsv')
print('Saving the latent space matrix...')
latent_df.to_csv(output_path, sep='\t')
@staticmethod
def print_phase(phase):
"""
print the phase information
Parameters:
phase (int) -- the phase of the training process
"""
if phase == 'p1':
print('PHASE 1: Unsupervised Phase')
elif phase == 'p2':
print('PHASE 2: Supervised Phase')
elif phase == 'p3':
print('PHASE 3: Supervised Phase')
| 27,478 | 49.981447 | 370 | py |
SubOmiEmbed | SubOmiEmbed-main/util/util.py | """
Contain some simple helper functions
"""
import os
import shutil
import torch
import random
import numpy as np
def mkdir(path):
"""
Create a empty directory in the disk if it didn't exist
Parameters:
path(str) -- a directory path we would like to create
"""
if not os.path.exists(path):
os.makedirs(path)
def clear_dir(path):
"""
delete all files in a path
Parameters:
path(str) -- a directory path that we would like to delete all files in it
"""
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path, exist_ok=True)
def setup_seed(seed):
"""
setup seed to make the experiments deterministic
Parameters:
seed(int) -- the random seed
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_time_points(T_max, time_num, extra_time_percent=0.1):
"""
Get time points for the MTLR model
"""
# Get time points in the time axis
time_points = np.linspace(0, T_max * (1 + extra_time_percent), time_num + 1)
return time_points
| 1,204 | 20.517857 | 82 | py |
SubOmiEmbed | SubOmiEmbed-main/params/basic_params.py | import time
import argparse
import torch
import os
import models
from util import util
class BasicParams:
"""
This class define the console parameters
"""
def __init__(self):
"""
Reset the class. Indicates the class hasn't been initialized
"""
self.initialized = False
self.isTrain = True
self.isTest = True
def initialize(self, parser):
"""
Define the common console parameters
"""
parser.add_argument('--gpu_ids', type=str, default='0',
help='which GPU would like to use: e.g. 0 or 0,1, -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints',
help='models, settings and intermediate results are saved in folder in this directory')
parser.add_argument('--experiment_name', type=str, default='test',
help='name of the folder in the checkpoint directory')
# Dataset parameters
parser.add_argument('--omics_mode', type=str, default='a',
help='omics types would like to use in the model, options: [abc | ab | a | b | c]')
parser.add_argument('--data_root', type=str, default='./data',
help='path to input data')
parser.add_argument('--batch_size', type=int, default=32,
help='input data batch size')
parser.add_argument('--num_threads', default=0, type=int,
help='number of threads for loading data')
parser.add_argument('--set_pin_memory', action='store_true',
help='set pin_memory in the dataloader to increase data loading performance')
parser.add_argument('--not_stratified', action='store_true',
help='do not apply the stratified mode in train/test split if set true')
parser.add_argument('--use_sample_list', action='store_true',
help='provide a subset sample list of the dataset, store in the path data_root/sample_list.tsv, if False use all the samples')
parser.add_argument('--use_feature_lists', action='store_true',
help='provide feature lists of the input omics data, e.g. data_root/feature_list_A.tsv, if False use all the features')
parser.add_argument('--detect_na', action='store_true',
help='detect missing value markers during data loading, stay False can improve the loading performance')
parser.add_argument('--file_format', type=str, default='tsv',
help='file format of the omics data, options: [tsv | csv | hdf]')
# Model parameters
parser.add_argument('--model', type=str, default='vae_classifier',
help='chooses which model want to use, options: [vae_classifier | vae_regression | vae_survival | vae_multitask]')
parser.add_argument('--net_VAE', type=str, default='fc_sep',
help='specify the backbone of the VAE, default is the one dimensional CNN, options: [conv_1d | fc_sep | fc]')
parser.add_argument('--net_down', type=str, default='multi_FC_classifier',
help='specify the backbone of the downstream task network, default is the multi-layer FC classifier, options: [multi_FC_classifier | multi_FC_regression | multi_FC_survival | multi_FC_multitask]')
parser.add_argument('--norm_type', type=str, default='batch',
help='the type of normalization applied to the model, default to use batch normalization, options: [batch | instance | none ]')
parser.add_argument('--filter_num', type=int, default=8,
help='number of filters in the last convolution layer in the generator')
parser.add_argument('--conv_k_size', type=int, default=9,
help='the kernel size of convolution layer, default kernel size is 9, the kernel is one dimensional.')
parser.add_argument('--dropout_p', type=float, default=0.2,
help='probability of an element to be zeroed in a dropout layer, default is 0 which means no dropout.')
parser.add_argument('--leaky_slope', type=float, default=0.2,
help='the negative slope of the Leaky ReLU activation function')
parser.add_argument('--latent_space_dim', type=int, default=128,
help='the dimensionality of the latent space')
parser.add_argument('--seed', type=int, default=42,
help='random seed')
parser.add_argument('--init_type', type=str, default='normal',
help='choose the method of network initialization, options: [normal | xavier_normal | xavier_uniform | kaiming_normal | kaiming_uniform | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02,
help='scaling factor for normal, xavier and orthogonal initialization methods')
# Feature subsetting
parser.add_argument('--use_subset_features', action='store_true',
help='divide features into subsets, train using each subset, and sum up total reconstruction losses while training with each subset. net_VAE will be set to fc if this argument is provided')
parser.add_argument("--subset_num", type=int, default=1,
help='No of subsets to divide features into')
parser.add_argument("--agg_method", type=str, default='mean',
help='Method to use while aggregating representations from multiple subsets for downstream task, options: [mean | max | min | sum | concat | random]')
parser.add_argument("--enc_reduction_factor", type=int, default=1,
help='the factor by which dimension of encoder hidden layers should be divided')
parser.add_argument("--dec_reduction_factor", type=int, default=1,
help='the factor by which dimension of decoder hidden layers should be divided')
parser.add_argument("--down_reduction_factor", type=int, default=1,
help='the factor by which dimension of downstream hidden layers should be divided')
parser.add_argument("--use_subset_identity", action='store_true',
help='use one hot encoded subset identity as additional input to downstream task.')
# Loss parameters
parser.add_argument('--recon_loss', type=str, default='BCE',
help='chooses the reconstruction loss function, options: [BCE | MSE | L1]')
parser.add_argument('--reduction', type=str, default='mean',
help='chooses the reduction to apply to the loss function, options: [sum | mean]')
parser.add_argument('--k_kl', type=float, default=0.01,
help='weight for the kl loss')
parser.add_argument('--k_embed', type=float, default=0.001,
help='weight for the embedding loss')
# Other parameters
parser.add_argument('--deterministic', action='store_true',
help='make the model deterministic for reproduction if set true')
parser.add_argument('--detail', action='store_true',
help='print more detailed information if set true')
parser.add_argument('--epoch_to_load', type=str, default='latest',
help='the epoch number to load, set latest to load latest cached model')
parser.add_argument('--experiment_to_load', type=str, default='test',
help='the experiment to load')
self.initialized = True # set the initialized to True after we define the parameters of the project
return parser
def get_params(self):
"""
Initialize our parser with basic parameters once.
Add additional model-specific parameters.
"""
if not self.initialized: # check if this object has been initialized
# if not create a new parser object
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# use our method to initialize the parser with the predefined arguments
parser = self.initialize(parser)
# get the basic parameters
param, _ = parser.parse_known_args()
# modify model-related parser options
model_name = param.model
model_param_setter = models.get_param_setter(model_name)
parser = model_param_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_params(self, param):
"""
Print welcome words and command line parameters.
Save the command line parameters in a txt file to the disk
"""
message = ''
message += '\nWelcome to OmiEmbed\nby Xiaoyu Zhang x.zhang18@imperial.ac.uk\n\n'
message += '-----------------------Running Parameters-----------------------\n'
for key, value in sorted(vars(param).items()):
comment = ''
default = self.parser.get_default(key)
if value != default:
comment = '\t[default: %s]' % str(default)
message += '{:>18}: {:<15}{}\n'.format(str(key), str(value), comment)
message += '----------------------------------------------------------------\n'
print(message)
# Save the running parameters setting in the disk
experiment_dir = os.path.join(param.checkpoints_dir, param.experiment_name)
util.mkdir(experiment_dir)
file_name = os.path.join(experiment_dir, 'cmd_parameters.txt')
with open(file_name, 'w') as param_file:
now = time.strftime('%c')
param_file.write('{:s}\n'.format(now))
param_file.write(message)
param_file.write('\n')
def parse(self):
"""
Parse the parameters of our project. Set up GPU device. Print the welcome words and list parameters in the console.
"""
param = self.get_params() # get the parameters to the object param
param.isTrain = self.isTrain
param.isTest = self.isTest
if param.use_subset_features:
param.net_VAE = 'fc'
if param.use_subset_identity:
param.agg_method = 'mean'
# Print welcome words and command line parameters
self.print_params(param)
# Set the internal parameters
# epoch_num: the total epoch number
if self.isTrain:
param.epoch_num = param.epoch_num_p1 + param.epoch_num_p2 + param.epoch_num_p3
# downstream_task: for the classification task a labels.tsv file is needed, for the regression task a values.tsv file is needed
if param.model == 'vae_classifier':
param.downstream_task = 'classification'
elif param.model == 'vae_regression':
param.downstream_task = 'regression'
elif param.model == 'vae_survival':
param.downstream_task = 'survival'
elif param.model == 'vae_multitask' or param.model == 'vae_multitask_gn':
param.downstream_task = 'multitask'
elif param.model == 'vae_alltask' or param.model == 'vae_alltask_gn':
param.downstream_task = 'alltask'
else:
raise NotImplementedError('Model name [%s] is not recognized' % param.model)
# add_channel: add one extra dimension of channel for the input data, used for convolution layer
# ch_separate: separate the DNA methylation matrix base on the chromosome
if param.net_VAE == 'conv_1d':
param.add_channel = True
param.ch_separate = False
elif param.net_VAE == 'fc_sep':
param.add_channel = False
param.ch_separate = True
elif param.net_VAE == 'fc':
param.add_channel = False
param.ch_separate = False
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % param.net_VAE)
# omics_num: the number of omics types
param.omics_num = len(param.omics_mode)
# Set up GPU
str_gpu_ids = param.gpu_ids.split(',')
param.gpu_ids = []
for str_gpu_id in str_gpu_ids:
int_gpu_id = int(str_gpu_id)
if int_gpu_id >= 0:
param.gpu_ids.append(int_gpu_id)
if len(param.gpu_ids) > 0:
torch.cuda.set_device(param.gpu_ids[0])
self.param = param
return self.param
| 12,834 | 54.323276 | 224 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/a_dataset.py | import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
import numpy as np
import pandas as pd
import torch
class ADataset(BasicDataset):
"""
A dataset class for gene expression dataset.
File should be prepared as '/path/to/data/A.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = list(np.zeros(23))
else:
B_tensor = 0
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value, 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value, 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 10,137 | 50.461929 | 184 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/abc_dataset.py | import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class ABCDataset(BasicDataset):
"""
A dataset class for multi-omics dataset.
For gene expression data, file should be prepared as '/path/to/data/A.tsv'.
For DNA methylation data, file should be prepared as '/path/to/data/B.tsv'.
For miRNA expression data, file should be prepared as '/path/to/data/C.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
# Load data for B
B_df = load_file(param, 'B')
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
# Load data for C
C_df = load_file(param, 'C')
# Get the feature list for C
if param.use_feature_lists:
feature_list_C_path = os.path.join(param.data_root, 'feature_list_C.tsv') # get the path of feature list
feature_list_C = np.loadtxt(feature_list_C_path, delimiter='\t', dtype='<U32')
else:
feature_list_C = C_df.index
C_df = C_df.loc[feature_list_C, self.sample_list]
self.C_dim = C_df.shape[0]
C_array = C_df.values
if self.param.add_channel:
# Add one dimension for the channel
C_array = C_array[np.newaxis, :, :]
self.C_tensor_all = torch.Tensor(C_array)
self.omics_dims.append(self.C_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, B_tensor, C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of C
if self.param.add_channel:
C_tensor = self.C_tensor_all[:, :, index]
else:
C_tensor = self.C_tensor_all[:, index]
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 13,033 | 48.748092 | 152 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/basic_dataset.py | """
This module implements an abstract base class for datasets. Other datasets can be created from this base class.
"""
import torch.utils.data as data
from abc import ABC, abstractmethod
class BasicDataset(data.Dataset, ABC):
"""
This class is an abstract base class for datasets.
To create a subclass, you need to implement the following three functions:
-- <__init__>: initialize the class, first call BasicDataset.__init__(self, param).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
"""
def __init__(self, param):
"""
Initialize the class, save the parameters in the class
"""
self.param = param
self.sample_list = None
@abstractmethod
def __len__(self):
"""Return the total number of samples in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Parameters:
index - - a integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
"""
pass
| 1,272 | 31.641026 | 116 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/ab_dataset.py | import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class ABDataset(BasicDataset):
"""
A dataset class for multi-omics dataset.
For gene expression data, file should be prepared as '/path/to/data/A.tsv'.
For DNA methylation data, file should be prepared as '/path/to/data/B.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
# Load data for B
B_df = load_file(param, 'B')
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, B_tensor, C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 12,076 | 49.112033 | 152 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/c_dataset.py | import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
import numpy as np
import pandas as pd
import torch
class CDataset(BasicDataset):
"""
A dataset class for miRNA expression dataset.
File should be prepared as '/path/to/data/C.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
self.omics_dims.append(None) # First dimension is for gene expression (A)
self.omics_dims.append(None) # Second dimension is for DNA methylation (B)
# Load data for C
C_df = load_file(param, 'C')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = C_df.columns
# Get the feature list for C
if param.use_feature_lists:
feature_list_C_path = os.path.join(param.data_root, 'feature_list_C.tsv') # get the path of feature list
feature_list_C = np.loadtxt(feature_list_C_path, delimiter='\t', dtype='<U32')
else:
feature_list_C = C_df.index
C_df = C_df.loc[feature_list_C, self.sample_list]
self.C_dim = C_df.shape[0]
self.sample_num = C_df.shape[1]
C_array = C_df.values
if self.param.add_channel:
# Add one dimension for the channel
C_array = C_array[np.newaxis, :, :]
self.C_tensor_all = torch.Tensor(C_array)
self.omics_dims.append(self.C_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of C
if self.param.add_channel:
C_tensor = self.C_tensor_all[:, :, index]
else:
C_tensor = self.C_tensor_all[:, index]
# Get the tensor of A
A_tensor = 0
# Get the tensor of B
# Get the tensor of B
if self.param.ch_separate:
B_tensor = list(np.zeros(23))
else:
B_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 10,372 | 50.098522 | 152 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/__init__.py | """
This package about data loading and data preprocessing
"""
import os
import torch
import importlib
import numpy as np
import pandas as pd
from util import util
from datasets.basic_dataset import BasicDataset
from datasets.dataloader_prefetch import DataLoaderPrefetch
from torch.utils.data import Subset
from sklearn.model_selection import train_test_split
def find_dataset_using_name(dataset_mode):
"""
Get the dataset of certain mode
"""
dataset_filename = "datasets." + dataset_mode + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# Instantiate the dataset class
dataset = None
# Change the name format to corresponding class name
target_dataset_name = dataset_mode.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BasicDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BasicDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def create_dataset(param):
"""
Create a dataset given the parameters.
"""
dataset_class = find_dataset_using_name(param.omics_mode)
# Get an instance of this dataset class
dataset = dataset_class(param)
print("Dataset [%s] was created" % type(dataset).__name__)
return dataset
class CustomDataLoader:
"""
Create a dataloader for certain dataset.
"""
def __init__(self, dataset, param, shuffle=True, enable_drop_last=False):
self.dataset = dataset
self.param = param
drop_last = False
if enable_drop_last:
if len(dataset) % param.batch_size < 3*len(param.gpu_ids):
drop_last = True
# Create dataloader for this dataset
self.dataloader = DataLoaderPrefetch(
dataset,
batch_size=param.batch_size,
shuffle=shuffle,
num_workers=int(param.num_threads),
drop_last=drop_last,
pin_memory=param.set_pin_memory
)
def __len__(self):
"""Return the number of data in the dataset"""
return len(self.dataset)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
yield data
def get_A_dim(self):
"""Return the dimension of first input omics data type"""
return self.dataset.A_dim
def get_B_dim(self):
"""Return the dimension of second input omics data type"""
return self.dataset.B_dim
def get_omics_dims(self):
"""Return a list of omics dimensions"""
return self.dataset.omics_dims
def get_class_num(self):
"""Return the number of classes for the downstream classification task"""
return self.dataset.class_num
def get_values_max(self):
"""Return the maximum target value of the dataset"""
return self.dataset.values_max
def get_values_min(self):
"""Return the minimum target value of the dataset"""
return self.dataset.values_min
def get_survival_T_max(self):
"""Return the maximum T of the dataset"""
return self.dataset.survival_T_max
def get_survival_T_min(self):
"""Return the minimum T of the dataset"""
return self.dataset.survival_T_min
def get_sample_list(self):
"""Return the sample list of the dataset"""
return self.dataset.sample_list
def create_single_dataloader(param, shuffle=True, enable_drop_last=False):
"""
Create a single dataloader
"""
dataset = create_dataset(param)
dataloader = CustomDataLoader(dataset, param, shuffle=shuffle, enable_drop_last=enable_drop_last)
sample_list = dataset.sample_list
return dataloader, sample_list
def create_separate_dataloader(param):
"""
Create set of dataloader (train, val, test).
"""
full_dataset = create_dataset(param)
full_size = len(full_dataset)
full_idx = np.arange(full_size)
if param.not_stratified:
train_idx, test_idx = train_test_split(full_idx,
test_size=param.test_ratio,
train_size=param.train_ratio,
shuffle=True)
else:
if param.downstream_task == 'classification':
targets = full_dataset.labels_array
elif param.downstream_task == 'survival':
targets = full_dataset.survival_E_array
if param.stratify_label:
targets = full_dataset.labels_array
elif param.downstream_task == 'multitask':
targets = full_dataset.labels_array
elif param.downstream_task == 'alltask':
targets = full_dataset.labels_array[0]
train_idx, test_idx = train_test_split(full_idx,
test_size=param.test_ratio,
train_size=param.train_ratio,
shuffle=True,
stratify=targets)
val_idx = list(set(full_idx) - set(train_idx) - set(test_idx))
train_dataset = Subset(full_dataset, train_idx)
val_dataset = Subset(full_dataset, val_idx)
test_dataset = Subset(full_dataset, test_idx)
full_dataloader = CustomDataLoader(full_dataset, param)
train_dataloader = CustomDataLoader(train_dataset, param, enable_drop_last=True)
val_dataloader = CustomDataLoader(val_dataset, param, shuffle=False)
test_dataloader = CustomDataLoader(test_dataset, param, shuffle=False)
return full_dataloader, train_dataloader, val_dataloader, test_dataloader
def load_file(param, file_name):
"""
Load data according to the format.
"""
if param.file_format == 'tsv':
file_path = os.path.join(param.data_root, file_name + '.tsv')
print('Loading data from ' + file_path)
df = pd.read_csv(file_path, sep='\t', header=0, index_col=0, na_filter=param.detect_na)
elif param.file_format == 'csv':
file_path = os.path.join(param.data_root, file_name + '.csv')
print('Loading data from ' + file_path)
df = pd.read_csv(file_path, header=0, index_col=0, na_filter=param.detect_na)
elif param.file_format == 'hdf':
file_path = os.path.join(param.data_root, file_name + '.h5')
print('Loading data from ' + file_path)
df = pd.read_hdf(file_path, header=0, index_col=0)
elif param.file_format == 'npy':
file_path = os.path.join(param.data_root, file_name + '.npy')
print('Loading data from ' + file_path)
values = np.load(file_path, allow_pickle=True)
features_path = os.path.join(param.data_root, file_name + '_features.npy')
print('Loading features from ' + features_path)
features = np.load(features_path, allow_pickle=True)
samples_path = os.path.join(param.data_root, file_name + '_samples.npy')
print('Loading samples from ' + samples_path)
samples = np.load(samples_path, allow_pickle=True)
df = pd.DataFrame(data=values, index=features, columns=samples)
else:
raise NotImplementedError('File format %s is supported' % param.file_format)
return df
def get_survival_y_true(param, T, E):
"""
Get y_true for survival prediction based on T and E
"""
# Get T_max
if param.survival_T_max == -1:
T_max = T.max()
else:
T_max = param.survival_T_max
# Get time points
time_points = util.get_time_points(T_max, param.time_num)
# Get the y_true
y_true = []
for i, (t, e) in enumerate(zip(T, E)):
y_true_i = np.zeros(param.time_num + 1)
dist_to_time_points = [abs(t - point) for point in time_points[:-1]]
time_index = np.argmin(dist_to_time_points)
# if this is a uncensored data point
if e == 1:
y_true_i[time_index] = 1
y_true.append(y_true_i)
# if this is a censored data point
else:
y_true_i[time_index:] = 1
y_true.append(y_true_i)
y_true = torch.Tensor(y_true)
return y_true
| 8,346 | 34.219409 | 177 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/dataloader_prefetch.py | from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
class DataLoaderPrefetch(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
| 210 | 25.375 | 54 | py |
SubOmiEmbed | SubOmiEmbed-main/datasets/b_dataset.py | import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class BDataset(BasicDataset):
"""
A dataset class for methylation dataset.
DNA methylation data file should be prepared as '/path/to/data/B.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
self.omics_dims.append(None) # First dimension is for gene expression (A)
# Load data for B
B_df = load_file(param, 'B')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='str')
else:
self.sample_list = B_df.columns
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
self.sample_num = B_df.shape[1]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains B_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of A
A_tensor = 0
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 11,172 | 49.556561 | 152 | py |
mixstyle-release | mixstyle-release-master/reid/main.py | import sys
import time
import os.path as osp
import argparse
import torch
import torch.nn as nn
import torchreid
from torchreid.utils import (
Logger, check_isfile, set_random_seed, collect_env_info,
resume_from_checkpoint, load_pretrained_weights, compute_model_complexity
)
from default_config import (
imagedata_kwargs, optimizer_kwargs, videodata_kwargs, engine_run_kwargs,
get_default_config, lr_scheduler_kwargs
)
from models.resnet_ms import resnet50_fc512, resnet50_fc512_ms12_a0d2, resnet50_fc512_ms12_a0d1, resnet50_fc512_ms12_a0d3
from models.resnet_ms import resnet50_fc512_ms1_a0d1, resnet50_fc512_ms123_a0d1, resnet50_fc512_ms1234_a0d1, resnet50_fc512_ms23_a0d1, resnet50_fc512_ms14_a0d1
from models.resnet_ms2 import resnet50_fc512_ms12_a0d1_domprior
from models.resnet_db import resnet50_fc512_db12
from models.osnet_ms import osnet_x1_0, osnet_x1_0_ms23_a0d1, osnet_x1_0_ms23_a0d2, osnet_x1_0_ms23_a0d3
from models.osnet_ms2 import osnet_x1_0_ms23_a0d1_domprior
from models.osnet_db import osnet_x1_0_db23
def build_datamanager(cfg):
if cfg.data.type == 'image':
return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
else:
return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
def build_engine(cfg, datamanager, model, optimizer, scheduler):
if cfg.data.type == 'image':
if cfg.loss.name == 'softmax':
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
engine = torchreid.engine.ImageTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
if cfg.loss.name == 'softmax':
engine = torchreid.engine.VideoSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth,
pooling_method=cfg.video.pooling_method
)
else:
engine = torchreid.engine.VideoTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
return engine
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.sources:
cfg.data.sources = args.sources
if args.targets:
cfg.data.targets = args.targets
if args.transforms:
cfg.data.transforms = args.transforms
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--config-file', type=str, default='', help='path to config file'
)
parser.add_argument(
'-s',
'--sources',
type=str,
nargs='+',
help='source datasets (delimited by space)'
)
parser.add_argument(
'-t',
'--targets',
type=str,
nargs='+',
help='target datasets (delimited by space)'
)
parser.add_argument(
'--transforms', type=str, nargs='+', help='data augmentation'
)
parser.add_argument(
'--root', type=str, default='', help='path to data root'
)
parser.add_argument(
'opts',
default=None,
nargs=argparse.REMAINDER,
help='Modify config options using the command-line'
)
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
log_name = 'test.log' if cfg.test.evaluate else 'train.log'
log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg)
model_factory = {
'resnet50_fc512': resnet50_fc512,
'osnet_x1_0': osnet_x1_0,
# mixstyle models
'resnet50_fc512_ms12_a0d1': resnet50_fc512_ms12_a0d1,
'resnet50_fc512_ms12_a0d2': resnet50_fc512_ms12_a0d2,
'resnet50_fc512_ms12_a0d3': resnet50_fc512_ms12_a0d3,
'resnet50_fc512_ms12_a0d1_domprior': resnet50_fc512_ms12_a0d1_domprior,
'osnet_x1_0_ms23_a0d1': osnet_x1_0_ms23_a0d1,
'osnet_x1_0_ms23_a0d2': osnet_x1_0_ms23_a0d2,
'osnet_x1_0_ms23_a0d3': osnet_x1_0_ms23_a0d3,
'osnet_x1_0_ms23_a0d1_domprior': osnet_x1_0_ms23_a0d1_domprior,
# ablation
'resnet50_fc512_ms1_a0d1': resnet50_fc512_ms1_a0d1,
'resnet50_fc512_ms123_a0d1': resnet50_fc512_ms123_a0d1,
'resnet50_fc512_ms1234_a0d1': resnet50_fc512_ms1234_a0d1,
'resnet50_fc512_ms14_a0d1': resnet50_fc512_ms14_a0d1,
'resnet50_fc512_ms23_a0d1': resnet50_fc512_ms23_a0d1,
# dropblock models
'resnet50_fc512_db12': resnet50_fc512_db12,
'osnet_x1_0_db23': osnet_x1_0_db23
}
print('Building model: {}'.format(cfg.model.name))
model = model_factory[cfg.model.name](
num_classes=datamanager.num_train_pids,
loss=cfg.loss.name,
pretrained=cfg.model.pretrained,
use_gpu=cfg.use_gpu
)
num_params, flops = compute_model_complexity(
model, (1, 3, cfg.data.height, cfg.data.width)
)
print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))
if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
load_pretrained_weights(model, cfg.model.load_weights)
if cfg.use_gpu:
model = nn.DataParallel(model).cuda()
optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
scheduler = torchreid.optim.build_lr_scheduler(
optimizer, **lr_scheduler_kwargs(cfg)
)
if cfg.model.resume and check_isfile(cfg.model.resume):
cfg.train.start_epoch = resume_from_checkpoint(
cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
)
print(
'Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type)
)
engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
engine.run(**engine_run_kwargs(cfg))
if __name__ == '__main__':
main()
| 7,293 | 32.925581 | 159 | py |
mixstyle-release | mixstyle-release-master/reid/models/osnet_db.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
dropblock_layers=[],
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.dropblock = None
if dropblock_layers:
self.dropblock = LinearScheduler(
DropBlock2D(drop_prob=0.1, block_size=7),
start_value=0.,
stop_value=0.1,
nr_steps=100
)
print('Insert DropBlock ater the following layers: {}'.format(dropblock_layers))
self.dropblock_layers = dropblock_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
if self.dropblock is not None:
self.dropblock.step()
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv3(x)
if 'conv3' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv4(x)
if 'conv4' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""DropBlock models"""
def osnet_x1_0_db23(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
dropblock_layers=['conv2', 'conv3'],
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 18,266 | 27.676609 | 108 | py |
mixstyle-release | mixstyle-release-master/reid/models/resnet_db.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
dropblock_layers=[],
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.dropblock = None
if dropblock_layers:
self.dropblock = LinearScheduler(
DropBlock2D(drop_prob=0.1, block_size=7),
start_value=0.,
stop_value=0.1,
nr_steps=100
)
print('Insert DropBlock ater the following layers: {}'.format(dropblock_layers))
self.dropblock_layers = dropblock_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
if self.dropblock is not None:
self.dropblock.step()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer2(x)
if 'layer2' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer3(x)
if 'layer3' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""
ResNet + FC
"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""DropBlock models"""
def resnet50_fc512_db12(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
dropblock_layers=['layer1', 'layer2'],
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 16,436 | 27.685864 | 106 | py |
mixstyle-release | mixstyle-release-master/reid/models/osnet_ms.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='random')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv3(x)
if 'conv3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv4(x)
if 'conv4' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""
MixStyle models
"""
def osnet_x1_0_ms23_a0d1(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x1_0_ms23_a0d2(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.2,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x1_0_ms23_a0d3(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.3,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 19,075 | 27.5142 | 108 | py |
mixstyle-release | mixstyle-release-master/reid/models/resnet_ms.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='random')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer2(x)
if 'layer2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer3(x)
if 'layer3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer4(x)
if 'layer4' in self.mixstyle_layers:
x = self.mixstyle(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""
ResNet + FC
"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""MixStyle models"""
def resnet50_fc512_ms12_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms12_a0d2(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.2,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms12_a0d3(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.3,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
# more variants in which mixstyle is applied to different layers
def resnet50_fc512_ms1_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms123_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2', 'layer3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms1234_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2', 'layer3', 'layer4'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms23_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer2', 'layer3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms14_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer4'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 19,818 | 27.516547 | 106 | py |
mixstyle-release | mixstyle-release-master/reid/models/mixstyle.py | import random
from contextlib import contextmanager
import torch
import torch.nn as nn
def deactivate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(False)
def activate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(True)
def random_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('random')
def crossdomain_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('crossdomain')
@contextmanager
def run_without_mixstyle(model):
# Assume MixStyle was initially activated
try:
model.apply(deactivate_mixstyle)
yield
finally:
model.apply(activate_mixstyle)
@contextmanager
def run_with_mixstyle(model, mix=None):
# Assume MixStyle was initially deactivated
if mix == 'random':
model.apply(random_mixstyle)
elif mix == 'crossdomain':
model.apply(crossdomain_mixstyle)
try:
model.apply(activate_mixstyle)
yield
finally:
model.apply(deactivate_mixstyle)
class MixStyle(nn.Module):
"""MixStyle.
Reference:
Zhou et al. Domain Generalization with MixStyle. ICLR 2021.
"""
def __init__(self, p=0.5, alpha=0.1, eps=1e-6, mix='random'):
"""
Args:
p (float): probability of using MixStyle.
alpha (float): parameter of the Beta distribution.
eps (float): scaling parameter to avoid numerical issues.
mix (str): how to mix.
"""
super().__init__()
self.p = p
self.beta = torch.distributions.Beta(alpha, alpha)
self.eps = eps
self.alpha = alpha
self.mix = mix
self._activated = True
def __repr__(self):
return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})'
def set_activation_status(self, status=True):
self._activated = status
def update_mix_method(self, mix='random'):
self.mix = mix
def forward(self, x):
if not self.training or not self._activated:
return x
if random.random() > self.p:
return x
B = x.size(0)
mu = x.mean(dim=[2, 3], keepdim=True)
var = x.var(dim=[2, 3], keepdim=True)
sig = (var + self.eps).sqrt()
mu, sig = mu.detach(), sig.detach()
x_normed = (x-mu) / sig
lmda = self.beta.sample((B, 1, 1, 1))
lmda = lmda.to(x.device)
if self.mix == 'random':
# random shuffle
perm = torch.randperm(B)
elif self.mix == 'crossdomain':
# split into two halves and swap the order
perm = torch.arange(B - 1, -1, -1) # inverse index
perm_b, perm_a = perm.chunk(2)
perm_b = perm_b[torch.randperm(B // 2)]
perm_a = perm_a[torch.randperm(B // 2)]
perm = torch.cat([perm_b, perm_a], 0)
else:
raise NotImplementedError
mu2, sig2 = mu[perm], sig[perm]
mu_mix = mu*lmda + mu2 * (1-lmda)
sig_mix = sig*lmda + sig2 * (1-lmda)
return x_normed*sig_mix + mu_mix
| 3,127 | 24.430894 | 90 | py |
mixstyle-release | mixstyle-release-master/reid/models/osnet_ms2.py | from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='crossdomain')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv3(x)
if 'conv3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv4(x)
if 'conv4' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""
MixStyle models
"""
def osnet_x1_0_ms23_a0d1_domprior(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 18,135 | 27.56063 | 108 | py |
mixstyle-release | mixstyle-release-master/reid/models/resnet_ms2.py | """
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='crossdomain')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer2(x)
if 'layer2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer3(x)
if 'layer3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""ResNet + FC"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""MixStyle models"""
def resnet50_fc512_ms12_a0d1_domprior(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 16,298 | 27.898936 | 106 | py |
mixstyle-release | mixstyle-release-master/reid/models/dropblock/dropblock.py | import torch
import torch.nn.functional as F
from torch import nn
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 2)
class DropBlock3D(DropBlock2D):
r"""Randomly zeroes 3D spatial blocks of the input tensor.
An extension to the concept described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, D, H, W)`
- Output: `(N, C, D, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock3D, self).__init__(drop_prob, block_size)
def forward(self, x):
# shape: (bsize, channels, depth, height, width)
assert x.dim() == 5, \
"Expected input with 5 dimensions (bsize, channels, depth, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool3d(input=mask[:, None, :, :, :],
kernel_size=(self.block_size, self.block_size, self.block_size),
stride=(1, 1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 3)
| 4,440 | 29.210884 | 98 | py |
mixstyle-release | mixstyle-release-master/reid/models/dropblock/scheduler.py | import numpy as np
from torch import nn
class LinearScheduler(nn.Module):
def __init__(self, dropblock, start_value, stop_value, nr_steps):
super(LinearScheduler, self).__init__()
self.dropblock = dropblock
self.i = 0
self.drop_values = np.linspace(start=start_value, stop=stop_value, num=nr_steps)
def forward(self, x):
return self.dropblock(x)
def step(self):
if self.i < len(self.drop_values):
self.dropblock.drop_prob = self.drop_values[self.i]
self.i += 1
| 546 | 26.35 | 88 | py |
mixstyle-release | mixstyle-release-master/imcls/vis.py | import argparse
import torch
import os.path as osp
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def normalize(feature):
norm = np.sqrt((feature**2).sum(1, keepdims=True))
return feature / (norm + 1e-12)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('src', type=str, default='', help='path to source file')
parser.add_argument('--dst', type=str, default='', help='destination directory')
parser.add_argument('--method', type=str, default='tsne', help='tnse, pca or none')
args = parser.parse_args()
if not args.dst:
args.dst = osp.dirname(args.src)
print('Loading file from "{}"'.format(args.src))
file = torch.load(args.src)
embed = file['embed']
domain = file['domain']
dnames = file['dnames']
#dim = embed.shape[1] // 2
#embed = embed[:, dim:]
#domain = file['label']
#dnames = ['dog', 'elephant', 'giraffe', 'guitar', 'horse', 'house', 'person']
nd_src = len(dnames)
embed = normalize(embed)
print('Loaded features with shape {}'.format(embed.shape))
embed2d_path = osp.join(args.dst, 'embed2d_' + args.method + '.pt')
if osp.exists(embed2d_path):
embed2d = torch.load(embed2d_path)
print('Loaded embed2d from "{}"'.format(embed2d_path))
else:
if args.method == 'tsne':
print('Dimension reduction with t-SNE (dim=2) ...')
tsne = TSNE(
n_components=2, metric='euclidean', verbose=1,
perplexity=50, n_iter=1000, learning_rate=200.
)
embed2d = tsne.fit_transform(embed)
torch.save(embed2d, embed2d_path)
print('Saved embed2d to "{}"'.format(embed2d_path))
elif args.method == 'pca':
print('Dimension reduction with PCA (dim=2) ...')
pca = PCA(n_components=2)
embed2d = pca.fit_transform(embed)
torch.save(embed2d, embed2d_path)
print('Saved embed2d to "{}"'.format(embed2d_path))
elif args.method == 'none':
# the original embedding is 2-D
embed2d = embed
avai_domains = list(set(domain.tolist()))
avai_domains.sort()
print('Plotting ...')
SIZE = 3
COLORS = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
LEGEND_MS = 3
fig, ax = plt.subplots()
for d in avai_domains:
d = int(d)
e = embed2d[domain == d]
"""
label = '$D_{}$'.format(str(d + 1))
if d < nd_src:
label += ' ($\mathcal{S}$)'
else:
label += ' ($\mathcal{N}$)'
"""
label = dnames[d]
ax.scatter(
e[:, 0],
e[:, 1],
s=SIZE,
c=COLORS[d],
edgecolors='none',
label=label,
alpha=1,
rasterized=False
)
#ax.legend(loc='upper left', fontsize=10, markerscale=LEGEND_MS)
ax.legend(fontsize=10, markerscale=LEGEND_MS)
ax.set_xticks([])
ax.set_yticks([])
#LIM = 22
#ax.set_xlim(-LIM, LIM)
#ax.set_ylim(-LIM, LIM)
figname = 'embed.pdf'
fig.savefig(osp.join(args.dst, figname), bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| 3,338 | 26.368852 | 87 | py |
mixstyle-release | mixstyle-release-master/imcls/train.py | import argparse
import copy
import torch
from dassl.utils import setup_logger, set_random_seed, collect_env_info
from dassl.config import get_cfg_default
from dassl.engine import build_trainer
# custom
from yacs.config import CfgNode as CN
import datasets.ssdg_pacs
import datasets.ssdg_officehome
import datasets.msda_pacs
import trainers.vanilla2
import trainers.semimixstyle
def print_args(args, cfg):
print('***************')
print('** Arguments **')
print('***************')
optkeys = list(args.__dict__.keys())
optkeys.sort()
for key in optkeys:
print('{}: {}'.format(key, args.__dict__[key]))
print('************')
print('** Config **')
print('************')
print(cfg)
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.resume:
cfg.RESUME = args.resume
if args.seed:
cfg.SEED = args.seed
if args.source_domains:
cfg.DATASET.SOURCE_DOMAINS = args.source_domains
if args.target_domains:
cfg.DATASET.TARGET_DOMAINS = args.target_domains
if args.transforms:
cfg.INPUT.TRANSFORMS = args.transforms
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head
def extend_cfg(cfg):
# Here you can extend the existing cfg variables by adding new ones
cfg.TRAINER.VANILLA2 = CN()
cfg.TRAINER.VANILLA2.MIX = 'random' # random or crossdomain
cfg.TRAINER.SEMIMIXSTYLE = CN()
cfg.TRAINER.SEMIMIXSTYLE.WEIGHT_U = 1. # weight on the unlabeled loss
cfg.TRAINER.SEMIMIXSTYLE.CONF_THRE = 0.95 # confidence threshold
cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS = ()
cfg.TRAINER.SEMIMIXSTYLE.MS_LABELED = False # apply mixstyle to labeled data
cfg.TRAINER.SEMIMIXSTYLE.MIX = 'random' # random or crossdomain
def setup_cfg(args):
cfg = get_cfg_default()
extend_cfg(cfg)
reset_cfg(cfg, args)
if args.dataset_config_file:
cfg.merge_from_file(args.dataset_config_file)
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def main(args):
cfg = setup_cfg(args)
if cfg.SEED >= 0:
print('Setting fixed seed: {}'.format(cfg.SEED))
set_random_seed(cfg.SEED)
setup_logger(cfg.OUTPUT_DIR)
if torch.cuda.is_available() and cfg.USE_CUDA:
torch.backends.cudnn.benchmark = True
print_args(args, cfg)
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
trainer = build_trainer(cfg)
if args.vis:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.vis()
return
if args.eval_only:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.test()
return
if not args.no_train:
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='', help='path to dataset')
parser.add_argument(
'--output-dir', type=str, default='', help='output directory'
)
parser.add_argument(
'--resume',
type=str,
default='',
help='checkpoint directory (from which the training resumes)'
)
parser.add_argument(
'--seed',
type=int,
default=-1,
help='only positive value enables a fixed seed'
)
parser.add_argument(
'--source-domains',
type=str,
nargs='+',
help='source domains for DA/DG'
)
parser.add_argument(
'--target-domains',
type=str,
nargs='+',
help='target domains for DA/DG'
)
parser.add_argument(
'--transforms', type=str, nargs='+', help='data augmentation methods'
)
parser.add_argument(
'--config-file', type=str, default='', help='path to config file'
)
parser.add_argument(
'--dataset-config-file',
type=str,
default='',
help='path to config file for dataset setup'
)
parser.add_argument(
'--trainer', type=str, default='', help='name of trainer'
)
parser.add_argument(
'--backbone', type=str, default='', help='name of CNN backbone'
)
parser.add_argument('--head', type=str, default='', help='name of head')
parser.add_argument(
'--eval-only', action='store_true', help='evaluation only'
)
parser.add_argument(
'--model-dir',
type=str,
default='',
help='load model from this directory for eval-only mode'
)
parser.add_argument(
'--load-epoch',
type=int,
help='load model weights at this epoch for evaluation'
)
parser.add_argument(
'--no-train', action='store_true', help='do not call trainer.train()'
)
parser.add_argument('--vis', action='store_true', help='visualization')
parser.add_argument(
'opts',
default=None,
nargs=argparse.REMAINDER,
help='modify config options using the command-line'
)
args = parser.parse_args()
main(args)
| 5,312 | 26.386598 | 80 | py |
mixstyle-release | mixstyle-release-master/imcls/trainers/semimixstyle.py | import torch
from torch.nn import functional as F
from dassl.data import DataManager
from dassl.engine import TRAINER_REGISTRY, TrainerXU
from dassl.metrics import compute_accuracy
from dassl.data.transforms import build_transform
from dassl.modeling.ops import deactivate_mixstyle, run_with_mixstyle
@TRAINER_REGISTRY.register()
class SemiMixStyle(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
self.weight_u = cfg.TRAINER.SEMIMIXSTYLE.WEIGHT_U
self.conf_thre = cfg.TRAINER.SEMIMIXSTYLE.CONF_THRE
self.ms_labeled = cfg.TRAINER.SEMIMIXSTYLE.MS_LABELED
mix = cfg.TRAINER.SEMIMIXSTYLE.MIX
if mix == 'random':
self.model.apply(random_mixstyle)
print('MixStyle: random mixing')
elif mix == 'crossdomain':
self.model.apply(crossdomain_mixstyle)
print('MixStyle: cross-domain mixing')
else:
raise NotImplementedError
self.model.apply(deactivate_mixstyle)
def check_cfg(self, cfg):
assert len(cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS) > 0
def build_data_loader(self):
cfg = self.cfg
tfm_train = build_transform(cfg, is_train=True)
custom_tfm_train = [tfm_train]
choices = cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS
tfm_train_strong = build_transform(cfg, is_train=True, choices=choices)
custom_tfm_train += [tfm_train_strong]
dm = DataManager(self.cfg, custom_tfm_train=custom_tfm_train)
self.train_loader_x = dm.train_loader_x
self.train_loader_u = dm.train_loader_u
self.val_loader = dm.val_loader
self.test_loader = dm.test_loader
self.num_classes = dm.num_classes
self.num_source_domains = dm.num_source_domains
self.lab2cname = dm.lab2cname
def assess_y_pred_quality(self, y_pred, y_true, mask):
n_masked_correct = (y_pred.eq(y_true).float() * mask).sum()
acc_thre = n_masked_correct / (mask.sum() + 1e-5)
acc_raw = y_pred.eq(y_true).sum() / y_pred.numel() # raw accuracy
keep_rate = mask.sum() / mask.numel()
output = {
'acc_thre': acc_thre,
'acc_raw': acc_raw,
'keep_rate': keep_rate
}
return output
def forward_backward(self, batch_x, batch_u):
parsed_data = self.parse_batch_train(batch_x, batch_u)
input_x, input_x2, label_x, input_u, input_u2, label_u = parsed_data
input_u = torch.cat([input_x, input_u], 0)
input_u2 = torch.cat([input_x2, input_u2], 0)
n_x = input_x.size(0)
# Generate pseudo labels
with torch.no_grad():
output_u = F.softmax(self.model(input_u), 1)
max_prob, label_u_pred = output_u.max(1)
mask_u = (max_prob >= self.conf_thre).float()
# Evaluate pseudo labels' accuracy
y_u_pred_stats = self.assess_y_pred_quality(
label_u_pred[n_x:], label_u, mask_u[n_x:]
)
# Supervised loss
if self.ms_labeled:
with run_with_mixstyle(self.model, mix='random'):
output_x = self.model(input_x)
loss_x = F.cross_entropy(output_x, label_x)
else:
output_x = self.model(input_x)
loss_x = F.cross_entropy(output_x, label_x)
# Unsupervised loss
with run_with_mixstyle(self.model, mix='crossdomain'):
output_u = self.model(input_u2)
loss_u = F.cross_entropy(output_u, label_u_pred, reduction='none')
loss_u = (loss_u * mask_u).mean()
loss = loss_x + loss_u * self.weight_u
self.model_backward_and_update(loss)
loss_summary = {
'loss_x': loss_x.item(),
'acc_x': compute_accuracy(output_x, label_x)[0].item(),
'loss_u': loss_u.item(),
'y_u_pred_acc_raw': y_u_pred_stats['acc_raw'],
'y_u_pred_acc_thre': y_u_pred_stats['acc_thre'],
'y_u_pred_keep': y_u_pred_stats['keep_rate']
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
input_x2 = batch_x['img2']
label_x = batch_x['label']
input_u = batch_u['img']
input_u2 = batch_u['img2']
# label_u is used only for evaluating pseudo labels' accuracy
label_u = batch_u['label']
input_x = input_x.to(self.device)
input_x2 = input_x2.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
input_u2 = input_u2.to(self.device)
label_u = label_u.to(self.device)
return input_x, input_x2, label_x, input_u, input_u2, label_u
| 4,835 | 35.360902 | 79 | py |
mixstyle-release | mixstyle-release-master/imcls/trainers/vanilla2.py | import torch
from torch.nn import functional as F
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.metrics import compute_accuracy
from dassl.modeling.ops import random_mixstyle, crossdomain_mixstyle
@TRAINER_REGISTRY.register()
class Vanilla2(TrainerX):
"""Vanilla baseline.
Slightly modified for mixstyle.
"""
def __init__(self, cfg):
super().__init__(cfg)
mix = cfg.TRAINER.VANILLA2.MIX
if mix == 'random':
self.model.apply(random_mixstyle)
print('MixStyle: random mixing')
elif mix == 'crossdomain':
self.model.apply(crossdomain_mixstyle)
print('MixStyle: cross-domain mixing')
else:
raise NotImplementedError
def forward_backward(self, batch):
input, label = self.parse_batch_train(batch)
output = self.model(input)
loss = F.cross_entropy(output, label)
self.model_backward_and_update(loss)
loss_summary = {
'loss': loss.item(),
'acc': compute_accuracy(output, label)[0].item()
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
@torch.no_grad()
def vis(self):
self.set_model_mode('eval')
output_dir = self.cfg.OUTPUT_DIR
source_domains = self.cfg.DATASET.SOURCE_DOMAINS
print('Source domains:', source_domains)
out_embed = []
out_domain = []
out_label = []
split = self.cfg.TEST.SPLIT
data_loader = self.val_loader if split == 'val' else self.test_loader
print('Extracting style features')
for batch_idx, batch in enumerate(data_loader):
input = batch['img'].to(self.device)
label = batch['label']
domain = batch['domain']
impath = batch['impath']
# model should directly output features or style statistics
raise NotImplementedError
output = self.model(input)
output = output.cpu().numpy()
out_embed.append(output)
out_domain.append(domain.numpy())
out_label.append(label.numpy()) # CLASS LABEL
print('processed batch-{}'.format(batch_idx + 1))
out_embed = np.concatenate(out_embed, axis=0)
out_domain = np.concatenate(out_domain, axis=0)
out_label = np.concatenate(out_label, axis=0)
print('shape of feature matrix:', out_embed.shape)
out = {
'embed': out_embed,
'domain': out_domain,
'dnames': source_domains,
'label': out_label
}
out_path = osp.join(output_dir, 'embed.pt')
torch.save(out, out_path)
print('File saved to "{}"'.format(out_path))
| 3,011 | 29.424242 | 77 | py |
scipy | scipy-main/dev.py | #! /usr/bin/env python3
'''
Developer CLI: building (meson), tests, benchmark, etc.
This file contains tasks definitions for doit (https://pydoit.org).
And also a CLI interface using click (https://click.palletsprojects.com).
The CLI is ideal for project contributors while,
doit interface is better suited for authoring the development tasks.
REQUIREMENTS:
--------------
- see environment.yml: doit, pydevtool, click, rich-click
# USAGE:
## 1 - click API
Commands can added using default Click API. i.e.
```
@cli.command()
@click.argument('extra_argv', nargs=-1)
@click.pass_obj
def python(ctx_obj, extra_argv):
"""Start a Python shell with PYTHONPATH set"""
```
## 2 - class based Click command definition
`CliGroup` provides an alternative class based API to create Click commands.
Just use the `cls_cmd` decorator. And define a `run()` method
```
@cli.cls_cmd('test')
class Test():
"""Run tests"""
@classmethod
def run(cls):
print('Running tests...')
```
- Command may make use a Click.Group context defining a `ctx` class attribute
- Command options are also define as class attributes
```
@cli.cls_cmd('test')
class Test():
"""Run tests"""
ctx = CONTEXT
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True, help="verbosity")
@classmethod
def run(cls, **kwargs): # kwargs contains options from class and CONTEXT
print('Running tests...')
```
## 3 - class based interface can be run as a doit task by subclassing from Task
- Extra doit task metadata can be defined as class attribute TASK_META.
- `run()` method will be used as python-action by task
```
@cli.cls_cmd('test')
class Test(Task): # Task base class, doit will create a task
"""Run tests"""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def run(cls, **kwargs):
pass
```
## 4 - doit tasks with cmd-action "shell" or dynamic metadata
Define method `task_meta()` instead of `run()`:
```
@cli.cls_cmd('refguide-check')
class RefguideCheck(Task):
@classmethod
def task_meta(cls, **kwargs):
return {
```
'''
import os
import subprocess
import sys
import warnings
import shutil
import json
import datetime
import time
import platform
import importlib.util
import errno
import contextlib
from sysconfig import get_path
import math
import traceback
from concurrent.futures.process import _MAX_WINDOWS_WORKERS
# distutils is required to infer meson install path
# if this needs to be replaced for Python 3.12 support and there's no
# stdlib alternative, use CmdAction and the hack discussed in gh-16058
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from distutils import dist
from distutils.command.install import INSTALL_SCHEMES
from pathlib import Path
from collections import namedtuple
from types import ModuleType as new_module
from dataclasses import dataclass
import click
from click import Option, Argument
from doit.cmd_base import ModuleTaskLoader
from doit.reporter import ZeroReporter
from doit.exceptions import TaskError
from doit.api import run_tasks
from pydevtool.cli import UnifiedContext, CliGroup, Task
from rich.console import Console
from rich.panel import Panel
from rich.theme import Theme
from rich_click import rich_click
DOIT_CONFIG = {
'verbosity': 2,
'minversion': '0.36.0',
}
console_theme = Theme({
"cmd": "italic gray50",
})
if sys.platform == 'win32':
class EMOJI:
cmd = ">"
else:
class EMOJI:
cmd = ":computer:"
rich_click.STYLE_ERRORS_SUGGESTION = "yellow italic"
rich_click.SHOW_ARGUMENTS = True
rich_click.GROUP_ARGUMENTS_OPTIONS = False
rich_click.SHOW_METAVARS_COLUMN = True
rich_click.USE_MARKDOWN = True
rich_click.OPTION_GROUPS = {
"dev.py": [
{
"name": "Options",
"options": [
"--help", "--build-dir", "--no-build", "--install-prefix"],
},
],
"dev.py test": [
{
"name": "Options",
"options": ["--help", "--verbose", "--parallel", "--coverage",
"--durations"],
},
{
"name": "Options: test selection",
"options": ["--submodule", "--tests", "--mode"],
},
],
}
rich_click.COMMAND_GROUPS = {
"dev.py": [
{
"name": "build & testing",
"commands": ["build", "test"],
},
{
"name": "static checkers",
"commands": ["lint", "mypy"],
},
{
"name": "environments",
"commands": ["shell", "python", "ipython"],
},
{
"name": "documentation",
"commands": ["doc", "refguide-check"],
},
{
"name": "release",
"commands": ["notes", "authors"],
},
{
"name": "benchmarking",
"commands": ["bench"],
},
]
}
class ErrorOnlyReporter(ZeroReporter):
desc = """Report errors only"""
def runtime_error(self, msg):
console = Console()
console.print("[red bold] msg")
def add_failure(self, task, fail_info):
console = Console()
if isinstance(fail_info, TaskError):
console.print(f'[red]Task Error - {task.name}'
f' => {fail_info.message}')
if fail_info.traceback:
console.print(Panel(
"".join(fail_info.traceback),
title=f"{task.name}",
subtitle=fail_info.message,
border_style="red",
))
CONTEXT = UnifiedContext({
'build_dir': Option(
['--build-dir'], metavar='BUILD_DIR',
default='build', show_default=True,
help=':wrench: Relative path to the build directory.'),
'no_build': Option(
["--no-build", "-n"], default=False, is_flag=True,
help=(":wrench: Do not build the project"
" (note event python only modification require build).")),
'install_prefix': Option(
['--install-prefix'], default=None, metavar='INSTALL_DIR',
help=(":wrench: Relative path to the install directory."
" Default is <build-dir>-install.")),
})
def run_doit_task(tasks):
"""
:param tasks: (dict) task_name -> {options}
"""
loader = ModuleTaskLoader(globals())
doit_config = {
'verbosity': 2,
'reporter': ErrorOnlyReporter,
}
return run_tasks(loader, tasks, extra_config={'GLOBAL': doit_config})
class CLI(CliGroup):
context = CONTEXT
run_doit_task = run_doit_task
@click.group(cls=CLI)
@click.pass_context
def cli(ctx, **kwargs):
"""Developer Tool for SciPy
\bCommands that require a built/installed instance are marked with :wrench:.
\b**python dev.py --build-dir my-build test -s stats**
""" # noqa: E501
CLI.update_context(ctx, kwargs)
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'meson.build']
@dataclass
class Dirs:
"""
root:
Directory where scr, build config and tools are located
(and this file)
build:
Directory where build output files (i.e. *.o) are saved
install:
Directory where .so from build and .py from src are put together.
site:
Directory where the built SciPy version was installed.
This is a custom prefix, followed by a relative path matching
the one the system would use for the site-packages of the active
Python interpreter.
"""
# all paths are absolute
root: Path
build: Path
installed: Path
site: Path # <install>/lib/python<version>/site-packages
def __init__(self, args=None):
""":params args: object like Context(build_dir, install_prefix)"""
self.root = Path(__file__).parent.absolute()
if not args:
return
self.build = Path(args.build_dir).resolve()
if args.install_prefix:
self.installed = Path(args.install_prefix).resolve()
else:
self.installed = self.build.parent / (self.build.stem + "-install")
if sys.platform == 'win32' and sys.version_info < (3, 10):
# Work around a pathlib bug; these must be absolute paths
self.build = Path(os.path.abspath(self.build))
self.installed = Path(os.path.abspath(self.installed))
# relative path for site-package with py version
# i.e. 'lib/python3.10/site-packages'
self.site = self.get_site_packages()
def add_sys_path(self):
"""Add site dir to sys.path / PYTHONPATH"""
site_dir = str(self.site)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = \
os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))
def get_site_packages(self):
"""
Depending on whether we have debian python or not,
return dist_packages path or site_packages path.
"""
if 'deb_system' in INSTALL_SCHEMES:
# debian patched python in use
install_cmd = dist.Distribution().get_command_obj('install')
install_cmd.select_scheme('deb_system')
install_cmd.finalize_options()
plat_path = Path(install_cmd.install_platlib)
else:
plat_path = Path(get_path('platlib'))
return self.installed / plat_path.relative_to(sys.exec_prefix)
@contextlib.contextmanager
def working_dir(new_dir):
current_dir = os.getcwd()
try:
os.chdir(new_dir)
yield
finally:
os.chdir(current_dir)
def import_module_from_path(mod_name, mod_path):
"""Import module with name `mod_name` from file path `mod_path`"""
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_test_runner(project_module):
"""
get Test Runner from locally installed/built project
"""
__import__(project_module)
# scipy._lib._testutils:PytestTester
test = sys.modules[project_module].test
version = sys.modules[project_module].__version__
mod_path = sys.modules[project_module].__file__
mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))
return test, version, mod_path
############
@cli.cls_cmd('build')
class Build(Task):
""":wrench: Build & install package on path.
\b
```python
Examples:
$ python dev.py build --asan ;
ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true
LD_PRELOAD=$(gcc --print-file-name=libasan.so)
python dev.py test -v -t
./scipy/ndimage/tests/test_morphology.py -- -s
```
"""
ctx = CONTEXT
werror = Option(
['--werror'], default=False, is_flag=True,
help="Treat warnings as errors")
gcov = Option(
['--gcov'], default=False, is_flag=True,
help="enable C code coverage via gcov (requires GCC)."
"gcov output goes to build/**/*.gc*")
asan = Option(
['--asan'], default=False, is_flag=True,
help=("Build and run with AddressSanitizer support. "
"Note: the build system doesn't check whether "
"the project is already compiled with ASan. "
"If not, you need to do a clean build (delete "
"build and build-install directories)."))
debug = Option(
['--debug', '-d'], default=False, is_flag=True, help="Debug build")
parallel = Option(
['--parallel', '-j'], default=None, metavar='N_JOBS',
help=("Number of parallel jobs for building. "
"This defaults to the number of available physical CPU cores"))
setup_args = Option(
['--setup-args', '-C'], default=[], multiple=True,
help=("Pass along one or more arguments to `meson setup` "
"Repeat the `-C` in case of multiple arguments."))
show_build_log = Option(
['--show-build-log'], default=False, is_flag=True,
help="Show build output rather than using a log file")
win_cp_openblas = Option(
['--win-cp-openblas'], default=False, is_flag=True,
help=("If set, and on Windows, copy OpenBLAS lib to install directory "
"after meson install. "
"Note: this argument may be removed in the future once a "
"`site.cfg`-like mechanism to select BLAS/LAPACK libraries is "
"implemented for Meson"))
@classmethod
def setup_build(cls, dirs, args):
"""
Setting up meson-build
"""
for fn in PROJECT_ROOT_FILES:
if not (dirs.root / fn).exists():
print("To build the project, run dev.py in "
"git checkout or unpacked source")
sys.exit(1)
env = dict(os.environ)
cmd = ["meson", "setup", dirs.build, "--prefix", dirs.installed]
build_dir = dirs.build
run_dir = Path()
if build_dir.exists() and not (build_dir / 'meson-info').exists():
if list(build_dir.iterdir()):
raise RuntimeError("Can't build into non-empty directory "
f"'{build_dir.absolute()}'")
if sys.platform == "cygwin":
# Cygwin only has netlib lapack, but can link against
# OpenBLAS rather than netlib blas at runtime. There is
# no libopenblas-devel to enable linking against
# openblas-specific functions or OpenBLAS Lapack
cmd.extend(["-Dlapack=lapack", "-Dblas=blas"])
build_options_file = (
build_dir / "meson-info" / "intro-buildoptions.json")
if build_options_file.exists():
with open(build_options_file) as f:
build_options = json.load(f)
installdir = None
for option in build_options:
if option["name"] == "prefix":
installdir = option["value"]
break
if installdir != str(dirs.installed):
run_dir = build_dir
cmd = ["meson", "setup", "--reconfigure",
"--prefix", str(dirs.installed)]
else:
return
if args.werror:
cmd += ["--werror"]
if args.gcov:
cmd += ['-Db_coverage=true']
if args.asan:
cmd += ['-Db_sanitize=address,undefined']
if args.setup_args:
cmd += [str(arg) for arg in args.setup_args]
# Setting up meson build
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
ret = subprocess.call(cmd, env=env, cwd=run_dir)
if ret == 0:
print("Meson build setup OK")
else:
print("Meson build setup failed!")
sys.exit(1)
return env
@classmethod
def build_project(cls, dirs, args, env):
"""
Build a dev version of the project.
"""
cmd = ["ninja", "-C", str(dirs.build)]
if args.parallel is None:
# Use number of physical cores rather than ninja's default of 2N+2,
# to avoid out of memory issues (see gh-17941 and gh-18443)
n_cores = cpu_count(only_physical_cores=True)
cmd += [f"-j{n_cores}"]
else:
cmd += ["-j", str(args.parallel)]
# Building with ninja-backend
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
ret = subprocess.call(cmd, env=env, cwd=dirs.root)
if ret == 0:
print("Build OK")
else:
print("Build failed!")
sys.exit(1)
@classmethod
def install_project(cls, dirs, args):
"""
Installs the project after building.
"""
if dirs.installed.exists():
non_empty = len(os.listdir(dirs.installed))
if non_empty and not dirs.site.exists():
raise RuntimeError("Can't install in non-empty directory: "
f"'{dirs.installed}'")
cmd = ["meson", "install", "-C", args.build_dir, "--only-changed"]
log_filename = dirs.root / 'meson-install.log'
start_time = datetime.datetime.now()
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
if args.show_build_log:
ret = subprocess.call(cmd, cwd=dirs.root)
else:
print("Installing, see meson-install.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, stdout=log, stderr=log,
cwd=dirs.root)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
elapsed = datetime.datetime.now() - start_time
print(" ... installation in progress ({} "
"elapsed)".format(elapsed))
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
elapsed = datetime.datetime.now() - start_time
if ret != 0:
if not args.show_build_log:
with open(log_filename) as f:
print(f.read())
print(f"Installation failed! ({elapsed} elapsed)")
sys.exit(1)
# ignore everything in the install directory.
with open(dirs.installed / ".gitignore", "w") as f:
f.write("*")
if sys.platform == "cygwin":
rebase_cmd = ["/usr/bin/rebase", "--database", "--oblivious"]
rebase_cmd.extend(Path(dirs.installed).glob("**/*.dll"))
subprocess.check_call(rebase_cmd)
print("Installation OK")
return
@classmethod
def copy_openblas(cls, dirs):
"""
Copies OpenBLAS DLL to the SciPy install dir, and also overwrites the
default `_distributor_init.py` file with the one
we use for wheels uploaded to PyPI so that DLL gets loaded.
Assumes pkg-config is installed and aware of OpenBLAS.
The "dirs" parameter is typically a "Dirs" object with the
structure as the following, say, if dev.py is run from the
folder "repo":
dirs = Dirs(
root=WindowsPath('C:/.../repo'),
build=WindowsPath('C:/.../repo/build'),
installed=WindowsPath('C:/.../repo/build-install'),
site=WindowsPath('C:/.../repo/build-install/Lib/site-packages'
)
"""
# Get OpenBLAS lib path from pkg-config
cmd = ['pkg-config', '--variable', 'libdir', 'openblas']
result = subprocess.run(cmd, capture_output=True, text=True)
# pkg-config does not return any meaningful error message if fails
if result.returncode != 0:
print('"pkg-config --variable libdir openblas" '
'command did not manage to find OpenBLAS '
'succesfully. Try running manually on the '
'command prompt for more information.')
print("OpenBLAS copy failed!")
sys.exit(result.returncode)
# Skip the drive letter of the path -> /c to get Windows drive
# to be appended correctly to avoid "C:\c\..." from stdout.
openblas_lib_path = Path(result.stdout.strip()[2:]).resolve()
if not openblas_lib_path.stem == 'lib':
raise RuntimeError('"pkg-config --variable libdir openblas" '
'command did not return a path ending with'
' "lib" folder. Instead it returned '
f'"{openblas_lib_path}"')
# Look in bin subdirectory for OpenBLAS binaries.
bin_path = openblas_lib_path.parent / 'bin'
# Locate, make output .libs directory in Scipy install directory.
scipy_path = dirs.site / 'scipy'
libs_path = scipy_path / '.libs'
libs_path.mkdir(exist_ok=True)
# Copy DLL files from OpenBLAS install to scipy install .libs subdir.
for dll_fn in bin_path.glob('*.dll'):
out_fname = libs_path / dll_fn.name
print(f'Copying {dll_fn} ----> {out_fname}')
out_fname.write_bytes(dll_fn.read_bytes())
# Write _distributor_init.py to scipy install dir;
# this ensures the .libs file is on the DLL search path at run-time,
# so OpenBLAS gets found
openblas_support = import_module_from_path(
'openblas_support',
dirs.root / 'tools' / 'openblas_support.py'
)
openblas_support.make_init(scipy_path)
print('OpenBLAS copied')
@classmethod
def run(cls, add_path=False, **kwargs):
kwargs.update(cls.ctx.get(kwargs))
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
cls.console = Console(theme=console_theme)
dirs = Dirs(args)
if args.no_build:
print("Skipping build")
else:
env = cls.setup_build(dirs, args)
cls.build_project(dirs, args, env)
cls.install_project(dirs, args)
if args.win_cp_openblas and platform.system() == 'Windows':
cls.copy_openblas(dirs)
# add site to sys.path
if add_path:
dirs.add_sys_path()
@cli.cls_cmd('test')
class Test(Task):
""":wrench: Run tests.
\b
```python
Examples:
$ python dev.py test -s {SAMPLE_SUBMODULE}
$ python dev.py test -t scipy.optimize.tests.test_minimize_constrained
$ python dev.py test -s cluster -m full --durations 20
$ python dev.py test -s stats -- --tb=line # `--` passes next args to pytest
$ python dev.py test -b numpy -b pytorch -s cluster
```
""" # noqa: E501
ctx = CONTEXT
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True,
help="more verbosity")
# removed doctests as currently not supported by _lib/_testutils.py
# doctests = Option(['--doctests'], default=False)
coverage = Option(
['--coverage', '-c'], default=False, is_flag=True,
help=("report coverage of project code. "
"HTML output goes under build/coverage"))
durations = Option(
['--durations', '-d'], default=None, metavar="NUM_TESTS",
help="Show timing for the given number of slowest tests"
)
submodule = Option(
['--submodule', '-s'], default=None, metavar='MODULE_NAME',
help="Submodule whose tests to run (cluster, constants, ...)")
tests = Option(
['--tests', '-t'], default=None, multiple=True, metavar='TESTS',
help='Specify tests to run')
mode = Option(
['--mode', '-m'], default='fast', metavar='MODE', show_default=True,
help=("'fast', 'full', or something that could be passed to "
"`pytest -m` as a marker expression"))
parallel = Option(
['--parallel', '-j'], default=1, metavar='N_JOBS',
help="Number of parallel jobs for testing"
)
array_api_backend = Option(
['--array-api-backend', '-b'], default=None, metavar='ARRAY_BACKEND',
multiple=True,
help=(
"Array API backend ('all', 'numpy', 'pytorch', 'cupy', 'numpy.array_api')."
)
)
# Argument can't have `help=`; used to consume all of `-- arg1 arg2 arg3`
pytest_args = Argument(
['pytest_args'], nargs=-1, metavar='PYTEST-ARGS', required=False
)
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def scipy_tests(cls, args, pytest_args):
dirs = Dirs(args)
dirs.add_sys_path()
print(f"SciPy from development installed path at: {dirs.site}")
# FIXME: support pos-args with doit
extra_argv = pytest_args[:] if pytest_args else []
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.coverage:
dst_dir = dirs.root / args.build_dir / 'coverage'
fn = dst_dir / 'coverage_html.js'
if dst_dir.is_dir() and fn.is_file():
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + str(dst_dir)]
shutil.copyfile(dirs.root / '.coveragerc',
dirs.site / '.coveragerc')
if args.durations:
extra_argv += ['--durations', args.durations]
# convert options to test selection
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
if len(args.array_api_backend) != 0:
os.environ['SCIPY_ARRAY_API'] = json.dumps(list(args.array_api_backend))
runner, version, mod_path = get_test_runner(PROJECT_MODULE)
# FIXME: changing CWD is not a good practice
with working_dir(dirs.site):
print("Running tests for {} version:{}, installed at:{}".format(
PROJECT_MODULE, version, mod_path))
# runner verbosity - convert bool to int
verbose = int(args.verbose) + 1
result = runner( # scipy._lib._testutils:PytestTester
args.mode,
verbose=verbose,
extra_argv=extra_argv,
doctests=False,
coverage=args.coverage,
tests=tests,
parallel=args.parallel)
return result
@classmethod
def run(cls, pytest_args, **kwargs):
"""run unit-tests"""
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
return cls.scipy_tests(args, pytest_args)
@cli.cls_cmd('bench')
class Bench(Task):
""":wrench: Run benchmarks.
\b
```python
Examples:
$ python dev.py bench -t integrate.SolveBVP
$ python dev.py bench -t linalg.Norm
$ python dev.py bench --compare main
```
"""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
submodule = Option(
['--submodule', '-s'], default=None, metavar='SUBMODULE',
help="Submodule whose tests to run (cluster, constants, ...)")
tests = Option(
['--tests', '-t'], default=None, multiple=True,
metavar='TESTS', help='Specify tests to run')
compare = Option(
['--compare', '-c'], default=None, metavar='COMPARE', multiple=True,
help=(
"Compare benchmark results of current HEAD to BEFORE. "
"Use an additional --bench COMMIT to override HEAD with COMMIT. "
"Note that you need to commit your changes first!"))
@staticmethod
def run_asv(dirs, cmd):
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
bench_dir = dirs.root / 'benchmarks'
sys.path.insert(0, str(bench_dir))
# Always use ccache, if installed
env = dict(os.environ)
env['PATH'] = os.pathsep.join(EXTRA_PATH +
env.get('PATH', '').split(os.pathsep))
# Control BLAS/LAPACK threads
env['OPENBLAS_NUM_THREADS'] = '1'
env['MKL_NUM_THREADS'] = '1'
# Limit memory usage
from benchmarks.common import set_mem_rlimit
try:
set_mem_rlimit()
except (ImportError, RuntimeError):
pass
try:
return subprocess.call(cmd, env=env, cwd=bench_dir)
except OSError as err:
if err.errno == errno.ENOENT:
cmd_str = " ".join(cmd)
print(f"Error when running '{cmd_str}': {err}\n")
print("You need to install Airspeed Velocity "
"(https://airspeed-velocity.github.io/asv/)")
print("to run Scipy benchmarks")
return 1
raise
@classmethod
def scipy_bench(cls, args):
dirs = Dirs(args)
dirs.add_sys_path()
print(f"SciPy from development installed path at: {dirs.site}")
with working_dir(dirs.site):
runner, version, mod_path = get_test_runner(PROJECT_MODULE)
extra_argv = []
if args.tests:
extra_argv.append(args.tests)
if args.submodule:
extra_argv.append([args.submodule])
bench_args = []
for a in extra_argv:
bench_args.extend(['--bench', ' '.join(str(x) for x in a)])
if not args.compare:
print("Running benchmarks for Scipy version %s at %s"
% (version, mod_path))
cmd = ['asv', 'run', '--dry-run', '--show-stderr',
'--python=same'] + bench_args
retval = cls.run_asv(dirs, cmd)
sys.exit(retval)
else:
if len(args.compare) == 1:
commit_a = args.compare[0]
commit_b = 'HEAD'
elif len(args.compare) == 2:
commit_a, commit_b = args.compare
else:
print("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*" * 80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*" * 80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd_compare = [
'asv', 'continuous', '--show-stderr', '--factor', '1.05',
commit_a, commit_b
] + bench_args
cls.run_asv(dirs, cmd_compare)
sys.exit(1)
@classmethod
def run(cls, **kwargs):
"""run benchmark"""
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
cls.scipy_bench(args)
###################
# linters
def emit_cmdstr(cmd):
"""Print the command that's being run to stdout
Note: cannot use this in the below tasks (yet), because as is these command
strings are always echoed to the console, even if the command isn't run
(but for example the `build` command is run).
"""
console = Console(theme=console_theme)
# The [cmd] square brackets controls the font styling, typically in italics
# to differentiate it from other stdout content
console.print(f"{EMOJI.cmd} [cmd] {cmd}")
def task_lint():
# Lint just the diff since branching off of main using a
# stricter configuration.
# emit_cmdstr(os.path.join('tools', 'lint.py') + ' --diff-against main')
return {
'basename': 'lint',
'actions': [str(Dirs().root / 'tools' / 'lint.py') +
' --diff-against=main'],
'doc': 'Lint only files modified since last commit (stricter rules)',
}
def task_unicode_check():
# emit_cmdstr(os.path.join('tools', 'unicode-check.py'))
return {
'basename': 'unicode-check',
'actions': [str(Dirs().root / 'tools' / 'unicode-check.py')],
'doc': 'Check for disallowed Unicode characters in the SciPy Python '
'and Cython source code.',
}
def task_check_test_name():
# emit_cmdstr(os.path.join('tools', 'check_test_name.py'))
return {
"basename": "check-testname",
"actions": [str(Dirs().root / "tools" / "check_test_name.py")],
"doc": "Check tests are correctly named so that pytest runs them."
}
@cli.cls_cmd('lint')
class Lint():
""":dash: Run linter on modified files and check for
disallowed Unicode characters and possibly-invalid test names."""
def run():
run_doit_task({
'lint': {},
'unicode-check': {},
'check-testname': {},
})
@cli.cls_cmd('mypy')
class Mypy(Task):
""":wrench: Run mypy on the codebase."""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def run(cls, **kwargs):
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
dirs = Dirs(args)
try:
import mypy.api
except ImportError as e:
raise RuntimeError(
"Mypy not found. Please install it by running "
"pip install -r mypy_requirements.txt from the repo root"
) from e
config = dirs.root / "mypy.ini"
check_path = PROJECT_MODULE
with working_dir(dirs.site):
# By default mypy won't color the output since it isn't being
# invoked from a tty.
os.environ['MYPY_FORCE_COLOR'] = '1'
# Change to the site directory to make sure mypy doesn't pick
# up any type stubs in the source tree.
emit_cmdstr(f"mypy.api.run --config-file {config} {check_path}")
report, errors, status = mypy.api.run([
"--config-file",
str(config),
check_path,
])
print(report, end='')
print(errors, end='', file=sys.stderr)
return status == 0
##########################################
# DOC
@cli.cls_cmd('doc')
class Doc(Task):
""":wrench: Build documentation.
TARGETS: Sphinx build targets [default: 'html']
"""
ctx = CONTEXT
args = Argument(['args'], nargs=-1, metavar='TARGETS', required=False)
list_targets = Option(
['--list-targets', '-t'], default=False, is_flag=True,
help='List doc targets',
)
parallel = Option(
['--parallel', '-j'], default=1, metavar='N_JOBS',
help="Number of parallel jobs"
)
no_cache = Option(
['--no-cache'], default=False, is_flag=True,
help="Forces a full rebuild of the docs. Note that this may be " + \
"needed in order to make docstring changes in C/Cython files " + \
"show up."
)
@classmethod
def task_meta(cls, list_targets, parallel, no_cache, args, **kwargs):
if list_targets: # list MAKE targets, remove default target
task_dep = []
targets = ''
else:
task_dep = ['build']
targets = ' '.join(args) if args else 'html'
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
build_args = Args(**kwargs)
dirs = Dirs(build_args)
make_params = [f'PYTHON="{sys.executable}"']
if parallel or no_cache:
sphinxopts = ""
if parallel:
sphinxopts += f"-j{parallel} "
if no_cache:
sphinxopts += "-E"
make_params.append(f'SPHINXOPTS="{sphinxopts}"')
# Environment variables needed for notebooks
# See gh-17322
make_params.append('SQLALCHEMY_SILENCE_UBER_WARNING=1')
make_params.append('JUPYTER_PLATFORM_DIRS=1')
return {
'actions': [
# move to doc/ so local scipy does not get imported
(f'cd doc; env PYTHONPATH="{dirs.site}" '
f'make {" ".join(make_params)} {targets}'),
],
'task_dep': task_dep,
'io': {'capture': False},
}
@cli.cls_cmd('refguide-check')
class RefguideCheck(Task):
""":wrench: Run refguide check."""
ctx = CONTEXT
submodule = Option(
['--submodule', '-s'], default=None, metavar='SUBMODULE',
help="Submodule whose tests to run (cluster, constants, ...)")
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True, help="verbosity")
@classmethod
def task_meta(cls, **kwargs):
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
dirs = Dirs(args)
cmd = [f'{sys.executable}',
str(dirs.root / 'tools' / 'refguide_check.py'),
'--doctests']
if args.verbose:
cmd += ['-vvv']
if args.submodule:
cmd += [args.submodule]
cmd_str = ' '.join(cmd)
return {
'actions': [f'env PYTHONPATH={dirs.site} {cmd_str}'],
'task_dep': ['build'],
'io': {'capture': False},
}
##########################################
# ENVS
@cli.cls_cmd('python')
class Python():
""":wrench: Start a Python shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Option(
['--pythonpath', '-p'], metavar='PYTHONPATH', default=None,
help='Paths to prepend to PYTHONPATH')
extra_argv = Argument(
['extra_argv'], nargs=-1, metavar='ARGS', required=False)
@classmethod
def _setup(cls, pythonpath, **kwargs):
vals = Build.opt_defaults()
vals.update(kwargs)
Build.run(add_path=True, **vals)
if pythonpath:
for p in reversed(pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
@classmethod
def run(cls, pythonpath, extra_argv=None, **kwargs):
cls._setup(pythonpath, **kwargs)
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0]) as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__', __file__=extra_argv[0])
exec(script, ns)
else:
import code
code.interact()
@cli.cls_cmd('ipython')
class Ipython(Python):
""":wrench: Start IPython shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Python.pythonpath
@classmethod
def run(cls, pythonpath, **kwargs):
cls._setup(pythonpath, **kwargs)
import IPython
IPython.embed(user_ns={})
@cli.cls_cmd('shell')
class Shell(Python):
""":wrench: Start Unix shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Python.pythonpath
extra_argv = Python.extra_argv
@classmethod
def run(cls, pythonpath, extra_argv, **kwargs):
cls._setup(pythonpath, **kwargs)
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + list(extra_argv))
sys.exit(1)
@cli.command()
@click.argument('version_args', nargs=2)
@click.pass_obj
def notes(ctx_obj, version_args):
""":ledger: Release notes and log generation.
\b
```python
Example:
$ python dev.py notes v1.7.0 v1.8.0
```
"""
if version_args:
sys.argv = version_args
log_start = sys.argv[0]
log_end = sys.argv[1]
cmd = f"python tools/write_release_and_log.py {log_start} {log_end}"
click.echo(cmd)
try:
subprocess.run([cmd], check=True, shell=True)
except subprocess.CalledProcessError:
print('Error caught: Incorrect log start or log end version')
@cli.command()
@click.argument('revision_args', nargs=2)
@click.pass_obj
def authors(ctx_obj, revision_args):
""":ledger: Generate list of authors who contributed within revision
interval.
\b
```python
Example:
$ python dev.py authors v1.7.0 v1.8.0
```
"""
if revision_args:
sys.argv = revision_args
start_revision = sys.argv[0]
end_revision = sys.argv[1]
cmd = f"python tools/authors.py {start_revision}..{end_revision}"
click.echo(cmd)
try:
subprocess.run([cmd], check=True, shell=True)
except subprocess.CalledProcessError:
print('Error caught: Incorrect revision start or revision end')
# The following CPU core count functions were taken from loky/backend/context.py
# See https://github.com/joblib/loky
# Cache for the number of physical cores to avoid repeating subprocess calls.
# It should not change during the lifetime of the program.
physical_cores_cache = None
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs the current process can use.
The returned number of CPUs accounts for:
* the number of CPUs in the system, as given by
``multiprocessing.cpu_count``;
* the CPU affinity settings of the current process
(available on some Unix systems);
* Cgroup CPU bandwidth limit (available on Linux only, typically
set by docker and similar container orchestration systems);
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
and is given as the minimum of these constraints.
If ``only_physical_cores`` is True, return the number of physical cores
instead of the number of logical cores (hyperthreading / SMT). Note that
this option is not enforced if the number of usable cores is controlled in
any other way such as: process affinity, Cgroup restricted CPU bandwidth
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
cores is not found, return the number of logical cores.
Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
Python < 3.10), see:
https://bugs.python.org/issue26903.
It is also always larger or equal to 1.
"""
# Note: os.cpu_count() is allowed to return None in its docstring
os_cpu_count = os.cpu_count() or 1
if sys.platform == "win32":
# On Windows, attempting to use more than 61 CPUs would result in a
# OS-level error. See https://bugs.python.org/issue26903. According to
# https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
# it might be possible to go beyond with a lot of extra work but this
# does not look easy.
os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
cpu_count_user = _cpu_count_user(os_cpu_count)
aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
if not only_physical_cores:
return aggregate_cpu_count
if cpu_count_user < os_cpu_count:
# Respect user setting
return max(cpu_count_user, 1)
cpu_count_physical, exception = _count_physical_cores()
if cpu_count_physical != "not found":
return cpu_count_physical
# Fallback to default behavior
if exception is not None:
# warns only the first time
warnings.warn(
"Could not find the number of physical cores for the "
f"following reason:\n{exception}\n"
"Returning the number of logical cores instead. You can "
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
"the number of cores you want to use."
)
traceback.print_tb(exception.__traceback__)
return aggregate_cpu_count
def _cpu_count_cgroup(os_cpu_count):
# Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
cpu_max_fname = "/sys/fs/cgroup/cpu.max"
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
if os.path.exists(cpu_max_fname):
# cgroup v2
# https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
with open(cpu_max_fname) as fh:
cpu_quota_us, cpu_period_us = fh.read().strip().split()
elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
# cgroup v1
# https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
with open(cfs_quota_fname) as fh:
cpu_quota_us = fh.read().strip()
with open(cfs_period_fname) as fh:
cpu_period_us = fh.read().strip()
else:
# No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
cpu_quota_us = "max"
cpu_period_us = 100_000 # unused, for consistency with default values
if cpu_quota_us == "max":
# No active Cgroup quota on a Cgroup-capable platform
return os_cpu_count
else:
cpu_quota_us = int(cpu_quota_us)
cpu_period_us = int(cpu_period_us)
if cpu_quota_us > 0 and cpu_period_us > 0:
return math.ceil(cpu_quota_us / cpu_period_us)
else: # pragma: no cover
# Setting a negative cpu_quota_us value is a valid way to disable
# cgroup CPU bandwith limits
return os_cpu_count
def _cpu_count_affinity(os_cpu_count):
# Number of available CPUs given affinity settings
if hasattr(os, "sched_getaffinity"):
try:
return len(os.sched_getaffinity(0))
except NotImplementedError:
pass
# On PyPy and possibly other platforms, os.sched_getaffinity does not exist
# or raises NotImplementedError, let's try with the psutil if installed.
try:
import psutil
p = psutil.Process()
if hasattr(p, "cpu_affinity"):
return len(p.cpu_affinity())
except ImportError: # pragma: no cover
if (
sys.platform == "linux"
and os.environ.get("LOKY_MAX_CPU_COUNT") is None
):
# PyPy does not implement os.sched_getaffinity on Linux which
# can cause severe oversubscription problems. Better warn the
# user in this particularly pathological case which can wreck
# havoc, typically on CI workers.
warnings.warn(
"Failed to inspect CPU affinity constraints on this system. "
"Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
)
# This can happen for platforms that do not implement any kind of CPU
# infinity such as macOS-based platforms.
return os_cpu_count
def _cpu_count_user(os_cpu_count):
"""Number of user defined available CPUs"""
cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
# User defined soft-limit passed as a loky specific environment variable.
cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
def _count_physical_cores():
"""Return a tuple (number of physical cores, exception)
If the number of physical cores is found, exception is set to None.
If it has not been found, return ("not found", exception).
The number of physical cores is cached to avoid repeating subprocess calls.
"""
exception = None
# First check if the value is cached
global physical_cores_cache
if physical_cores_cache is not None:
return physical_cores_cache, exception
# Not cached yet, find it
try:
if sys.platform == "linux":
cpu_info = subprocess.run(
"lscpu --parse=core".split(), capture_output=True, text=True
)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = {line for line in cpu_info if not line.startswith("#")}
cpu_count_physical = len(cpu_info)
elif sys.platform == "win32":
cpu_info = subprocess.run(
"wmic CPU Get NumberOfCores /Format:csv".split(),
capture_output=True,
text=True,
)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = [
l.split(",")[1]
for l in cpu_info
if (l and l != "Node,NumberOfCores")
]
cpu_count_physical = sum(map(int, cpu_info))
elif sys.platform == "darwin":
cpu_info = subprocess.run(
"sysctl -n hw.physicalcpu".split(),
capture_output=True,
text=True,
)
cpu_info = cpu_info.stdout
cpu_count_physical = int(cpu_info)
else:
raise NotImplementedError(f"unsupported platform: {sys.platform}")
# if cpu_count_physical < 1, we did not find a valid value
if cpu_count_physical < 1:
raise ValueError(f"found {cpu_count_physical} physical cores < 1")
except Exception as e:
exception = e
cpu_count_physical = "not found"
# Put the result in cache
physical_cores_cache = cpu_count_physical
return cpu_count_physical, exception
if __name__ == '__main__':
cli()
| 50,071 | 33.085773 | 87 | py |
scipy | scipy-main/scipy/conftest.py | # Pytest customization
import json
import os
import warnings
import numpy as np
import numpy.array_api
import numpy.testing as npt
import pytest
from scipy._lib._fpumode import get_fpu_mode
from scipy._lib._testutils import FPUModeChangeWarning
from scipy._lib import _pep440
from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE
def pytest_configure(config):
config.addinivalue_line("markers",
"slow: Tests that are very slow.")
config.addinivalue_line("markers",
"xslow: mark test as extremely slow (not run unless explicitly requested)")
config.addinivalue_line("markers",
"xfail_on_32bit: mark test as failing on 32-bit platforms")
try:
import pytest_timeout # noqa:F401
except Exception:
config.addinivalue_line(
"markers", 'timeout: mark a test for a non-default timeout')
def _get_mark(item, name):
if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"):
mark = item.get_closest_marker(name)
else:
mark = item.get_marker(name)
return mark
def pytest_runtest_setup(item):
mark = _get_mark(item, "xslow")
if mark is not None:
try:
v = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
v = False
if not v:
pytest.skip("very slow test; set environment variable SCIPY_XSLOW=1 to run it")
mark = _get_mark(item, 'xfail_on_32bit')
if mark is not None and np.intp(0).itemsize < 8:
pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}')
# Older versions of threadpoolctl have an issue that may lead to this
# warning being emitted, see gh-14441
with npt.suppress_warnings() as sup:
sup.filter(pytest.PytestUnraisableExceptionWarning)
try:
from threadpoolctl import threadpool_limits
HAS_THREADPOOLCTL = True
except Exception: # observed in gh-14441: (ImportError, AttributeError)
# Optional dependency only. All exceptions are caught, for robustness
HAS_THREADPOOLCTL = False
if HAS_THREADPOOLCTL:
# Set the number of openmp threads based on the number of workers
# xdist is using to prevent oversubscription. Simplified version of what
# sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper
# functions)
try:
xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT'])
except KeyError:
# raises when pytest-xdist is not installed
return
if not os.getenv('OMP_NUM_THREADS'):
max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores
threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1)
try:
threadpool_limits(threads_per_worker, user_api='blas')
except Exception:
# May raise AttributeError for older versions of OpenBLAS.
# Catch any error for robustness.
return
@pytest.fixture(scope="function", autouse=True)
def check_fpu_mode(request):
"""
Check FPU mode was not changed during the test.
"""
old_mode = get_fpu_mode()
yield
new_mode = get_fpu_mode()
if old_mode != new_mode:
warnings.warn("FPU mode changed from {:#x} to {:#x} during "
"the test".format(old_mode, new_mode),
category=FPUModeChangeWarning, stacklevel=0)
# Array API backend handling
xp_available_backends = {'numpy': np}
if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str):
# fill the dict of backends with available libraries
xp_available_backends.update({'numpy.array_api': numpy.array_api})
try:
import torch # type: ignore[import]
xp_available_backends.update({'pytorch': torch})
# can use `mps` or `cpu`
torch.set_default_device(SCIPY_DEVICE)
except ImportError:
pass
try:
import cupy # type: ignore[import]
xp_available_backends.update({'cupy': cupy})
except ImportError:
pass
# by default, use all available backends
if SCIPY_ARRAY_API.lower() not in ("1", "true"):
SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API)
if 'all' in SCIPY_ARRAY_API_:
pass # same as True
else:
# only select a subset of backend by filtering out the dict
try:
xp_available_backends = {
backend: xp_available_backends[backend]
for backend in SCIPY_ARRAY_API_
}
except KeyError:
msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}"
raise ValueError(msg)
if 'cupy' in xp_available_backends:
SCIPY_DEVICE = 'cuda'
array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values())
skip_if_array_api = pytest.mark.skipif(
SCIPY_ARRAY_API,
reason="do not run with Array API on",
)
skip_if_array_api_gpu = pytest.mark.skipif(
SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu',
reason="do not run with Array API on and not on CPU",
)
def skip_if_array_api_backend(backend):
def wrapper(func):
reason = (
f"do not run with Array API backend: {backend}"
)
# method gets there as a function so we cannot use inspect.ismethod
if '.' in func.__qualname__:
def wrapped(self, *args, xp, **kwargs):
if xp.__name__ == backend:
pytest.skip(reason=reason)
return func(self, *args, xp, **kwargs)
else:
def wrapped(*args, xp, **kwargs): # type: ignore[misc]
if xp.__name__ == backend:
pytest.skip(reason=reason)
return func(*args, xp, **kwargs)
return wrapped
return wrapper
| 5,991 | 33.436782 | 91 | py |
scipy | scipy-main/scipy/linalg/_basic.py | #
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from warnings import warn
from itertools import product
import numpy as np
from numpy import atleast_1d, atleast_2d
from .lapack import get_lapack_funcs, _compute_lwork
from ._misc import LinAlgError, _datacopied, LinAlgWarning
from ._decomp import _asarray_validated
from . import _decomp, _decomp_svd
from ._solve_toeplitz import levinson
from ._cythonized_array_utils import find_det_from_lu
from scipy._lib.deprecation import _NoValue
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz']
# The numpy facilities for type-casting checks are too slow for small sized
# arrays and eat away the time budget for the checkups. Here we set a
# precomputed dict container of the numpy.can_cast() table.
# It can be used to determine quickly what a dtype can be cast to LAPACK
# compatible types, i.e., 'float32, float64, complex64, complex128'.
# Then it can be checked via "casting_dict[arr.dtype.char]"
lapack_cast_dict = {x: ''.join([y for y in 'fdFD' if np.can_cast(x, y)])
for x in np.typecodes['All']}
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, lower=False, overwrite_a=False,
overwrite_b=False, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a @ x == b`` for the unknown ``x``
for square `a` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
lower : bool, default: False
Ignored if ``assume_a == 'gen'`` (the default). If True, the
calculation uses only the data in the lower triangle of `a`;
entries above the diagonal are ignored. If False (default), the
calculation uses only the data in the upper triangle of `a`; entries
below the diagonal are ignored.
overwrite_a : bool, default: False
Allow overwriting data in `a` (may enhance performance).
overwrite_b : bool, default: False
Allow overwriting data in `b` (may enhance performance).
check_finite : bool, default: True
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, {'gen', 'sym', 'her', 'pos'}
Valid entries are explained above.
transposed : bool, default: False
If True, solve ``a.T @ x == b``. Raises `NotImplementedError`
for complex `a`.
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Notes
-----
If the input b matrix is a 1-D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1-D array.
The generic, symmetric, Hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> import numpy as np
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
"""
# Flags for 1-D or N-D right-hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1-D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1-D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# for a real matrix, describe it as "symmetric", not "hermitian"
# (lapack doesn't know what to do with real hermitian matrices)
if assume_a == 'her' and not np.iscomplexobj(a1):
assume_a = 'sym'
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> import numpy as np
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('shapes of a {} and b {} are incompatible'
.format(a1.shape, b1.shape))
overwrite_b = overwrite_b or _datacopied(b1, b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
if a1.flags.f_contiguous or trans == 2:
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
else:
# transposed system is solved since trtrs expects Fortran ordering
x, info = trtrs(a1.T, b1, overwrite_b=overwrite_b, lower=not lower,
trans=not trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %dth argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> import numpy as np
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
Uses Thomas' Algorithm, which is more efficient than standard LU
factorization, but should only be used for Hermitian positive-definite
matrices.
The matrix ``a`` is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of ``a`` is (6, 6), number of upper diagonals,
``u`` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (``u`` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``a x = b``. Shape of return matches shape
of `b`.
Notes
-----
In the case of a non-positive definite matrix ``a``, the solver
`solve_banded` may be used.
Examples
--------
Solve the banded system ``A x = b``, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> import numpy as np
>>> from scipy.linalg import solveh_banded
``ab`` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system ``H x = b``, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array ``hb``:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%dth leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %dth argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> import numpy as np
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
r, c, b, dtype, b_shape = _validate_args_for_toeplitz_ops(
c_or_cr, b, check_finite, keep_b_shape=True)
# Form a 1-D array of values to be used in the matrix, containing a
# reversed copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError(f"'{aname}axis' entry is out of bounds")
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a 1-D vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Shapes of c {} and b {} are incompatible'
.format(c.shape, b.shape))
fc = np.fft.fft(np.moveaxis(c, caxis, -1), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.moveaxis(b, baxis, -1), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.moveaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2D.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant is a scalar that is a function of the associated square
matrix coefficients. The determinant value is zero for singular matrices.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : (...) float or complex
Determinant of `a`. For stacked arrays, a scalar is returned for each
(m, m) slice in the last two dimensions of the input. For example, an
input of shape (p, q, m, m) will produce a result of shape (p, q). If
all dimensions are 1 a scalar is returned regardless of ndim.
Notes
-----
The determinant is computed by performing an LU factorization of the
input with LAPACK routine 'getrf', and then calculating the product of
diagonal entries of the U factor.
Even the input array is single precision (float32 or complex64), the result
will be returned in double precision (float64 or complex128) to prevent
overflows.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]]) # A singular matrix
>>> linalg.det(a)
0.0
>>> b = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(b)
3.0
>>> # An array with the shape (3, 2, 2, 2)
>>> c = np.array([[[[1., 2.], [3., 4.]],
... [[5., 6.], [7., 8.]]],
... [[[9., 10.], [11., 12.]],
... [[13., 14.], [15., 16.]]],
... [[[17., 18.], [19., 20.]],
... [[21., 22.], [23., 24.]]]])
>>> linalg.det(c) # The resulting shape is (3, 2)
array([[-2., -2.],
[-2., -2.],
[-2., -2.]])
>>> linalg.det(c[0, 0]) # Confirm the (0, 0) slice, [[1, 2], [3, 4]]
-2.0
"""
# The goal is to end up with a writable contiguous array to pass to Cython
# First we check and make arrays.
a1 = np.asarray_chkfinite(a) if check_finite else np.asarray(a)
if a1.ndim < 2:
raise ValueError('The input array must be at least two-dimensional.')
if a1.shape[-1] != a1.shape[-2]:
raise ValueError('Last 2 dimensions of the array must be square'
f' but received shape {a1.shape}.')
# Also check if dtype is LAPACK compatible
if a1.dtype.char not in 'fdFD':
dtype_char = lapack_cast_dict[a1.dtype.char]
if not dtype_char: # No casting possible
raise TypeError(f'The dtype "{a1.dtype.name}" cannot be cast '
'to float(32, 64) or complex(64, 128).')
a1 = a1.astype(dtype_char[0]) # makes a copy, free to scratch
overwrite_a = True
# Empty array has determinant 1 because math.
if min(*a1.shape) == 0:
if a1.ndim == 2:
return np.float64(1.)
else:
return np.ones(shape=a1.shape[:-2], dtype=np.float64)
# Scalar case
if a1.shape[-2:] == (1, 1):
# Either ndarray with spurious singletons or a single element
if max(*a1.shape) > 1:
temp = np.squeeze(a1)
if a1.dtype.char in 'dD':
return temp
else:
return (temp.astype('d') if a1.dtype.char == 'f' else
temp.astype('D'))
else:
return (np.float64(a1.item()) if a1.dtype.char in 'fd' else
np.complex128(a1.item()))
# Then check overwrite permission
if not _datacopied(a1, a): # "a" still alive through "a1"
if not overwrite_a:
# Data belongs to "a" so make a copy
a1 = a1.copy(order='C')
# else: Do nothing we'll use "a" if possible
# else: a1 has its own data thus free to scratch
# Then layout checks, might happen that overwrite is allowed but original
# array was read-only or non-C-contiguous.
if not (a1.flags['C_CONTIGUOUS'] and a1.flags['WRITEABLE']):
a1 = a1.copy(order='C')
if a1.ndim == 2:
det = find_det_from_lu(a1)
# Convert float, complex to to NumPy scalars
return (np.float64(det) if np.isrealobj(det) else np.complex128(det))
# loop over the stacked array, and avoid overflows for single precision
# Cf. np.linalg.det(np.diag([1e+38, 1e+38]).astype(np.float32))
dtype_char = a1.dtype.char
if dtype_char in 'fF':
dtype_char = 'd' if dtype_char.islower() else 'D'
det = np.empty(a1.shape[:-2], dtype=dtype_char)
for ind in product(*[range(x) for x in a1.shape[:-2]]):
det[ind] = find_det_from_lu(a1[ind])
return det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left-hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``cond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``ndim(A) == n`` (returns a scalar if ``b`` is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of ``a`` is
``s[0] / s[-1]``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> import numpy as np
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, atol=None, rtol=None, return_rank=False, check_finite=True,
cond=_NoValue, rcond=_NoValue):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition ``U @ S @ V`` in the economy mode and picking
up only the columns/rows that are associated with significant singular
values.
If ``s`` is the maximum singular value of ``a``, then the
significance cut-off value is determined by ``atol + rtol * s``. Any
singular value below this value is assumed insignificant.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
atol : float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol : float, optional
Relative threshold term, default value is ``max(M, N) * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
cond, rcond : float, optional
In older versions, these values were meant to be used as ``atol`` with
``rtol=0``. If both were given ``rcond`` overwrote ``cond`` and hence
the code was not correct. Thus using these are strongly discouraged and
the tolerances above are recommended instead. In fact, if provided,
atol, rtol takes precedence over these keywords.
.. deprecated:: 1.7.0
Deprecated in favor of ``rtol`` and ``atol`` parameters above and
will be removed in SciPy 1.13.0.
.. versionchanged:: 1.3.0
Previously the default cutoff value was just ``eps*f`` where ``f``
was ``1e3`` for single precision and ``1e6`` for double precision.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
See Also
--------
pinvh : Moore-Penrose pseudoinverse of a hermititan matrix.
Notes
-----
If ``A`` is invertible then the Moore-Penrose pseudoinverse is exactly
the inverse of ``A`` [1]_. If ``A`` is not invertible then the
Moore-Penrose pseudoinverse computes the ``x`` solution to ``Ax = b`` such
that ``||Ax - b||`` is minimized [1]_.
References
----------
.. [1] Penrose, R. (1956). On best approximate solutions of linear matrix
equations. Mathematical Proceedings of the Cambridge Philosophical
Society, 52(1), 17-19. doi:10.1017/S0305004100030929
Examples
--------
Given an ``m x n`` matrix ``A`` and an ``n x m`` matrix ``B`` the four
Moore-Penrose conditions are:
1. ``ABA = A`` (``B`` is a generalized inverse of ``A``),
2. ``BAB = B`` (``A`` is a generalized inverse of ``B``),
3. ``(AB)* = AB`` (``AB`` is hermitian),
4. ``(BA)* = BA`` (``BA`` is hermitian) [1]_.
Here, ``A*`` denotes the conjugate transpose. The Moore-Penrose
pseudoinverse is a unique ``B`` that satisfies all four of these
conditions and exists for any ``A``. Note that, unlike the standard
matrix inverse, ``A`` does not have to be square or have
independant columns/rows.
As an example, we can calculate the Moore-Penrose pseudoinverse of a
random non-square matrix and verify it satisfies the four conditions.
>>> import numpy as np
>>> from scipy import linalg
>>> rng = np.random.default_rng()
>>> A = rng.standard_normal((9, 6))
>>> B = linalg.pinv(A)
>>> np.allclose(A @ B @ A, A) # Condition 1
True
>>> np.allclose(B @ A @ B, B) # Condition 2
True
>>> np.allclose((A @ B).conj().T, A @ B) # Condition 3
True
>>> np.allclose((B @ A).conj().T, B @ A) # Condition 4
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = _decomp_svd.svd(a, full_matrices=False, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(s)
if rcond is not _NoValue or cond is not _NoValue:
warn('Use of the "cond" and "rcond" keywords are deprecated and '
'will be removed in SciPy 1.13.0. Use "atol" and '
'"rtol" keywords instead', DeprecationWarning, stacklevel=2)
# backwards compatible only atol and rtol are both missing
if ((rcond not in (_NoValue, None) or cond not in (_NoValue, None))
and (atol is None) and (rtol is None)):
atol = rcond if rcond not in (_NoValue, None) else cond
rtol = 0.
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
rank = np.sum(s > val)
u = u[:, :rank]
u /= s[:rank]
B = (u @ vh[:rank]).conj().T
if return_rank:
return B, rank
else:
return B
def pinvh(a, atol=None, rtol=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a complex Hermitian/real symmetric
matrix using its eigenvalue decomposition and including all eigenvalues
with 'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
atol : float, optional
Absolute threshold term, default value is 0.
.. versionadded:: 1.7.0
rtol : float, optional
Relative threshold term, default value is ``N * eps`` where
``eps`` is the machine precision value of the datatype of ``a``.
.. versionadded:: 1.7.0
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
return_rank : bool, optional
If True, return the effective rank of the matrix.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if `return_rank` is True.
Raises
------
LinAlgError
If eigenvalue algorithm does not converge.
See Also
--------
pinv : Moore-Penrose pseudoinverse of a matrix.
Examples
--------
For a more detailed example see `pinv`.
>>> import numpy as np
>>> from scipy.linalg import pinvh
>>> rng = np.random.default_rng()
>>> a = rng.standard_normal((9, 6))
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, a @ B @ a)
True
>>> np.allclose(B, B @ a @ B)
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = _decomp.eigh(a, lower=lower, check_finite=False)
t = u.dtype.char.lower()
maxS = np.max(np.abs(s))
atol = 0. if atol is None else atol
rtol = max(a.shape) * np.finfo(t).eps if (rtol is None) else rtol
if (atol < 0.) or (rtol < 0.):
raise ValueError("atol and rtol values must be positive.")
val = atol + maxS * rtol
above_cutoff = (abs(s) > val)
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = (u * psigma_diag) @ u.conj().T
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1-D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
References
----------
.. [1] B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, :doi:`10.1007/BF02165404`
.. [2] R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, :arxiv:`1401.5766`
.. [3] D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
Examples
--------
>>> import numpy as np
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape : bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square : bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape
def matmul_toeplitz(c_or_cr, x, check_finite=False, workers=None):
"""Efficient Toeplitz Matrix-Matrix Multiplication using FFT
This function returns the matrix multiplication between a Toeplitz
matrix and a dense matrix.
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
x : (M,) or (M, K) array_like
Matrix with which to multiply.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
workers : int, optional
To pass to scipy.fft.fft and ifft. Maximum number of workers to use
for parallel computation. If negative, the value wraps around from
``os.cpu_count()``. See scipy.fft.fft for more details.
Returns
-------
T @ x : (M,) or (M, K) ndarray
The result of the matrix multiplication ``T @ x``. Shape of return
matches shape of `x`.
See Also
--------
toeplitz : Toeplitz matrix
solve_toeplitz : Solve a Toeplitz system using Levinson Recursion
Notes
-----
The Toeplitz matrix is embedded in a circulant matrix and the FFT is used
to efficiently calculate the matrix-matrix product.
Because the computation is based on the FFT, integer inputs will
result in floating point outputs. This is unlike NumPy's `matmul`,
which preserves the data type of the input.
This is partly based on the implementation that can be found in [1]_,
licensed under the MIT license. More information about the method can be
found in reference [2]_. References [3]_ and [4]_ have more reference
implementations in Python.
.. versionadded:: 1.6.0
References
----------
.. [1] Jacob R Gardner, Geoff Pleiss, David Bindel, Kilian
Q Weinberger, Andrew Gordon Wilson, "GPyTorch: Blackbox Matrix-Matrix
Gaussian Process Inference with GPU Acceleration" with contributions
from Max Balandat and Ruihan Wu. Available online:
https://github.com/cornellius-gp/gpytorch
.. [2] J. Demmel, P. Koev, and X. Li, "A Brief Survey of Direct Linear
Solvers". In Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der
Vorst, editors. Templates for the Solution of Algebraic Eigenvalue
Problems: A Practical Guide. SIAM, Philadelphia, 2000. Available at:
http://www.netlib.org/utk/people/JackDongarra/etemplates/node384.html
.. [3] R. Scheibler, E. Bezzam, I. Dokmanic, Pyroomacoustics: A Python
package for audio room simulations and array processing algorithms,
Proc. IEEE ICASSP, Calgary, CA, 2018.
https://github.com/LCAV/pyroomacoustics/blob/pypi-release/
pyroomacoustics/adaptive/util.py
.. [4] Marano S, Edwards B, Ferrari G and Fah D (2017), "Fitting
Earthquake Spectra: Colored Noise and Incomplete Data", Bulletin of
the Seismological Society of America., January, 2017. Vol. 107(1),
pp. 276-291.
Examples
--------
Multiply the Toeplitz matrix T with matrix x::
[ 1 -1 -2 -3] [1 10]
T = [ 3 1 -1 -2] x = [2 11]
[ 6 3 1 -1] [2 11]
[10 6 3 1] [5 19]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> import numpy as np
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> x = np.array([[1, 10], [2, 11], [2, 11], [5, 19]])
>>> from scipy.linalg import toeplitz, matmul_toeplitz
>>> matmul_toeplitz((c, r), x)
array([[-20., -80.],
[ -7., -8.],
[ 9., 85.],
[ 33., 218.]])
Check the result by creating the full Toeplitz matrix and
multiplying it by ``x``.
>>> toeplitz(c, r) @ x
array([[-20, -80],
[ -7, -8],
[ 9, 85],
[ 33, 218]])
The full matrix is never formed explicitly, so this routine
is suitable for very large Toeplitz matrices.
>>> n = 1000000
>>> matmul_toeplitz([1] + [0]*(n-1), np.ones(n))
array([1., 1., 1., ..., 1., 1., 1.])
"""
from ..fft import fft, ifft, rfft, irfft
r, c, x, dtype, x_shape = _validate_args_for_toeplitz_ops(
c_or_cr, x, check_finite, keep_b_shape=False, enforce_square=False)
n, m = x.shape
T_nrows = len(c)
T_ncols = len(r)
p = T_nrows + T_ncols - 1 # equivalent to len(embedded_col)
embedded_col = np.concatenate((c, r[-1:0:-1]))
if np.iscomplexobj(embedded_col) or np.iscomplexobj(x):
fft_mat = fft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = fft(x, n=p, axis=0, workers=workers)
mat_times_x = ifft(fft_mat*fft_x, axis=0,
workers=workers)[:T_nrows, :]
else:
# Real inputs; using rfft is faster
fft_mat = rfft(embedded_col, axis=0, workers=workers).reshape(-1, 1)
fft_x = rfft(x, n=p, axis=0, workers=workers)
mat_times_x = irfft(fft_mat*fft_x, axis=0,
workers=workers, n=p)[:T_nrows, :]
return_shape = (T_nrows,) if len(x_shape) == 1 else (T_nrows, m)
return mat_times_x.reshape(*return_shape)
| 69,470 | 34.939472 | 79 | py |
scipy | scipy-main/scipy/_lib/setup.py | import os
def check_boost_submodule():
from scipy._lib._boost_utils import _boost_dir
if not os.path.exists(_boost_dir(ret_path=True).parent / 'README.md'):
raise RuntimeError("Missing the `boost` submodule! Run `git submodule "
"update --init` to fix this.")
def check_highs_submodule():
from scipy._lib._highs_utils import _highs_dir
if not os.path.exists(_highs_dir() / 'README.md'):
raise RuntimeError("Missing the `highs` submodule! Run `git submodule "
"update --init` to fix this.")
def build_clib_pre_build_hook(cmd, ext):
from scipy._build_utils.compiler_helper import get_cxx_std_flag
std_flag = get_cxx_std_flag(cmd.compiler)
ext.setdefault('extra_compiler_args', [])
if std_flag is not None:
ext['extra_compiler_args'].append(std_flag)
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._lib._boost_utils import _boost_dir
check_boost_submodule()
check_highs_submodule()
config = Configuration('_lib', parent_package, top_path)
config.add_data_files('tests/*.py')
# Deal with array_api_compat git submodule
config.add_data_files('array_api_compat/array_api_compat/*.py')
config.add_data_files('array_api_compat/array_api_compat/common/*.py')
config.add_data_files('array_api_compat/array_api_compat/cupy/*.py')
config.add_data_files('array_api_compat/array_api_compat/numpy/*.py')
config.add_data_files('array_api_compat/array_api_compat/torch/*.py')
include_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src'))
depends = [os.path.join(include_dir, 'ccallback.h')]
config.add_extension("_ccallback_c",
sources=["_ccallback_c.c"],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_test_ccallback",
sources=["src/_test_ccallback.c"],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_fpumode",
sources=["_fpumode.c"])
def get_messagestream_config(ext, build_dir):
# Generate a header file containing defines
config_cmd = config.get_config_cmd()
defines = []
if config_cmd.check_func('open_memstream', decl=True, call=True):
defines.append(('HAVE_OPEN_MEMSTREAM', '1'))
target = os.path.join(os.path.dirname(__file__), 'src',
'messagestream_config.h')
with open(target, 'w') as f:
for name, value in defines:
f.write(f'#define {name} {value}\n')
depends = [os.path.join(include_dir, 'messagestream.h')]
config.add_extension("messagestream",
sources=["messagestream.c"] + [get_messagestream_config],
depends=depends,
include_dirs=[include_dir])
config.add_extension("_test_deprecation_call",
sources=["_test_deprecation_call.c"],
include_dirs=[include_dir])
config.add_extension("_test_deprecation_def",
sources=["_test_deprecation_def.c"],
include_dirs=[include_dir])
config.add_subpackage('_uarray')
# ensure Boost was checked out and builds
config.add_library(
'test_boost_build',
sources=['tests/test_boost_build.cpp'],
include_dirs=_boost_dir(),
language='c++',
_pre_build_hook=build_clib_pre_build_hook)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 3,817 | 35.711538 | 82 | py |
scipy | scipy-main/scipy/_lib/tests/test_array_api.py | import numpy as np
from numpy.testing import assert_equal
import pytest
from scipy.conftest import array_api_compatible
from scipy._lib._array_api import (
_GLOBAL_CONFIG, array_namespace, as_xparray,
)
if not _GLOBAL_CONFIG["SCIPY_ARRAY_API"]:
pytest.skip(
"Array API test; set environment variable SCIPY_ARRAY_API=1 to run it",
allow_module_level=True
)
def to_numpy(array, xp):
"""Convert `array` into a NumPy ndarray on the CPU. From sklearn."""
xp_name = xp.__name__
if xp_name in {"array_api_compat.torch", "torch"}:
return array.cpu().numpy()
elif xp_name == "cupy.array_api":
return array._array.get()
elif xp_name in {"array_api_compat.cupy", "cupy"}: # pragma: nocover
return array.get()
return np.asarray(array)
def test_array_namespace():
x, y = np.array([0, 1, 2]), np.array([0, 1, 2])
xp = array_namespace(x, y)
assert 'array_api_compat.numpy' in xp.__name__
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = False
xp = array_namespace(x, y)
assert 'array_api_compat.numpy' in xp.__name__
_GLOBAL_CONFIG["SCIPY_ARRAY_API"] = True
@array_api_compatible
def test_asarray(xp):
x, y = as_xparray([0, 1, 2], xp=xp), as_xparray(np.arange(3), xp=xp)
ref = np.array([0, 1, 2])
assert_equal(x, ref)
assert_equal(y, ref)
@array_api_compatible
def test_to_numpy(xp):
x = xp.asarray([0, 1, 2])
x = to_numpy(x, xp=xp)
assert isinstance(x, np.ndarray)
@pytest.mark.filterwarnings("ignore: the matrix subclass")
def test_raises():
msg = "'numpy.ma.MaskedArray' are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.ma.array(1), np.array(1))
msg = "'numpy.matrix' are not supported"
with pytest.raises(TypeError, match=msg):
array_namespace(np.array(1), np.matrix(1))
msg = "Only support Array API"
with pytest.raises(TypeError, match=msg):
array_namespace([0, 1, 2])
with pytest.raises(TypeError, match=msg):
array_namespace(1)
| 2,051 | 26.72973 | 79 | py |
scipy | scipy-main/doc/source/conf.py | import math
import os
from os.path import relpath, dirname
import re
import sys
import warnings
from datetime import date
from docutils import nodes
from docutils.parsers.rst import Directive
import matplotlib
import matplotlib.pyplot as plt
from numpydoc.docscrape_sphinx import SphinxDocString
from sphinx.util import inspect
import scipy
from scipy._lib._util import _rng_html_rewrite
# Workaround for sphinx-doc/sphinx#6573
# ua._Function should not be treated as an attribute
import scipy._lib.uarray as ua
from scipy.stats._distn_infrastructure import rv_generic # noqa: E402
from scipy.stats._multivariate import multi_rv_generic # noqa: E402
old_isdesc = inspect.isdescriptor
inspect.isdescriptor = (lambda obj: old_isdesc(obj)
and not isinstance(obj, ua._Function))
# Currently required to build scipy.fft docs
os.environ['_SCIPY_BUILDING_DOC'] = 'True'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import numpydoc.docscrape as np_docscrape # noqa:E402
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'numpydoc',
'sphinx_design',
'scipyoptdoc',
'doi_role',
'matplotlib.sphinxext.plot_directive',
'myst_nb',
]
# Do some matplotlib config in case users have a matplotlibrc that will break
# things
matplotlib.use('agg')
plt.ioff()
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-%s, The SciPy community' % date.today().year
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
version = re.sub(r'\.dev.*$', r'.dev', scipy.__version__)
release = version
if os.environ.get('CIRCLE_JOB', False) and \
os.environ.get('CIRCLE_BRANCH', '') != 'main':
version = os.environ['CIRCLE_BRANCH']
release = version
print(f"{project} (VERSION {version})")
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# Ensure all our internal links work
nitpicky = True
nitpick_ignore = [
# This ignores errors for classes (OptimizeResults, sparse.dok_matrix)
# which inherit methods from `dict`. missing references to builtins get
# ignored by default (see https://github.com/sphinx-doc/sphinx/pull/7254),
# but that fix doesn't work for inherited methods.
("py:class", "a shallow copy of D"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "an object providing a view on D's values"),
("py:class", "None. Remove all items from D."),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "v, remove specified key and return the corresponding value."),
]
exclude_patterns = [ # glob-style
]
# be strict about warnings in our examples, we should write clean code
# (exceptions permitted for pedagogical purposes below)
warnings.resetwarnings()
warnings.filterwarnings('error')
# allow these and show them
warnings.filterwarnings('default', module='sphinx') # internal warnings
# global weird ones that can be safely ignored
for key in (
r"OpenSSL\.rand is deprecated", # OpenSSL package in linkcheck
r"distutils Version", # distutils
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*" + key, category=DeprecationWarning)
warnings.filterwarnings( # matplotlib<->pyparsing issue
'ignore', message="Exception creating Regex for oneOf.*",
category=SyntaxWarning)
# warnings in examples (mostly) that we allow
# TODO: eventually these should be eliminated!
for key in (
'invalid escape sequence', # numpydoc 0.8 has some bad escape chars
'The integral is probably divergent', # stats.mielke example
'underflow encountered in square', # signal.filtfilt underflow
'underflow encountered in multiply', # scipy.spatial.HalfspaceIntersection
'underflow encountered in nextafter', # tuterial/interpolate.rst
# stats.skewnorm, stats.norminvgauss, stats.gaussian_kde,
# tutorial/stats.rst (twice):
'underflow encountered in exp',
):
warnings.filterwarnings(
'once', message='.*' + key)
# docutils warnings when using notebooks (see gh-17322)
# these will hopefully be removed in the near future
for key in (
r"The frontend.OptionParser class will be replaced",
r"The frontend.Option class will be removed",
):
warnings.filterwarnings('ignore', message=key, category=DeprecationWarning)
warnings.filterwarnings(
'ignore',
message=r'.*is obsoleted by Node.findall()',
category=PendingDeprecationWarning,
)
warnings.filterwarnings(
'ignore',
message=r'There is no current event loop',
category=DeprecationWarning,
)
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
html_theme = 'pydata_sphinx_theme'
html_logo = '_static/logo.svg'
html_favicon = '_static/favicon.ico'
html_theme_options = {
"github_url": "https://github.com/scipy/scipy",
"twitter_url": "https://twitter.com/SciPy_team",
"navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"],
"switcher": {
"json_url": "https://scipy.github.io/devdocs/_static/version_switcher.json",
"version_match": version,
}
}
if 'dev' in version:
html_theme_options["switcher"]["version_match"] = "development"
if 'versionwarning' in tags: # noqa
# Specific to docs.scipy.org deployment.
# See https://github.com/scipy/docs.scipy.org/blob/main/_static/versionwarning.js_t
src = ('var script = document.createElement("script");\n'
'script.type = "text/javascript";\n'
'script.src = "/doc/_static/versionwarning.js";\n'
'document.head.appendChild(script);')
html_context = {
'VERSIONCHECK_JS': src
}
html_js_files = ['versioncheck.js']
html_title = f"{project} v{version} Manual"
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_css_files = [
"scipy.css",
]
# html_additional_pages = {
# 'index': 'indexcontent.html',
# }
html_additional_pages = {}
html_use_modindex = True
html_domain_indices = False
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
mathjax_path = "scipy-mathjax/MathJax.js?config=scipy-mathjax"
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'neps': ('https://numpy.org/neps', None),
'matplotlib': ('https://matplotlib.org/stable', None),
'asv': ('https://asv.readthedocs.io/en/stable/', None),
'statsmodels': ('https://www.statsmodels.org/stable', None),
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
np_docscrape.ClassDoc.extra_public_methods = [ # should match class.rst
'__call__', '__mul__', '__getitem__', '__len__',
]
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
autosummary_generate = True
# maps functions with a name same as a class name that is indistinguishable
# Ex: scipy.signal.czt and scipy.signal.CZT or scipy.odr.odr and scipy.odr.ODR
# Otherwise, the stubs are overwritten when the name is same for
# OS (like MacOS) which has a filesystem that ignores the case
# See https://github.com/sphinx-doc/sphinx/pull/7927
autosummary_filename_map = {
"scipy.odr.odr": "odr-function",
"scipy.signal.czt": "czt-function",
}
# -----------------------------------------------------------------------------
# Autodoc
# -----------------------------------------------------------------------------
autodoc_default_options = {
'inherited-members': None,
}
autodoc_typehints = 'none'
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Matplotlib plot_directive options
#------------------------------------------------------------------------------
plot_pre_code = """
import warnings
for key in (
'lsim2 is deprecated', # Deprecation of scipy.signal.lsim2
'impulse2 is deprecated', # Deprecation of scipy.signal.impulse2
'step2 is deprecated', # Deprecation of scipy.signal.step2
'interp2d` is deprecated', # Deprecation of scipy.interpolate.interp2d
'scipy.misc', # scipy.misc deprecated in v1.10.0; use scipy.datasets
'kurtosistest only valid', # intentionally "bad" excample in docstring
):
warnings.filterwarnings(action='ignore', message='.*' + key + '.*')
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96)]
plot_html_show_formats = False
plot_html_show_source_link = False
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Notebook tutorials with MyST-NB
# -----------------------------------------------------------------------------
nb_execution_mode = "auto"
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
# Not the same as from sphinx.util import inspect and needed here
import inspect # noqa: E402
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except Exception:
return None
# Use the original function object if it is wrapped.
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
# SciPy's distributions are instances of *_gen. Point to this
# class since it contains the implementation of all the methods.
if isinstance(obj, (rv_generic, multi_rv_generic)):
obj = obj.__class__
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
startdir = os.path.abspath(os.path.join(dirname(scipy.__file__), '..'))
fn = relpath(fn, start=startdir).replace(os.path.sep, '/')
if fn.startswith('scipy/'):
m = re.match(r'^.*dev0\+([a-f0-9]+)$', scipy.__version__)
base_url = "https://github.com/scipy/scipy/blob"
if m:
return f"{base_url}/{m.group(1)}/{fn}{linespec}"
elif 'dev' in scipy.__version__:
return f"{base_url}/main/{fn}{linespec}"
else:
return f"{base_url}/v{scipy.__version__}/{fn}{linespec}"
else:
return None
# Tell overwrite numpydoc's logic to render examples containing rng.
SphinxDocString._str_examples = _rng_html_rewrite(
SphinxDocString._str_examples
)
class LegacyDirective(Directive):
"""
Adapted from docutils/parsers/rst/directives/admonitions.py
Uses a default text if the directive does not have contents. If it does,
the default text is concatenated to the contents.
"""
has_content = True
node_class = nodes.admonition
optional_arguments = 1
def run(self):
try:
obj = self.arguments[0]
except IndexError:
# Argument is empty; use default text
obj = "submodule"
text = (f"This {obj} is considered legacy and will no longer receive "
"updates. This could also mean it will be removed in future "
"SciPy versions.")
try:
self.content[0] = text+" "+self.content[0]
except IndexError:
# Content is empty; use the default text
source, lineno = self.state_machine.get_source_and_line(
self.lineno
)
self.content.append(
text,
source=source,
offset=lineno
)
text = '\n'.join(self.content)
# Create the admonition node, to be populated by `nested_parse`
admonition_node = self.node_class(rawsource=text)
# Set custom title
title_text = "Legacy"
textnodes, _ = self.state.inline_text(title_text, self.lineno)
title = nodes.title(title_text, '', *textnodes)
# Set up admonition node
admonition_node += title
# Select custom class for CSS styling
admonition_node['classes'] = ['admonition-legacy']
# Parse the directive contents
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
def setup(app):
app.add_directive("legacy", LegacyDirective)
| 16,583 | 31.839604 | 87 | py |
MENET | MENET-master/light/utils/transforms.py | # -*- coding: utf-8 -*-
# @File : derain_wgan_tf/transforms.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc : @ sumihui : refer to pytorch
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import numpy as np
from PIL import Image
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>> transforms.Compose([
>> transforms.FiveCrop(10),
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops])
>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
horizontal_flip (bool): Whether use horizontal flipping or not
Example:
>> transform = Compose([
>> FiveCrop(size), # this is a list of PIL Images
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops]) # returns a 4D ndarray
>> ])
>> #In your test loop you can do the following:
>> input, target = batch # input is a 5d tensor, target is 2d
>> bs, ncrops, h, w, c = input.size()
>> result = model(input.reshape(-1, h, w, c)) # fuse batch size and ncrops
"""
def __init__(self, size, horizontal_flip=False):
self.size = size
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.horizontal_flip = horizontal_flip
def __call__(self, img):
"""
:param img: (PIL Image). Image to be cropped.
:return: return five_crop(img)
"""
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
crops = self.five_crop(img)
if self.horizontal_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
crops = crops + self.five_crop(img)
return crops
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def five_crop(self, img):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center) corresponding top left,
top right, bottom left, bottom right and center crop.
"""
w, h = img.size
crop_h, crop_w = self.size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = self.center_crop(img)
return (tl, tr, bl, br, center)
def center_crop(self, img):
"""
:param img:
:return: PIL Image: Cropped image.
"""
w, h = img.size
th, tw = self.size # Height/Width of the cropped image.
i = int(round((h - th) / 2.)) # Upper pixel coordinate.
j = int(round((w - tw) / 2.)) # Left pixel coordinate.
return img.crop((j, i, j + tw, i + th))
class ToArray(object):
"""Convert a ``PIL Image`` to ``numpy.ndarray``.
Converts a PIL Image (H x W x C) in the range
[0, 255] to a numpy.ndarray of shape (H x W x C) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to numpy.ndarray.
Returns:
numpy.ndarray: Converted image.
"""
return np.asarray(pic, "uint8") # note: 2019/05/29 uint8
def __repr__(self):
return self.__class__.__name__ + '()'
| 5,204 | 33.932886 | 106 | py |
MENET | MENET-master/heavy/utils/transforms.py | # -*- coding: utf-8 -*-
# @File : derain_wgan_tf/transforms.py
# @Info : @ TSMC-SIGGRAPH, 2019/5/29
# @Desc : @ sumihui : refer to pytorch
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import numpy as np
from PIL import Image
class Compose(object):
"""Composes several transforms together.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>> transforms.Compose([
>> transforms.FiveCrop(10),
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops])
>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img):
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class FiveCrop(object):
"""Crop the given PIL Image into four corners and the central crop
.. Note::
This transform returns a tuple of images and there may be a mismatch in the number of
inputs and targets your Dataset returns. See below for an example of how to deal with
this.
Args:
size (sequence or int): Desired output size of the crop. If size is an ``int``
instead of sequence like (h, w), a square crop of size (size, size) is made.
horizontal_flip (bool): Whether use horizontal flipping or not
Example:
>> transform = Compose([
>> FiveCrop(size), # this is a list of PIL Images
>> lambda crops: np.stack([transforms.ToArray(crop) for crop in crops]) # returns a 4D ndarray
>> ])
>> #In your test loop you can do the following:
>> input, target = batch # input is a 5d tensor, target is 2d
>> bs, ncrops, h, w, c = input.size()
>> result = model(input.reshape(-1, h, w, c)) # fuse batch size and ncrops
"""
def __init__(self, size, horizontal_flip=False):
self.size = size
if isinstance(size, int):
self.size = (size, size)
else:
assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
self.size = size
self.horizontal_flip = horizontal_flip
def __call__(self, img):
"""
:param img: (PIL Image). Image to be cropped.
:return: return five_crop(img)
"""
if not isinstance(img, Image.Image):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
crops = self.five_crop(img)
if self.horizontal_flip:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
crops = crops + self.five_crop(img)
return crops
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
def five_crop(self, img):
"""Crop the given PIL Image into four corners and the central crop.
.. Note::
This transform returns a tuple of images and there may be a
mismatch in the number of inputs and targets your ``Dataset`` returns.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
Returns:
tuple: tuple (tl, tr, bl, br, center) corresponding top left,
top right, bottom left, bottom right and center crop.
"""
w, h = img.size
crop_h, crop_w = self.size
if crop_w > w or crop_h > h:
raise ValueError("Requested crop size {} is bigger than input size {}".format(self.size,
(h, w)))
tl = img.crop((0, 0, crop_w, crop_h))
tr = img.crop((w - crop_w, 0, w, crop_h))
bl = img.crop((0, h - crop_h, crop_w, h))
br = img.crop((w - crop_w, h - crop_h, w, h))
center = self.center_crop(img)
return (tl, tr, bl, br, center)
def center_crop(self, img):
"""
:param img:
:return: PIL Image: Cropped image.
"""
w, h = img.size
th, tw = self.size # Height/Width of the cropped image.
i = int(round((h - th) / 2.)) # Upper pixel coordinate.
j = int(round((w - tw) / 2.)) # Left pixel coordinate.
return img.crop((j, i, j + tw, i + th))
class ToArray(object):
"""Convert a ``PIL Image`` to ``numpy.ndarray``.
Converts a PIL Image (H x W x C) in the range
[0, 255] to a numpy.ndarray of shape (H x W x C) in the range [0.0, 1.0].
"""
def __call__(self, pic):
"""
Args:
pic (PIL Image): Image to be converted to numpy.ndarray.
Returns:
numpy.ndarray: Converted image.
"""
return np.asarray(pic, "uint8") # note: 2019/05/29 uint8
def __repr__(self):
return self.__class__.__name__ + '()'
| 5,204 | 33.932886 | 106 | py |
ARFlow | ARFlow-master/inference.py | import imageio
import argparse
import numpy as np
import matplotlib.pyplot as plt
import torch
from easydict import EasyDict
from torchvision import transforms
from transforms import sep_transforms
from utils.flow_utils import flow_to_image, resize_flow
from utils.torch_utils import restore_model
from models.pwclite import PWCLite
class TestHelper():
def __init__(self, cfg):
self.cfg = EasyDict(cfg)
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
"cpu")
self.model = self.init_model()
self.input_transform = transforms.Compose([
sep_transforms.Zoom(*self.cfg.test_shape),
sep_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
])
def init_model(self):
model = PWCLite(self.cfg.model)
# print('Number fo parameters: {}'.format(model.num_parameters()))
model = model.to(self.device)
model = restore_model(model, self.cfg.pretrained_model)
model.eval()
return model
def run(self, imgs):
imgs = [self.input_transform(img).unsqueeze(0) for img in imgs]
img_pair = torch.cat(imgs, 1).to(self.device)
return self.model(img_pair)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', default='checkpoints/KITTI15/pwclite_ar.tar')
parser.add_argument('-s', '--test_shape', default=[384, 640], type=int, nargs=2)
parser.add_argument('-i', '--img_list', nargs='+',
default=['examples/img1.png', 'examples/img2.png'])
args = parser.parse_args()
cfg = {
'model': {
'upsample': True,
'n_frames': len(args.img_list),
'reduce_dense': True
},
'pretrained_model': args.model,
'test_shape': args.test_shape,
}
ts = TestHelper(cfg)
imgs = [imageio.imread(img).astype(np.float32) for img in args.img_list]
h, w = imgs[0].shape[:2]
flow_12 = ts.run(imgs)['flows_fw'][0]
flow_12 = resize_flow(flow_12, (h, w))
np_flow_12 = flow_12[0].detach().cpu().numpy().transpose([1, 2, 0])
vis_flow = flow_to_image(np_flow_12)
fig = plt.figure()
plt.imshow(vis_flow)
plt.show()
| 2,310 | 29.813333 | 90 | py |
ARFlow | ARFlow-master/basic_train.py | import torch
from utils.torch_utils import init_seed
from datasets.get_dataset import get_dataset
from models.get_model import get_model
from losses.get_loss import get_loss
from trainer.get_trainer import get_trainer
def main(cfg, _log):
init_seed(cfg.seed)
_log.info("=> fetching img pairs.")
train_set, valid_set = get_dataset(cfg)
_log.info('{} samples found, {} train samples and {} test samples '.format(
len(valid_set) + len(train_set),
len(train_set),
len(valid_set)))
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=cfg.train.batch_size,
num_workers=cfg.train.workers, pin_memory=True, shuffle=True)
max_test_batch = 4
if type(valid_set) is torch.utils.data.ConcatDataset:
valid_loader = [torch.utils.data.DataLoader(
s, batch_size=min(max_test_batch, cfg.train.batch_size),
num_workers=min(4, cfg.train.workers),
pin_memory=True, shuffle=False) for s in valid_set.datasets]
valid_size = sum([len(l) for l in valid_loader])
else:
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=min(max_test_batch, cfg.train.batch_size),
num_workers=min(4, cfg.train.workers),
pin_memory=True, shuffle=False)
valid_size = len(valid_loader)
if cfg.train.epoch_size == 0:
cfg.train.epoch_size = len(train_loader)
if cfg.train.valid_size == 0:
cfg.train.valid_size = valid_size
cfg.train.epoch_size = min(cfg.train.epoch_size, len(train_loader))
cfg.train.valid_size = min(cfg.train.valid_size, valid_size)
model = get_model(cfg.model)
loss = get_loss(cfg.loss)
trainer = get_trainer(cfg.trainer)(
train_loader, valid_loader, model, loss, _log, cfg.save_root, cfg.train)
trainer.train()
| 1,854 | 34.673077 | 80 | py |
ARFlow | ARFlow-master/trainer/base_trainer.py | import torch
import numpy as np
from abc import abstractmethod
from tensorboardX import SummaryWriter
from utils.torch_utils import bias_parameters, weight_parameters, \
load_checkpoint, save_checkpoint, AdamW
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
self._log = _log
self.cfg = config
self.save_root = save_root
self.summary_writer = SummaryWriter(str(save_root))
self.train_loader, self.valid_loader = train_loader, valid_loader
self.device, self.device_ids = self._prepare_device(config['n_gpu'])
self.model = self._init_model(model)
self.optimizer = self._create_optimizer()
self.loss_func = loss_func
self.best_error = np.inf
self.i_epoch = 0
self.i_iter = 0
@abstractmethod
def _run_one_epoch(self):
...
@abstractmethod
def _validate_with_gt(self):
...
def train(self):
for epoch in range(self.cfg.epoch_num):
self._run_one_epoch()
if self.i_epoch % self.cfg.val_epoch_size == 0:
errors, error_names = self._validate_with_gt()
valid_res = ' '.join(
'{}: {:.2f}'.format(*t) for t in zip(error_names, errors))
self._log.info(' * Epoch {} '.format(self.i_epoch) + valid_res)
def _init_model(self, model):
model = model.to(self.device)
if self.cfg.pretrained_model:
self._log.info("=> using pre-trained weights {}.".format(
self.cfg.pretrained_model))
epoch, weights = load_checkpoint(self.cfg.pretrained_model)
from collections import OrderedDict
new_weights = OrderedDict()
model_keys = list(model.state_dict().keys())
weight_keys = list(weights.keys())
for a, b in zip(model_keys, weight_keys):
new_weights[a] = weights[b]
weights = new_weights
model.load_state_dict(weights)
else:
self._log.info("=> Train from scratch.")
model.init_weights()
model = torch.nn.DataParallel(model, device_ids=self.device_ids)
return model
def _create_optimizer(self):
self._log.info('=> setting Adam solver')
param_groups = [
{'params': bias_parameters(self.model.module),
'weight_decay': self.cfg.bias_decay},
{'params': weight_parameters(self.model.module),
'weight_decay': self.cfg.weight_decay}]
if self.cfg.optim == 'adamw':
optimizer = AdamW(param_groups, self.cfg.lr,
betas=(self.cfg.momentum, self.cfg.beta))
elif self.cfg.optim == 'adam':
optimizer = torch.optim.Adam(param_groups, self.cfg.lr,
betas=(self.cfg.momentum, self.cfg.beta),
eps=1e-7)
else:
raise NotImplementedError(self.cfg.optim)
return optimizer
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self._log.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self._log.warning(
"Warning: The number of GPU\'s configured to use is {}, "
"but only {} are available.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def save_model(self, error, name):
is_best = error < self.best_error
if is_best:
self.best_error = error
models = {'epoch': self.i_epoch,
'state_dict': self.model.module.state_dict()}
save_checkpoint(self.save_root, models, name, is_best)
| 4,244 | 34.672269 | 83 | py |
ARFlow | ARFlow-master/trainer/kitti_trainer_ar.py | import time
import torch
import numpy as np
from copy import deepcopy
from .base_trainer import BaseTrainer
from utils.flow_utils import load_flow, evaluate_flow
from utils.misc_utils import AverageMeter
from transforms.ar_transforms.sp_transfroms import RandomAffineFlow
from transforms.ar_transforms.oc_transforms import run_slic_pt, random_crop
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
self.sp_transform = RandomAffineFlow(
self.cfg.st_cfg, addnoise=self.cfg.st_cfg.add_noise).to(self.device)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean', 'l_atst', 'l_ot']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'].to(self.device), data['img2'].to(self.device)
img_pair = torch.cat([img1, img2], 1)
# measure data loading time
am_data_time.update(time.time() - end)
# run 1st pass
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
flow_ori = res_dict['flows_fw'][0].detach()
if self.cfg.run_atst:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# construct augment sample
noc_ori = self.loss_func.pyramid_occu_mask1[0] # non-occluded region
s = {'imgs': [img1, img2], 'flows_f': [flow_ori], 'masks_f': [noc_ori]}
st_res = self.sp_transform(deepcopy(s)) if self.cfg.run_st else deepcopy(s)
flow_t, noc_t = st_res['flows_f'][0], st_res['masks_f'][0]
# run 2nd pass
img_pair = torch.cat(st_res['imgs'], 1)
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
if not self.cfg.mask_st:
noc_t = torch.ones_like(noc_t)
l_atst = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_atst = (l_atst * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_atst
else:
l_atst = torch.zeros_like(loss)
if self.cfg.run_ot:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# run 3rd pass
img_pair = torch.cat([img1, img2], 1)
# random crop images
img_pair, flow_t, occ_t = random_crop(img_pair, flow_ori, 1 - noc_ori,
self.cfg.ot_size)
# slic 200, random select 8~16
if self.cfg.ot_slic:
img2 = img_pair[:, 3:]
seg_mask = run_slic_pt(img2, n_seg=200,
compact=self.cfg.ot_compact, rd_select=[8, 16],
fast=self.cfg.ot_fast).type_as(img2) # Nx1xHxW
noise = torch.rand(img2.size()).type_as(img2)
img2 = img2 * (1 - seg_mask) + noise * seg_mask
img_pair[:, 3:] = img2
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
noc_t = 1 - occ_t
l_ot = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_ot = (l_ot * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_ot
else:
l_ot = torch.zeros_like(loss)
# update meters
key_meters.update(
[loss.item(), l_ph.item(), l_sm.item(), flow_mean.item(),
l_atst.item(), l_ot.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE', 'E_noc', 'E_occ', 'F1_all']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
res = list(map(load_flow, data['flow_occ']))
gt_flows, occ_masks = [r[0] for r in res], [r[1] for r in res]
res = list(map(load_flow, data['flow_noc']))
_, noc_masks = [r[0] for r in res], [r[1] for r in res]
gt_flows = [np.concatenate([flow, occ_mask, noc_mask], axis=2) for
flow, occ_mask, noc_mask in
zip(gt_flows, occ_masks, noc_masks)]
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0], name='KITTI_Flow')
return all_error_avgs, all_error_names
| 8,755 | 40.49763 | 91 | py |
ARFlow | ARFlow-master/trainer/sintel_trainer.py | import time
import torch
from .base_trainer import BaseTrainer
from utils.flow_utils import evaluate_flow
from utils.misc_utils import AverageMeter
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
# measure data loading time
am_data_time.update(time.time() - end)
# compute output
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
# update meters
key_meters.update([loss.item(), l_ph.item(), l_sm.item(), flow_mean.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
gt_flows = data['target']['flow'].numpy().transpose([0, 2, 3, 1])
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0] + all_error_avgs[1], name='Sintel')
return all_error_avgs, all_error_names
| 5,445 | 37.9 | 89 | py |
ARFlow | ARFlow-master/trainer/kitti_trainer.py | import time
import torch
import numpy as np
from .base_trainer import BaseTrainer
from utils.flow_utils import load_flow, evaluate_flow
from utils.misc_utils import AverageMeter
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
# measure data loading time
am_data_time.update(time.time() - end)
# compute output
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
# update meters
key_meters.update([loss.item(), l_ph.item(), l_sm.item(), flow_mean.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE', 'E_noc', 'E_occ', 'F1_all']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
res = list(map(load_flow, data['flow_occ']))
gt_flows, occ_masks = [r[0] for r in res], [r[1] for r in res]
res = list(map(load_flow, data['flow_noc']))
_, noc_masks = [r[0] for r in res], [r[1] for r in res]
gt_flows = [np.concatenate([flow, occ_mask, noc_mask], axis=2) for
flow, occ_mask, noc_mask in
zip(gt_flows, occ_masks, noc_masks)]
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0], name='KITTI_Flow')
return all_error_avgs, all_error_names
| 5,884 | 38.496644 | 89 | py |
ARFlow | ARFlow-master/trainer/sintel_trainer_ar.py | import time
import torch
from copy import deepcopy
from .base_trainer import BaseTrainer
from utils.flow_utils import evaluate_flow
from utils.misc_utils import AverageMeter
from transforms.ar_transforms.sp_transfroms import RandomAffineFlow
from transforms.ar_transforms.oc_transforms import run_slic_pt, random_crop
class TrainFramework(BaseTrainer):
def __init__(self, train_loader, valid_loader, model, loss_func,
_log, save_root, config):
super(TrainFramework, self).__init__(
train_loader, valid_loader, model, loss_func, _log, save_root, config)
self.sp_transform = RandomAffineFlow(
self.cfg.st_cfg, addnoise=self.cfg.st_cfg.add_noise).to(self.device)
def _run_one_epoch(self):
am_batch_time = AverageMeter()
am_data_time = AverageMeter()
key_meter_names = ['Loss', 'l_ph', 'l_sm', 'flow_mean', 'l_atst', 'l_ot']
key_meters = AverageMeter(i=len(key_meter_names), precision=4)
self.model.train()
end = time.time()
if 'stage1' in self.cfg:
if self.i_epoch == self.cfg.stage1.epoch:
self.loss_func.cfg.update(self.cfg.stage1.loss)
for i_step, data in enumerate(self.train_loader):
if i_step > self.cfg.epoch_size:
break
# read data to device
img1, img2 = data['img1'].to(self.device), data['img2'].to(self.device)
img_pair = torch.cat([img1, img2], 1)
# measure data loading time
am_data_time.update(time.time() - end)
# run 1st pass
res_dict = self.model(img_pair, with_bk=True)
flows_12, flows_21 = res_dict['flows_fw'], res_dict['flows_bw']
flows = [torch.cat([flo12, flo21], 1) for flo12, flo21 in
zip(flows_12, flows_21)]
loss, l_ph, l_sm, flow_mean = self.loss_func(flows, img_pair)
flow_ori = res_dict['flows_fw'][0].detach()
if self.cfg.run_atst:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# construct augment sample
noc_ori = self.loss_func.pyramid_occu_mask1[0] # non-occluded region
s = {'imgs': [img1, img2], 'flows_f': [flow_ori], 'masks_f': [noc_ori]}
st_res = self.sp_transform(deepcopy(s)) if self.cfg.run_st else deepcopy(s)
flow_t, noc_t = st_res['flows_f'][0], st_res['masks_f'][0]
# run 2nd pass
img_pair = torch.cat(st_res['imgs'], 1)
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
if not self.cfg.mask_st:
noc_t = torch.ones_like(noc_t)
l_atst = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_atst = (l_atst * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_atst
else:
l_atst = torch.zeros_like(loss)
if self.cfg.run_ot:
img1, img2 = data['img1_ph'].to(self.device), data['img2_ph'].to(
self.device)
# run 3rd pass
img_pair = torch.cat([img1, img2], 1)
# random crop images
img_pair, flow_t, occ_t = random_crop(img_pair, flow_ori, 1 - noc_ori,
self.cfg.ot_size)
# slic 200, random select 8~16
if self.cfg.ot_slic:
img2 = img_pair[:, 3:]
seg_mask = run_slic_pt(img2, n_seg=200,
compact=self.cfg.ot_compact, rd_select=[8, 16],
fast=self.cfg.ot_fast).type_as(img2) # Nx1xHxW
noise = torch.rand(img2.size()).type_as(img2)
img2 = img2 * (1 - seg_mask) + noise * seg_mask
img_pair[:, 3:] = img2
flow_t_pred = self.model(img_pair, with_bk=False)['flows_fw'][0]
noc_t = 1 - occ_t
l_ot = ((flow_t_pred - flow_t).abs() + self.cfg.ar_eps) ** self.cfg.ar_q
l_ot = (l_ot * noc_t).mean() / (noc_t.mean() + 1e-7)
loss += self.cfg.w_ar * l_ot
else:
l_ot = torch.zeros_like(loss)
# update meters
key_meters.update(
[loss.item(), l_ph.item(), l_sm.item(), flow_mean.item(),
l_atst.item(), l_ot.item()],
img_pair.size(0))
# compute gradient and do optimization step
self.optimizer.zero_grad()
# loss.backward()
scaled_loss = 1024. * loss
scaled_loss.backward()
for param in [p for p in self.model.parameters() if p.requires_grad]:
param.grad.data.mul_(1. / 1024)
self.optimizer.step()
# measure elapsed time
am_batch_time.update(time.time() - end)
end = time.time()
if self.i_iter % self.cfg.record_freq == 0:
for v, name in zip(key_meters.val, key_meter_names):
self.summary_writer.add_scalar('Train_' + name, v, self.i_iter)
if self.i_iter % self.cfg.print_freq == 0:
istr = '{}:{:04d}/{:04d}'.format(
self.i_epoch, i_step, self.cfg.epoch_size) + \
' Time {} Data {}'.format(am_batch_time, am_data_time) + \
' Info {}'.format(key_meters)
self._log.info(istr)
self.i_iter += 1
self.i_epoch += 1
@torch.no_grad()
def _validate_with_gt(self):
batch_time = AverageMeter()
if type(self.valid_loader) is not list:
self.valid_loader = [self.valid_loader]
# only use the first GPU to run validation, multiple GPUs might raise error.
# https://github.com/Eromera/erfnet_pytorch/issues/2#issuecomment-486142360
self.model = self.model.module
self.model.eval()
end = time.time()
all_error_names = []
all_error_avgs = []
n_step = 0
for i_set, loader in enumerate(self.valid_loader):
error_names = ['EPE']
error_meters = AverageMeter(i=len(error_names))
for i_step, data in enumerate(loader):
img1, img2 = data['img1'], data['img2']
img_pair = torch.cat([img1, img2], 1).to(self.device)
gt_flows = data['target']['flow'].numpy().transpose([0, 2, 3, 1])
# compute output
flows = self.model(img_pair)['flows_fw']
pred_flows = flows[0].detach().cpu().numpy().transpose([0, 2, 3, 1])
es = evaluate_flow(gt_flows, pred_flows)
error_meters.update([l.item() for l in es], img_pair.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i_step % self.cfg.print_freq == 0 or i_step == len(loader) - 1:
self._log.info('Test: {0}[{1}/{2}]\t Time {3}\t '.format(
i_set, i_step, self.cfg.valid_size, batch_time) + ' '.join(
map('{:.2f}'.format, error_meters.avg)))
if i_step > self.cfg.valid_size:
break
n_step += len(loader)
# write error to tf board.
for value, name in zip(error_meters.avg, error_names):
self.summary_writer.add_scalar(
'Valid_{}_{}'.format(name, i_set), value, self.i_epoch)
all_error_avgs.extend(error_meters.avg)
all_error_names.extend(['{}_{}'.format(name, i_set) for name in error_names])
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
# In order to reduce the space occupied during debugging,
# only the model with more than cfg.save_iter iterations will be saved.
if self.i_iter > self.cfg.save_iter:
self.save_model(all_error_avgs[0] + all_error_avgs[1], name='Sintel')
return all_error_avgs, all_error_names
| 8,316 | 40.173267 | 91 | py |
ARFlow | ARFlow-master/models/pwclite.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.warp_utils import flow_warp
from .correlation_package.correlation import Correlation
# from .correlation_native import Correlation
def conv(in_planes, out_planes, kernel_size=3, stride=1, dilation=1, isReLU=True):
if isReLU:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True),
nn.LeakyReLU(0.1, inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
dilation=dilation,
padding=((kernel_size - 1) * dilation) // 2, bias=True)
)
class FeatureExtractor(nn.Module):
def __init__(self, num_chs):
super(FeatureExtractor, self).__init__()
self.num_chs = num_chs
self.convs = nn.ModuleList()
for l, (ch_in, ch_out) in enumerate(zip(num_chs[:-1], num_chs[1:])):
layer = nn.Sequential(
conv(ch_in, ch_out, stride=2),
conv(ch_out, ch_out)
)
self.convs.append(layer)
def forward(self, x):
feature_pyramid = []
for conv in self.convs:
x = conv(x)
feature_pyramid.append(x)
return feature_pyramid[::-1]
class FlowEstimatorDense(nn.Module):
def __init__(self, ch_in):
super(FlowEstimatorDense, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(ch_in + 128, 128)
self.conv3 = conv(ch_in + 256, 96)
self.conv4 = conv(ch_in + 352, 64)
self.conv5 = conv(ch_in + 416, 32)
self.feat_dim = ch_in + 448
self.conv_last = conv(ch_in + 448, 2, isReLU=False)
def forward(self, x):
x1 = torch.cat([self.conv1(x), x], dim=1)
x2 = torch.cat([self.conv2(x1), x1], dim=1)
x3 = torch.cat([self.conv3(x2), x2], dim=1)
x4 = torch.cat([self.conv4(x3), x3], dim=1)
x5 = torch.cat([self.conv5(x4), x4], dim=1)
x_out = self.conv_last(x5)
return x5, x_out
class FlowEstimatorReduce(nn.Module):
# can reduce 25% of training time.
def __init__(self, ch_in):
super(FlowEstimatorReduce, self).__init__()
self.conv1 = conv(ch_in, 128)
self.conv2 = conv(128, 128)
self.conv3 = conv(128 + 128, 96)
self.conv4 = conv(128 + 96, 64)
self.conv5 = conv(96 + 64, 32)
self.feat_dim = 32
self.predict_flow = conv(64 + 32, 2, isReLU=False)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x3 = self.conv3(torch.cat([x1, x2], dim=1))
x4 = self.conv4(torch.cat([x2, x3], dim=1))
x5 = self.conv5(torch.cat([x3, x4], dim=1))
flow = self.predict_flow(torch.cat([x4, x5], dim=1))
return x5, flow
class ContextNetwork(nn.Module):
def __init__(self, ch_in):
super(ContextNetwork, self).__init__()
self.convs = nn.Sequential(
conv(ch_in, 128, 3, 1, 1),
conv(128, 128, 3, 1, 2),
conv(128, 128, 3, 1, 4),
conv(128, 96, 3, 1, 8),
conv(96, 64, 3, 1, 16),
conv(64, 32, 3, 1, 1),
conv(32, 2, isReLU=False)
)
def forward(self, x):
return self.convs(x)
class PWCLite(nn.Module):
def __init__(self, cfg):
super(PWCLite, self).__init__()
self.search_range = 4
self.num_chs = [3, 16, 32, 64, 96, 128, 192]
self.output_level = 4
self.num_levels = 7
self.leakyRELU = nn.LeakyReLU(0.1, inplace=True)
self.feature_pyramid_extractor = FeatureExtractor(self.num_chs)
self.upsample = cfg.upsample
self.n_frames = cfg.n_frames
self.reduce_dense = cfg.reduce_dense
self.corr = Correlation(pad_size=self.search_range, kernel_size=1,
max_displacement=self.search_range, stride1=1,
stride2=1, corr_multiply=1)
self.dim_corr = (self.search_range * 2 + 1) ** 2
self.num_ch_in = 32 + (self.dim_corr + 2) * (self.n_frames - 1)
if self.reduce_dense:
self.flow_estimators = FlowEstimatorReduce(self.num_ch_in)
else:
self.flow_estimators = FlowEstimatorDense(self.num_ch_in)
self.context_networks = ContextNetwork(
(self.flow_estimators.feat_dim + 2) * (self.n_frames - 1))
self.conv_1x1 = nn.ModuleList([conv(192, 32, kernel_size=1, stride=1, dilation=1),
conv(128, 32, kernel_size=1, stride=1, dilation=1),
conv(96, 32, kernel_size=1, stride=1, dilation=1),
conv(64, 32, kernel_size=1, stride=1, dilation=1),
conv(32, 32, kernel_size=1, stride=1, dilation=1)])
def num_parameters(self):
return sum(
[p.data.nelement() if p.requires_grad else 0 for p in self.parameters()])
def init_weights(self):
for layer in self.named_modules():
if isinstance(layer, nn.Conv2d):
nn.init.kaiming_normal_(layer.weight)
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
elif isinstance(layer, nn.ConvTranspose2d):
nn.init.kaiming_normal_(layer.weight)
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward_2_frames(self, x1_pyramid, x2_pyramid):
# outputs
flows = []
# init
b_size, _, h_x1, w_x1, = x1_pyramid[0].size()
init_dtype = x1_pyramid[0].dtype
init_device = x1_pyramid[0].device
flow = torch.zeros(b_size, 2, h_x1, w_x1, dtype=init_dtype,
device=init_device).float()
for l, (x1, x2) in enumerate(zip(x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x2_warp = x2
else:
flow = F.interpolate(flow * 2, scale_factor=2,
mode='bilinear', align_corners=True)
x2_warp = flow_warp(x2, flow)
# correlation
out_corr = self.corr(x1, x2_warp)
out_corr_relu = self.leakyRELU(out_corr)
# concat and estimate flow
x1_1by1 = self.conv_1x1[l](x1)
x_intm, flow_res = self.flow_estimators(
torch.cat([out_corr_relu, x1_1by1, flow], dim=1))
flow = flow + flow_res
flow_fine = self.context_networks(torch.cat([x_intm, flow], dim=1))
flow = flow + flow_fine
flows.append(flow)
# upsampling or post-processing
if l == self.output_level:
break
if self.upsample:
flows = [F.interpolate(flow * 4, scale_factor=4,
mode='bilinear', align_corners=True) for flow in flows]
return flows[::-1]
def forward_3_frames(self, x0_pyramid, x1_pyramid, x2_pyramid):
# outputs
flows = []
# init
b_size, _, h_x1, w_x1, = x1_pyramid[0].size()
init_dtype = x1_pyramid[0].dtype
init_device = x1_pyramid[0].device
flow = torch.zeros(b_size, 4, h_x1, w_x1, dtype=init_dtype,
device=init_device).float()
for l, (x0, x1, x2) in enumerate(zip(x0_pyramid, x1_pyramid, x2_pyramid)):
# warping
if l == 0:
x0_warp = x0
x2_warp = x2
else:
flow = F.interpolate(flow * 2, scale_factor=2,
mode='bilinear', align_corners=True)
x0_warp = flow_warp(x0, flow[:, :2])
x2_warp = flow_warp(x2, flow[:, 2:])
# correlation
corr_10, corr_12 = self.corr(x1, x0_warp), self.corr(x1, x2_warp)
corr_relu_10, corr_relu_12 = self.leakyRELU(corr_10), self.leakyRELU(corr_12)
# concat and estimate flow
x1_1by1 = self.conv_1x1[l](x1)
feat_10 = [x1_1by1, corr_relu_10, corr_relu_12, flow[:, :2], -flow[:, 2:]]
feat_12 = [x1_1by1, corr_relu_12, corr_relu_10, flow[:, 2:], -flow[:, :2]]
x_intm_10, flow_res_10 = self.flow_estimators(torch.cat(feat_10, dim=1))
x_intm_12, flow_res_12 = self.flow_estimators(torch.cat(feat_12, dim=1))
flow_res = torch.cat([flow_res_10, flow_res_12], dim=1)
flow = flow + flow_res
feat_10 = [x_intm_10, x_intm_12, flow[:, :2], -flow[:, 2:]]
feat_12 = [x_intm_12, x_intm_10, flow[:, 2:], -flow[:, :2]]
flow_res_10 = self.context_networks(torch.cat(feat_10, dim=1))
flow_res_12 = self.context_networks(torch.cat(feat_12, dim=1))
flow_res = torch.cat([flow_res_10, flow_res_12], dim=1)
flow = flow + flow_res
flows.append(flow)
if l == self.output_level:
break
if self.upsample:
flows = [F.interpolate(flow * 4, scale_factor=4,
mode='bilinear', align_corners=True) for flow in flows]
flows_10 = [flo[:, :2] for flo in flows[::-1]]
flows_12 = [flo[:, 2:] for flo in flows[::-1]]
return flows_10, flows_12
def forward(self, x, with_bk=False):
n_frames = x.size(1) / 3
imgs = [x[:, 3 * i: 3 * i + 3] for i in range(int(n_frames))]
x = [self.feature_pyramid_extractor(img) + [img] for img in imgs]
res_dict = {}
if n_frames == 2:
res_dict['flows_fw'] = self.forward_2_frames(x[0], x[1])
if with_bk:
res_dict['flows_bw'] = self.forward_2_frames(x[1], x[0])
elif n_frames == 3:
flows_10, flows_12 = self.forward_3_frames(x[0], x[1], x[2])
res_dict['flows_fw'], res_dict['flows_bw'] = flows_12, flows_10
elif n_frames == 5:
flows_10, flows_12 = self.forward_3_frames(x[0], x[1], x[2])
flows_21, flows_23 = self.forward_3_frames(x[1], x[2], x[3])
res_dict['flows_fw'] = [flows_12, flows_23]
if with_bk:
flows_32, flows_34 = self.forward_3_frames(x[2], x[3], x[4])
res_dict['flows_bw'] = [flows_21, flows_32]
else:
raise NotImplementedError
return res_dict
| 10,680 | 36.742049 | 90 | py |
ARFlow | ARFlow-master/models/correlation_native.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class Correlation(nn.Module):
def __init__(self, max_displacement=4, *args, **kwargs):
super(Correlation, self).__init__()
self.max_displacement = max_displacement
self.output_dim = 2 * self.max_displacement + 1
self.pad_size = self.max_displacement
def forward(self, x1, x2):
B, C, H, W = x1.size()
x2 = F.pad(x2, [self.pad_size] * 4)
cv = []
for i in range(self.output_dim):
for j in range(self.output_dim):
cost = x1 * x2[:, :, i:(i + H), j:(j + W)]
cost = torch.mean(cost, 1, keepdim=True)
cv.append(cost)
return torch.cat(cv, 1)
if __name__ == '__main__':
import time
import random
from correlation_package.correlation import Correlation as Correlation_cuda
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
corr1 = Correlation(max_displacement=4, kernel_size=1, stride1=1,
stride2=1, corr_multiply=1).to(device)
corr2 = Correlation_cuda(pad_size=4, kernel_size=1, max_displacement=4, stride1=1,
stride2=1, corr_multiply=1)
t1_sum = 0
t2_sum = 0
for i in range(50):
C = random.choice([128, 256])
H = random.choice([128, 256]) # , 512
W = random.choice([64, 128]) # , 256
x1 = torch.randn(4, C, H, W, requires_grad=True).to(device)
x2 = torch.randn(4, C, H, W).to(device)
end = time.time()
y2 = corr2(x1, x2)
t2_f = time.time() - end
end = time.time()
y2.sum().backward()
t2_b = time.time() - end
end = time.time()
y1 = corr1(x1, x2)
t1_f = time.time() - end
end = time.time()
y1.sum().backward()
t1_b = time.time() - end
assert torch.allclose(y1, y2, atol=1e-7)
print('Forward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_f * 100, t2_f * 100))
print(
'Backward: cuda: {:.3f}ms, pytorch: {:.3f}ms'.format(t1_b * 100, t2_b * 100))
if i < 3:
continue
t1_sum += t1_b + t1_f
t2_sum += t2_b + t2_f
print('cuda: {:.3f}s, pytorch: {:.3f}s'.format(t1_sum, t2_sum))
...
| 2,336 | 28.961538 | 90 | py |
ARFlow | ARFlow-master/models/correlation_package/correlation.py | import torch
from torch.nn.modules.module import Module
from torch.autograd import Function
import correlation_cuda
class CorrelationFunction(Function):
def __init__(self, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):
super(CorrelationFunction, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
# self.out_channel = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)
def forward(self, input1, input2):
self.save_for_backward(input1, input2)
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return output
def backward(self, grad_output):
input1, input2 = self.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return grad_input1, grad_input2
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
def forward(self, input1, input2):
result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement, self.stride1, self.stride2, self.corr_multiply)(input1, input2)
return result
| 2,265 | 34.968254 | 156 | py |
ARFlow | ARFlow-master/models/correlation_package/setup.py | #!/usr/bin/env python3
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++11']
nvcc_args = [
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_61,code=compute_61',
'-ccbin', '/usr/bin/gcc'
]
setup(
name='correlation_cuda',
ext_modules=[
CUDAExtension('correlation_cuda', [
'correlation_cuda.cc',
'correlation_cuda_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args, 'cuda-path': ['/usr/local/cuda-9.0']})
],
cmdclass={
'build_ext': BuildExtension
})
| 813 | 26.133333 | 105 | py |
ARFlow | ARFlow-master/datasets/get_dataset.py | import copy
from torchvision import transforms
from torch.utils.data import ConcatDataset
from transforms.co_transforms import get_co_transforms
from transforms.ar_transforms.ap_transforms import get_ap_transforms
from transforms import sep_transforms
from datasets.flow_datasets import SintelRaw, Sintel
from datasets.flow_datasets import KITTIRawFile, KITTIFlow, KITTIFlowMV
def get_dataset(all_cfg):
cfg = all_cfg.data
input_transform = transforms.Compose([
sep_transforms.ArrayToTensor(),
transforms.Normalize(mean=[0, 0, 0], std=[255, 255, 255]),
])
co_transform = get_co_transforms(aug_args=all_cfg.data_aug)
if cfg.type == 'Sintel_Flow':
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.train_n_frames, type='clean',
split='training', subsplit=cfg.train_subsplit,
with_flow=False,
ap_transform=ap_transform,
transform=input_transform,
co_transform=co_transform
)
train_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.train_n_frames, type='final',
split='training', subsplit=cfg.train_subsplit,
with_flow=False,
ap_transform=ap_transform,
transform=input_transform,
co_transform=co_transform
)
train_set = ConcatDataset([train_set_1, train_set_2])
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='clean',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='final',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'Sintel_Raw':
train_set = SintelRaw(cfg.root_sintel_raw, n_frames=cfg.train_n_frames,
transform=input_transform, co_transform=co_transform)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='clean',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set_2 = Sintel(cfg.root_sintel, n_frames=cfg.val_n_frames, type='final',
split='training', subsplit=cfg.val_subsplit,
transform=valid_input_transform,
target_transform={'flow': sep_transforms.ArrayToTensor()}
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'KITTI_Raw':
train_input_transform = copy.deepcopy(input_transform)
train_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.train_shape))
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set = KITTIRawFile(
cfg.root,
cfg.train_file,
cfg.train_n_frames,
transform=train_input_transform,
ap_transform=ap_transform,
co_transform=co_transform # no target here
)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = KITTIFlow(cfg.root_kitti15, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set_2 = KITTIFlow(cfg.root_kitti12, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
elif cfg.type == 'KITTI_MV':
train_input_transform = copy.deepcopy(input_transform)
train_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.train_shape))
root_flow = cfg.root_kitti15 if cfg.train_15 else cfg.root_kitti12
ap_transform = get_ap_transforms(cfg.at_cfg) if cfg.run_at else None
train_set = KITTIFlowMV(
root_flow,
cfg.train_n_frames,
transform=train_input_transform,
ap_transform=ap_transform,
co_transform=co_transform # no target here
)
valid_input_transform = copy.deepcopy(input_transform)
valid_input_transform.transforms.insert(0, sep_transforms.Zoom(*cfg.test_shape))
valid_set_1 = KITTIFlow(cfg.root_kitti15, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set_2 = KITTIFlow(cfg.root_kitti12, n_frames=cfg.val_n_frames,
transform=valid_input_transform,
)
valid_set = ConcatDataset([valid_set_1, valid_set_2])
else:
raise NotImplementedError(cfg.type)
return train_set, valid_set | 5,969 | 47.536585 | 89 | py |
ARFlow | ARFlow-master/datasets/flow_datasets.py | import imageio
import numpy as np
import random
from path import Path
from abc import abstractmethod, ABCMeta
from torch.utils.data import Dataset
from utils.flow_utils import load_flow
class ImgSeqDataset(Dataset, metaclass=ABCMeta):
def __init__(self, root, n_frames, input_transform=None, co_transform=None,
target_transform=None, ap_transform=None):
self.root = Path(root)
self.n_frames = n_frames
self.input_transform = input_transform
self.co_transform = co_transform
self.ap_transform = ap_transform
self.target_transform = target_transform
self.samples = self.collect_samples()
@abstractmethod
def collect_samples(self):
pass
def _load_sample(self, s):
images = s['imgs']
images = [imageio.imread(self.root / p).astype(np.float32) for p in images]
target = {}
if 'flow' in s:
target['flow'] = load_flow(self.root / s['flow'])
if 'mask' in s:
# 0~255 HxWx1
mask = imageio.imread(self.root / s['mask']).astype(np.float32) / 255.
if len(mask.shape) == 3:
mask = mask[:, :, 0]
target['mask'] = np.expand_dims(mask, -1)
return images, target
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
images, target = self._load_sample(self.samples[idx])
if self.co_transform is not None:
# In unsupervised learning, there is no need to change target with image
images, _ = self.co_transform(images, {})
if self.input_transform is not None:
images = [self.input_transform(i) for i in images]
data = {'img{}'.format(i + 1): p for i, p in enumerate(images)}
if self.ap_transform is not None:
imgs_ph = self.ap_transform(
[data['img{}'.format(i + 1)].clone() for i in range(self.n_frames)])
for i in range(self.n_frames):
data['img{}_ph'.format(i + 1)] = imgs_ph[i]
if self.target_transform is not None:
for key in self.target_transform.keys():
target[key] = self.target_transform[key](target[key])
data['target'] = target
return data
class SintelRaw(ImgSeqDataset):
def __init__(self, root, n_frames=2, transform=None, co_transform=None):
super(SintelRaw, self).__init__(root, n_frames, input_transform=transform,
co_transform=co_transform)
def collect_samples(self):
scene_list = self.root.dirs()
samples = []
for scene in scene_list:
img_list = scene.files('*.png')
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {'imgs': [self.root.relpathto(file) for file in seq]}
samples.append(sample)
return samples
class Sintel(ImgSeqDataset):
def __init__(self, root, n_frames=2, type='clean', split='training',
subsplit='trainval', with_flow=True, ap_transform=None,
transform=None, target_transform=None, co_transform=None, ):
self.dataset_type = type
self.with_flow = with_flow
self.split = split
self.subsplit = subsplit
self.training_scene = ['alley_1', 'ambush_4', 'ambush_6', 'ambush_7', 'bamboo_2',
'bandage_2', 'cave_2', 'market_2', 'market_5', 'shaman_2',
'sleeping_2', 'temple_3'] # Unofficial train-val split
root = Path(root) / split
super(Sintel, self).__init__(root, n_frames, input_transform=transform,
target_transform=target_transform,
co_transform=co_transform, ap_transform=ap_transform)
def collect_samples(self):
img_dir = self.root / Path(self.dataset_type)
flow_dir = self.root / 'flow'
assert img_dir.isdir() and flow_dir.isdir()
samples = []
for flow_map in sorted((self.root / flow_dir).glob('*/*.flo')):
info = flow_map.splitall()
scene, filename = info[-2:]
fid = int(filename[-8:-4])
if self.split == 'training' and self.subsplit != 'trainval':
if self.subsplit == 'train' and scene not in self.training_scene:
continue
if self.subsplit == 'val' and scene in self.training_scene:
continue
s = {'imgs': [img_dir / scene / 'frame_{:04d}.png'.format(fid + i) for i in
range(self.n_frames)]}
try:
assert all([p.isfile() for p in s['imgs']])
if self.with_flow:
if self.n_frames == 3:
# for img1 img2 img3, only flow_23 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid + 1)
elif self.n_frames == 2:
# for img1 img2, flow_12 will be evaluated
s['flow'] = flow_dir / scene / 'frame_{:04d}.flo'.format(fid)
else:
raise NotImplementedError(
'n_frames {} with flow or mask'.format(self.n_frames))
if self.with_flow:
assert s['flow'].isfile()
except AssertionError:
print('Incomplete sample for: {}'.format(s['imgs'][0]))
continue
samples.append(s)
return samples
class KITTIRawFile(ImgSeqDataset):
def __init__(self, root, sp_file, n_frames=2, ap_transform=None,
transform=None, target_transform=None, co_transform=None):
self.sp_file = sp_file
super(KITTIRawFile, self).__init__(root, n_frames,
input_transform=transform,
target_transform=target_transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
samples = []
with open(self.sp_file, 'r') as f:
for line in f.readlines():
sp = line.split()
s = {'imgs': [sp[i] for i in range(self.n_frames)]}
samples.append(s)
return samples
class KITTIFlowMV(ImgSeqDataset):
"""
This dataset is used for unsupervised training only
"""
def __init__(self, root, n_frames=2,
transform=None, co_transform=None, ap_transform=None, ):
super(KITTIFlowMV, self).__init__(root, n_frames,
input_transform=transform,
co_transform=co_transform,
ap_transform=ap_transform)
def collect_samples(self):
flow_occ_dir = 'flow_' + 'occ'
assert (self.root / flow_occ_dir).isdir()
img_l_dir, img_r_dir = 'image_2', 'image_3'
assert (self.root / img_l_dir).isdir() and (self.root / img_r_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
for img_dir in [img_l_dir, img_r_dir]:
img_list = (self.root / img_dir).files('*{}*.png'.format(root_filename))
img_list.sort()
for st in range(0, len(img_list) - self.n_frames + 1):
seq = img_list[st:st + self.n_frames]
sample = {}
sample['imgs'] = []
for i, file in enumerate(seq):
frame_id = int(file[-6:-4])
if 12 >= frame_id >= 9:
break
sample['imgs'].append(self.root.relpathto(file))
if len(sample['imgs']) == self.n_frames:
samples.append(sample)
return samples
class KITTIFlow(ImgSeqDataset):
"""
This dataset is used for validation only, so all files about target are stored as
file filepath and there is no transform about target.
"""
def __init__(self, root, n_frames=2, transform=None):
super(KITTIFlow, self).__init__(root, n_frames, input_transform=transform)
def __getitem__(self, idx):
s = self.samples[idx]
# img 1 2 for 2 frames, img 0 1 2 for 3 frames.
st = 1 if self.n_frames == 2 else 0
ed = st + self.n_frames
imgs = [s['img{}'.format(i)] for i in range(st, ed)]
inputs = [imageio.imread(self.root / p).astype(np.float32) for p in imgs]
raw_size = inputs[0].shape[:2]
data = {
'flow_occ': self.root / s['flow_occ'],
'flow_noc': self.root / s['flow_noc'],
}
data.update({ # for test set
'im_shape': raw_size,
'img1_path': self.root / s['img1'],
})
if self.input_transform is not None:
inputs = [self.input_transform(i) for i in inputs]
data.update({'img{}'.format(i + 1): inputs[i] for i in range(self.n_frames)})
return data
def collect_samples(self):
'''Will search in training folder for folders 'flow_noc' or 'flow_occ'
and 'colored_0' (KITTI 2012) or 'image_2' (KITTI 2015) '''
flow_occ_dir = 'flow_' + 'occ'
flow_noc_dir = 'flow_' + 'noc'
assert (self.root / flow_occ_dir).isdir()
img_dir = 'image_2'
assert (self.root / img_dir).isdir()
samples = []
for flow_map in sorted((self.root / flow_occ_dir).glob('*.png')):
flow_map = flow_map.basename()
root_filename = flow_map[:-7]
flow_occ_map = flow_occ_dir + '/' + flow_map
flow_noc_map = flow_noc_dir + '/' + flow_map
s = {'flow_occ': flow_occ_map, 'flow_noc': flow_noc_map}
img1 = img_dir + '/' + root_filename + '_10.png'
img2 = img_dir + '/' + root_filename + '_11.png'
assert (self.root / img1).isfile() and (self.root / img2).isfile()
s.update({'img1': img1, 'img2': img2})
if self.n_frames == 3:
img0 = img_dir + '/' + root_filename + '_09.png'
assert (self.root / img0).isfile()
s.update({'img0': img0})
samples.append(s)
return samples
| 10,692 | 38.3125 | 90 | py |
ARFlow | ARFlow-master/utils/warp_utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import inspect
def mesh_grid(B, H, W):
# mesh grid
x_base = torch.arange(0, W).repeat(B, H, 1) # BHW
y_base = torch.arange(0, H).repeat(B, W, 1).transpose(1, 2) # BHW
base_grid = torch.stack([x_base, y_base], 1) # B2HW
return base_grid
def norm_grid(v_grid):
_, _, H, W = v_grid.size()
# scale grid to [-1,1]
v_grid_norm = torch.zeros_like(v_grid)
v_grid_norm[:, 0, :, :] = 2.0 * v_grid[:, 0, :, :] / (W - 1) - 1.0
v_grid_norm[:, 1, :, :] = 2.0 * v_grid[:, 1, :, :] / (H - 1) - 1.0
return v_grid_norm.permute(0, 2, 3, 1) # BHW2
def get_corresponding_map(data):
"""
:param data: unnormalized coordinates Bx2xHxW
:return: Bx1xHxW
"""
B, _, H, W = data.size()
# x = data[:, 0, :, :].view(B, -1).clamp(0, W - 1) # BxN (N=H*W)
# y = data[:, 1, :, :].view(B, -1).clamp(0, H - 1)
x = data[:, 0, :, :].view(B, -1) # BxN (N=H*W)
y = data[:, 1, :, :].view(B, -1)
# invalid = (x < 0) | (x > W - 1) | (y < 0) | (y > H - 1) # BxN
# invalid = invalid.repeat([1, 4])
x1 = torch.floor(x)
x_floor = x1.clamp(0, W - 1)
y1 = torch.floor(y)
y_floor = y1.clamp(0, H - 1)
x0 = x1 + 1
x_ceil = x0.clamp(0, W - 1)
y0 = y1 + 1
y_ceil = y0.clamp(0, H - 1)
x_ceil_out = x0 != x_ceil
y_ceil_out = y0 != y_ceil
x_floor_out = x1 != x_floor
y_floor_out = y1 != y_floor
invalid = torch.cat([x_ceil_out | y_ceil_out,
x_ceil_out | y_floor_out,
x_floor_out | y_ceil_out,
x_floor_out | y_floor_out], dim=1)
# encode coordinates, since the scatter function can only index along one axis
corresponding_map = torch.zeros(B, H * W).type_as(data)
indices = torch.cat([x_ceil + y_ceil * W,
x_ceil + y_floor * W,
x_floor + y_ceil * W,
x_floor + y_floor * W], 1).long() # BxN (N=4*H*W)
values = torch.cat([(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_ceil)) * (1 - torch.abs(y - y_floor)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_ceil)),
(1 - torch.abs(x - x_floor)) * (1 - torch.abs(y - y_floor))],
1)
# values = torch.ones_like(values)
values[invalid] = 0
corresponding_map.scatter_add_(1, indices, values)
# decode coordinates
corresponding_map = corresponding_map.view(B, H, W)
return corresponding_map.unsqueeze(1)
def flow_warp(x, flow12, pad='border', mode='bilinear'):
B, _, H, W = x.size()
base_grid = mesh_grid(B, H, W).type_as(x) # B2HW
v_grid = norm_grid(base_grid + flow12) # BHW2
if 'align_corners' in inspect.getfullargspec(torch.nn.functional.grid_sample).args:
im1_recons = nn.functional.grid_sample(x, v_grid, mode=mode, padding_mode=pad, align_corners=True)
else:
im1_recons = nn.functional.grid_sample(x, v_grid, mode=mode, padding_mode=pad)
return im1_recons
def get_occu_mask_bidirection(flow12, flow21, scale=0.01, bias=0.5):
flow21_warped = flow_warp(flow21, flow12, pad='zeros')
flow12_diff = flow12 + flow21_warped
mag = (flow12 * flow12).sum(1, keepdim=True) + \
(flow21_warped * flow21_warped).sum(1, keepdim=True)
occ_thresh = scale * mag + bias
occ = (flow12_diff * flow12_diff).sum(1, keepdim=True) > occ_thresh
return occ.float()
def get_occu_mask_backward(flow21, th=0.2):
B, _, H, W = flow21.size()
base_grid = mesh_grid(B, H, W).type_as(flow21) # B2HW
corr_map = get_corresponding_map(base_grid + flow21) # BHW
occu_mask = corr_map.clamp(min=0., max=1.) < th
return occu_mask.float()
| 3,850 | 33.079646 | 106 | py |
ARFlow | ARFlow-master/utils/flow_utils.py | import torch
import cv2
import numpy as np
from matplotlib.colors import hsv_to_rgb
def load_flow(path):
if path.endswith('.png'):
# for KITTI which uses 16bit PNG images
# see 'https://github.com/ClementPinard/FlowNetPytorch/blob/master/datasets/KITTI.py'
# The -1 is here to specify not to change the image depth (16bit), and is compatible
# with both OpenCV2 and OpenCV3
flo_file = cv2.imread(path, -1)
flo_img = flo_file[:, :, 2:0:-1].astype(np.float32)
invalid = (flo_file[:, :, 0] == 0) # mask
flo_img = flo_img - 32768
flo_img = flo_img / 64
flo_img[np.abs(flo_img) < 1e-10] = 1e-10
flo_img[invalid, :] = 0
return flo_img, np.expand_dims(flo_file[:, :, 0], 2)
else:
with open(path, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
assert (202021.25 == magic), 'Magic number incorrect. Invalid .flo file'
h = np.fromfile(f, np.int32, count=1)[0]
w = np.fromfile(f, np.int32, count=1)[0]
data = np.fromfile(f, np.float32, count=2 * w * h)
# Reshape data into 3D array (columns, rows, bands)
data2D = np.resize(data, (w, h, 2))
return data2D
def flow_to_image(flow, max_flow=256):
if max_flow is not None:
max_flow = max(max_flow, 1.)
else:
max_flow = np.max(flow)
n = 8
u, v = flow[:, :, 0], flow[:, :, 1]
mag = np.sqrt(np.square(u) + np.square(v))
angle = np.arctan2(v, u)
im_h = np.mod(angle / (2 * np.pi) + 1, 1)
im_s = np.clip(mag * n / max_flow, a_min=0, a_max=1)
im_v = np.clip(n - im_s, a_min=0, a_max=1)
im = hsv_to_rgb(np.stack([im_h, im_s, im_v], 2))
return (im * 255).astype(np.uint8)
def resize_flow(flow, new_shape):
_, _, h, w = flow.shape
new_h, new_w = new_shape
flow = torch.nn.functional.interpolate(flow, (new_h, new_w),
mode='bilinear', align_corners=True)
scale_h, scale_w = h / float(new_h), w / float(new_w)
flow[:, 0] /= scale_w
flow[:, 1] /= scale_h
return flow
def evaluate_flow(gt_flows, pred_flows, moving_masks=None):
# credit "undepthflow/eval/evaluate_flow.py"
def calculate_error_rate(epe_map, gt_flow, mask):
bad_pixels = np.logical_and(
epe_map * mask > 3,
epe_map * mask / np.maximum(
np.sqrt(np.sum(np.square(gt_flow), axis=2)), 1e-10) > 0.05)
return bad_pixels.sum() / mask.sum() * 100.
error, error_noc, error_occ, error_move, error_static, error_rate = \
0.0, 0.0, 0.0, 0.0, 0.0, 0.0
error_move_rate, error_static_rate = 0.0, 0.0
B = len(gt_flows)
for gt_flow, pred_flow, i in zip(gt_flows, pred_flows, range(B)):
H, W = gt_flow.shape[:2]
h, w = pred_flow.shape[:2]
pred_flow = np.copy(pred_flow)
pred_flow[:, :, 0] = pred_flow[:, :, 0] / w * W
pred_flow[:, :, 1] = pred_flow[:, :, 1] / h * H
flo_pred = cv2.resize(pred_flow, (W, H), interpolation=cv2.INTER_LINEAR)
epe_map = np.sqrt(
np.sum(np.square(flo_pred[:, :, :2] - gt_flow[:, :, :2]),
axis=2))
if gt_flow.shape[-1] == 2:
error += np.mean(epe_map)
elif gt_flow.shape[-1] == 4:
error += np.sum(epe_map * gt_flow[:, :, 2]) / np.sum(gt_flow[:, :, 2])
noc_mask = gt_flow[:, :, -1]
error_noc += np.sum(epe_map * noc_mask) / np.sum(noc_mask)
error_occ += np.sum(epe_map * (gt_flow[:, :, 2] - noc_mask)) / max(
np.sum(gt_flow[:, :, 2] - noc_mask), 1.0)
error_rate += calculate_error_rate(epe_map, gt_flow[:, :, 0:2],
gt_flow[:, :, 2])
if moving_masks is not None:
move_mask = moving_masks[i]
error_move_rate += calculate_error_rate(
epe_map, gt_flow[:, :, 0:2], gt_flow[:, :, 2] * move_mask)
error_static_rate += calculate_error_rate(
epe_map, gt_flow[:, :, 0:2],
gt_flow[:, :, 2] * (1.0 - move_mask))
error_move += np.sum(epe_map * gt_flow[:, :, 2] *
move_mask) / np.sum(gt_flow[:, :, 2] *
move_mask)
error_static += np.sum(epe_map * gt_flow[:, :, 2] * (
1.0 - move_mask)) / np.sum(gt_flow[:, :, 2] *
(1.0 - move_mask))
if gt_flows[0].shape[-1] == 4:
res = [error / B, error_noc / B, error_occ / B, error_rate / B]
if moving_masks is not None:
res += [error_move / B, error_static / B]
return res
else:
return [error / B] | 4,870 | 38.601626 | 93 | py |
ARFlow | ARFlow-master/utils/torch_utils.py | import torch
import shutil
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import numbers
import random
import math
from torch.optim import Optimizer
def init_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
def weight_parameters(module):
return [param for name, param in module.named_parameters() if 'weight' in name]
def bias_parameters(module):
return [param for name, param in module.named_parameters() if 'bias' in name]
def load_checkpoint(model_path):
weights = torch.load(model_path)
epoch = None
if 'epoch' in weights:
epoch = weights.pop('epoch')
if 'state_dict' in weights:
state_dict = (weights['state_dict'])
else:
state_dict = weights
return epoch, state_dict
def save_checkpoint(save_path, states, file_prefixes, is_best, filename='ckpt.pth.tar'):
def run_one_sample(save_path, state, prefix, is_best, filename):
torch.save(state, save_path / '{}_{}'.format(prefix, filename))
if is_best:
shutil.copyfile(save_path / '{}_{}'.format(prefix, filename),
save_path / '{}_model_best.pth.tar'.format(prefix))
if not isinstance(file_prefixes, str):
for (prefix, state) in zip(file_prefixes, states):
run_one_sample(save_path, state, prefix, is_best, filename)
else:
run_one_sample(save_path, states, file_prefixes, is_best, filename)
def restore_model(model, pretrained_file):
epoch, weights = load_checkpoint(pretrained_file)
model_keys = set(model.state_dict().keys())
weight_keys = set(weights.keys())
# load weights by name
weights_not_in_model = sorted(list(weight_keys - model_keys))
model_not_in_weights = sorted(list(model_keys - weight_keys))
if len(model_not_in_weights):
print('Warning: There are weights in model but not in pre-trained.')
for key in (model_not_in_weights):
print(key)
weights[key] = model.state_dict()[key]
if len(weights_not_in_model):
print('Warning: There are pre-trained weights not in model.')
for key in (weights_not_in_model):
print(key)
from collections import OrderedDict
new_weights = OrderedDict()
for key in model_keys:
new_weights[key] = weights[key]
weights = new_weights
model.load_state_dict(weights)
return model
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Fixing Weight Decay Regularization in Adam`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
.. Fixing Weight Decay Regularization in Adam:
https://arxiv.org/abs/1711.05101
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
super(AdamW, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'AdamW does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# according to the paper, this penalty should come after the bias correction
# if group['weight_decay'] != 0:
# grad = grad.add(group['weight_decay'], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(1 - beta1, grad)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(-step_size, exp_avg, denom)
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'], p.data)
return loss
| 5,676 | 34.04321 | 102 | py |
ARFlow | ARFlow-master/transforms/sep_transforms.py | import numpy as np
import torch
# from scipy.misc import imresize
from skimage.transform import resize as imresize
class ArrayToTensor(object):
"""Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W)."""
def __call__(self, array):
assert (isinstance(array, np.ndarray))
array = np.transpose(array, (2, 0, 1))
# handle numpy array
tensor = torch.from_numpy(array)
# put it from HWC to CHW format
return tensor.float()
class Zoom(object):
def __init__(self, new_h, new_w):
self.new_h = new_h
self.new_w = new_w
def __call__(self, image):
h, w, _ = image.shape
if h == self.new_h and w == self.new_w:
return image
image = imresize(image, (self.new_h, self.new_w))
return image
| 832 | 26.766667 | 91 | py |
ARFlow | ARFlow-master/transforms/ar_transforms/interpolation.py | ## Portions of Code from, copyright 2018 Jochen Gast
from __future__ import absolute_import, division, print_function
import torch
from torch import nn
import torch.nn.functional as tf
def _bchw2bhwc(tensor):
return tensor.transpose(1,2).transpose(2,3)
def _bhwc2bchw(tensor):
return tensor.transpose(2,3).transpose(1,2)
class Meshgrid(nn.Module):
def __init__(self):
super(Meshgrid, self).__init__()
self.width = 0
self.height = 0
self.register_buffer("xx", torch.zeros(1,1))
self.register_buffer("yy", torch.zeros(1,1))
self.register_buffer("rangex", torch.zeros(1,1))
self.register_buffer("rangey", torch.zeros(1,1))
def _compute_meshgrid(self, width, height):
torch.arange(0, width, out=self.rangex)
torch.arange(0, height, out=self.rangey)
self.xx = self.rangex.repeat(height, 1).contiguous()
self.yy = self.rangey.repeat(width, 1).t().contiguous()
def forward(self, width, height):
if self.width != width or self.height != height:
self._compute_meshgrid(width=width, height=height)
self.width = width
self.height = height
return self.xx, self.yy
class BatchSub2Ind(nn.Module):
def __init__(self):
super(BatchSub2Ind, self).__init__()
self.register_buffer("_offsets", torch.LongTensor())
def forward(self, shape, row_sub, col_sub, out=None):
batch_size = row_sub.size(0)
height, width = shape
ind = row_sub*width + col_sub
torch.arange(batch_size, out=self._offsets)
self._offsets *= (height*width)
if out is None:
return torch.add(ind, self._offsets.view(-1,1,1))
else:
torch.add(ind, self._offsets.view(-1,1,1), out=out)
class Interp2(nn.Module):
def __init__(self, clamp=False):
super(Interp2, self).__init__()
self._clamp = clamp
self._batch_sub2ind = BatchSub2Ind()
self.register_buffer("_x0", torch.LongTensor())
self.register_buffer("_x1", torch.LongTensor())
self.register_buffer("_y0", torch.LongTensor())
self.register_buffer("_y1", torch.LongTensor())
self.register_buffer("_i00", torch.LongTensor())
self.register_buffer("_i01", torch.LongTensor())
self.register_buffer("_i10", torch.LongTensor())
self.register_buffer("_i11", torch.LongTensor())
self.register_buffer("_v00", torch.FloatTensor())
self.register_buffer("_v01", torch.FloatTensor())
self.register_buffer("_v10", torch.FloatTensor())
self.register_buffer("_v11", torch.FloatTensor())
self.register_buffer("_x", torch.FloatTensor())
self.register_buffer("_y", torch.FloatTensor())
def forward(self, v, xq, yq):
batch_size, channels, height, width = v.size()
# clamp if wanted
if self._clamp:
xq.clamp_(0, width - 1)
yq.clamp_(0, height - 1)
# ------------------------------------------------------------------
# Find neighbors
#
# x0 = torch.floor(xq).long(), x0.clamp_(0, width - 1)
# x1 = x0 + 1, x1.clamp_(0, width - 1)
# y0 = torch.floor(yq).long(), y0.clamp_(0, height - 1)
# y1 = y0 + 1, y1.clamp_(0, height - 1)
#
# ------------------------------------------------------------------
self._x0 = torch.floor(xq).long().clamp(0, width - 1)
self._y0 = torch.floor(yq).long().clamp(0, height - 1)
self._x1 = torch.add(self._x0, 1).clamp(0, width - 1)
self._y1 = torch.add(self._y0, 1).clamp(0, height - 1)
# batch_sub2ind
self._batch_sub2ind([height, width], self._y0, self._x0, out=self._i00)
self._batch_sub2ind([height, width], self._y0, self._x1, out=self._i01)
self._batch_sub2ind([height, width], self._y1, self._x0, out=self._i10)
self._batch_sub2ind([height, width], self._y1, self._x1, out=self._i11)
# reshape
v_flat = _bchw2bhwc(v).contiguous().view(-1, channels)
torch.index_select(v_flat, dim=0, index=self._i00.view(-1), out=self._v00)
torch.index_select(v_flat, dim=0, index=self._i01.view(-1), out=self._v01)
torch.index_select(v_flat, dim=0, index=self._i10.view(-1), out=self._v10)
torch.index_select(v_flat, dim=0, index=self._i11.view(-1), out=self._v11)
# local_coords
torch.add(xq, - self._x0.float(), out=self._x)
torch.add(yq, - self._y0.float(), out=self._y)
# weights
w00 = torch.unsqueeze((1.0 - self._y) * (1.0 - self._x), dim=1)
w01 = torch.unsqueeze((1.0 - self._y) * self._x, dim=1)
w10 = torch.unsqueeze(self._y * (1.0 - self._x), dim=1)
w11 = torch.unsqueeze(self._y * self._x, dim=1)
def _reshape(u):
return _bhwc2bchw(u.view(batch_size, height, width, channels))
# values
values = _reshape(self._v00)*w00 + _reshape(self._v01)*w01 \
+ _reshape(self._v10)*w10 + _reshape(self._v11)*w11
if self._clamp:
return values
else:
# find_invalid
invalid = ((xq < 0) | (xq >= width) | (yq < 0) | (yq >= height)).unsqueeze(dim=1).float()
# maskout invalid
transformed = invalid * torch.zeros_like(values) + (1.0 - invalid)*values
return transformed
def resize2D(inputs, size_targets, mode="bilinear"):
size_inputs = [inputs.size(2), inputs.size(3)]
if all([size_inputs == size_targets]):
return inputs # nothing to do
elif any([size_targets < size_inputs]):
resized = tf.adaptive_avg_pool2d(inputs, size_targets) # downscaling
else:
resized = tf.upsample(inputs, size=size_targets, mode=mode) # upsampling
# correct scaling
return resized
def resize2D_as(inputs, output_as, mode="bilinear"):
size_targets = [output_as.size(2), output_as.size(3)]
return resize2D(inputs, size_targets, mode=mode) | 6,104 | 37.15625 | 101 | py |
ARFlow | ARFlow-master/transforms/ar_transforms/sp_transfroms.py | # Part of the code from https://github.com/visinf/irr/blob/master/augmentations.py
import torch
import torch.nn as nn
from transforms.ar_transforms.interpolation import Interp2
from transforms.ar_transforms.interpolation import Meshgrid
import numpy as np
def denormalize_coords(xx, yy, width, height):
""" scale indices from [-1, 1] to [0, width/height] """
xx = 0.5 * (width - 1.0) * (xx.float() + 1.0)
yy = 0.5 * (height - 1.0) * (yy.float() + 1.0)
return xx, yy
def normalize_coords(xx, yy, width, height):
""" scale indices from [0, width/height] to [-1, 1] """
xx = (2.0 / (width - 1.0)) * xx.float() - 1.0
yy = (2.0 / (height - 1.0)) * yy.float() - 1.0
return xx, yy
def apply_transform_to_params(theta0, theta_transform):
a1 = theta0[:, 0]
a2 = theta0[:, 1]
a3 = theta0[:, 2]
a4 = theta0[:, 3]
a5 = theta0[:, 4]
a6 = theta0[:, 5]
#
b1 = theta_transform[:, 0]
b2 = theta_transform[:, 1]
b3 = theta_transform[:, 2]
b4 = theta_transform[:, 3]
b5 = theta_transform[:, 4]
b6 = theta_transform[:, 5]
#
c1 = a1 * b1 + a4 * b2
c2 = a2 * b1 + a5 * b2
c3 = b3 + a3 * b1 + a6 * b2
c4 = a1 * b4 + a4 * b5
c5 = a2 * b4 + a5 * b5
c6 = b6 + a3 * b4 + a6 * b5
#
new_theta = torch.stack([c1, c2, c3, c4, c5, c6], dim=1)
return new_theta
class _IdentityParams(nn.Module):
def __init__(self):
super(_IdentityParams, self).__init__()
self._batch_size = 0
self.register_buffer("_o", torch.FloatTensor())
self.register_buffer("_i", torch.FloatTensor())
def _update(self, batch_size):
torch.zeros([batch_size, 1], out=self._o)
torch.ones([batch_size, 1], out=self._i)
return torch.cat([self._i, self._o, self._o, self._o, self._i, self._o], dim=1)
def forward(self, batch_size):
if self._batch_size != batch_size:
self._identity_params = self._update(batch_size)
self._batch_size = batch_size
return self._identity_params
class RandomMirror(nn.Module):
def __init__(self, vertical=True, p=0.5):
super(RandomMirror, self).__init__()
self._batch_size = 0
self._p = p
self._vertical = vertical
self.register_buffer("_mirror_probs", torch.FloatTensor())
def update_probs(self, batch_size):
torch.ones([batch_size, 1], out=self._mirror_probs)
self._mirror_probs *= self._p
def forward(self, theta_list):
batch_size = theta_list[0].size(0)
if batch_size != self._batch_size:
self.update_probs(batch_size)
self._batch_size = batch_size
# apply random sign to a1 a2 a3 (these are the guys responsible for x)
sign = torch.sign(2.0 * torch.bernoulli(self._mirror_probs) - 1.0)
i = torch.ones_like(sign)
horizontal_mirror = torch.cat([sign, sign, sign, i, i, i], dim=1)
theta_list = [theta * horizontal_mirror for theta in theta_list]
# apply random sign to a4 a5 a6 (these are the guys responsible for y)
if self._vertical:
sign = torch.sign(2.0 * torch.bernoulli(self._mirror_probs) - 1.0)
vertical_mirror = torch.cat([i, i, i, sign, sign, sign], dim=1)
theta_list = [theta * vertical_mirror for theta in theta_list]
return theta_list
class RandomAffineFlow(nn.Module):
def __init__(self, cfg, addnoise=True):
super(RandomAffineFlow, self).__init__()
self.cfg = cfg
self._interp2 = Interp2(clamp=False)
self._flow_interp2 = Interp2(clamp=False)
self._meshgrid = Meshgrid()
self._identity = _IdentityParams()
self._random_mirror = RandomMirror(cfg.vflip) if cfg.hflip else RandomMirror(p=1)
self._addnoise = addnoise
self.register_buffer("_noise1", torch.FloatTensor())
self.register_buffer("_noise2", torch.FloatTensor())
self.register_buffer("_xbounds", torch.FloatTensor([-1, -1, 1, 1]))
self.register_buffer("_ybounds", torch.FloatTensor([-1, 1, -1, 1]))
self.register_buffer("_x", torch.IntTensor(1))
self.register_buffer("_y", torch.IntTensor(1))
def inverse_transform_coords(self, width, height, thetas, offset_x=None,
offset_y=None):
xx, yy = self._meshgrid(width=width, height=height)
xx = torch.unsqueeze(xx, dim=0).float()
yy = torch.unsqueeze(yy, dim=0).float()
if offset_x is not None:
xx = xx + offset_x
if offset_y is not None:
yy = yy + offset_y
a1 = thetas[:, 0].contiguous().view(-1, 1, 1)
a2 = thetas[:, 1].contiguous().view(-1, 1, 1)
a3 = thetas[:, 2].contiguous().view(-1, 1, 1)
a4 = thetas[:, 3].contiguous().view(-1, 1, 1)
a5 = thetas[:, 4].contiguous().view(-1, 1, 1)
a6 = thetas[:, 5].contiguous().view(-1, 1, 1)
xx, yy = normalize_coords(xx, yy, width=width, height=height)
xq = a1 * xx + a2 * yy + a3
yq = a4 * xx + a5 * yy + a6
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
return xq, yq
def transform_coords(self, width, height, thetas):
xx1, yy1 = self._meshgrid(width=width, height=height)
xx, yy = normalize_coords(xx1, yy1, width=width, height=height)
def _unsqueeze12(u):
return torch.unsqueeze(torch.unsqueeze(u, dim=1), dim=1)
a1 = _unsqueeze12(thetas[:, 0])
a2 = _unsqueeze12(thetas[:, 1])
a3 = _unsqueeze12(thetas[:, 2])
a4 = _unsqueeze12(thetas[:, 3])
a5 = _unsqueeze12(thetas[:, 4])
a6 = _unsqueeze12(thetas[:, 5])
#
z = a1 * a5 - a2 * a4
b1 = a5 / z
b2 = - a2 / z
b4 = - a4 / z
b5 = a1 / z
#
xhat = xx - a3
yhat = yy - a6
xq = b1 * xhat + b2 * yhat
yq = b4 * xhat + b5 * yhat
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
return xq, yq
def find_invalid(self, width, height, thetas):
x = self._xbounds
y = self._ybounds
#
a1 = torch.unsqueeze(thetas[:, 0], dim=1)
a2 = torch.unsqueeze(thetas[:, 1], dim=1)
a3 = torch.unsqueeze(thetas[:, 2], dim=1)
a4 = torch.unsqueeze(thetas[:, 3], dim=1)
a5 = torch.unsqueeze(thetas[:, 4], dim=1)
a6 = torch.unsqueeze(thetas[:, 5], dim=1)
#
z = a1 * a5 - a2 * a4
b1 = a5 / z
b2 = - a2 / z
b4 = - a4 / z
b5 = a1 / z
#
xhat = x - a3
yhat = y - a6
xq = b1 * xhat + b2 * yhat
yq = b4 * xhat + b5 * yhat
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
#
invalid = (
(xq < 0) | (yq < 0) | (xq >= width) | (yq >= height)
).sum(dim=1, keepdim=True) > 0
return invalid
def apply_random_transforms_to_params(self,
theta0,
max_translate,
min_zoom, max_zoom,
min_squeeze, max_squeeze,
min_rotate, max_rotate,
validate_size=None):
max_translate *= 0.5
batch_size = theta0.size(0)
height, width = validate_size
# collect valid params here
thetas = torch.zeros_like(theta0)
zoom = theta0.new(batch_size, 1).zero_()
squeeze = torch.zeros_like(zoom)
tx = torch.zeros_like(zoom)
ty = torch.zeros_like(zoom)
phi = torch.zeros_like(zoom)
invalid = torch.ones_like(zoom).byte()
while invalid.sum() > 0:
# random sampling
zoom.uniform_(min_zoom, max_zoom)
squeeze.uniform_(min_squeeze, max_squeeze)
tx.uniform_(-max_translate, max_translate)
ty.uniform_(-max_translate, max_translate)
phi.uniform_(min_rotate, max_rotate)
# construct affine parameters
sx = zoom * squeeze
sy = zoom / squeeze
sin_phi = torch.sin(phi)
cos_phi = torch.cos(phi)
b1 = cos_phi * sx
b2 = sin_phi * sy
b3 = tx
b4 = - sin_phi * sx
b5 = cos_phi * sy
b6 = ty
theta_transform = torch.cat([b1, b2, b3, b4, b5, b6], dim=1)
theta_try = apply_transform_to_params(theta0, theta_transform)
thetas = invalid.float() * theta_try + (1 - invalid).float() * thetas
# compute new invalid ones
invalid = self.find_invalid(width=width, height=height, thetas=thetas)
# here we should have good thetas within borders
return thetas
def transform_image(self, images, thetas):
batch_size, channels, height, width = images.size()
xq, yq = self.transform_coords(width=width, height=height, thetas=thetas)
transformed = self._interp2(images, xq, yq)
return transformed
def transform_flow(self, flow, theta1, theta2):
batch_size, channels, height, width = flow.size()
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
# inverse transform coords
x0, y0 = self.inverse_transform_coords(
width=width, height=height, thetas=theta1)
x1, y1 = self.inverse_transform_coords(
width=width, height=height, thetas=theta2, offset_x=u, offset_y=v)
# subtract and create new flow
u = x1 - x0
v = y1 - y0
new_flow = torch.stack([u, v], dim=1)
# transform coords
xq, yq = self.transform_coords(width=width, height=height, thetas=theta1)
# interp2
transformed = self._flow_interp2(new_flow, xq, yq)
return transformed
def forward(self, data):
# 01234 flow 12 21 23 32
imgs = data['imgs']
flows_f = data['flows_f']
masks_f = data['masks_f']
batch_size, _, height, width = imgs[0].size()
# identity = no transform
theta0 = self._identity(batch_size)
# global transform
theta_list = [self.apply_random_transforms_to_params(
theta0,
max_translate=self.cfg.trans[0],
min_zoom=self.cfg.zoom[0], max_zoom=self.cfg.zoom[1],
min_squeeze=self.cfg.squeeze[0], max_squeeze=self.cfg.squeeze[1],
min_rotate=self.cfg.rotate[0], max_rotate=self.cfg.rotate[1],
validate_size=[height, width])
]
# relative transform
for i in range(len(imgs) - 1):
theta_list.append(
self.apply_random_transforms_to_params(
theta_list[-1],
max_translate=self.cfg.trans[1],
min_zoom=self.cfg.zoom[2], max_zoom=self.cfg.zoom[3],
min_squeeze=self.cfg.squeeze[2], max_squeeze=self.cfg.squeeze[3],
min_rotate=self.cfg.rotate[2], max_rotate=self.cfg.rotate[2],
validate_size=[height, width])
)
# random flip images
theta_list = self._random_mirror(theta_list)
# 01234
imgs = [self.transform_image(im, theta) for im, theta in zip(imgs, theta_list)]
if len(imgs) > 2:
theta_list = theta_list[1:-1]
# 12 23
flows_f = [self.transform_flow(flo, theta1, theta2) for flo, theta1, theta2 in
zip(flows_f, theta_list[:-1], theta_list[1:])]
masks_f = [self.transform_image(mask, theta) for mask, theta in
zip(masks_f, theta_list)]
if self._addnoise:
stddev = np.random.uniform(0.0, 0.04)
for im in imgs:
noise = torch.zeros_like(im)
noise.normal_(std=stddev)
im.add_(noise)
im.clamp_(0.0, 1.0)
data['imgs'] = imgs
data['flows_f'] = flows_f
data['masks_f'] = masks_f
return data
| 12,154 | 34.437318 | 89 | py |
ARFlow | ARFlow-master/transforms/ar_transforms/ap_transforms.py | import numpy as np
import torch
from torchvision import transforms as tf
from PIL import ImageFilter
def get_ap_transforms(cfg):
transforms = [ToPILImage()]
if cfg.cj:
transforms.append(ColorJitter(brightness=cfg.cj_bri,
contrast=cfg.cj_con,
saturation=cfg.cj_sat,
hue=cfg.cj_hue))
if cfg.gblur:
transforms.append(RandomGaussianBlur(0.5, 3))
transforms.append(ToTensor())
if cfg.gamma:
transforms.append(RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True))
return tf.Compose(transforms)
# from https://github.com/visinf/irr/blob/master/datasets/transforms.py
class ToPILImage(tf.ToPILImage):
def __call__(self, imgs):
return [super(ToPILImage, self).__call__(im) for im in imgs]
class ColorJitter(tf.ColorJitter):
def __call__(self, imgs):
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return [transform(im) for im in imgs]
class ToTensor(tf.ToTensor):
def __call__(self, imgs):
return [super(ToTensor, self).__call__(im) for im in imgs]
class RandomGamma():
def __init__(self, min_gamma=0.7, max_gamma=1.5, clip_image=False):
self._min_gamma = min_gamma
self._max_gamma = max_gamma
self._clip_image = clip_image
@staticmethod
def get_params(min_gamma, max_gamma):
return np.random.uniform(min_gamma, max_gamma)
@staticmethod
def adjust_gamma(image, gamma, clip_image):
adjusted = torch.pow(image, gamma)
if clip_image:
adjusted.clamp_(0.0, 1.0)
return adjusted
def __call__(self, imgs):
gamma = self.get_params(self._min_gamma, self._max_gamma)
return [self.adjust_gamma(im, gamma, self._clip_image) for im in imgs]
class RandomGaussianBlur():
def __init__(self, p, max_k_sz):
self.p = p
self.max_k_sz = max_k_sz
def __call__(self, imgs):
if np.random.random() < self.p:
radius = np.random.uniform(0, self.max_k_sz)
imgs = [im.filter(ImageFilter.GaussianBlur(radius)) for im in imgs]
return imgs
| 2,275 | 30.611111 | 85 | py |
ARFlow | ARFlow-master/transforms/ar_transforms/oc_transforms.py | import numpy as np
import torch
# from skimage.color import rgb2yuv
import cv2
from fast_slic.avx2 import SlicAvx2 as Slic
from skimage.segmentation import slic as sk_slic
def run_slic_pt(img_batch, n_seg=200, compact=10, rd_select=(8, 16), fast=True): # Nx1xHxW
"""
:param img: Nx3xHxW 0~1 float32
:param n_seg:
:param compact:
:return: Nx1xHxW float32
"""
B = img_batch.size(0)
dtype = img_batch.type()
img_batch = np.split(
img_batch.detach().cpu().numpy().transpose([0, 2, 3, 1]), B, axis=0)
out = []
if fast:
fast_slic = Slic(num_components=n_seg, compactness=compact, min_size_factor=0.8)
for img in img_batch:
img = np.copy((img * 255).squeeze(0).astype(np.uint8), order='C')
if fast:
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
seg = fast_slic.iterate(img)
else:
seg = sk_slic(img, n_segments=200, compactness=10)
if rd_select is not None:
n_select = np.random.randint(rd_select[0], rd_select[1])
select_list = np.random.choice(range(0, np.max(seg) + 1), n_select,
replace=False)
seg = np.bitwise_or.reduce([seg == seg_id for seg_id in select_list])
out.append(seg)
x_out = torch.tensor(np.stack(out)).type(dtype).unsqueeze(1)
return x_out
def random_crop(img, flow, occ_mask, crop_sz):
"""
:param img: Nx6xHxW
:param flows: n * [Nx2xHxW]
:param occ_masks: n * [Nx1xHxW]
:param crop_sz:
:return:
"""
_, _, h, w = img.size()
c_h, c_w = crop_sz
if c_h == h and c_w == w:
return img, flow, occ_mask
x1 = np.random.randint(0, w - c_w)
y1 = np.random.randint(0, h - c_h)
img = img[:, :, y1:y1 + c_h, x1: x1 + c_w]
flow = flow[:, :, y1:y1 + c_h, x1: x1 + c_w]
occ_mask = occ_mask[:, :, y1:y1 + c_h, x1: x1 + c_w]
return img, flow, occ_mask
| 1,952 | 29.046154 | 91 | py |
ARFlow | ARFlow-master/losses/flow_loss.py | import torch.nn as nn
import torch.nn.functional as F
from .loss_blocks import SSIM, smooth_grad_1st, smooth_grad_2nd, TernaryLoss
from utils.warp_utils import flow_warp
from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward
class unFlowLoss(nn.modules.Module):
def __init__(self, cfg):
super(unFlowLoss, self).__init__()
self.cfg = cfg
def loss_photomatric(self, im1_scaled, im1_recons, occu_mask1):
loss = []
if self.cfg.w_l1 > 0:
loss += [self.cfg.w_l1 * (im1_scaled - im1_recons).abs() * occu_mask1]
if self.cfg.w_ssim > 0:
loss += [self.cfg.w_ssim * SSIM(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
if self.cfg.w_ternary > 0:
loss += [self.cfg.w_ternary * TernaryLoss(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
return sum([l.mean() for l in loss]) / occu_mask1.mean()
def loss_smooth(self, flow, im1_scaled):
if 'smooth_2nd' in self.cfg and self.cfg.smooth_2nd:
func_smooth = smooth_grad_2nd
else:
func_smooth = smooth_grad_1st
loss = []
loss += [func_smooth(flow, im1_scaled, self.cfg.alpha)]
return sum([l.mean() for l in loss])
def forward(self, output, target):
"""
:param output: Multi-scale forward/backward flows n * [B x 4 x h x w]
:param target: image pairs Nx6xHxW
:return:
"""
pyramid_flows = output
im1_origin = target[:, :3]
im2_origin = target[:, 3:]
pyramid_smooth_losses = []
pyramid_warp_losses = []
self.pyramid_occu_mask1 = []
self.pyramid_occu_mask2 = []
s = 1.
for i, flow in enumerate(pyramid_flows):
if self.cfg.w_scales[i] == 0:
pyramid_warp_losses.append(0)
pyramid_smooth_losses.append(0)
continue
b, _, h, w = flow.size()
# resize images to match the size of layer
im1_scaled = F.interpolate(im1_origin, (h, w), mode='area')
im2_scaled = F.interpolate(im2_origin, (h, w), mode='area')
im1_recons = flow_warp(im2_scaled, flow[:, :2], pad=self.cfg.warp_pad)
im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad=self.cfg.warp_pad)
if i == 0:
if self.cfg.occ_from_back:
occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], th=0.2)
occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], th=0.2)
else:
occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:])
occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2])
else:
occu_mask1 = F.interpolate(self.pyramid_occu_mask1[0],
(h, w), mode='nearest')
occu_mask2 = F.interpolate(self.pyramid_occu_mask2[0],
(h, w), mode='nearest')
self.pyramid_occu_mask1.append(occu_mask1)
self.pyramid_occu_mask2.append(occu_mask2)
loss_warp = self.loss_photomatric(im1_scaled, im1_recons, occu_mask1)
if i == 0:
s = min(h, w)
loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled)
if self.cfg.with_bk:
loss_warp += self.loss_photomatric(im2_scaled, im2_recons,
occu_mask2)
loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled)
loss_warp /= 2.
loss_smooth /= 2.
pyramid_warp_losses.append(loss_warp)
pyramid_smooth_losses.append(loss_smooth)
pyramid_warp_losses = [l * w for l, w in
zip(pyramid_warp_losses, self.cfg.w_scales)]
pyramid_smooth_losses = [l * w for l, w in
zip(pyramid_smooth_losses, self.cfg.w_sm_scales)]
warp_loss = sum(pyramid_warp_losses)
smooth_loss = self.cfg.w_smooth * sum(pyramid_smooth_losses)
total_loss = warp_loss + smooth_loss
return total_loss, warp_loss, smooth_loss, pyramid_flows[0].abs().mean()
| 4,395 | 37.226087 | 88 | py |
ARFlow | ARFlow-master/losses/loss_blocks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
# Crecit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def SSIM(x, y, md=1):
patch_size = 2 * md + 1
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = nn.AvgPool2d(patch_size, 1, 0)(x)
mu_y = nn.AvgPool2d(patch_size, 1, 0)(y)
mu_x_mu_y = mu_x * mu_y
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
sigma_x = nn.AvgPool2d(patch_size, 1, 0)(x * x) - mu_x_sq
sigma_y = nn.AvgPool2d(patch_size, 1, 0)(y * y) - mu_y_sq
sigma_xy = nn.AvgPool2d(patch_size, 1, 0)(x * y) - mu_x_mu_y
SSIM_n = (2 * mu_x_mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x_sq + mu_y_sq + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
dist = torch.clamp((1 - SSIM) / 2, 0, 1)
return dist
def gradient(data):
D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy
def smooth_grad_1st(flo, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flo)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return loss_x.mean() / 2. + loss_y.mean() / 2.
def smooth_grad_2nd(flo, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flo)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return loss_x.mean() / 2. + loss_y.mean() / 2.
| 3,197 | 30.98 | 87 | py |
myriad | myriad-main/run.py | # (c) 2021 Nikolaus Howe
import numpy as np
import random
from jax.config import config
from myriad.experiments.e2e_sysid import run_endtoend
from myriad.experiments.mle_sysid import run_mle_sysid
from myriad.experiments.node_e2e_sysid import run_node_endtoend
from myriad.experiments.node_mle_sysid import run_node_mle_sysid
from myriad.useful_scripts import run_setup, run_trajectory_opt, load_node_and_plan
from myriad.probing_numerical_instability import probe, special_probe
from myriad.utils import integrate_time_independent, yield_minibatches
config.update("jax_enable_x64", True)
run_buddy = False
def main():
#########
# Setup #
#########
hp, cfg = run_setup()
random.seed(hp.seed)
np.random.seed(hp.seed)
if run_buddy:
# random.seed(hp.seed)
# np.random.seed(hp.seed)
import experiment_buddy
experiment_buddy.register(hp.__dict__)
# tensorboard = experiment_buddy.deploy(host='mila', sweep_yaml="sweep.yaml")
tensorboard = experiment_buddy.deploy(host='mila', sweep_yaml="")
# tensorboard = experiment_buddy.deploy(host='', sweep_yaml='')
########################################
# Probing Systems' Numerical Stability #
########################################
# for st in SystemType:
# if st in [SystemType.SIMPLECASE, SystemType.INVASIVEPLANT]:
# continue
# print("system", st)
# hp.system = st
# probe(hp, cfg)
# probe(hp, cfg)
# special_probe(hp, cfg)
###########################################
# Trajectory optimization with true model #
###########################################
run_trajectory_opt(hp, cfg, save_as='traj_opt_example.pdf')
######################
# MLE model learning #
######################
# Parametric, MLE
# run_mle_sysid(hp, cfg)
# NODE, MLE
# run_node_mle_sysid(hp, cfg)
#############################
# End to end model learning #
#############################
# Parametric, end-to-end
# run_endtoend(hp, cfg)
# NODE, end-to-end
# run_node_endtoend(hp, cfg)
###############
# Noise study #
###############
# study_noise(hp, cfg, experiment_string='mle_sysid')
# study_noise(hp, cfg, experiment_string='node_mle_sysid')
##################
# Dynamics study #
##################
# study_vector_field(hp, cfg, 'mle', 0)
# study_vector_field(hp, cfg, 'e2e', 0, file_extension='pdf')
if __name__ == '__main__':
main()
| 2,407 | 26.363636 | 83 | py |
myriad | myriad-main/tests/tests.py | # (c) Nikolaus Howe 2021
from scipy.integrate import odeint
import jax.numpy as jnp
import numpy as np
import sys
import unittest
from run import run_trajectory_opt
from myriad.config import IntegrationMethod, NLPSolverType, OptimizerType, QuadratureRule, SystemType
from myriad.custom_types import State, Control, Timestep, States
from myriad.useful_scripts import run_setup
from myriad.utils import integrate
hp, cfg = run_setup(sys.argv, gin_path='../source/gin-configs/default.gin')
class BasicTests(unittest.TestCase):
def test_integrate(self):
# Perform integration using odeint
def f(t: Timestep, state: State) -> States:
return state
y0 = jnp.array([1.])
t = [0., 1.]
result_odeint = odeint(f, y0, t, tfirst=True)
# Perform integration using our 'integrate'
N = 100
t = jnp.linspace(0., 1., N)
h = t[1]
def f_wrapper(state: State, control: Control, time: Timestep) -> States:
del control
return f(time, state)
_, found_states = integrate(f_wrapper, y0, t, h, N - 1, t, integration_method=IntegrationMethod.RK4)
# Check that we get similar enough results
np.testing.assert_almost_equal(result_odeint[-1], found_states[-1], decimal=6,
err_msg=f'our integrator gave {result_odeint[-1]}, '
f'but it should have given {found_states[-1]}',
verbose=True)
class OptimizerTests(unittest.TestCase):
def test_single_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 1
hp.controls_per_interval = 50
run_trajectory_opt(hp, cfg)
def test_multiple_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 20
hp.controls_per_interval = 3
run_trajectory_opt(hp, cfg)
def test_dense_multiple_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_trapezoidal_collocation(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_hermite_simpson_collocation(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class IntegrationMethodTests(unittest.TestCase):
def test_euler(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.EULER
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_heun(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_midpoint(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.MIDPOINT
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_RK4(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class QuadratureRuleTests(unittest.TestCase):
def test_trapozoidal(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_hermite_simpson(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class NLPSolverTests(unittest.TestCase):
def test_ipopt(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.IPOPT
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_slsqp(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_trust_constr(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.TRUST
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_extragradient(self):
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.EXTRAGRADIENT
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
if __name__ == '__main__':
unittest.main()
| 7,521 | 26.654412 | 104 | py |
myriad | myriad-main/tests/test_smoke.py | import random
import unittest
import jax
import numpy as np
from myriad.config import Config, SystemType, HParams, OptimizerType
from myriad.trajectory_optimizers import get_optimizer
from myriad.systems import IndirectFHCS
from myriad.plotting import plot_result
# import os
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
approaches = {
'fbsm': {'optimizer': OptimizerType.FBSM, 'fbsm_intervals': 1000},
'single_shooting': {'optimizer': OptimizerType.SHOOTING, 'intervals': 1, 'controls_per_interval': 90},
'multiple_shooting_3_controls': {'optimizer': OptimizerType.SHOOTING, 'intervals': 30, 'controls_per_interval': 3},
'multiple_shooting_1_control': {'optimizer': OptimizerType.SHOOTING, 'intervals': 90, 'controls_per_interval': 1},
'collocation': {'optimizer': OptimizerType.COLLOCATION, 'intervals': 90, 'controls_per_interval': 1}
}
# Test that experiments run without raising exceptions
class SmokeTest(unittest.TestCase):
def setUp(self):
jax.config.update("jax_enable_x64", True)
def test_smoke(self):
for system in SystemType:
for approach in approaches:
hp = HParams(system=system, **approaches[approach]) # unpack the hparams for this approach
# TODO: add adjoint dynamics to those systems, so that FBSM can be used
# (FBSM doesn't work on environments without adjoint dynamics)
if hp.optimizer == OptimizerType.FBSM and not issubclass(system.value, IndirectFHCS):
continue
# Invasive plant is a discrete system, so it only works with FBSM
if hp.system == SystemType.INVASIVEPLANT:
continue
with self.subTest(system=hp.system, optimizer=hp.optimizer):
cfg = Config(verbose=True)
random.seed(hp.seed)
np.random.seed(hp.seed)
_system = hp.system()
optimizer = get_optimizer(hp, cfg, _system)
print("calling optimizer", hp.optimizer.name)
results = optimizer.solve()
print("solution", results[0].shape)
print("now for plotting")
# Plot the solution, using system-specific plotting where present
# plot_solution = getattr(_system, "plot_solution", None)
# if callable(plot_solution):
# print("using custom plotting")
# plot_solution(*results)
# else:
print("using default plotting")
plot_result(results, hp, save_as=approach+hp.system.name+"_test")
if __name__=='__main__':
unittest.main()
| 2,495 | 36.253731 | 117 | py |
myriad | myriad-main/myriad/custom_types.py | # (c) Nikolaus Howe 2021
import jax.numpy as jnp
from typing import Callable, Mapping, Optional, Union
Batch = jnp.ndarray
Control = Union[float, jnp.ndarray]
Controls = jnp.ndarray
Cost = float
Dataset = jnp.ndarray
Defect = jnp.ndarray
DParams = Mapping[str, Union[float, jnp.ndarray]]
DState = Union[float, jnp.ndarray]
DStates = jnp.ndarray
Epoch = int
Params = Mapping[str, Union[float, jnp.ndarray]]
Solution = Mapping[str, Union[float, jnp.ndarray]]
State = Union[float, jnp.ndarray]
States = jnp.ndarray
Timestep = int
CostFun = Callable[[State, Control, Optional[Timestep]], Cost]
DynamicsFun = Callable[[State, Control, Optional[Timestep]], DState]
| 663 | 25.56 | 68 | py |
myriad | myriad-main/myriad/plotting.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.offsetbox import AnchoredText
from typing import Dict, Optional, Tuple
from myriad.config import SystemType, IntegrationMethod, OptimizerType, HParams
from myriad.systems import state_descriptions, control_descriptions
from myriad.systems import get_name
def plot_losses(hp, path_to_csv, save_as=None):
etv = np.genfromtxt(path_to_csv, delimiter=',')
if len(etv) == 10000: # TODO: remove except for ne2e
print("clipping to 9999")
etv = etv[:-1]
epochs = etv[:, 0]
train = etv[:, 1]
val = etv[:, 2]
if save_as is not None and save_as.endswith(('pgf', 'pdf')):
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
title = get_name(hp)
print("title is", title)
plt.figure(figsize=(4.5, 3.5))
fixed_epochs = []
transitions = []
offset = 0
previous = 0
for i, epoch in enumerate(epochs):
if i > 0 and epoch == 0:
offset += previous
transitions.append(epoch + offset)
fixed_epochs.append(epoch + offset)
previous = epoch
plt.plot(fixed_epochs, train, label='train loss')
plt.plot(fixed_epochs, val, label='validation loss')
if title is not None:
plt.title(title)
for transition in transitions:
plt.axvline(transition, linestyle='dashed', color='grey')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.grid()
plt.legend()
plt.tight_layout()
plt.yscale('log')
if save_as is not None:
plt.savefig(save_as, bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_result(result, hp, save_as=None):
adj = len(result) == 3
data = {}
if adj:
x_guess, u_guess, adj_guess = result
data['adj'] = adj_guess
else:
x_guess, u_guess = result
data['x'] = x_guess
data['u'] = u_guess
plot(hp, hp.system(), data, save_as=save_as)
def plot(hp, system,
data: Dict[str, jnp.ndarray],
labels: Optional[Dict[str, str]] = None,
styles: Optional[Dict[str, str]] = None,
widths: Optional[Dict[str, float]] = None,
title: Optional[str] = None,
save_as: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None) -> None:
if save_as is not None and save_as.endswith(('pgf', 'pdf')): # comment out for the cluster
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
if styles is None:
styles = {}
for name in data:
styles[name] = '-'
if widths is None:
widths = {}
for name in data:
widths[name] = 1.
# Separate plotting for the discrete-time system
if hp.system == SystemType.INVASIVEPLANT:
system.plot_solution(data['x'], data['u'], data['adj'])
return
# elif hp.system == SystemType.SEIR:
# system.plot_solution(data['x'], data['u'])
# return
if figsize is not None:
plt.figure(figsize=figsize)
else:
# plt.rcParams["figure.figsize"] = (4, 3.3)
plt.figure(figsize=(4, 4))
# if 'adj' not in data:
# height = 4#5.6
# num_subplots = 2
# else:
# height = 9
# num_subplots = 3
# # plt.figure(figsize=(7, height))
# plt.figure(figsize=(5, height))
num_subplots = 2
title = get_name(hp)
if title is not None:
plt.suptitle(title)
# else:
# if hp.optimizer == OptimizerType.COLLOCATION:
# plt.suptitle(
# f'{hp.system.name}') # {hp.optimizer.name}:{hp.intervals} {hp.quadrature_rule.name} {hp.integration_method.name}')
# else:
# plt.suptitle(
# f'{hp.system.name}') # {hp.optimizer.name}:{hp.intervals}x{hp.controls_per_interval} {hp.integration_method.name}')
order_multiplier = 2 if hp.integration_method == IntegrationMethod.RK4 else 1
ts_x = jnp.linspace(0, system.T, data['x'].shape[0])
ts_u = jnp.linspace(0, system.T, data['u'].shape[0])
# Every system except SIMPLECASE and SIMPLECASEWITHBOUNDS
# Plot exactly those state columns which we want plotted
plt.subplot(num_subplots, 1, 1)
if hp.system in state_descriptions:
for idx, x_i in enumerate(data['x'].T):
if idx in state_descriptions[hp.system][0]:
plt.plot(ts_x, x_i, styles['x'], lw=widths['x'],
label=state_descriptions[hp.system][1][idx] + labels['x'])
if 'other_x' in data:
plt.plot(jnp.linspace(0, system.T, data['other_x'][:, idx].shape[0]),
data['other_x'][:, idx], styles['other_x'], lw=widths['other_x'],
label=state_descriptions[hp.system][1][idx] + labels['other_x'])
else:
plt.plot(ts_x, data['x'], styles['x'], lw=widths['x'], label=labels['x'])
if 'other_x' in data:
plt.plot(jnp.linspace(0, system.T, data['other_x'].shape[0]),
data['other_x'], styles['other_x'], lw=widths['other_x'], label=labels['other_x'])
plt.ylabel("state (x)")
plt.grid()
plt.legend(loc="upper left")
# Same thing as above, but for the controls
ax = plt.subplot(num_subplots, 1, 2)
if hp.system in control_descriptions:
for idx, u_i in enumerate(data['u'].T):
if idx in control_descriptions[hp.system][0]:
plt.plot(ts_u, u_i, styles['u'], lw=widths['u'], label=control_descriptions[hp.system][1][idx] + labels['u'])
if 'other_u' in data and data['other_u'] is not None:
plt.plot(jnp.linspace(0, system.T, data['other_u'][:, idx].shape[0]),
data['other_u'][:, idx], styles['other_u'], lw=widths['other_u'],
label=control_descriptions[hp.system][1][idx] + labels['other_u'])
else:
plt.plot(ts_u, data['u'], styles['u'], lw=widths['u'], label=labels['u'])
if 'other_u' in data:
plt.plot(jnp.linspace(0, system.T, data['other_u'].shape[0]),
data['other_u'], styles['other_u'], lw=widths['other_u'], label=labels['other_u'])
plt.ylabel("control (u)")
plt.grid()
plt.legend(loc="upper left")
if 'cost' in data and 'other_cost' not in data:
cost_text = f"Cost: {data['cost']:.2f}"
if 'defect' in data and data['defect'] is not None:
for i, d in enumerate(data['defect']):
if i == 0:
cost_text += f"\nDefect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
at = AnchoredText(cost_text,
prop=dict(size=10), frameon=False,
loc='upper right',
)
# at.set_alpha(0.5)
# at.patch.set_alpha(0.5)
at.txt._text.set_bbox(dict(facecolor="#FFFFFF", edgecolor="#DBDBDB", alpha=0.7))
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at, )
elif 'cost' in data and 'other_cost' in data:
cost_text = f"Optimal cost: {data['cost']:.2f}"
if 'defect' in data and data['defect'] is not None:
for i, d in enumerate(data['defect']):
if i == 0:
cost_text += f"\nOptimal defect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
cost_text += f"\nAchieved cost: {data['other_cost']:.2f}"
if 'other_defect' in data and data['other_defect'] is not None:
for i, d in enumerate(data['other_defect']):
if i == 0:
cost_text += f"\nAchieved defect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
at = AnchoredText(cost_text,
prop=dict(size=10), frameon=False,
loc='upper right',
)
# at.set_alpha(0.5)
# at.patch.set_alpha(0.5)
at.txt._text.set_bbox(dict(facecolor="#FFFFFF", edgecolor="#DBDBDB", alpha=0.7))
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at, )
if 'adj' in data:
ts_adj = jnp.linspace(0, system.T, data['adj'].shape[0])
plt.subplot(num_subplots, 1, 3)
if labels is not None and 'adj' in labels:
plt.plot(ts_adj, data['adj'], label=labels['adj'])
else:
plt.plot(ts_adj, data['adj'], label='Adjoint')
plt.ylabel("adjoint (lambda)")
plt.legend(loc="upper left")
plt.xlabel('time (s)')
plt.tight_layout()
if save_as:
plt.savefig(save_as, bbox_inches='tight')
plt.close()
else:
plt.show()
if __name__ == "__main__":
hp = HParams()
path_to_csv = f'../losses/{hp.system.name}/1_1_1'
plot_losses(path_to_csv, save_as=f'../plots/{hp.system.name}/1_1_1/{hp.system.name}_train.pdf')
plot_losses(path_to_csv, save_as=f'../plots/{hp.system.name}/1_1_1/{hp.system.name}_train.pgf')
# plot_losses(path_to_csv)
| 8,641 | 32.496124 | 125 | py |
myriad | myriad-main/myriad/probing_numerical_instability.py | # (c) 2021 Nikolaus Howe
import numpy as np
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import pickle as pkl
from jax import lax
from typing import Callable, Tuple
from myriad.custom_types import State, States, Control, Controls, DState
from myriad.utils import integrate, integrate_time_independent, integrate_time_independent_in_parallel
from myriad.config import HParams, Config, IntegrationMethod
################
# INSTRUCTIONS #
################
# Place me at the same level as "run.py",
# and run me as:
# for st in SystemType:
# if st in [SystemType.SIMPLECASE, SystemType.INVASIVEPLANT]:
# continue
# print("system", st)
# hp.system = st
# run_trajectory_opt(hp, cfg)
def nice_scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
for c in carry.T:
plt.plot(x, c, 'o', color='red')
# if x == 21:
# print("x", x, carry)
if x == 49:
print("xx", x, carry)
# plt.xlim((0, 12))
plt.show()
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
def testing_integrate_time_independent(
dynamics_t: Callable[[State, Control], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
integration_method: IntegrationMethod # allows user to choose int method
) -> Tuple[State, States]:
# QUESTION: do we want to keep the mid-controls as decision variables for RK4,
# or move to simply taking the average between the edge ones?
# @jit
def rk4_step(x, u1, u2, u3):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1 / 2, u2)
k3 = dynamics_t(x + h * k2 / 2, u2)
k4 = dynamics_t(x + h * k3, u3)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
# @jit
def heun_step(x, u1, u2):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1, u2)
return x + h / 2 * (k1 + k2)
# @jit
def midpoint_step(x, u1, u2):
x_mid = x + h * dynamics_t(x, u1)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid)
# @jit
def euler_step(x, u):
return x + h * dynamics_t(x, u)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
# x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
plt.plot(interval_us, color='blue')
x_T, all_next_states = nice_scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
def probe(hp: HParams, cfg: Config):
hp.key, subkey = jax.random.split(hp.key)
system = hp.system()
# Generate |total dataset size| control trajectories
total_size = hp.train_size + hp.val_size + hp.test_size
state_size = system.x_0.shape[0]
control_size = system.bounds.shape[0] - state_size
u_lower = system.bounds[state_size:, 0]
u_upper = system.bounds[state_size:, 1]
x_lower = system.bounds[:state_size, 0]
x_upper = system.bounds[:state_size, 1]
if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
raise Exception("infinite control bounds, aborting")
# if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
# raise Exception("infinite state bounds, aborting")
spread = (u_upper - u_lower) * hp.sample_spread
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, control_size),
minval=u_lower, maxval=u_upper)
# Generate the start states
start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
# Generate the states from applying the chosen controls
if hp.start_spread > 0.:
hp.key, subkey = jax.random.split(hp.key)
start_states += jax.random.normal(subkey,
shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
# Generate the corresponding state trajectories
_, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
all_us, hp.stepsize, hp.num_steps,
hp.integration_method)
print("the shape of the generated us is", all_us.shape)
print("the shape of the generated xs is", all_xs.shape)
# print("an example is", all_xs[-1])
for i, xs in enumerate(all_xs):
if not jnp.isfinite(xs).all():
print("there was an infinity encountered")
print("us", all_us[i])
print("start state", start_states[i])
raise SystemExit
plt.close()
for i, xs in enumerate(all_xs):
# print("xs is of shape", xs.shape)
for j, state in enumerate(xs.T):
plt.plot(state)
# break
# plt.plot(xs)
# plt.legend()
plt.show()
# plt.savefig(f'cool_{hp.system.name}.pdf')
plt.close()
def special_probe(hp, cfg):
# CARTPOLE
key = jax.random.PRNGKey(42)
key, subkey = jax.random.split(key)
system = hp.system()
hp.key, subkey = jax.random.split(hp.key)
file_path = 't_set'
train_set = pkl.load(open(file_path, 'rb'))
print("train set", train_set)
first_xs = train_set[0, :, :hp.state_size]
first_us = train_set[0, :, hp.state_size:]
given_params = {
'g': 15,
'm1': 1.0,
'm2': 0.1,
'length': 1.0
}
def dynamics(x, u):
return system.parametrized_dynamics(given_params, x, u)
print("first xs", first_xs.shape)
print("first us", first_us.shape)
start = first_xs[0]
print("start", start.shape)
_, xs = testing_integrate_time_independent(dynamics, start,
first_us, hp.stepsize, hp.num_steps,
hp.integration_method)
#####################
# train_xs = train_set[:, :, :hp.state_size]
# train_us = train_set[:, :, hp.state_size:]
# start_xs = train_xs[:, 0, :]
# # if cfg.verbose:
# # print("train xs", train_xs.shape)
# # print("train us", train_us.shape)
# # print("start train xs", start_xs.shape)
#
# _, predicted_states = integrate_time_independent_in_parallel(
# dynamics, start_xs, train_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
# )
############################3
print("us", first_us)
print("resulting xs", xs)
raise SystemExit
# print("us", us)
# print("xs", xs)
u = us[21]
uu = us[22]
uuu = us[23]
x = jnp.array([-2.68629165])
xx = jnp.array([-7.63482766])
def heun_step(x, u1, u2):
k1 = system.dynamics(x, u1)
print("k1", k1)
k2 = system.dynamics(x + hp.stepsize * k1, u2)
print("k2", k2)
return x + hp.stepsize / 2 * (k1 + k2)
print("step", heun_step(xx, uu, uuu))
| 7,453 | 30.451477 | 116 | py |
myriad | myriad-main/myriad/utils.py | # (c) 2021 Nikolaus Howe
from __future__ import annotations
import jax
import jax.numpy as jnp
import numpy as np
import time
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
from myriad.config import HParams, Config
from jax import jit, lax, vmap
from typing import Callable, Optional, Tuple, Dict
from myriad.config import Config, HParams, IntegrationMethod, SamplingApproach
from myriad.systems import FiniteHorizonControlSystem
from myriad.custom_types import Control, Controls, Dataset, DState, State, States, Cost, Timestep
def integrate(
dynamics_t: Callable[[State, Control, Timestep], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
ts: jnp.ndarray, # times
integration_method: IntegrationMethod # allows user to choose interpolation for controls
) -> Tuple[State, States]:
# QUESTION: do we want to keep this interpolation for rk4, or move to linear?
@jit
def rk4_step(x, u1, u2, u3, t):
k1 = dynamics_t(x, u1, t)
k2 = dynamics_t(x + h * k1 / 2, u2, t + h / 2)
k3 = dynamics_t(x + h * k2 / 2, u2, t + h / 2)
k4 = dynamics_t(x + h * k3, u3, t + h)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
@jit
def heun_step(x, u1, u2, t):
k1 = dynamics_t(x, u1, t)
k2 = dynamics_t(x + h * k1, u2, t + h)
return x + h / 2 * (k1 + k2)
@jit
def midpoint_step(x, u1, u2, t):
x_mid = x + h * dynamics_t(x, u1, t)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid, t + h / 2)
@jit
def euler_step(x, u, t):
return x + h * dynamics_t(x, u, t)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx], ts[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1], ts[idx])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1], ts[idx])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2], ts[idx])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
# Used for the augmented state cost calculation
integrate_in_parallel = vmap(integrate, in_axes=(None, 0, 0, None, None, 0, None)) # , static_argnums=(0, 5, 6)
def integrate_time_independent(
dynamics_t: Callable[[State, Control], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
integration_method: IntegrationMethod # allows user to choose int method
) -> Tuple[State, States]:
# QUESTION: do we want to keep the mid-controls as decision variables for RK4,
# or move to simply taking the average between the edge ones?
@jit
def rk4_step(x, u1, u2, u3):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1 / 2, u2)
k3 = dynamics_t(x + h * k2 / 2, u2)
k4 = dynamics_t(x + h * k3, u3)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
@jit
def heun_step(x, u1, u2):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1, u2)
return x + h / 2 * (k1 + k2)
@jit
def midpoint_step(x, u1, u2):
x_mid = x + h * dynamics_t(x, u1)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid)
@jit
def euler_step(x, u):
return x + h * dynamics_t(x, u)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
integrate_time_independent_in_parallel = vmap(integrate_time_independent, in_axes=(None, 0, 0, None, None, None))
# Used for the adjoint integration
def integrate_fbsm(
dynamics_t: Callable[[State, Control, Optional[jnp.ndarray], Optional[jnp.ndarray]],
jnp.ndarray], # dynamics function
x_0: jnp.ndarray, # starting state
u: jnp.ndarray, # controls
h: float, # step size # is negative in backward mode
N: int, # steps
v: Optional[jnp.ndarray] = None,
t: Optional[jnp.ndarray] = None,
discrete: bool = False,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""
Implementation of Runge-Kutta 4th order method for ODE solving, adapted for the FBSM method.
Specifically, it can either performs a numerical integration in a forward sweep over the states variables,
or a backward sweep to integrate over the adjoint variables.
Args:
dynamics_t: (Callable) -- The dynamics (ODEs) to integrate
x_0: The initial value to begin integration
u: (jnp.ndarray) -- A guess over a costate variable.
h: (float) -- The step size for the numerical integration
N: (int) -- The number of steps for the numerical integration
v: (jnp.ndarray, optional) -- Another costate variable, if needed
t: (jnp.ndarray, optional) -- The time variable, for time-dependent dynamics
discrete: (bool, optional) -- Perform direct calculation instead of integration if facing a discrete system.
Returns:
final_state, trajectory : Tuple[jnp.ndarray, jnp.array] -- The final value of the integrated variable and the complete trajectory
"""
@jit
def rk4_step(x_t1, u, u_next, v, v_next, t):
u_convex_approx = (u + u_next) / 2
v_convex_approx = (v + v_next) / 2
k1 = dynamics_t(x_t1, u, v, t)
k2 = dynamics_t(x_t1 + h * k1 / 2, u_convex_approx, v_convex_approx, t + h / 2)
k3 = dynamics_t(x_t1 + h * k2 / 2, u_convex_approx, v_convex_approx, t + h / 2)
k4 = dynamics_t(x_t1 + h * k3, u_next, v_next, t + h)
return x_t1 + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
if v is None:
v = jnp.empty_like(u)
if t is None:
t = jnp.empty_like(u)
direction = int(jnp.sign(h))
if discrete:
if direction >= 0:
fn = lambda x_t, idx: [dynamics_t(x_t, u[idx], v[idx], t[idx])] * 2
else:
fn = lambda x_t, idx: [dynamics_t(x_t, u[idx], v[idx - 1], t[idx - 1])] * 2
else:
fn = lambda x_t, idx: [rk4_step(x_t, u[idx], u[idx + direction], v[idx], v[idx + direction], t[idx])] * 2
if direction >= 0:
x_T, ys = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[None], ys))
else:
x_T, ys = lax.scan(fn, x_0, jnp.arange(N, 0, -1))
return x_T, jnp.concatenate((jnp.flipud(ys), x_0[None]))
# First, get the optimal controls and resulting trajectory using the true system model.
# Then, replace the model dynamics with the trained neural network,
# and use that to find the "optimal" controls according to the NODE model.
# Finally get the resulting true state trajectory coming from those suboptimal controls.
# def plan_with_model(node: NeuralODE, regularize: bool = False) -> Controls:
# apply_net = lambda x, u: node.net.apply(node.params, jnp.append(x, u)) # use nonlocal net and params
#
# # Replace system dynamics, but remember it to restore later
# # old_dynamics = node.system.dynamics
# # node.system.dynamics = apply_net
#
# objective = functools.partial(node.optimizer.objective, custom_dynamics=apply_net)
# constraints = functools.partial(node.optimizer.constraints, custom_dynamics=apply_net)
#
# opt_inputs = {
# 'objective': objective,
# 'guess': node.optimizer.guess,
# 'constraints': constraints,
# 'bounds': node.optimizer.bounds,
# 'unravel': node.optimizer.unravel
# }
#
# _, u = solve(node.hp, node.cfg, opt_inputs)
#
# # Restore system dynamics
# # node.system.dynamics = old_dynamics
#
# return u.squeeze() # this is necessary for later broadcasting
def plan_with_node_model(node: NeuralODE) -> Tuple[States, Controls]:
apply_net = lambda x, u: node.net.apply(node.params, jnp.append(x, u)) # use nonlocal net and params
# Replace system dynamics, but remember it to restore later
old_dynamics = node.system.dynamics
node.system.dynamics = apply_net
solved_results = node.optimizer.solve()
# Restore system dynamics
node.system.dynamics = old_dynamics
return solved_results['x'], solved_results['u']
# TODO: I'm removing the squeeze on the ['u'] because it's causing problems later on
# hopefully this doesn't break something else... all in the name of supporting vector controls
# Find the optimal trajectory according the learned model
def get_optimal_node_trajectory(node: NeuralODE) -> Tuple[States, Controls]:
_, opt_u = plan_with_node_model(node)
_, opt_x = integrate_time_independent(node.system.dynamics, node.system.x_0, opt_u,
node.stepsize, node.num_steps, node.hp.integration_method)
# assert not jnp.isnan(opt_u).all() and not jnp.isnan(opt_x).all()
# NOTE: it used to return things in the opposite order! might cause bugs!
return opt_x, opt_u
# TODO: make the start state default to the system start state
def get_state_trajectory_and_cost(hp: HParams, system: FiniteHorizonControlSystem,
start_state: State, us: Controls) -> Tuple[States, Cost]:
@jax.jit
def augmented_dynamics(x_and_c: jnp.ndarray, u: Control, t: Timestep) -> jnp.ndarray:
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.dynamics(x, u), system.cost(x, u, t))
num_steps = hp.intervals * hp.controls_per_interval
step_size = system.T / num_steps
times = jnp.linspace(0., system.T, num=num_steps + 1)
starting_x_and_cost = jnp.append(start_state, 0.)
# print("starting x and cost", starting_x_and_cost)
# print("us", us.shape)
# print("step size", step_size)
# print("num steps", num_steps)
# print("times", times.shape)
# raise SystemExit
# Integrate cost in parallel
# print("entering integration")
# print("the us are", us)
_, state_and_cost = integrate(
augmented_dynamics, starting_x_and_cost, us,
step_size, num_steps, times, hp.integration_method)
# print("the states and costs are", state_and_cost)
# print("the states and costs are", state_and_cost.shape)
# raise SystemExit
states = state_and_cost[:, :-1]
# print("extracted states", states.shape)
last_augmented_state = state_and_cost[-1]
# print("last aug state", last_augmented_state)
cost = last_augmented_state[-1]
# print("cost", cost)
if system.terminal_cost:
cost += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
# raise SystemExit
return states, cost
def smooth(curve: jnp.ndarray, its: int) -> jnp.ndarray:
curve = np.array(curve)
kernel = np.array([0.15286624, 0.22292994, 0.24840764, 0.22292994, 0.15286624]) # Gaussian blur
for it in range(its):
for i, row in enumerate(curve):
for j, dim in enumerate(row.T):
dim = np.pad(dim, (2, 2), 'edge')
dim = np.convolve(dim, kernel, mode='valid')
curve[i, :, j] = dim
return jnp.array(curve)
def get_defect(system: FiniteHorizonControlSystem, learned_xs: States) -> Optional[jnp.array]:
defect = None
if system.x_T is not None:
defect = []
for i, s in enumerate(learned_xs[-1]):
if system.x_T[i] is not None:
defect.append(s - system.x_T[i])
if defect is not None:
defect = jnp.array(defect)
return defect
def generate_dataset(hp: HParams, cfg: Config,
given_us: Optional[Controls] = None) -> Dataset:
system = hp.system()
hp.key, subkey = jax.random.split(hp.key)
# Generate |total dataset size| control trajectories
total_size = hp.train_size + hp.val_size + hp.test_size
# TODO: fix what happens in case of infinite bounds
u_lower = system.bounds[hp.state_size:, 0]
u_upper = system.bounds[hp.state_size:, 1]
x_lower = system.bounds[:hp.state_size, 0]
x_upper = system.bounds[:hp.state_size, 1]
if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
raise Exception("infinite control bounds, aborting")
if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
raise Exception("infinite state bounds, aborting")
spread = (u_upper - u_lower) * hp.sample_spread
########################
# RANDOM WALK CONTROLS #
########################
if hp.sampling_approach == SamplingApproach.RANDOM_WALK:
# Make all the first states
all_start_us = np.random.uniform(u_lower, u_upper, (total_size, 1, hp.control_size))
all_us = all_start_us
for i in range(hp.num_steps):
next_us = np.random.normal(0, spread, (total_size, 1, hp.control_size))
rightmost_us = all_us[:, -1:, :]
together = np.clip(next_us + rightmost_us, u_lower, u_upper)
all_us = np.concatenate((all_us, together), axis=1)
# elif hp.sampling_approach == SamplingApproach.RANDOM_GRID:
# single_ascending_controls = np.linspace(u_lower, u_upper, hp.num_steps + 1)
# parallel_ascending_controls = single_ascending_controls[np.newaxis].repeat(total_size)
# assert parallel_ascending_controls.shape == ()
# NOTE: we could also generate data by exhaustively considering every combination
# of state-control pair up to some discretization. This might just solve
# the problem. Unfortunately, curse of dimensionality is real.
# IDEA: let's try doing this on the CANCERTREATMENT domain, and see whether
# this is enough to help neural ODE figure out what is going on
# at the very start of planning
###########################
# UNIFORM RANDOM CONTROLS #
###########################
elif hp.sampling_approach == SamplingApproach.UNIFORM:
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
minval=u_lower, maxval=u_upper) * 0.75 # TODO
# TODO: make sure having added control size everywhere didn't break things
#########################
# AROUND GIVEN CONTROLS #
#########################
elif hp.sampling_approach == SamplingApproach.TRUE_OPTIMAL or hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL:
if given_us is None:
print("Since you didn't provide any controls, we'll use a uniform random guess")
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
minval=u_lower, maxval=u_upper) * 0.75 # TODO
# raise Exception("If sampling around a control trajectory, need to provide that trajectory.")
else:
noise = jax.random.normal(key=subkey, shape=(total_size, hp.num_steps + 1, hp.control_size)) \
* (u_upper - u_lower) * hp.sample_spread
all_us = jnp.clip(given_us[jnp.newaxis].repeat(total_size, axis=0).squeeze() + noise.squeeze(), a_min=u_lower,
a_max=u_upper)
else:
raise Exception("Unknown sampling approach, please choose among", SamplingApproach.__dict__['_member_names_'])
print("initial controls shape", all_us.shape)
# Smooth the controls if so desired
if hp.to_smooth:
start = time.time()
all_us = smooth(all_us, 2)
end = time.time()
print(f"smoothing took {end - start}s")
# TODO: I really dislike having to have this line below. Is there no way to remove it?
# Make the controls guess smaller so our dynamics don't explode
# all_us *= 0.1
# Generate the start states
start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
# Generate the states from applying the chosen controls
if hp.start_spread > 0.:
hp.key, subkey = jax.random.split(hp.key)
start_states += jax.random.normal(subkey,
shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
# Generate the corresponding state trajectories
_, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
all_us, hp.stepsize, hp.num_steps,
hp.integration_method)
# Noise up the state observations
hp.key, subkey = jax.random.split(hp.key)
all_xs += jax.random.normal(subkey, shape=all_xs.shape) * (x_upper - x_lower) * hp.noise_level
all_xs = jnp.clip(all_xs, a_min=x_lower, a_max=x_upper)
# Stack the states and controls together
xs_and_us = jnp.concatenate((all_xs, all_us), axis=2)
if cfg.verbose:
print("Generating training control trajectories between bounds:")
print(" u lower", u_lower)
print(" u upper", u_upper)
print("of shapes:")
print(" xs shape", all_xs.shape)
print(" us shape", all_us.shape)
print(" together", xs_and_us.shape)
assert np.isfinite(xs_and_us).all()
return xs_and_us
def yield_minibatches(hp: HParams, total_size: int, dataset: Dataset) -> iter:
assert total_size <= dataset.shape[0]
tmp_dataset = np.random.permutation(dataset)
num_minibatches = total_size // hp.minibatch_size + (1 if total_size % hp.minibatch_size > 0 else 0)
for i in range(num_minibatches):
n = np.minimum((i + 1) * hp.minibatch_size, total_size) - i * hp.minibatch_size
yield tmp_dataset[i * hp.minibatch_size: i * hp.minibatch_size + n]
def sample_x_init(hp: HParams, n_batch: int = 1) -> np.ndarray:
s = hp.system()
res = np.random.uniform(s.bounds[:, 0], s.bounds[:, 1], (n_batch, hp.state_size + hp.control_size))
res = res[:, :hp.state_size]
assert np.isfinite(res).all()
return res
| 18,600 | 39.088362 | 133 | py |
myriad | myriad-main/myriad/useful_scripts.py | # (c) 2021 Nikolaus Howe
from __future__ import annotations
import jax.numpy as jnp
import numpy as np
import pickle as pkl
import simple_parsing
from jax.flatten_util import ravel_pytree
from jax.config import config
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config
from myriad.custom_types import Cost, Defect, Optional
from myriad.neural_ode.create_node import NeuralODE
from myriad.trajectory_optimizers import get_optimizer
from myriad.utils import get_defect, integrate_time_independent, get_state_trajectory_and_cost, plan_with_node_model
from myriad.plotting import plot
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.config import OptimizerType
config.update("jax_enable_x64", True)
def run_trajectory_opt(hp: HParams, cfg: Config, save_as: str = None,
params_path: str = None) -> Tuple[Cost, Optional[Defect]]:
plot_path = f'plots/{hp.system.name}/trajectory_opt/'
Path(plot_path).mkdir(parents=True, exist_ok=True)
if save_as is not None:
save_as = plot_path + save_as
if params_path is not None:
params = pkl.load(open(params_path, 'rb'))
system = hp.system(**params)
print("loaded params:", params)
else:
system = hp.system()
print("made default system")
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
x = solution['x']
u = solution['u']
if optimizer.require_adj:
adj = solution['adj']
true_system = hp.system()
opt_x, c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, u)
defect = get_defect(true_system, opt_x)
if cfg.plot:
if cfg.pretty_plotting:
plot(hp, true_system,
data={'x': opt_x, 'u': u, 'cost': c, 'defect': defect},
labels={'x': '', 'u': ''},
styles={'x': '-', 'u': '-'},
widths={'x': 2, 'u': 2},
save_as=save_as)
else: # We also want to plot the state trajectory we got from the solver
if optimizer.require_adj:
plot(hp, true_system,
data={'x': x, 'u': u, 'adj': adj, 'other_x': opt_x, 'cost': c, 'defect': defect},
labels={'x': ' (from solver)',
'u': 'Controls from solver',
'adj': 'Adjoint from solver',
'other_x': ' (integrated)'},
save_as=save_as)
else:
plot(hp, true_system,
data={'x': x, 'u': u, 'other_x': opt_x, 'cost': c, 'defect': defect},
labels={'x': ' (from solver)',
'u': 'Controls from solver',
'other_x': ' (from integrating dynamics)'},
save_as=save_as)
return c, defect
def run_node_trajectory_opt(hp: HParams, cfg: Config, save_as: str = None,
params_path: str = None) -> Tuple[Cost, Optional[Defect]]:
true_system = hp.system()
node = NeuralODE(hp, cfg)
node.load_params(params_path)
node_system = NodeSystem(node, true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
node_solution = node_optimizer.solve_with_params(node.params)
u = node_solution['u']
opt_x, c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, u)
defect = get_defect(true_system, opt_x)
if cfg.plot:
plot(hp, true_system,
data={'x': opt_x, 'u': u, 'cost': c, 'defect': defect},
labels={'x': '', 'u': ''},
styles={'x': '-', 'u': '-'},
save_as=save_as)
return c, defect
def run_setup():
# def run_setup(gin_path='./myriad/gin-configs/default.gin'): # note: no longer need Gin
# Prepare experiment settings
parser = simple_parsing.ArgumentParser()
parser.add_arguments(HParams, dest="hparams")
parser.add_arguments(Config, dest="config")
# parser.add_argument("--gin_bindings", type=str) # Needed for the parser to work in conjunction with absl.flags
# key_dict = HParams.__dict__.copy()
# key_dict.update(Config.__dict__)
# print("the key dict is", key_dict)
# for key in key_dict.keys():
# if "__" not in key:
# flags.DEFINE_string(key, None, # Parser arguments need to be accepted by the flags
# 'Backward compatibility with previous parser')
# flags.DEFINE_multi_string(
# 'gin_bindings', [],
# 'Gin bindings to override the values set in the config files '
# '(e.g. "Lab1.A=1.0").')
# jax.config.update("jax_enable_x64", True)
args = parser.parse_args()
hp = args.hparams
cfg = args.config
print(hp)
print(cfg)
# Set our seeds for reproducibility
np.random.seed(hp.seed)
return hp, cfg
def plot_zero_control_dynamics(hp, cfg):
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
num_steps = hp.intervals * hp.controls_per_interval
stepsize = system.T / num_steps
zero_us = jnp.zeros((num_steps + 1,))
_, opt_x = integrate_time_independent(system.dynamics, system.x_0, zero_us,
stepsize, num_steps, hp.integration_method)
plot(hp, system,
data={'x': opt_x, 'u': zero_us},
labels={'x': 'Integrated state',
'u': 'Zero controls'})
xs_and_us, unused_unravel = ravel_pytree((opt_x, zero_us))
if hp.optimizer != OptimizerType.FBSM:
print("control cost from optimizer", optimizer.objective(xs_and_us))
print('constraint violations from optimizer', jnp.linalg.norm(optimizer.constraints(xs_and_us)))
# Plot the given control and state trajectory. Also plot the state
# trajectory which occurs when using the neural net for dynamics.
# If "optimal", do the same things as above but using the true
# optimal controls and corresponding true state trajectory.
# "extra_u" is just a way to plot an extra control trajectory.
def plot_trajectory(node: NeuralODE,
optimal: bool = False,
x: jnp.ndarray = None,
u: jnp.ndarray = None,
validation: bool = False,
title: str = None,
save_as: str = None) -> None:
if validation:
dset = node.validation_data
else:
dset = node.train_data
if x is None:
x: jnp.ndarray = dset[-1, :, :node.hp.state_size]
if u is None:
u: jnp.ndarray = dset[-1, :, node.hp.state_size:]
apply_net = lambda x, u: node.net.apply(node.params, jnp.concatenate((x, u), axis=0)) # use nonlocal net and params
# if node.cfg.verbose:
# print("states to plot", x.shape)
# print("controls to plot", u.shape)
if optimal:
x = node.true_opt_xs
u = node.true_opt_us
# Get states when using those controls
_, predicted_states = integrate_time_independent(apply_net, x[0], u,
node.stepsize, node.num_steps,
node.hp.integration_method)
# Get the true integrated cost of these controls
_, control_cost = get_state_trajectory_and_cost(node.hp, node.system, x[0], u)
# If there is a final state, also report the defect
defect = None
if node.system.x_T is not None:
defect = []
for i, s in enumerate(predicted_states[-1]):
if node.system.x_T[i] is not None:
defect.append(s - node.system.x_T[i])
defect = np.array(defect)
# Plot
plot(hp=node.hp,
system=node.system,
data={'x': x, 'u': u, 'other_x': predicted_states, 'cost': control_cost,
'defect': defect},
labels={'x': ' (true)', 'u': '', 'other_x': ' (predicted)'},
title=title, save_as=save_as)
# Plan with the model. Plot the controls from planning and corresponding true state trajectory.
# Compare it with the true optimal controls and corresponding state trajectory.
def plan_and_plot(node: NeuralODE, title: str = None, save_as: str = None) -> None:
planned_x, planned_us = plan_with_node_model(node)
xs, cost = get_state_trajectory_and_cost(node.hp, node.system, node.system.x_0, planned_us)
# If this is the best cost so far, update the best guess for us
# TODO: I don't think this is the place to do this... where is better?
if node.best_guess_us_cost is None or cost < node.best_guess_us_cost:
print("updating best us with a cost of", cost)
node.best_guess_us = planned_us
node.best_guess_us_cost = cost
new_guess, _ = ravel_pytree((planned_x, planned_us))
node.optimizer.guess = new_guess
# single_traj_train_controls = node.train_data[0, :, -1]
# single_traj_train_states = node.train_data[:, :, :-1]
#
# print("train controls are", single_traj_train_controls.shape)
# print("start train states are", single_traj_train_states[0, 0, :])
# _, train_cost = get_state_trajectory_and_cost(node.hp, node.system,
# single_traj_train_states[0, 0, :],
# single_traj_train_controls.squeeze())
# If there is a final state, also report the defect
opt_defect = None
defect = None
if node.system.x_T is not None:
opt_defect = node.true_opt_xs[-1] - node.system.x_T
defect = xs[-1] - node.system.x_T
plot(hp=node.hp,
system=node.system,
data={'x': node.true_opt_xs, 'u': node.true_opt_us, 'other_x': xs, 'other_u': planned_us,
'cost': node.true_opt_cost, 'defect': opt_defect, 'other_cost': cost, 'other_defect': defect},
labels={'x': ' (true)',
'u': ' (true)',
'other_x': ' (planned)',
'other_u': ' (planned)'},
title=title,
save_as=save_as)
##########################
# Test E2E Node planning #
##########################
def load_node_and_plan(hp, cfg):
params_path = f'params/{hp.system.name}/e2e_node/'
plots_path = f'params/{hp.system.name}/e2e_node/'
Path(params_path).mkdir(parents=True, exist_ok=True)
Path(plots_path).mkdir(parents=True, exist_ok=True)
params_names = [f'{i * 50}.p' for i in range(60, 201)]
plots_names = [f'{i * 50}_epochs.png' for i in range(60, 201)]
node = NeuralODE(hp, cfg, mle=False)
true_system = hp.system() # use the default params here
true_optimizer = get_optimizer(hp, cfg, true_system)
node_system = NodeSystem(node=node, true_system=true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
true_solution = true_optimizer.solve()
true_opt_us = true_solution['u']
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
for i, params_name in enumerate(params_names):
try:
node.load_params(params_path + params_name)
print("loaded params")
solution = node_optimizer.solve_with_params(node.params)
solved_us = solution['u']
_, integrated_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, solved_us, hp.stepsize, hp.num_steps, hp.integration_method)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': integrated_xs,
'u': true_opt_us,
'other_u': solved_us},
labels={'x': ' (optimal)',
'other_x': ' (learned)',
'u': ' (optimal)',
'other_u': ' (learned)'},
styles={'x': '.',
'other_x': '-',
'u': '.',
'other_u': '-'},
save_as=plots_path + plots_names[i])
except FileNotFoundError as e:
print("unable to find the params, so we'll skip")
| 11,496 | 36.087097 | 118 | py |
myriad | myriad-main/myriad/config.py | # (c) 2021 Nikolaus Howe
from typing import Tuple
import jax
from dataclasses import dataclass
from enum import Enum
from myriad.systems import SystemType
class OptimizerType(Enum):
"""Parser argument. Optimizing strategy used to solve the OCP"""
# _settings_ = NoAlias
COLLOCATION = "COLLOCATION"
SHOOTING = "SHOOTING"
FBSM = "FBSM"
class SamplingApproach(Enum):
UNIFORM = 'UNIFORM'
TRUE_OPTIMAL = 'TRUE_OPTIMAL'
RANDOM_WALK = 'RANDOM_WALK'
CURRENT_OPTIMAL = 'CURRENT_OPTIMAL' # TODO: current optimal is broken at the moment, because we're not
# TODO: the guess around which we are sampling
# RANDOM_GRID = 'RANDOM_GRID'
# This ^ isn't implemented yet. It's unclear how helpful it would be
# FULL_GRID = 'FULL_GRID'
# We're not doing the FULL GRID anymore because it breaks the idea of generating trajectories.
# But it would be interesting to compare performance against, since in some sense this is the
# theoretical best. I wonder how resilient it would be to noise though.
# ENDTOEND = "ENDTOEND"
# ORNSTECK_BLABLA = "snnth"
# Another one we should try to implement
class NLPSolverType(Enum):
SLSQP = "SLSQP" # Scipy's SLSQP
TRUST = "TRUST" # Scipy's trust-constr
IPOPT = "IPOPT" # ipopt
# INEXACTNEWTON="INEXACTNEWTON"
EXTRAGRADIENT = "EXTRAGRADIENT" # an extragradient-based solver
class IntegrationMethod(Enum):
EULER = "CONSTANT"
HEUN = "LINEAR"
MIDPOINT = "MIDPOINT"
RK4 = "RK4"
class QuadratureRule(Enum):
TRAPEZOIDAL = "TRAPEZOIDAL"
HERMITE_SIMPSON = "HERMITE_SIMPSON"
# Hyperparameters which change experiment results
@dataclass(eq=True, frozen=False) # or frozen == False
class HParams:
"""The hyperparameters of the experiment. Modifying these should change the results"""
seed: int = 2019
system: SystemType = SystemType.CANCERTREATMENT
optimizer: OptimizerType = OptimizerType.SHOOTING
nlpsolver: NLPSolverType = NLPSolverType.IPOPT
integration_method: IntegrationMethod = IntegrationMethod.HEUN
quadrature_rule: QuadratureRule = QuadratureRule.TRAPEZOIDAL
max_iter: int = 1000 # maxiter for NLP solver (usually 1000)
intervals: int = 1 # used by COLLOCATION and SHOOTING
controls_per_interval: int = 100 # used by SHOOTING
fbsm_intervals: int = 1000 # used by FBSM
sampling_approach: SamplingApproach = SamplingApproach.RANDOM_WALK
train_size: int = 100 # num trajectories per dataset
val_size: int = 3
test_size: int = 3
sample_spread: float = 0.05
start_spread: float = 0.1
noise_level: float = 0.01 * 0.
to_smooth: bool = False
learning_rate: float = 0.001
minibatch_size: int = 16
num_epochs: int = 10_001
num_experiments: int = 1 # num datesets
loss_recording_frequency: int = 10 # 1000
plot_progress_frequency: int = 10 # 10_000
early_stop_threshold: int = 30 # 30_000 # 70 for cartpole, 1 for cancertreatment
early_stop_check_frequency: int = 20 # 1000
hidden_layers: Tuple[int, int] = (50, 50) # (100, 100)
num_unrolled: int = 5
eta_x: float = 1e-1
eta_lmbda: float = 1e-3
adam_lr: float = 1e-4
def __post_init__(self):
if self.optimizer == OptimizerType.COLLOCATION:
self.controls_per_interval = 1
if self.nlpsolver == NLPSolverType.EXTRAGRADIENT:
self.max_iter *= 10
# For convenience, record number of steps and stepsize
system = self.system()
self.num_steps = self.intervals * self.controls_per_interval
self.stepsize = system.T / self.num_steps
self.key = jax.random.PRNGKey(self.seed)
self.state_size = system.x_0.shape[0]
self.control_size = system.bounds.shape[0] - self.state_size
# Fix the minibatch size if we're working with small datasets
self.minibatch_size = min([self.minibatch_size, self.train_size, self.val_size, self.test_size])
@dataclass(eq=True, frozen=False)
class Config:
"""Secondary configurations that should not change experiment results
and should be largely used for debugging"""
verbose: bool = True
"""Verbose mode; default to `True`"""
jit: bool = True
"""Enable [`@jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions) compilation; default to `True`"""
plot: bool = True
"""Plot progress during (and results after) the experiment; default to `True`"""
pretty_plotting: bool = True
"""Only plot the true trajectory, ignoring the solver state output"""
load_params_if_saved: bool = True
figsize: Tuple[float, float] = (8, 6)
file_extension: str = 'png' # pdf, pgf, png
| 4,564 | 34.115385 | 150 | py |
myriad | myriad-main/myriad/study_scripts.py | # (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import pickle as pkl
from jax.config import config
from pathlib import Path
from myriad.defaults import param_guesses
from myriad.neural_ode.create_node import NeuralODE
from myriad.experiments.mle_sysid import run_mle_sysid
from myriad.experiments.node_mle_sysid import run_node_mle_sysid
from myriad.trajectory_optimizers import get_optimizer
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.systems import get_name
from myriad.useful_scripts import run_trajectory_opt, run_node_trajectory_opt
from myriad.utils import get_state_trajectory_and_cost
config.update("jax_enable_x64", True)
###############
# Noise study #
###############
def study_noise(hp, cfg, experiment_string='mle_sysid'):
# Parametric, ML
noise_levels = [0.0, 0.001, 0.01, 0.1, 0.2, 0.5, 1., 2., 5.]
param_path = f'params/{hp.system.name}/{experiment_string}/'
plot_path = f'plots/{hp.system.name}/{experiment_string}/'
hp.num_experiments = 1
# Run the sysid
for noise_level in noise_levels:
hp.noise_level = noise_level
if experiment_string == 'mle_sysid':
run_mle_sysid(hp, cfg)
elif experiment_string == 'node_mle_sysid':
run_node_mle_sysid(hp, cfg)
else:
raise Exception("Didn't recognize experiment string")
# Make the loss vs noise plot
costs = []
defects = []
# cfg.plot_results = False
for noise_level in noise_levels:
param_name = f'noise_{noise_level}_smoothed_{hp.to_smooth}_10_3_3'
if experiment_string == 'mle_sysid':
c, d = run_trajectory_opt(hp, cfg, params_path=param_path + param_name + '.p')
elif experiment_string == 'node_mle_sysid':
c, d = run_node_trajectory_opt(hp, cfg, params_path=param_path + param_name + '_exp_0.p')
else:
raise Exception("Unknown experiment string")
costs.append(c)
defects.append(d)
cd_path = f'costs_and_defects/{hp.system.name}/{experiment_string}/'
Path(cd_path).mkdir(parents=True, exist_ok=True)
pkl.dump(noise_levels, open(cd_path + 'noise_levels', 'wb'))
pkl.dump(costs, open(cd_path + 'costs', 'wb'))
pkl.dump(defects, open(cd_path + 'defects', 'wb'))
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (3.7, 3.1)
# Get the cost of the truly optimal trajectory
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
_, optimal_cost = get_state_trajectory_and_cost(hp, system, system.x_0, solution['u'])
nl = pkl.load(open(cd_path + 'noise_levels', 'rb'))
c = pkl.load(open(cd_path + 'costs', 'rb'))
d = pkl.load(open(cd_path + 'defects', 'rb'))
plt.plot(nl, c)
plt.xlabel('noise level')
plt.ylabel('cost')
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.xlim(0, 5)
plt.grid()
if d[0] is not None:
plt.plot(nl, d)
# plt.title(hp.system.name)
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.savefig(plot_path + 'aanoise_study.pgf', bbox_inches='tight')
plt.close()
params_path = f'params/{hp.system.name}/{experiment_string}/guess.p'
pkl.dump(param_guesses[hp.system], open(params_path, 'wb'))
c, d = run_trajectory_opt(hp, cfg, params_path=params_path)
print("c, d", c, d)
def load_system_and_us(hp, cfg, experiment_string, experiment_number):
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
us = solution['u']
if experiment_string is None:
pass
# system = hp.system()
# optimizer = get_optimizer(hp, cfg, system)
# solution = optimizer.solve()
# us = solution['u']
learned_dynamics = system.dynamics
elif experiment_string == 'mle' or experiment_string == 'e2e':
params_path = f'params/{hp.system.name}/node_{experiment_string}_sysid/'
if experiment_string == 'mle':
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.p'
else:
params_name = 'node_e2e.p'
node = NeuralODE(hp, cfg)
node.load_params(params_path + params_name)
print("loaded params", params_path + params_name)
system = NodeSystem(node, node.system)
# optimizer = get_optimizer(hp, cfg, system)
# solution = optimizer.solve_with_params(node.params)
# us = solution['u']
def learned_dynamics(x, u, t):
return system.parametrized_dynamics(node.params, x, u, t)
else:
raise Exception("Didn't recognize the experiment string")
return system, learned_dynamics, us
def study_vector_field(hp, cfg, experiment_string=None, experiment_number=0, title=''):
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
num_horizontal_arrows = 21
num_vertical_arrows = 15
system, true_dynamics, us = load_system_and_us(hp, cfg, None, 0)
_, learned_dynamics, _ = load_system_and_us(hp, cfg, experiment_string, 0)
opt_xs, c = get_state_trajectory_and_cost(hp, system, system.x_0, us)
# TODO: maybe want to also plot the integrated trajectory
ts_x = jnp.linspace(0, system.T, opt_xs.shape[0])
ts_u = jnp.linspace(0, system.T, us.shape[0])
state_bounds = system.bounds[:hp.state_size].squeeze()
plt.figure(figsize=(4, 4))
if experiment_string is None:
title = 'True Dynamics'
else:
title = 'Learned Dynamics'
plus_title = get_name(hp)
# if plus_title is not None:
# plt.suptitle(plus_title)
plt.suptitle(title + ' – ' + plus_title)
# plt.subplot(2, 1, 1)
xs, ys = jnp.meshgrid(jnp.linspace(0, system.T, num_horizontal_arrows),
jnp.linspace(state_bounds[0], state_bounds[1], num_vertical_arrows))
xs = xs.flatten()
ys = ys.flatten()
times_to_evaluate_at = jnp.linspace(0, system.T, num_horizontal_arrows)
interpolated_us = jnp.interp(times_to_evaluate_at, ts_u, us.flatten())
all_true_dynamics = []
all_learned_dynamics = []
for i, y in enumerate(ys):
all_true_dynamics.append(true_dynamics(y, interpolated_us[i % num_horizontal_arrows],
0)) # dynamics are time independent, so can put 0 here
all_learned_dynamics.append(learned_dynamics(y, interpolated_us[i % num_horizontal_arrows], 0))
vec_true_y = jnp.array(all_true_dynamics)
vec_learned_y = jnp.array(all_learned_dynamics)
vec_x = jnp.ones_like(vec_true_y)
plt.quiver(xs, ys, vec_x, vec_true_y, angles='xy', width=0.003, alpha=0.9, color='blue', label='True Dynamics')
plt.quiver(xs, ys, vec_x, vec_learned_y, angles='xy', width=0.003, alpha=0.9, color='orange',
label='Learned Dynamics')
# Also plot the true dynamics
plt.plot(ts_x, opt_xs, label='True Trajectory', lw=1, ls='--', c='grey')
plt.grid()
plt.ylim(state_bounds)
plt.xlim((0., system.T))
# plt.plot(ts_x, opt_xs, label='State')
# arrow = plt.arrow(0, 0, 0.5, 0.6)
# plt.legend([arrow, ], ['My label', ])
plt.legend(loc='upper right', fontsize=8, title_fontsize=10)
plt.ylabel('state (x)')
# plt.subplot(2, 1, 2)
#
# plt.plot(ts_u, us, label='Control')
# plt.grid()
# plt.xlabel('time (s)')
# plt.ylabel('control (u)')
# # plt.ylim((0., max(us)))
# plt.xlim((0., system.T))
if experiment_string is None:
plot_path = f'plots/{hp.system.name}/true/'
else:
plot_path = f'plots/{hp.system.name}/node_{experiment_string}_sysid/'
Path(plot_path).mkdir(parents=True, exist_ok=True)
plt.tight_layout()
plt.savefig(plot_path + f'{hp.system.name}_{experiment_string}_vector_study.{cfg.file_extension}',
bbox_inches='tight')
plt.close()
| 7,920 | 32.706383 | 113 | py |
myriad | myriad-main/myriad/__init__.py | """
This library implements in [JAX](https://github.com/google/jax) various real-world environments,
neural ODEs for system identification, and trajectory optimizers for solving the optimal control problem.
"""
# from .config import *
# from .nlp_solvers import *
# from .trajectory_optimizers import *
# from .plotting import *
# from .utils import *
# Exclude from documentation
__pdoc__ = {'trajectory_optimizers.IndirectMethodOptimizer.require_adj': False,
'trajectory_optimizers.TrajectoryOptimizer.require_adj': False,
'trajectory_optimizers.TrapezoidalCollocationOptimizer.require_adj': False,
'trajectory_optimizers.HermiteSimpsonCollocationOptimizer.require_adj': False,
'trajectory_optimizers.MultipleShootingOptimizer.require_adj': False,
'trajectory_optimizers.IndirectMethodOptimizer.solve': False,
'custom_types': False,
'defaults': False,
'probing_numerical_instability': False,
'study_scripts': False,
}
| 1,043 | 40.76 | 105 | py |
myriad | myriad-main/myriad/neural_ode/data_generators.py | # # (c) 2021 Nikolaus Howe
# from __future__ import annotations # for nicer typing
#
# import typing
#
# if typing.TYPE_CHECKING:
# pass
# import jax
# import jax.numpy as jnp
# import numpy as np
# import time
#
# from typing import Optional
#
# from myriad.config import Config, HParams, SamplingApproach
# from myriad.custom_types import Controls, Dataset
# from myriad.utils import integrate_time_independent_in_parallel, smooth
#
#
# def generate_dataset(hp: HParams, cfg: Config,
# given_us: Optional[Controls] = None) -> Dataset:
# system = hp.system()
# hp.key, subkey = jax.random.split(hp.key)
#
# # Generate |total dataset size| control trajectories
# total_size = hp.train_size + hp.val_size + hp.test_size
#
# # TODO: fix what happens in case of infinite bounds
# u_lower = system.bounds[hp.state_size:, 0]
# u_upper = system.bounds[hp.state_size:, 1]
# x_lower = system.bounds[:hp.state_size, 0]
# x_upper = system.bounds[:hp.state_size, 1]
# if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
# raise Exception("infinite control bounds, aborting")
# if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
# raise Exception("infinite state bounds, aborting")
#
# spread = (u_upper - u_lower) * hp.sample_spread
#
# ########################
# # RANDOM WALK CONTROLS #
# ########################
# if hp.sampling_approach == SamplingApproach.RANDOM_WALK:
# # Make all the first states
# all_start_us = np.random.uniform(u_lower, u_upper, (total_size, 1, hp.control_size))
# all_us = all_start_us
#
# for i in range(hp.num_steps):
# next_us = np.random.normal(0, spread, (total_size, 1, hp.control_size))
# rightmost_us = all_us[:, -1:, :]
# together = np.clip(next_us + rightmost_us, u_lower, u_upper)
# all_us = np.concatenate((all_us, together), axis=1)
#
# # elif hp.sampling_approach == SamplingApproach.RANDOM_GRID:
# # single_ascending_controls = np.linspace(u_lower, u_upper, hp.num_steps + 1)
# # parallel_ascending_controls = single_ascending_controls[np.newaxis].repeat(total_size)
# # assert parallel_ascending_controls.shape == ()
# # NOTE: we could also generate data by exhaustively considering every combination
# # of state-control pair up to some discretization. This might just solve
# # the problem. Unfortunately, curse of dimensionality is real.
# # IDEA: let's try doing this on the CANCERTREATMENT domain, and see whether
# # this is enough to help neural ODE figure out what is going on
# # at the very start of planning
#
# ###########################
# # UNIFORM RANDOM CONTROLS #
# ###########################
# elif hp.sampling_approach == SamplingApproach.UNIFORM:
# all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
# minval=u_lower, maxval=u_upper) * 0.75 # TODO
# # TODO: make sure having added control size everywhere didn't break things
# #########################
# # AROUND GIVEN CONTROLS #
# #########################
# elif hp.sampling_approach == SamplingApproach.TRUE_OPTIMAL or hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL:
# if given_us is None:
# print("Since you didn't provide any controls, we'll use a uniform random guess")
# all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
# minval=u_lower, maxval=u_upper) * 0.75 # TODO
# # raise Exception("If sampling around a control trajectory, need to provide that trajectory.")
#
# else:
# noise = jax.random.normal(key=subkey, shape=(total_size, hp.num_steps + 1, hp.control_size)) \
# * (u_upper - u_lower) * hp.sample_spread
# all_us = jnp.clip(given_us[jnp.newaxis].repeat(total_size, axis=0).squeeze() + noise.squeeze(), a_min=u_lower,
# a_max=u_upper)
#
# else:
# raise Exception("Unknown sampling approach, please choose among", SamplingApproach.__dict__['_member_names_'])
#
# print("initial controls shape", all_us.shape)
#
# # Smooth the controls if so desired
# if hp.to_smooth:
# start = time.time()
# all_us = smooth(all_us, 2)
# end = time.time()
# print(f"smoothing took {end - start}s")
#
# # TODO: I really dislike having to have this line below. Is there no way to remove it?
# # Make the controls guess smaller so our dynamics don't explode
# # all_us *= 0.1
#
# # Generate the start states
# start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
#
# # Generate the states from applying the chosen controls
# if hp.start_spread > 0.:
# hp.key, subkey = jax.random.split(hp.key)
# start_states += jax.random.normal(subkey,
# shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
# start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
#
# # Generate the corresponding state trajectories
# _, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
# all_us, hp.stepsize, hp.num_steps,
# hp.integration_method)
#
# # Noise up the state observations
# hp.key, subkey = jax.random.split(hp.key)
# all_xs += jax.random.normal(subkey, shape=all_xs.shape) * (x_upper - x_lower) * hp.noise_level
# all_xs = jnp.clip(all_xs, a_min=x_lower, a_max=x_upper)
#
# # Stack the states and controls together
# xs_and_us = jnp.concatenate((all_xs, all_us), axis=2)
#
# if cfg.verbose:
# print("Generating training control trajectories between bounds:")
# print(" u lower", u_lower)
# print(" u upper", u_upper)
# print("of shapes:")
# print(" xs shape", all_xs.shape)
# print(" us shape", all_us.shape)
# print(" together", xs_and_us.shape)
#
# assert np.isfinite(xs_and_us).all()
# return xs_and_us
#
#
# def yield_minibatches(hp: HParams, total_size: int, dataset: Dataset) -> iter:
# assert total_size <= dataset.shape[0]
#
# tmp_dataset = np.random.permutation(dataset)
# num_minibatches = total_size // hp.minibatch_size + (1 if total_size % hp.minibatch_size > 0 else 0)
#
# for i in range(num_minibatches):
# n = np.minimum((i + 1) * hp.minibatch_size, total_size) - i * hp.minibatch_size
# yield tmp_dataset[i * hp.minibatch_size: i * hp.minibatch_size + n]
#
#
# def sample_x_init(hp: HParams, n_batch: int = 1) -> np.ndarray:
# s = hp.system()
# res = np.random.uniform(s.bounds[:, 0], s.bounds[:, 1], (n_batch, hp.state_size + hp.control_size))
# res = res[:, :hp.state_size]
# assert np.isfinite(res).all()
# return res
#
#
# if __name__ == "__main__":
# hp = HParams()
# cfg = Config()
# dset = generate_dataset(hp, cfg)
# # dset = np.random.rand(100, 5)
# # hp = HParams()
# # for e in yield_minibatches(hp, 91, dset):
# # print(e.shape)
# # pass
# # print(SamplingApproach.__dict__['_member_names_'])
# # hp = HParams()
# # n_batch = 10
# # res = sample_x_init(hp, n_batch)
# # print(res.shape)
# #
# # s = hp.system()
# # lower = s.bounds[:, 0]
# # upper = s.bounds[:, 1]
# # res2 = np.random.uniform(s.bounds[:, 0],
# # s.bounds[:, 1],
# # (n_batch,
# # hp.state_size + hp.control_size)) # keeping as is, though doesn't match our cartpole limits
# # res2 = res2[:, :hp.state_size]
# # print(res2.shape)
#
# # TODO: make a data generator, but with the optimal trajectories instead of random controls
# # def populate_data(hp: HParams, cfg: Config, system_params,
# # n_train, n_val, n_test, seed=0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# # np.random.seed(seed)
# # n_data = n_train + n_val + n_test
# # x_init = sample_x_init(hp=hp, n_batch=n_data)
# #
# # system_params = {"x_0": x_init, **system_params}
# #
# # system = hp.system(system_params)
# # optimizer = get_optimizer(hp, cfg, system)
# # solution = optimizer.solve()
# #
# # x = solution['x']
# # u = solution['u']
# # if hp.order == IntegrationOrder.QUADRATIC and hp.optimizer == OptimizerType.COLLOCATION:
# # x_mid = solution['x_mid']
# # u_mid = solution['u_mid']
# # if optimizer.require_adj:
# # adj = solution['adj']
# #
# # num_steps = hp.intervals * hp.controls_per_interval
# # stepsize = system.T / num_steps
# #
# # print("the shapes of x and u are", x.shape, u.shape)
# #
# # #########
# #
# # tau = np.cat((x, u), dim=2).transpose(0, 1)
# # print("tau is", tau.shape)
# # print("now splitting into train, val, and test")
# #
# # train_data = tau[:n_train]
# # val_data = tau[n_train:n_train + n_val]
# # test_data = tau[n_train + n_val:]
# #
# # return train_data, val_data, test_data
| 8,950 | 39.502262 | 124 | py |
myriad | myriad-main/myriad/neural_ode/node_training.py | # (c) Nikolaus Howe 2021
from __future__ import annotations
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
from jax.flatten_util import ravel_pytree
from tqdm import trange
from typing import Callable, Optional, Tuple
from myriad.custom_types import Batch, Controls, Cost, Epoch
from myriad.useful_scripts import plan_and_plot, plot_trajectory
from myriad.utils import integrate_time_independent, integrate_time_independent_in_parallel, plan_with_node_model, \
yield_minibatches
# Perform node.hp.num_epochs of minibatched gradient descent.
# Store losses in the "losses" dict. Return the termination epoch.
def train(node: NeuralODE,
start_epoch: Epoch = 0,
also_record_planning_loss: bool = False,
save_as: Optional[str] = None,
extension: Optional[str] = 'png') -> Epoch:
@jax.jit
def loss(params: hk.Params, minibatch: Batch) -> Cost:
# assert jnp.isfinite(minibatch) # had to comment out because of jitting
@jax.jit
def apply_net(x, u):
net_input = jnp.append(x, u)
# print("net input", net_input)
return node.net.apply(params, net_input)
# Extract controls and true state trajectory
controls = minibatch[:, :, node.hp.state_size:]
true_states = minibatch[:, :, :node.hp.state_size]
# Extract starting states
# print("true states", true_states.shape)
start_states = true_states[:, 0, :]
# Use neural net to predict state trajectory
_, predicted_states = integrate_time_independent_in_parallel(apply_net, start_states,
controls, node.stepsize, node.num_steps,
node.hp.integration_method)
return jnp.mean((predicted_states - true_states) * (predicted_states - true_states)) # MSE
# Gradient descent on the loss function in scope
@jax.jit
def update(params: hk.Params, opt_state: optax.OptState, minibatch: Batch) -> Tuple[hk.Params, optax.OptState]:
grads = jax.grad(loss)(params, minibatch)
updates, opt_state = node.opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
best_val_loss = 10e10
best_params = None
epoch = None
count = 0
print("check loss frequency is", node.hp.loss_recording_frequency)
# train_loss, validation_loss = calculate_losses(node, loss, 0, also_record_planning_loss)
for epoch in trange(node.hp.num_epochs):
overall_epoch = start_epoch + epoch * node.hp.train_size
if epoch % node.hp.loss_recording_frequency == 0:
# As a side-effect, this function also fills the loss lists
# print('calculating losses')
train_loss, validation_loss = calculate_losses(node, loss, overall_epoch, also_record_planning_loss)
print(epoch, train_loss, validation_loss)
# Plot progress so far
if epoch % node.hp.plot_progress_frequency == 0:
if node.cfg.plot and save_as is not None:
print("saving progress plot :)")
plot_progress(node, overall_epoch, save_as, extension)
# Early stopping
if epoch % node.hp.early_stop_check_frequency == 0:
if count >= node.hp.early_stop_threshold:
print(f"Stopping early at epoch {epoch}. Threshold was {node.hp.early_stop_threshold} epochs.")
break
# Update early stopping counts/values
if validation_loss >= best_val_loss:
count += node.hp.early_stop_check_frequency
else:
best_val_loss = validation_loss
best_params = hk.data_structures.to_immutable_dict(node.params)
count = 0
# Descend on entire dataset, in minibatches
# NOTE: when we add new data to the train set, we still only use the same number of
# minibatches to count as an "epoch" (so after the first experiment, when
# we complete an epoch, there will still be unseen data each time)
for mb in yield_minibatches(node.hp, node.hp.train_size, node.train_data):
node.params, node.opt_state = update(node.params, node.opt_state, mb)
# Save the best params
node.params = best_params
if epoch and node.cfg.verbose:
print("Trained for {} epochs on dataset of size {}".format(epoch, node.hp.train_size))
return epoch
def calculate_losses(node: NeuralODE,
loss_fn: Callable[[hk.Params, Batch], float],
overall_epoch: int,
also_record_planning_losses: bool = False) -> Tuple[Cost, Cost]:
# Record how many training points we've used
node.losses['ts'].append(overall_epoch)
# Calculate losses
cur_loss = loss_fn(node.params, next(yield_minibatches(node.hp, node.hp.train_size, node.train_data)))
node.losses['train_loss'].append(cur_loss)
validation_loss = loss_fn(node.params, next(yield_minibatches(node.hp, node.hp.val_size, node.validation_data)))
node.losses['validation_loss'].append(validation_loss)
node.losses['loss_on_opt'].append(loss_fn(node.params, node.true_x_and_u_opt[jnp.newaxis]))
if also_record_planning_losses:
planning_loss, planning_defect, u = calculate_planning_loss(node)
node.losses['control_costs'].append(planning_loss)
if planning_defect is not None:
node.losses['constraint_violation'].append(planning_defect)
# Calculate divergences from the optimal trajectories
node.losses['divergence_from_optimal_us'].append(divergence_from_optimal_us(node, u))
node.losses['divergence_from_optimal_xs'].append(divergence_from_optimal_xs(node, u))
return cur_loss, validation_loss
def calculate_planning_loss(node: NeuralODE) -> Tuple[Cost, Optional[Cost], Controls]:
# Get the optimal controls, and cost of applying them
_, u = plan_with_node_model(node)
_, xs = integrate_time_independent(node.system.dynamics, node.system.x_0, u, node.stepsize, # true dynamics
node.num_steps, node.hp.integration_method)
# We only want the states at boundaries of shooting intervals
xs_interval_start = xs[::node.hp.controls_per_interval]
xs_and_us, unused_unravel = ravel_pytree((xs_interval_start, u))
cost1 = node.optimizer.objective(xs_and_us)
# Calculate the final constraint violation, if present
if node.system.x_T is not None:
cv = node.system.x_T - xs[-1]
# if node.cfg.verbose:
# print("constraint violation", cv)
cost2 = jnp.linalg.norm(cv)
else:
cost2 = None
return cost1, cost2, u
# TODO: jit
# This is the "outer" loss of the problem, one of the main things we care about.
# Another "outer" loss, which gives a more RL flavour,
# is the integral cost of applying controls in the true dynamics,
# and the final constraint violation (if present) when controls in the true dynamics.
def divergence_from_optimal_us(node: NeuralODE, us: Controls) -> Cost:
assert len(us) == len(node.true_opt_us)
return jnp.mean((us - node.true_opt_us) * (us - node.true_opt_us)) # MS
def divergence_from_optimal_xs(node: NeuralODE, us: Controls) -> Cost:
# Get true state trajectory from applying "optimal" controls
_, xs = integrate_time_independent(node.system.dynamics, node.system.x_0, us,
node.stepsize, node.num_steps, node.hp.integration_method)
assert len(xs) == len(node.true_opt_xs)
return jnp.mean((xs - node.true_opt_xs) * (xs - node.true_opt_xs)) # MSE
def plot_progress(node, trained_for, save_as, extension, also_plan=False):
plot_trajectory(node,
optimal=True,
title="Prediction on optimal trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_opt.{extension}")
plot_trajectory(node,
optimal=False,
title="Prediction on train trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_train_rand.{extension}")
plot_trajectory(node,
optimal=False,
validation=True,
title="Prediction on validation trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_val_rand.{extension}")
# Use the network for planning
if also_plan:
plan_and_plot(node,
title="Planning after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_plan.{extension}")
| 8,523 | 41.40796 | 116 | py |
myriad | myriad-main/myriad/neural_ode/create_node.py | # (c) 2021 Nikolaus Howe
from pathlib import Path
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import pickle as pkl
from dataclasses import dataclass
from jax import config
from typing import Optional
from myriad.config import HParams, Config, SamplingApproach
from myriad.trajectory_optimizers import get_optimizer
from myriad.utils import get_state_trajectory_and_cost, generate_dataset, yield_minibatches
config.update("jax_enable_x64", True)
def make_empty_losses():
return {'ts': [],
'train_loss': [],
'validation_loss': [],
'loss_on_opt': [],
'control_costs': [],
'constraint_violation': [],
'divergence_from_optimal_us': [],
'divergence_from_optimal_xs': []}
##############################
# Neural ODE for opt control #
##############################
@dataclass
class NeuralODE(object):
hp: HParams
cfg: Config
key: jnp.ndarray = jax.random.PRNGKey(42)
mle: bool = True
dataset: Optional[jnp.ndarray] = None
def __post_init__(self) -> None:
self.system = self.hp.system()
self.num_steps = self.hp.intervals * self.hp.controls_per_interval
self.stepsize = self.system.T / self.num_steps # Segment length
# Get the true optimal controls and corresponding trajectory
# real_solver = self.hp.nlpsolver
# self.hp.nlpsolver = NLPSolverType.SLSQP
if self.mle:
# Try to load the optimal trajectories. If they don't exist, solve for them ourselves.
opt_path = f'datasets/{self.hp.system.name}/optimal_trajectories/'
Path(opt_path).mkdir(parents=True, exist_ok=True)
opt_name = f'{self.hp.intervals}_{self.hp.controls_per_interval}_{self.hp.optimizer.name}_' \
f'{self.hp.integration_method.name}_{self.hp.quadrature_rule.name}'
try:
self.true_opt_us = jnp.array(pkl.load(open(f'{opt_path + opt_name}_us', 'rb')))
self.true_opt_xs = jnp.array(pkl.load(open(f'{opt_path + opt_name}_xs', 'rb')))
except FileNotFoundError as e:
print("Didn't find pre-saved optimal trajectories, so calculating our own.")
self.optimizer = get_optimizer(self.hp, self.cfg, self.system)
self.optimal_solution = self.optimizer.solve()
self.true_opt_us = self.optimal_solution['u']
self.true_opt_xs = self.optimal_solution['x']
pkl.dump(self.true_opt_us, open(f'{opt_path + opt_name}_us', 'wb'))
pkl.dump(self.true_opt_xs, open(f'{opt_path + opt_name}_xs', 'wb'))
# TODO: think about quadratic case
# _, self.true_opt_xs = self.integrate(self.true_opt_us)
# print("getting state traj and cost")
self.true_opt_xs, self.true_opt_cost = get_state_trajectory_and_cost(
self.hp, self.system, self.system.x_0, self.true_opt_us)
self.true_x_and_u_opt = jnp.concatenate([self.true_opt_xs, self.true_opt_us], axis=1)
# self.hp.nlpsolver = real_solver
# Create a best guess which we'll update as we plan
self.best_guess_us = None
self.best_guess_us_cost = None
# Record the important info about this node
self.info = f"{self.hp.learning_rate}" \
f"_{self.hp.train_size}" \
f"_{self.hp.val_size}" \
f"_{self.hp.test_size}" \
f"_start_spread_{self.hp.start_spread}" \
f"_{self.hp.minibatch_size}" \
f"_({'_'.join(str(layer) for layer in self.hp.hidden_layers)})" \
f"_{self.hp.sample_spread}" \
f"_{self.hp.noise_level}"
# Generate the (initial) dataset
self.train_data, self.validation_data, self.test_data, self.full_data = self.make_datasets(first_time=True)
# Initialize the parameters and optimizer state
self.net = hk.without_apply_rng(hk.transform(self.net_fn))
mb = next(yield_minibatches(self.hp, self.hp.train_size, self.train_data))
print("node: params initialized with: ", mb[1, 1, :].shape)
self.key, subkey = jax.random.split(self.key) # Always update the NODE's key
self.params = self.net.init(subkey, mb[1, 1, :])
self.opt = optax.adam(self.hp.learning_rate)
self.opt_state = self.opt.init(self.params)
self.losses = make_empty_losses()
if self.cfg.verbose:
print("node: minibatches are of shape", mb.shape)
print("node: initialized network weights")
# The neural net for the neural ode: a small and simple MLP
def net_fn(self, x_and_u: jnp.array) -> jnp.array:
the_layers = []
for layer_size in self.hp.hidden_layers:
the_layers.append(hk.Linear(layer_size))
the_layers.append(jax.nn.sigmoid)
the_layers.append(hk.Linear(len(self.system.x_0)))
mlp = hk.Sequential(the_layers)
return mlp(x_and_u) # will automatically broadcast over minibatches
def save_params(self, filename: str) -> None:
pkl.dump(self.params, open(filename, 'wb'))
def load_params(self, params_pickle: str) -> None:
try:
temp_params = hk.data_structures.to_mutable_dict(pkl.load(open(params_pickle, 'rb')))
print("loaded node params from file")
if 'linear/~/linear' in temp_params:
temp_params['linear_1'] = temp_params['linear/~/linear']
del temp_params['linear/~/linear']
if 'linear/~/linear/~/linear' in temp_params:
temp_params['linear_2'] = temp_params['linear/~/linear/~/linear']
del temp_params['linear/~/linear/~/linear']
self.params = hk.data_structures.to_immutable_dict(temp_params)
except FileNotFoundError as e:
raise e
def load_dataset(self, file_path: str) -> None:
try:
dataset = pkl.load(open(file_path, 'rb'))
dataset = jnp.array(dataset)
self.train_data = dataset[:self.hp.train_size]
self.validation_data = dataset[self.hp.train_size:self.hp.train_size + self.hp.val_size]
self.test_data = dataset[self.hp.train_size + self.hp.val_size:]
self.all_data = dataset
except FileNotFoundError as e:
raise e
def make_datasets(self, first_time=False):
# Generate the new data
if self.hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL and self.best_guess_us is not None:
all_data = generate_dataset(self.hp, self.cfg, given_us=self.best_guess_us)
else:
all_data = generate_dataset(self.hp, self.cfg)
# Split the new data
train_data = all_data[:self.hp.train_size]
validation_data = all_data[self.hp.train_size:self.hp.train_size + self.hp.val_size]
test_data = all_data[self.hp.train_size + self.hp.val_size:]
# If not first time, add the new data to our existing dataset
if not first_time:
train_data = jnp.concatenate((self.train_data, train_data), axis=0)
validation_data = jnp.concatenate((self.validation_data, validation_data), axis=0)
test_data = jnp.concatenate((self.test_data, test_data), axis=0)
if self.cfg.verbose:
print("Generated training trajectories of shape", train_data.shape)
print("Generated validation trajectories of shape", validation_data.shape)
print("Generated test trajectories of shape", test_data.shape)
return train_data, validation_data, test_data, all_data
def augment_datasets(self):
self.train_data, self.validation_data, self.test_data, self.full_dataset = self.make_datasets(first_time=False)
if __name__ == "__main__":
hp = HParams()
cfg = Config()
my_node = NeuralODE(hp, cfg)
print("my_node", my_node)
| 7,428 | 40.044199 | 115 | py |
myriad | myriad-main/myriad/nlp_solvers/__init__.py | # (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import time
from cyipopt import minimize_ipopt
from scipy.optimize import minimize
from typing import Dict
from myriad.config import Config, HParams, NLPSolverType
from myriad.defaults import learning_rates
from myriad.utils import get_state_trajectory_and_cost
### Import your new nlp solver here ###
from myriad.nlp_solvers.extra_gradient import extra_gradient
def solve(hp: HParams, cfg: Config, opt_dict: Dict) -> Dict[str, jnp.ndarray]:
"""
Use a the solver indicated in the hyper-parameters to solve the constrained optimization problem.
Args:
hp: the hyperparameters
cfg: the extra hyperparameters
opt_dict: everything needed for the solve
Returns
A dictionary with the optimal controls and corresponding states
(and for quadratic interpolation schemes, the midpoints too)
"""
_t1 = time.time()
opt_inputs = {
'fun': jax.jit(opt_dict['objective']) if cfg.jit else opt_dict['objective'],
'x0': opt_dict['guess'],
'constraints': ({
'type': 'eq',
'fun': jax.jit(opt_dict['constraints']) if cfg.jit else opt_dict['constraints'],
'jac': jax.jit(jax.jacrev(opt_dict['constraints'])) if cfg.jit else jax.jacrev(opt_dict['constraints']),
}),
'bounds': opt_dict['bounds'],
'jac': jax.jit(jax.grad(opt_dict['objective'])) if cfg.jit else jax.grad(opt_dict['objective']),
'options': {"maxiter": hp.max_iter}
}
### Add new nlp solvers to this list ###
if hp.nlpsolver == NLPSolverType.EXTRAGRADIENT:
opt_inputs['method'] = 'exgd'
if hp.system in learning_rates:
opt_inputs['options'] = {**opt_inputs['options'], **learning_rates[hp.system]}
solution = extra_gradient(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.SLSQP:
opt_inputs['method'] = 'SLSQP'
solution = minimize(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.TRUST:
opt_inputs['method'] = 'trust-constr'
solution = minimize(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.IPOPT:
opt_inputs['method'] = 'ipopt'
solution = minimize_ipopt(**opt_inputs)
else:
print("Unknown NLP solver. Please choose among", list(NLPSolverType.__members__.keys()))
raise ValueError
_t2 = time.time()
if cfg.verbose:
print('Solver exited with success:', solution['success'])
print(f'Completed in {_t2 - _t1} seconds.')
system = hp.system()
opt_x, c = get_state_trajectory_and_cost(hp, system, system.x_0, (opt_dict['unravel'](solution['x']))[1])
print('Cost given by solver:', solution['fun'])
print("Cost given by integrating the control trajectory:", c)
if system.x_T is not None:
achieved_last_state = opt_x[-1]
desired_last_state = system.x_T
defect = []
for i, el in enumerate(desired_last_state):
if el is not None:
defect.append(achieved_last_state[i] - el)
print("Defect:", defect)
lmbda = None
if hp.nlpsolver == NLPSolverType.IPOPT:
lmbda = solution.info['mult_g']
elif hp.nlpsolver == NLPSolverType.TRUST:
lmbda = solution['v']
elif hp.nlpsolver == NLPSolverType.EXTRAGRADIENT:
lmbda = solution['v']
# print("the full solution was", solution)
# raise SystemExit
results = {'x': (opt_dict['unravel'](solution['x']))[0],
'u': (opt_dict['unravel'](solution['x']))[1],
'xs_and_us': solution['x'],
'cost': solution['fun']}
if lmbda is not None:
results['lambda'] = lmbda
return results
| 3,500 | 34.363636 | 110 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.