repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
SpinalNet
|
SpinalNet-master/MNIST/Arch2_EMNIST_Digits.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet Arch2 for EMNIST digits.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 200
batch_size_train = 64
batch_size_test = 1000
momentum = 0.5
log_interval = 5000
first_HL = 50
prob = 0.5
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.RandomPerspective(),
torchvision.transforms.RandomRotation(10, fill=(0,)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL)
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_6 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_7 = nn.Linear(160 + first_HL, first_HL) #added
self.fcp = nn.Linear(720, first_HL)
self.fcp_1 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_2 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_3 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_4 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_5 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_6 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp_7 = nn.Linear(720 + first_HL, first_HL) #added
self.fcp2 = nn.Linear(392, first_HL)
self.fcp2_1 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_2 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_3 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_4 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_5 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_6 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_7 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_8 = nn.Linear(392 + first_HL, first_HL) #added
self.fcp2_9 = nn.Linear(392 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*26, 50) # changed first_HL from second_HL
self.fc3 = nn.Linear(50, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x_real=x.view(-1, 28*28)
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, 2))
x_conv1 = x.view(-1, 1440)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
half_width = 160
x1 = x[:, 0:half_width]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fc1_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fc1_7(x8))
x0 = torch.cat([x1, x2], dim=1)
x0 = torch.cat([x0, x3], dim=1)
x0 = torch.cat([x0, x4], dim=1)
x0 = torch.cat([x0, x5], dim=1)
x0 = torch.cat([x0, x6], dim=1)
x0 = torch.cat([x0, x7], dim=1)
x0 = torch.cat([x0, x8], dim=1)
x = x_conv1
half_width =720
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp_7(x8))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x0 = torch.cat([x, x0], dim=1)
x = x_real
half_width =392
x1 = x[:, 0:half_width]
x1 = F.relu(self.fcp2(x1))
x2= torch.cat([ x[:,half_width:half_width*2], x1], dim=1)
x2 = F.relu(self.fcp2_1(x2))
x3= torch.cat([ x[:,0:half_width], x2], dim=1)
x3 = F.relu(self.fcp2_2(x3))
x4= torch.cat([ x[:,half_width:half_width*2], x3], dim=1)
x4 = F.relu(self.fcp2_3(x4))
x5= torch.cat([ x[:,0:half_width], x4], dim=1)
x5 = F.relu(self.fcp2_4(x5))
x6= torch.cat([ x[:,half_width:half_width*2], x5], dim=1)
x6 = F.relu(self.fcp2_5(x6))
x7= torch.cat([ x[:,0:half_width], x6], dim=1)
x7 = F.relu(self.fcp2_6(x7))
x8= torch.cat([ x[:,half_width:half_width*2], x7], dim=1)
x8 = F.relu(self.fcp2_7(x8))
x9= torch.cat([ x[:,0:half_width], x8], dim=1)
x9 = F.relu(self.fcp2_8(x9))
x10= torch.cat([ x[:,half_width:half_width*2], x9], dim=1)
x10 = F.relu(self.fcp2_9(x10))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = torch.cat([x, x7], dim=1)
x = torch.cat([x, x8], dim=1)
x = torch.cat([x, x9], dim=1)
x = torch.cat([x, x10], dim=1)
x = torch.cat([x, x0], dim=1)
x = F.relu(self.fc2(x))
x = F.log_softmax(self.fc3(x))
return x
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(random_seed, epoch, max_accuracy):
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
accuracy = 100. * correct / len(test_loader.dataset)
if accuracy> max_accuracy:
max_accuracy = accuracy
if epoch%5==0:
print('Seed: {:.0f}, Epoch: {:.0f}; Test: Avg. loss: {:.4f}, Accuracy: {}/{}, Max Accuracy = ({:.2f}%)'.format(
random_seed, epoch,
test_loss, correct, len(test_loader.dataset),
max_accuracy))
return max_accuracy
for random_seed in range(2):
max_accuracy = 0
learning_rate =0.1
torch.manual_seed(random_seed)
#test(random_seed)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
for epoch in range(1, n_epochs + 1):
train(epoch)
max_accuracy2 = test(random_seed,epoch, max_accuracy)
if max_accuracy == max_accuracy2:
learning_rate = learning_rate*.8
else:
max_accuracy = max_accuracy2
#workbook.close()
| 9,736
| 32.926829
| 117
|
py
|
SpinalNet
|
SpinalNet-master/MNIST/SpinalNet_EMNIST_Digits.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the SpinalNet EMNIST digits code.
It provides better performance for the same number of epoch.
@author: Dipu
"""
import torch
import torchvision
n_epochs = 8
batch_size_train = 64
batch_size_test = 1000
learning_rate = 0.01
momentum = 0.5
log_interval = 100
first_HL =10
torch.backends.cudnn.enabled = False
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.EMNIST('/files/', split='digits', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,))
])),
batch_size=batch_size_test, shuffle=True)
examples = enumerate(test_loader)
batch_idx, (example_data, example_targets) = next(examples)
print(example_data.shape)
import matplotlib.pyplot as plt
fig = plt.figure()
for i in range(6):
plt.subplot(2,3,i+1)
plt.tight_layout()
plt.imshow(example_data[i][0], cmap='gray', interpolation='none')
plt.title("Ground Truth: {}".format(example_targets[i]))
plt.xticks([])
plt.yticks([])
fig
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(160, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_4 = nn.Linear(160 + first_HL, first_HL) #added
self.fc1_5 = nn.Linear(160 + first_HL, first_HL) #added
self.fc2 = nn.Linear(first_HL*6, 10) # changed first_HL from second_HL
#self.fc1 = nn.Linear(320, 50)
#self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x1 = x[:, 0:160]
x1 = F.relu(self.fc1(x1))
x2= torch.cat([ x[:,160:320], x1], dim=1)
x2 = F.relu(self.fc1_1(x2))
x3= torch.cat([ x[:,0:160], x2], dim=1)
x3 = F.relu(self.fc1_2(x3))
x4= torch.cat([ x[:,160:320], x3], dim=1)
x4 = F.relu(self.fc1_3(x4))
x5= torch.cat([ x[:,0:160], x4], dim=1)
x5 = F.relu(self.fc1_4(x5))
x6= torch.cat([ x[:,160:320], x5], dim=1)
x6 = F.relu(self.fc1_5(x6))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = torch.cat([x, x5], dim=1)
x = torch.cat([x, x6], dim=1)
x = self.fc2(x)
#x = F.relu(self.fc1(x))
#x = F.dropout(x, training=self.training)
#x = self.fc2(x)
return F.log_softmax(x)
network = Net()
optimizer = optim.SGD(network.parameters(), lr=learning_rate,
momentum=momentum)
train_losses = []
train_counter = []
test_losses = []
test_counter = [i*len(train_loader.dataset) for i in range(n_epochs + 1)]
def train(epoch):
network.train()
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = network(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
train_losses.append(loss.item())
train_counter.append(
(batch_idx*64) + ((epoch-1)*len(train_loader.dataset)))
def test():
network.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = network(data)
test_loss += F.nll_loss(output, target, size_average=False).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test()
for epoch in range(1, n_epochs + 1):
train(epoch)
test()
#%%
fig = plt.figure()
plt.plot(train_counter, train_losses, color='blue')
plt.scatter(test_counter, test_losses, color='red')
plt.legend(['Train Loss', 'Test Loss'], loc='upper right')
plt.xlabel('number of training examples seen')
plt.ylabel('negative log likelihood loss')
fig
#%%
| 5,407
| 30.08046
| 84
|
py
|
SpinalNet
|
SpinalNet-master/Customizable Model/spinalnettorch.py
|
# Customizable SpinalNet. Supports up to 30 layers.
import torch
import torch.nn as nn
import numpy as np
class SpinalNet(nn.Module):
def __init__(self, Input_Size, Number_of_Split, HL_width, number_HL, Output_Size, Activation_Function):
super(SpinalNet, self).__init__()
Splitted_Input_Size = int(np.round(Input_Size/Number_of_Split))
self.lru = Activation_Function
self.fc1 = nn.Linear(Splitted_Input_Size, HL_width)
if number_HL>1:
self.fc2 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>2:
self.fc3 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>3:
self.fc4 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>4:
self.fc5 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>5:
self.fc6 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>6:
self.fc7 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>7:
self.fc8 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>8:
self.fc9 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>9:
self.fc10 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>10:
self.fc11 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>11:
self.fc12 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>12:
self.fc13 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>13:
self.fc14 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>14:
self.fc15 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>15:
self.fc16 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>16:
self.fc17 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>17:
self.fc18 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>18:
self.fc19 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>19:
self.fc20 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>20:
self.fc21 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>21:
self.fc22 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>22:
self.fc23 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>23:
self.fc24 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>24:
self.fc25 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>25:
self.fc26 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>26:
self.fc27 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>27:
self.fc28 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>28:
self.fc29 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
if number_HL>29:
self.fc30 = nn.Linear(Splitted_Input_Size+HL_width, HL_width)
self.fcx = nn.Linear(HL_width*number_HL, Output_Size)
def forward(self, x):
x_all =x
Splitted_Input_Size = self.fc1.in_features
HL_width = self.fc2.in_features - self.fc1.in_features
number_HL = int(np.round(self.fcx.in_features/HL_width))
length_x_all = number_HL*Splitted_Input_Size
while x_all.size(dim=1) < length_x_all:
x_all = torch.cat([x_all, x],dim=1)
x = self.lru(self.fc1(x_all[:,0:Splitted_Input_Size]))
x_out = x
counter1 = 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc2(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc3(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc4(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc5(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc6(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc7(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc8(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc9(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc10(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc11(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc12(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc13(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc14(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc15(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc16(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc17(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc18(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc19(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc20(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc21(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc22(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc23(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc24(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc25(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc26(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc27(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc28(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc29(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
counter1 = counter1 + 1
if number_HL>counter1:
x_from_all = x_all[:,Splitted_Input_Size* counter1:Splitted_Input_Size*(counter1+1)]
x = self.lru(self.fc30(torch.cat([x_from_all, x], dim=1)))
x_out = torch.cat([x_out, x], dim=1)
#print("Size before output layer:",x_out.size(dim=1))
x = self.fcx(x_out)
return x
| 12,469
| 46.414449
| 107
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-100/CNN_dropout_Default_and_SpinalFC_CIFAR100.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal CNN dropout code for CIFAR-100.
This code trains both NNs as two different models.
The code is collected and changed from:
https://zhenye-na.github.io/2018/09/28/pytorch-cnn-cifar10.html
This code gradually decreases the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(1)
random.seed(1)
Half_width =2048
layer_width = 256
# Image preprocessing modules
transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32),
transforms.ToTensor()])
# CIFAR-10 dataset
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=trainset,
batch_size=200,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=testset,
batch_size=200,
shuffle=False)
class CNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(CNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_layer = nn.Sequential(
nn.Dropout(p=0.1),
nn.Linear(4096, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(512, 100)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
# fc layer
x = self.fc_layer(x)
return x
# 3x3 convolution
class SpinalCNN(nn.Module):
"""CNN."""
def __init__(self):
"""CNN Builder."""
super(SpinalCNN, self).__init__()
self.conv_layer = nn.Sequential(
# Conv Layer block 1
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Conv Layer block 2
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Dropout2d(p=0.05),
# Conv Layer block 3
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(p=0.1), nn.Linear(layer_width*4, 100)
)
def forward(self, x):
"""Perform forward."""
# conv layers
x = self.conv_layer(x)
# flatten
x = x.view(x.size(0), -1)
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
# fc layer
x = self.fc_out(x)
return x
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = CNN().to(device)
model2 = SpinalCNN().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = curr_lr1/3
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % '.format(100 * correct1 / total1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = curr_lr2/3
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % '.format(100 * correct2 / total2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 9,248
| 29.22549
| 99
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-100/ResNet_Default_and_SpinalFC_CIFAR100.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal ResNet code for CIFAR-100.
This code trains both NNs as two different models.
There is option of choosing ResNet18(), ResNet34(), SpinalResNet18(), or
SpinalResNet34().
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 160
learning_rate = 0.001
torch.manual_seed(0)
random.seed(0)
first_HL = 512
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR100(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(nn.Module):
"""Basic Block of ReseNet."""
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
"""Basic Block of ReseNet Builder."""
super(BasicBlock, self).__init__()
# First conv3x3 layer
self.conv1 = conv3x3(in_channels, out_channels, stride)
# Batch Normalization
self.bn1 = nn.BatchNorm2d(num_features=out_channels)
# ReLU Activation Function
self.relu = nn.ReLU(inplace=True)
# Second conv3x3 layer
self.conv2 = conv3x3(out_channels, out_channels)
# Batch Normalization
self.bn2 = nn.BatchNorm2d(num_features=out_channels)
# downsample for `residual`
self.downsample = downsample
self.stride = stride
def forward(self, x):
"""Forward Pass of Basic Block."""
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
class SpinalResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=100):
"""Residual Neural Network Builder."""
super(SpinalResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=2)
#self.fc_layer = nn.Linear(256, num_classes)
self.fc1 = nn.Linear(256, first_HL) #changed from 16 to 8
self.fc1_1 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_2 = nn.Linear(256 + first_HL, first_HL) #added
self.fc1_3 = nn.Linear(256 + first_HL, first_HL) #added
self.fc_layer = nn.Linear(first_HL*4, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out1 = self.maxpool2(out)
#print('out1',out1.shape)
out2 = out1[:,:,0,0]
#print('out2',out2.shape)
out2 = out2.view(out2.size(0),-1)
#print('out2',out2.shape)
x1 = out1[:,:,0,0]
x1 = self.relu(self.fc1(x1))
x2= torch.cat([ out1[:,:,0,1], x1], dim=1)
x2 = self.relu(self.fc1_1(x2))
x3= torch.cat([ out1[:,:,1,0], x2], dim=1)
x3 = self.relu(self.fc1_2(x3))
x4= torch.cat([ out1[:,:,1,1], x3], dim=1)
x4 = self.relu(self.fc1_3(x4))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
out = torch.cat([x, x4], dim=1)
out = self.fc_layer(out)
return out
class ResNet(nn.Module):
"""Residual Neural Network."""
def __init__(self, block, duplicates, num_classes=100):
"""Residual Neural Network Builder."""
super(ResNet, self).__init__()
self.in_channels = 32
self.conv1 = conv3x3(in_channels=3, out_channels=32)
self.bn = nn.BatchNorm2d(num_features=32)
self.relu = nn.ReLU(inplace=True)
self.dropout = nn.Dropout2d(p=0.02)
# block of Basic Blocks
self.conv2_x = self._make_block(block, duplicates[0], out_channels=32)
self.conv3_x = self._make_block(block, duplicates[1], out_channels=64, stride=2)
self.conv4_x = self._make_block(block, duplicates[2], out_channels=128, stride=2)
self.conv5_x = self._make_block(block, duplicates[3], out_channels=256, stride=2)
self.maxpool = nn.MaxPool2d(kernel_size=4, stride=1)
#self.maxpool2 = nn.MaxPool2d(kernel_size=2, stride=1)
self.fc_layer = nn.Linear(256, num_classes)
# initialize weights
# self.apply(initialize_weights)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal(m.weight.data, mode='fan_out')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_block(self, block, duplicates, out_channels, stride=1):
"""
Create Block in ResNet.
Args:
block: BasicBlock
duplicates: number of BasicBlock
out_channels: out channels of the block
Returns:
nn.Sequential(*layers)
"""
downsample = None
if (stride != 1) or (self.in_channels != out_channels):
downsample = nn.Sequential(
conv3x3(self.in_channels, out_channels, stride=stride),
nn.BatchNorm2d(num_features=out_channels)
)
layers = []
layers.append(
block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels
for _ in range(1, duplicates):
layers.append(block(out_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, x):
"""Forward pass of ResNet."""
out = self.conv1(x)
out = self.bn(out)
out = self.relu(out)
out = self.dropout(out)
# Stacked Basic Blocks
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out = self.maxpool(out)
out = out.view(out.size(0), -1)
out = out.view(out.size(0), -1)
out = self.fc_layer(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2]).to(device)
def SpinalResNet18():
return SpinalResNet(BasicBlock, [2,2,2,2]).to(device)
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3]).to(device)
def SpinalResNet34():
return SpinalResNet(BasicBlock, [3, 4, 6, 3]).to(device)
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = ResNet34().to(device)
model2 = SpinalResNet34().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
#%%
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
# if i == 249:
# print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
# .format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
# print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
# .format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer1, curr_lr1)
print('Epoch :{} Accuracy NN: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),5))
update_lr(optimizer2, curr_lr2)
print('Epoch :{} Accuracy SpinalNet: ({:.2f}%), Maximum Accuracy: {:.2f}%'.format(epoch,
100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 13,588
| 30.025114
| 101
|
py
|
SpinalNet
|
SpinalNet-master/CIFAR-100/VGG_Default_and_SpinalFC_CIFAR_100.py
|
# -*- coding: utf-8 -*-
"""
This Script contains the default and Spinal VGG code for CIFAR-100.
This code trains both NNs as two different models.
There is option of choosing NN among:
vgg11_bn(), vgg13_bn(), vgg16_bn(), vgg19_bn() and
Spinalvgg11_bn(), Spinalvgg13_bn(), Spinalvgg16_bn(), Spinalvgg19_bn()
This code randomly changes the learning rate to get a good result.
@author: Dipu
"""
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import random
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = 'cpu'
# Hyper-parameters
num_epochs = 200
learning_rate = 0.0001
Half_width =256
layer_width=512
torch.manual_seed(0)
random.seed(0)
# Image preprocessing modules
# Normalize training set together with augmentation
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# Normalize test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])
])
# CIFAR-100 dataset
trainset = torchvision.datasets.CIFAR100(root='./data',
train=True,
download=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=200, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR100(root='./data',
train=False,
download=True,
transform=transform_test)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=200, shuffle=False, num_workers=0)
def conv3x3(in_channels, out_channels, stride=1):
"""3x3 kernel size with padding convolutional layer in ResNet BasicBlock."""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
cfg = {
'A' : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B' : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E' : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
}
class VGG(nn.Module):
def __init__(self, features, num_class=100):
super().__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
output = self.classifier(output)
return output
class SpinalVGG(nn.Module):
def __init__(self, features, num_class=100):
super().__init__()
self.features = features
self.fc_spinal_layer1 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer2 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer3 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_spinal_layer4 = nn.Sequential(
nn.Dropout(), nn.Linear(Half_width + layer_width, layer_width),
nn.ReLU(inplace=True),
)
self.fc_out = nn.Sequential(
nn.Dropout(), nn.Linear(layer_width*4, num_class)
)
def forward(self, x):
output = self.features(x)
output = output.view(output.size()[0], -1)
x = output
x1 = self.fc_spinal_layer1(x[:, 0:Half_width])
x2 = self.fc_spinal_layer2(torch.cat([ x[:,Half_width:2*Half_width], x1], dim=1))
x3 = self.fc_spinal_layer3(torch.cat([ x[:,0:Half_width], x2], dim=1))
x4 = self.fc_spinal_layer4(torch.cat([ x[:,Half_width:2*Half_width], x3], dim=1))
x = torch.cat([x1, x2], dim=1)
x = torch.cat([x, x3], dim=1)
x = torch.cat([x, x4], dim=1)
x = self.fc_out(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
input_channel = 3
for l in cfg:
if l == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
continue
layers += [nn.Conv2d(input_channel, l, kernel_size=3, padding=1)]
if batch_norm:
layers += [nn.BatchNorm2d(l)]
layers += [nn.ReLU(inplace=True)]
input_channel = l
return nn.Sequential(*layers)
def vgg11_bn():
return VGG(make_layers(cfg['A'], batch_norm=True))
def vgg13_bn():
return VGG(make_layers(cfg['B'], batch_norm=True))
def vgg16_bn():
return VGG(make_layers(cfg['D'], batch_norm=True))
def vgg19_bn():
return VGG(make_layers(cfg['E'], batch_norm=True))
def Spinalvgg11_bn():
return SpinalVGG(make_layers(cfg['A'], batch_norm=True))
def Spinalvgg13_bn():
return SpinalVGG(make_layers(cfg['B'], batch_norm=True))
def Spinalvgg16_bn():
return SpinalVGG(make_layers(cfg['D'], batch_norm=True))
def Spinalvgg19_bn():
return SpinalVGG(make_layers(cfg['E'], batch_norm=True))
# For updating learning rate
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# Train the model
total_step = len(train_loader)
curr_lr1 = learning_rate
curr_lr2 = learning_rate
model1 = vgg19_bn().to(device)
model2 = Spinalvgg19_bn().to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer1 = torch.optim.Adam(model1.parameters(), lr=learning_rate)
optimizer2 = torch.optim.Adam(model2.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
best_accuracy1 = 0
best_accuracy2 =0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model1(images)
loss1 = criterion(outputs, labels)
# Backward and optimize
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
outputs = model2(images)
loss2 = criterion(outputs, labels)
# Backward and optimize
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
if i == 249:
print ("Ordinary Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss1.item()))
print ("Spinal Epoch [{}/{}], Step [{}/{}] Loss: {:.4f}"
.format(epoch+1, num_epochs, i+1, total_step, loss2.item()))
# Test the model
model1.eval()
model2.eval()
with torch.no_grad():
correct1 = 0
total1 = 0
correct2 = 0
total2 = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model1(images)
_, predicted = torch.max(outputs.data, 1)
total1 += labels.size(0)
correct1 += (predicted == labels).sum().item()
outputs = model2(images)
_, predicted = torch.max(outputs.data, 1)
total2 += labels.size(0)
correct2 += (predicted == labels).sum().item()
if best_accuracy1> correct1 / total1:
curr_lr1 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer1, curr_lr1)
print('Test Accuracy of NN: {} % Best: {} %'.format(100 * correct1 / total1, 100*best_accuracy1))
else:
best_accuracy1 = correct1 / total1
net_opt1 = model1
print('Test Accuracy of NN: {} % (improvement)'.format(100 * correct1 / total1))
if best_accuracy2> correct2 / total2:
curr_lr2 = learning_rate*np.asscalar(pow(np.random.rand(1),3))
update_lr(optimizer2, curr_lr2)
print('Test Accuracy of SpinalNet: {} % Best: {} %'.format(100 * correct2 / total2, 100*best_accuracy2))
else:
best_accuracy2 = correct2 / total2
net_opt2 = model2
print('Test Accuracy of SpinalNet: {} % (improvement)'.format(100 * correct2 / total2))
model1.train()
model2.train()
| 9,281
| 28.845659
| 116
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/test.py
|
"""
Separated testing for OmiEmbed
"""
import time
from util import util
from params.test_params import TestParams
from datasets import create_single_dataloader
from models import create_model
from util.visualizer import Visualizer
if __name__ == '__main__':
# Get testing parameter
param = TestParams().parse()
if param.deterministic:
util.setup_seed(param.seed)
# Dataset related
dataloader, sample_list = create_single_dataloader(param, shuffle=False) # No shuffle for testing
print('The size of testing set is {}'.format(len(dataloader)))
# Get sample list for the dataset
param.sample_list = dataloader.get_sample_list()
# Get the dimension of input omics data
param.omics_dims = dataloader.get_omics_dims()
if param.downstream_task == 'classification' or param.downstream_task == 'multitask':
# Get the number of classes for the classification task
if param.class_num == 0:
param.class_num = dataloader.get_class_num()
print('The number of classes: {}'.format(param.class_num))
if param.downstream_task == 'regression' or param.downstream_task == 'multitask':
# Get the range of the target values
values_min = dataloader.get_values_min()
values_max = dataloader.get_values_max()
if param.regression_scale == 1:
param.regression_scale = values_max
print('The range of the target values is [{}, {}]'.format(values_min, values_max))
if param.downstream_task == 'survival' or param.downstream_task == 'multitask':
# Get the range of T
survival_T_min = dataloader.get_survival_T_min()
survival_T_max = dataloader.get_survival_T_max()
if param.survival_T_max == -1:
param.survival_T_max = survival_T_max
print('The range of survival T is [{}, {}]'.format(survival_T_min, survival_T_max))
# Model related
model = create_model(param) # Create a model given param.model and other parameters
model.setup(param) # Regular setup for the model: load and print networks, create schedulers
visualizer = Visualizer(param) # Create a visualizer to print results
# TESTING
model.set_eval()
test_start_time = time.time() # Start time of testing
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
if param.save_latent_space:
latent_dict = model.init_latent_dict()
# Start testing loop
for i, data in enumerate(dataloader):
dataset_size = len(dataloader)
actual_batch_size = len(data['index'])
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.test() # Run forward to get the output tensors
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if param.save_latent_space:
latent_dict = model.update_latent_dict(latent_dict) # Update the latent space array
if i % param.print_freq == 0: # Print testing log
visualizer.print_test_log(param.epoch_to_load, i, losses_dict, metrics_dict, param.batch_size, dataset_size)
test_time = time.time() - test_start_time
visualizer.print_test_summary(param.epoch_to_load, losses_dict, output_dict, test_time)
visualizer.save_output_dict(output_dict)
if param.save_latent_space:
visualizer.save_latent_space(latent_dict, sample_list)
| 3,489
| 46.808219
| 120
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/train.py
|
"""
Separated training for OmiEmbed
"""
import time
import warnings
from util import util
from params.train_params import TrainParams
from datasets import create_single_dataloader
from models import create_model
from util.visualizer import Visualizer
if __name__ == "__main__":
warnings.filterwarnings('ignore')
# Get parameters
param = TrainParams().parse()
if param.deterministic:
util.setup_seed(param.seed)
# Dataset related
dataloader, sample_list = create_single_dataloader(param, enable_drop_last=True)
print('The size of training set is {}'.format(len(dataloader)))
# Get the dimension of input omics data
param.omics_dims = dataloader.get_omics_dims()
if param.downstream_task in ['classification', 'multitask', 'alltask']:
# Get the number of classes for the classification task
if param.class_num == 0:
param.class_num = dataloader.get_class_num()
if param.downstream_task != 'alltask':
print('The number of classes: {}'.format(param.class_num))
if param.downstream_task in ['regression', 'multitask', 'alltask']:
# Get the range of the target values
values_min = dataloader.get_values_min()
values_max = dataloader.get_values_max()
if param.regression_scale == 1:
param.regression_scale = values_max
print('The range of the target values is [{}, {}]'.format(values_min, values_max))
if param.downstream_task in ['survival', 'multitask', 'alltask']:
# Get the range of T
survival_T_min = dataloader.get_survival_T_min()
survival_T_max = dataloader.get_survival_T_max()
if param.survival_T_max == -1:
param.survival_T_max = survival_T_max
print('The range of survival T is [{}, {}]'.format(survival_T_min, survival_T_max))
# Model related
model = create_model(param) # Create a model given param.model and other parameters
model.setup(param) # Regular setup for the model: load and print networks, create schedulers
visualizer = Visualizer(param) # Create a visualizer to print results
# Start the epoch loop
visualizer.print_phase(model.phase)
for epoch in range(param.epoch_count, param.epoch_num + 1): # outer loop for different epochs
epoch_start_time = time.time() # Start time of this epoch
model.epoch = epoch
# TRAINING
model.set_train() # Set train mode for training
iter_load_start_time = time.time() # Start time of data loading for this iteration
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
if epoch == param.epoch_num_p1 + 1:
model.phase = 'p2' # Change to supervised phase
visualizer.print_phase(model.phase)
if epoch == param.epoch_num_p1 + param.epoch_num_p2 + 1:
model.phase = 'p3' # Change to supervised phase
visualizer.print_phase(model.phase)
if param.save_latent_space and epoch == param.epoch_num:
latent_dict = model.init_latent_dict()
# Start training loop
for i, data in enumerate(dataloader): # Inner loop for different iteration within one epoch
model.iter = i
dataset_size = len(dataloader)
actual_batch_size = len(data['index'])
iter_start_time = time.time() # Timer for computation per iteration
if i % param.print_freq == 0:
load_time = iter_start_time - iter_load_start_time # Data loading time for this iteration
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.update() # Calculate losses, gradients and update network parameters
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if param.save_latent_space and epoch == param.epoch_num:
latent_dict = model.update_latent_dict(latent_dict) # Update the latent space array
if i % param.print_freq == 0: # Print training losses and save logging information to the disk
comp_time = time.time() - iter_start_time # Computational time for this iteration
visualizer.print_train_log(epoch, i, losses_dict, metrics_dict, load_time, comp_time, param.batch_size, dataset_size)
iter_load_start_time = time.time()
# Model saving
if param.save_model:
if param.save_epoch_freq == -1: # Only save networks during last epoch
if epoch == param.epoch_num:
print('Saving the model at the end of epoch {:d}'.format(epoch))
model.save_networks(str(epoch))
elif epoch % param.save_epoch_freq == 0: # Save both the generator and the discriminator every <save_epoch_freq> epochs
print('Saving the model at the end of epoch {:d}'.format(epoch))
# model.save_networks('latest')
model.save_networks(str(epoch))
train_time = time.time() - epoch_start_time
current_lr = model.update_learning_rate() # update learning rates at the end of each epoch
visualizer.print_train_summary(epoch, losses_dict, output_dict, train_time, current_lr)
if param.save_latent_space and epoch == param.epoch_num:
visualizer.save_latent_space(latent_dict, sample_list)
| 5,827
| 55.038462
| 146
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/train_test.py
|
"""
Training and testing for OmiEmbed
"""
import time
import warnings
import numpy as np
import torch
from util import util
from params.train_test_params import TrainTestParams
from datasets import create_separate_dataloader
from models import create_model
from util.visualizer import Visualizer
if __name__ == "__main__":
warnings.filterwarnings('ignore')
full_start_time = time.time()
# Get parameters
param = TrainTestParams().parse()
if param.deterministic:
util.setup_seed(param.seed)
# Dataset related
full_dataloader, train_dataloader, val_dataloader, test_dataloader = create_separate_dataloader(param)
print('The size of training set is {}'.format(len(train_dataloader)))
# Get sample list for the dataset
param.sample_list = full_dataloader.get_sample_list()
# Get the dimension of input omics data
param.omics_dims = full_dataloader.get_omics_dims()
if param.downstream_task in ['classification', 'multitask', 'alltask']:
# Get the number of classes for the classification task
if param.class_num == 0:
param.class_num = full_dataloader.get_class_num()
if param.downstream_task != 'alltask':
print('The number of classes: {}'.format(param.class_num))
if param.downstream_task in ['regression', 'multitask', 'alltask']:
# Get the range of the target values
values_min = full_dataloader.get_values_min()
values_max = full_dataloader.get_values_max()
if param.regression_scale == 1:
param.regression_scale = values_max
print('The range of the target values is [{}, {}]'.format(values_min, values_max))
if param.downstream_task in ['survival', 'multitask', 'alltask']:
# Get the range of T
survival_T_min = full_dataloader.get_survival_T_min()
survival_T_max = full_dataloader.get_survival_T_max()
if param.survival_T_max == -1:
param.survival_T_max = survival_T_max
print('The range of survival T is [{}, {}]'.format(survival_T_min, survival_T_max))
# Model related
model = create_model(param) # Create a model given param.model and other parameters
model.setup(param) # Regular setup for the model: load and print networks, create schedulers
visualizer = Visualizer(param) # Create a visualizer to print results
# Start the epoch loop
visualizer.print_phase(model.phase)
for epoch in range(param.epoch_count, param.epoch_num + 1): # outer loop for different epochs
epoch_start_time = time.time() # Start time of this epoch
model.epoch = epoch
# TRAINING
model.set_train() # Set train mode for training
iter_load_start_time = time.time() # Start time of data loading for this iteration
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
if epoch == param.epoch_num_p1 + 1:
model.phase = 'p2' # Change to supervised phase
visualizer.print_phase(model.phase)
if epoch == param.epoch_num_p1 + param.epoch_num_p2 + 1:
model.phase = 'p3' # Change to supervised phase
visualizer.print_phase(model.phase)
# Start training loop
for i, data in enumerate(train_dataloader): # Inner loop for different iteration within one epoch
model.iter = i
dataset_size = len(train_dataloader)
actual_batch_size = len(data['index'])
iter_start_time = time.time() # Timer for computation per iteration
if i % param.print_freq == 0:
load_time = iter_start_time - iter_load_start_time # Data loading time for this iteration
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.update() # Calculate losses, gradients and update network parameters
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if i % param.print_freq == 0: # Print training losses and save logging information to the disk
comp_time = time.time() - iter_start_time # Computational time for this iteration
visualizer.print_train_log(epoch, i, losses_dict, metrics_dict, load_time, comp_time, param.batch_size, dataset_size)
iter_load_start_time = time.time()
# Model saving
if param.save_model:
if param.save_epoch_freq == -1: # Only save networks during last epoch
if epoch == param.epoch_num:
print('Saving the model at the end of epoch {:d}'.format(epoch))
model.save_networks(str(epoch))
elif epoch % param.save_epoch_freq == 0: # Save both the generator and the discriminator every <save_epoch_freq> epochs
print('Saving the model at the end of epoch {:d}'.format(epoch))
# model.save_networks('latest')
model.save_networks(str(epoch))
train_time = time.time() - epoch_start_time
current_lr = model.update_learning_rate() # update learning rates at the end of each epoch
visualizer.print_train_summary(epoch, losses_dict, output_dict, train_time, current_lr)
# TESTING
model.set_eval() # Set eval mode for testing
test_start_time = time.time() # Start time of testing
output_dict, losses_dict, metrics_dict = model.init_log_dict() # Initialize the log dictionaries
# Start testing loop
for i, data in enumerate(test_dataloader):
dataset_size = len(test_dataloader)
actual_batch_size = len(data['index'])
model.set_input(data) # Unpack input data from the output dictionary of the dataloader
model.test() # Run forward to get the output tensors
model.update_log_dict(output_dict, losses_dict, metrics_dict, actual_batch_size) # Update the log dictionaries
if i % param.print_freq == 0: # Print testing log
visualizer.print_test_log(epoch, i, losses_dict, metrics_dict, param.batch_size, dataset_size)
test_time = time.time() - test_start_time
visualizer.print_test_summary(epoch, losses_dict, output_dict, test_time)
if epoch == param.epoch_num:
visualizer.save_output_dict(output_dict)
full_time = time.time() - full_start_time
print('Full running time: {:.3f}s'.format(full_time))
| 7,049
| 54.952381
| 146
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_survival_model.py
|
import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeSurvivalModel(VaeBasicModel):
"""
This class implements the VAE survival model, using the VAE framework with the survival prediction downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae survival prediction model
parser.set_defaults(net_down='multi_FC_survival')
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
parser.add_argument('--stratify_label', action='store_true', help='load extra label for stratified dataset separation')
return parser
def __init__(self, param):
"""
Initialize the VAE_survival class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('survival')
# specify the metrics you want to print out.
self.metric_names = []
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
# output tensor
self.y_out = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, None, param.time_num, None, param.init_type,
param.init_gain, self.gpu_ids)
self.loss_survival = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
def forward(self):
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss (downstream loss)
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# LOSS DOWN
self.loss_down = self.loss_survival
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out = self.y_out
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out': y_out}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
pass
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 5,390
| 38.933333
| 151
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/losses.py
|
import torch
import torch.nn as nn
def get_loss_func(loss_name, reduction='mean'):
"""
Return the loss function.
Parameters:
loss_name (str) -- the name of the loss function: BCE | MSE | L1 | CE
reduction (str) -- the reduction method applied to the loss function: sum | mean
"""
if loss_name == 'BCE':
return nn.BCEWithLogitsLoss(reduction=reduction)
elif loss_name == 'MSE':
return nn.MSELoss(reduction=reduction)
elif loss_name == 'L1':
return nn.L1Loss(reduction=reduction)
elif loss_name == 'CE':
return nn.CrossEntropyLoss(reduction=reduction)
else:
raise NotImplementedError('Loss function %s is not found' % loss_name)
def kl_loss(mean, log_var, reduction='mean'):
part_loss = 1 + log_var - mean.pow(2) - log_var.exp()
if reduction == 'mean':
loss = -0.5 * torch.mean(part_loss)
else:
loss = -0.5 * torch.sum(part_loss)
return loss
def MTLR_survival_loss(y_pred, y_true, E, tri_matrix, reduction='mean'):
"""
Compute the MTLR survival loss
"""
# Get censored index and uncensored index
censor_idx = []
uncensor_idx = []
for i in range(len(E)):
# If this is a uncensored data point
if E[i] == 1:
# Add to uncensored index list
uncensor_idx.append(i)
else:
# Add to censored index list
censor_idx.append(i)
# Separate y_true and y_pred
y_pred_censor = y_pred[censor_idx]
y_true_censor = y_true[censor_idx]
y_pred_uncensor = y_pred[uncensor_idx]
y_true_uncensor = y_true[uncensor_idx]
# Calculate likelihood for censored datapoint
phi_censor = torch.exp(torch.mm(y_pred_censor, tri_matrix))
reduc_phi_censor = torch.sum(phi_censor * y_true_censor, dim=1)
# Calculate likelihood for uncensored datapoint
phi_uncensor = torch.exp(torch.mm(y_pred_uncensor, tri_matrix))
reduc_phi_uncensor = torch.sum(phi_uncensor * y_true_uncensor, dim=1)
# Likelihood normalisation
z_censor = torch.exp(torch.mm(y_pred_censor, tri_matrix))
reduc_z_censor = torch.sum(z_censor, dim=1)
z_uncensor = torch.exp(torch.mm(y_pred_uncensor, tri_matrix))
reduc_z_uncensor = torch.sum(z_uncensor, dim=1)
# MTLR loss
loss = - (torch.sum(torch.log(reduc_phi_censor)) + torch.sum(torch.log(reduc_phi_uncensor)) - torch.sum(torch.log(reduc_z_censor)) - torch.sum(torch.log(reduc_z_uncensor)))
if reduction == 'mean':
loss = loss / E.shape[0]
return loss
| 2,558
| 32.671053
| 176
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_alltask_gn_model.py
|
import torch
import torch.nn as nn
from .basic_model import BasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskGNModel(BasicModel):
"""
This class implements the VAE multitasking model with GradNorm (all tasks), using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_alltask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# GradNorm ralated
parser.add_argument('--alpha', type=float, default=1.5, help='the additional hyperparameter for GradNorm')
parser.add_argument('--lr_gn', type=float, default=1e-3, help='the learning rate for GradNorm')
parser.add_argument('--k_survival', type=float, default=1.0, help='initial weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1.0, help='initial weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1.0, help='initial weight for the regression loss')
# Number of tasks
parser.add_argument('--task_num', type=int, default=7, help='the number of downstream tasks')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
self.loss_names.extend(['survival', 'classifier_1', 'classifier_2', 'classifier_3', 'classifier_4', 'classifier_5', 'regression', 'gradient', 'w_sur', 'w_cla_1', 'w_cla_2', 'w_cla_3', 'w_cla_4', 'w_cla_5', 'w_reg'])
# specify the models you want to save to the disk and load.
self.model_names = ['All']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# specify the metrics you want to print out.
self.metric_names = ['accuracy_1', 'accuracy_2', 'accuracy_3', 'accuracy_4', 'accuracy_5', 'rmse']
# define the network
self.netAll = networks.define_net(param.net_VAE, param.net_down, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.class_num, param.time_num, param.task_num,
param.init_type, param.init_gain, self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
self.loss_survival = None
self.loss_classifier_1 = None
self.loss_classifier_2 = None
self.loss_classifier_3 = None
self.loss_classifier_4 = None
self.loss_classifier_5 = None
self.loss_regression = None
self.loss_gradient = 0
self.loss_w_sur = None
self.loss_w_cla_1 = None
self.loss_w_cla_2 = None
self.loss_w_cla_3 = None
self.loss_w_cla_4 = None
self.loss_w_cla_5 = None
self.loss_w_reg = None
self.task_losses = None
self.weighted_losses = None
self.initial_losses = None
self.metric_accuracy_1 = None
self.metric_accuracy_2 = None
self.metric_accuracy_3 = None
self.metric_accuracy_4 = None
self.metric_accuracy_5 = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
# Weights of multiple downstream tasks
self.loss_weights = nn.Parameter(torch.ones(param.task_num, requires_grad=True, device=self.device))
if self.isTrain:
# Set the optimizer
self.optimizer_All = torch.optim.Adam([{'params': self.netAll.parameters(), 'lr': param.lr, 'betas': (param.beta1, 0.999), 'weight_decay': param.weight_decay},
{'params': self.loss_weights, 'lr': param.lr_gn}])
self.optimizers.append(self.optimizer_All)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
self.data_index = input_dict['index']
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = []
for i in range(self.param.task_num - 2):
self.label.append(input_dict['label'][i].to(self.device))
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
self.z, self.recon_omics, self.mean, self.log_var, self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netAll(self.input_omics)
# define the latent
self.latent = self.mean
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier_1 = self.lossFuncClass(self.y_out_cla[0], self.label[0])
self.loss_classifier_2 = self.lossFuncClass(self.y_out_cla[1], self.label[1])
self.loss_classifier_3 = self.lossFuncClass(self.y_out_cla[2], self.label[2])
self.loss_classifier_4 = self.lossFuncClass(self.y_out_cla[3], self.label[3])
self.loss_classifier_5 = self.lossFuncClass(self.y_out_cla[4], self.label[4])
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# Calculate the weighted downstream losses
# Add initial weights
self.task_losses = torch.stack([self.param.k_survival * self.loss_survival, self.param.k_classifier * self.loss_classifier_1, self.param.k_classifier * self.loss_classifier_2, self.param.k_classifier * self.loss_classifier_3, self.param.k_classifier * self.loss_classifier_4, self.param.k_classifier * self.loss_classifier_5, self.param.k_regression * self.loss_regression])
self.weighted_losses = self.loss_weights * self.task_losses
# LOSS DOWN
self.loss_down = self.weighted_losses.sum()
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# Log the loss weights
self.loss_w_sur = self.loss_weights[0] * self.param.k_survival
self.loss_w_cla_1 = self.loss_weights[1] * self.param.k_classifier
self.loss_w_cla_2 = self.loss_weights[2] * self.param.k_classifier
self.loss_w_cla_3 = self.loss_weights[3] * self.param.k_classifier
self.loss_w_cla_4 = self.loss_weights[4] * self.param.k_classifier
self.loss_w_cla_5 = self.loss_weights[5] * self.param.k_classifier
self.loss_w_reg = self.loss_weights[6] * self.param.k_regression
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.cal_losses() # Calculate losses
self.optimizer_All.zero_grad() # Set gradients to zero
# Calculate the GradNorm gradients
if isinstance(self.netAll, torch.nn.DataParallel):
W = list(self.netAll.module.get_last_encode_layer().parameters())
else:
W = list(self.netAll.get_last_encode_layer().parameters())
grad_norms = []
for weight, loss in zip(self.loss_weights, self.task_losses):
grad = torch.autograd.grad(loss, W, retain_graph=True)
grad_norms.append(torch.norm(weight * grad[0]))
grad_norms = torch.stack(grad_norms)
if self.iter == 0:
self.initial_losses = self.task_losses.detach()
# Calculate the constant targets
with torch.no_grad():
# loss ratios
loss_ratios = self.task_losses / self.initial_losses
# inverse training rate
inverse_train_rates = loss_ratios / loss_ratios.mean()
constant_terms = grad_norms.mean() * (inverse_train_rates ** self.param.alpha)
# Calculate the gradient loss
self.loss_gradient = (grad_norms - constant_terms).abs().sum()
# Set the gradients of weights
loss_weights_grad = torch.autograd.grad(self.loss_gradient, self.loss_weights)[0]
self.loss_All.backward()
self.loss_weights.grad = loss_weights_grad
self.optimizer_All.step() # Update weights
# Re-normalize the losses weights
with torch.no_grad():
normalize_coeff = len(self.loss_weights) / self.loss_weights.sum()
self.loss_weights.data = self.loss_weights.data * normalize_coeff
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = []
y_pred_cla = []
y_true_cla = []
for i in range(self.param.task_num - 2):
y_prob_cla.append(F.softmax(self.y_out_cla[i], dim=1))
_, y_pred_cla_i = torch.max(y_prob_cla[i], 1)
y_pred_cla.append(y_pred_cla_i)
y_true_cla.append(self.label[i])
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk,
'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla,
'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy_1 = (output_dict['y_true_cla'][0] == output_dict['y_pred_cla'][0]).sum().item() / len(
output_dict['y_true_cla'][0])
self.metric_accuracy_2 = (output_dict['y_true_cla'][1] == output_dict['y_pred_cla'][1]).sum().item() / len(
output_dict['y_true_cla'][1])
self.metric_accuracy_3 = (output_dict['y_true_cla'][2] == output_dict['y_pred_cla'][2]).sum().item() / len(
output_dict['y_true_cla'][2])
self.metric_accuracy_4 = (output_dict['y_true_cla'][3] == output_dict['y_pred_cla'][3]).sum().item() / len(
output_dict['y_true_cla'][3])
self.metric_accuracy_5 = (output_dict['y_true_cla'][4] == output_dict['y_pred_cla'][4]).sum().item() / len(
output_dict['y_true_cla'][4])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 17,700
| 47.231608
| 382
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_regression_model.py
|
import torch
from sklearn import metrics
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
class VaeRegressionModel(VaeBasicModel):
"""
This class implements the VAE regression model, using the VAE framework with the regression downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae regression model
parser.set_defaults(net_down='multi_FC_regression', not_stratified=True)
parser.add_argument('--regression_scale', type=int, default=1,
help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1',
help='choose the distance loss for regression task, options: [MSE | L1]')
return parser
def __init__(self, param):
"""
Initialize the VAE_regression class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('distance')
# specify the metrics you want to print out.
self.metric_names = ['rmse']
# input tensor
self.value = None
# output tensor
self.y_out = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, None, None, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_distance = None
self.metric_rmse = None
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.value = input_dict['value'].to(self.device)
def forward(self):
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the regression distance loss (downstream loss)
self.loss_distance = self.lossFuncDist(self.y_out.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.loss_distance
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
y_true = self.value
y_pred = self.y_out * self.param.regression_scale
return {'index': index, 'y_true': y_true, 'y_pred': y_pred}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true, y_pred, squared=False)
| 3,793
| 37.323232
| 152
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_alltask_model.py
|
import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeAlltaskModel(VaeBasicModel):
"""
This class implements the VAE multitasking model with all downstream tasks (5 classifiers + 1 regressor + 1 survival predictor), using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_alltask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# Loss combined
parser.add_argument('--k_survival', type=float, default=1,
help='weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1,
help='weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1,
help='weight for the regression loss')
# Number of tasks
parser.add_argument('--task_num', type=int, default=7,
help='the number of downstream tasks')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.extend(['survival', 'classifier_1', 'classifier_2', 'classifier_3', 'classifier_4', 'classifier_5', 'regression'])
# specify the metrics you want to print out.
self.metric_names = ['accuracy_1', 'accuracy_2', 'accuracy_3', 'accuracy_4', 'accuracy_5', 'rmse']
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, param.time_num, param.task_num, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_survival = None
self.loss_classifier_1 = None
self.loss_classifier_2 = None
self.loss_classifier_3 = None
self.loss_classifier_4 = None
self.loss_classifier_5 = None
self.loss_regression = None
self.metric_accuracy_1 = None
self.metric_accuracy_2 = None
self.metric_accuracy_3 = None
self.metric_accuracy_4 = None
self.metric_accuracy_5 = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = []
for i in range(self.param.task_num-2):
self.label.append(input_dict['label'][i].to(self.device))
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
VaeBasicModel.forward(self)
self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier_1 = self.lossFuncClass(self.y_out_cla[0], self.label[0])
self.loss_classifier_2 = self.lossFuncClass(self.y_out_cla[1], self.label[1])
self.loss_classifier_3 = self.lossFuncClass(self.y_out_cla[2], self.label[2])
self.loss_classifier_4 = self.lossFuncClass(self.y_out_cla[3], self.label[3])
self.loss_classifier_5 = self.lossFuncClass(self.y_out_cla[4], self.label[4])
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.param.k_survival * self.loss_survival + self.param.k_classifier * self.loss_classifier_1 + self.param.k_classifier * self.loss_classifier_2 + self.param.k_classifier * self.loss_classifier_3 + self.param.k_classifier * self.loss_classifier_4 + self.param.k_classifier * self.loss_classifier_5 + self.param.k_regression * self.loss_regression
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = []
y_pred_cla = []
y_true_cla = []
for i in range(self.param.task_num-2):
y_prob_cla.append(F.softmax(self.y_out_cla[i], dim=1))
_, y_pred_cla_i = torch.max(y_prob_cla[i], 1)
y_pred_cla.append(y_pred_cla_i)
y_true_cla.append(self.label[i])
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy_1 = (output_dict['y_true_cla'][0] == output_dict['y_pred_cla'][0]).sum().item() / len(output_dict['y_true_cla'][0])
self.metric_accuracy_2 = (output_dict['y_true_cla'][1] == output_dict['y_pred_cla'][1]).sum().item() / len(output_dict['y_true_cla'][1])
self.metric_accuracy_3 = (output_dict['y_true_cla'][2] == output_dict['y_pred_cla'][2]).sum().item() / len(output_dict['y_true_cla'][2])
self.metric_accuracy_4 = (output_dict['y_true_cla'][3] == output_dict['y_pred_cla'][3]).sum().item() / len(output_dict['y_true_cla'][3])
self.metric_accuracy_5 = (output_dict['y_true_cla'][4] == output_dict['y_pred_cla'][4]).sum().item() / len(output_dict['y_true_cla'][4])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 10,265
| 49.078049
| 371
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/networks.py
|
import torch
import torch.nn as nn
import functools
from torch.nn import init
from torch.optim import lr_scheduler
# Class components
class DownSample(nn.Module):
"""
SingleConv1D module + MaxPool
The output dimension = input dimension // down_ratio
"""
def __init__(self, input_chan_num, output_chan_num, down_ratio, kernel_size=9, norm_layer=nn.InstanceNorm1d,
leaky_slope=0.2, dropout_p=0):
"""
Construct a downsampling block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output tensor
down_ratio (int) -- the kernel size and stride of the MaxPool1d layer
kernel_size (int) -- the kernel size of the DoubleConv1D block
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
"""
super(DownSample, self).__init__()
self.down_sample = nn.Sequential(
SingleConv1D(input_chan_num, output_chan_num, kernel_size, norm_layer, leaky_slope),
nn.MaxPool1d(down_ratio),
nn.Dropout(p=dropout_p)
)
def forward(self, x):
return self.down_sample(x)
class UpSample(nn.Module):
"""
ConvTranspose1d + SingleConv1D
The output dimension = input dimension * ratio
"""
def __init__(self, input_chan_num, output_chan_num, up_ratio, kernel_size=9, norm_layer=nn.BatchNorm1d,
leaky_slope=0.2, dropout_p=0, attention=True):
"""
Construct a upsampling block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor (the tensor from get from the last layer, not the tensor from the skip-connection mechanism)
output_chan_num (int) -- the number of channels of the output tensor
up_ratio (int) -- the kernel size and stride of the ConvTranspose1d layer
kernel_size (int) -- the kernel size of the DoubleConv1D block
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
activation (bool) -- need activation or not
"""
super(UpSample, self).__init__()
self.attention = attention
self.up_sample = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.ConvTranspose1d(input_chan_num, input_chan_num, kernel_size=up_ratio, stride=up_ratio),
SingleConv1D(input_chan_num, output_chan_num, kernel_size, norm_layer, leaky_slope)
)
self.up_sample_no_relu = nn.Sequential(
nn.Dropout(p=dropout_p),
nn.ConvTranspose1d(input_chan_num, input_chan_num, kernel_size=up_ratio, stride=up_ratio),
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=kernel_size, padding=kernel_size // 2)
)
def forward(self, x):
if self.attention:
return self.up_sample(x)
else:
return self.up_sample_no_relu(x)
class OutputConv(nn.Module):
"""
Output convolution layer
"""
def __init__(self, input_chan_num, output_chan_num):
"""
Construct the output convolution layer
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output omics data
"""
super(OutputConv, self).__init__()
self.output_conv = nn.Sequential(
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=1),
)
def forward(self, x):
return self.output_conv(x)
class SingleConv1D(nn.Module):
"""
Convolution1D => Norm1D => LeakyReLU
The omics data dimension keep the same during this process
"""
def __init__(self, input_chan_num, output_chan_num, kernel_size=9, norm_layer=nn.InstanceNorm1d, leaky_slope=0.2):
"""
Construct a single convolution block
Parameters:
input_chan_num (int) -- the number of channels of the input tensor
output_chan_num (int) -- the number of channels of the output tensor
kernel_size (int) -- the kernel size of the convolution layer
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
"""
super(SingleConv1D, self).__init__()
# Only if the norm method is instance norm we use bias for the corresponding conv layer
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm1d
else:
use_bias = norm_layer == nn.InstanceNorm1d
self.single_conv_1d = nn.Sequential(
nn.Conv1d(input_chan_num, output_chan_num, kernel_size=kernel_size, padding=kernel_size // 2,
bias=use_bias),
norm_layer(output_chan_num),
nn.LeakyReLU(negative_slope=leaky_slope, inplace=True)
)
def forward(self, x):
return self.single_conv_1d(x)
class FCBlock(nn.Module):
"""
Linear => Norm1D => LeakyReLU
"""
def __init__(self, input_dim, output_dim, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, activation=True, normalization=True, activation_name='LeakyReLU'):
"""
Construct a fully-connected block
Parameters:
input_dim (int) -- the dimension of the input tensor
output_dim (int) -- the dimension of the output tensor
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
activation (bool) -- need activation or not
normalization (bool) -- need normalization or not
activation_name (str) -- name of the activation function used in the FC block
"""
super(FCBlock, self).__init__()
# Linear
self.fc_block = [nn.Linear(input_dim, output_dim)]
# Norm
if normalization:
# FC block doesn't support InstanceNorm1d
if isinstance(norm_layer, functools.partial) and norm_layer.func == nn.InstanceNorm1d:
norm_layer = nn.BatchNorm1d
self.fc_block.append(norm_layer(output_dim))
# Dropout
if 0 < dropout_p <= 1:
self.fc_block.append(nn.Dropout(p=dropout_p))
# LeakyReLU
if activation:
if activation_name.lower() == 'leakyrelu':
self.fc_block.append(nn.LeakyReLU(negative_slope=leaky_slope, inplace=True))
elif activation_name.lower() == 'tanh':
self.fc_block.append(nn.Tanh())
else:
raise NotImplementedError('Activation function [%s] is not implemented' % activation_name)
self.fc_block = nn.Sequential(*self.fc_block)
def forward(self, x):
y = self.fc_block(x)
return y
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Unflatten(nn.Module):
def __init__(self, channel, dim):
super(Unflatten, self).__init__()
self.channel = channel
self.dim = dim
def forward(self, x):
return x.view(x.size(0), self.channel, self.dim)
class Identity(nn.Module):
def forward(self, x):
return x
# Class for VAE
# ConvVae
class ConvVaeABC(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_1A=4, ratio_2A=4, ratio_1C=2, ratio_2C=2, ratio_3=16,
latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeABC, self).__init__()
A_dim = omics_dims[0]
B_dim = omics_dims[1]
C_dim = omics_dims[2]
hidden_dim_1 = (B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.narrow_B = hidden_dim_2 // (4 * filter_num) * ratio_3 * (B_dim // ratio_1B // ratio_2B) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.narrow_A = hidden_dim_2 // (4 * filter_num) * ratio_3 * (A_dim // ratio_1A // ratio_2A) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.narrow_C = hidden_dim_2 // (4 * filter_num) * ratio_3 * (C_dim // ratio_1C // ratio_2C) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A + C_dim // ratio_1C // ratio_2C)
self.B_dim = B_dim
self.A_dim = A_dim
self.C_dim = C_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 1 -> 8
self.down_sample_1C = DownSample(1, filter_num, down_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 8 -> 16
self.down_sample_2C = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
# C 16 -> 8
self.up_sample_2C = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 8 -> 1
self.up_sample_3C = UpSample(filter_num, filter_num, up_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C Output
self.output_conv_C = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_2_A = self.down_sample_1A(x[0])
level_2_C = self.down_sample_1C(x[2])
level_3_B = self.down_sample_2B(level_2_B)
level_3_A = self.down_sample_2A(level_2_A)
level_3_C = self.down_sample_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 2)
level_4 = self.down_sample_3(level_3)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_2_B = level_2.narrow(2, 0, self.narrow_B)
level_2_A = level_2.narrow(2, self.narrow_B, self.narrow_A)
level_2_C = level_2.narrow(2, self.narrow_B+self.narrow_A, self.narrow_C+1)
level_3_B = self.up_sample_2B(level_2_B)
level_3_A = self.up_sample_2A(level_2_A)
level_3_C = self.up_sample_2C(level_2_C)
level_4_B = self.up_sample_3B(level_3_B)
level_4_A = self.up_sample_3A(level_3_A)
level_4_C = self.up_sample_3C(level_3_C)
output_B = self.output_conv_B(level_4_B)
output_A = self.output_conv_A(level_4_A)
output_C = self.output_conv_C(level_4_C)
recon_B = output_B[:, :, 0:self.B_dim]
recon_A = output_A[:, :, 0:self.A_dim]
recon_C = output_C[:, :, 0:self.C_dim]
return [recon_A, recon_B, recon_C]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeAB(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_1A=4, ratio_2A=4, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeAB, self).__init__()
A_dim = omics_dims[0]
B_dim = omics_dims[1]
hidden_dim_1 = (B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 2) * filter_num * 4
self.narrow_B = hidden_dim_2 // (4 * filter_num) * ratio_3 * (B_dim // ratio_1B // ratio_2B) // (
B_dim // ratio_1B // ratio_2B + A_dim // ratio_1A // ratio_2A)
self.narrow_A = hidden_dim_2 // (4 * filter_num) * ratio_3 - self.narrow_B
self.B_dim = B_dim
self.A_dim = A_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_2_A = self.down_sample_1A(x[0])
level_3_B = self.down_sample_2B(level_2_B)
level_3_A = self.down_sample_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 2)
level_4 = self.down_sample_3(level_3)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_2_B = level_2.narrow(2, 0, self.narrow_B)
level_2_A = level_2.narrow(2, self.narrow_B, self.narrow_A)
level_3_B = self.up_sample_2B(level_2_B)
level_3_A = self.up_sample_2A(level_2_A)
level_4_B = self.up_sample_3B(level_3_B)
level_4_A = self.up_sample_3A(level_3_A)
output_B = self.output_conv_B(level_4_B)
output_A = self.output_conv_A(level_4_A)
recon_B = output_B[:, :, 0:self.B_dim]
recon_A = output_A[:, :, 0:self.A_dim]
return [recon_A, recon_B]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeB(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for DNA methylation dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1B=16, ratio_2B=16, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for DNA methylation dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeB, self).__init__()
B_dim = omics_dims[1]
hidden_dim_1 = B_dim // ratio_1B // ratio_2B // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.B_dim = B_dim
# ENCODER
# B 1 -> 8
self.down_sample_1B = DownSample(1, filter_num, down_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 8 -> 16
self.down_sample_2B = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B 16 -> 8
self.up_sample_2B = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# B 8 -> 1
self.up_sample_3B = UpSample(filter_num, filter_num, up_ratio=ratio_1B, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# B Output
self.output_conv_B = OutputConv(filter_num, 1)
def encode(self, x):
level_2_B = self.down_sample_1B(x[1])
level_3_B = self.down_sample_2B(level_2_B)
level_4 = self.down_sample_3(level_3_B)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_B = self.up_sample_2B(level_2)
level_4_B = self.up_sample_3B(level_3_B)
output_B = self.output_conv_B(level_4_B)
recon_B = output_B[:, :, 0:self.B_dim]
return [None, recon_B]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeA(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for gene expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1A=4, ratio_2A=4, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeA, self).__init__()
A_dim = omics_dims[0]
hidden_dim_1 = A_dim // ratio_1A // ratio_2A // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.A_dim = A_dim
# ENCODER
# A 1 -> 8
self.down_sample_1A = DownSample(1, filter_num, down_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 8 -> 16
self.down_sample_2A = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A 16 -> 8
self.up_sample_2A = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# A 8 -> 1
self.up_sample_3A = UpSample(filter_num, filter_num, up_ratio=ratio_1A, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# A Output
self.output_conv_A = OutputConv(filter_num, 1)
def encode(self, x):
level_2_A = self.down_sample_1A(x[0])
level_3_A = self.down_sample_2A(level_2_A)
level_4 = self.down_sample_3(level_3_A)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_A = self.up_sample_2A(level_2)
level_4_A = self.up_sample_3A(level_3_A)
output_A = self.output_conv_A(level_4_A)
recon_A = output_A[:, :, 0:self.A_dim]
return [recon_A]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class ConvVaeC(nn.Module):
"""
Defines a one dimensional convolution variational autoencoder for miRNA expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9, leaky_slope=0.2,
dropout_p=0, ratio_1C=2, ratio_2C=2, ratio_3=16, latent_dim=256):
"""
Construct a one dimensional convolution variational autoencoder for multi-omics dataset
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(ConvVaeC, self).__init__()
C_dim = omics_dims[2]
hidden_dim_1 = (C_dim // ratio_1C // ratio_2C) // ratio_3 * filter_num * 4
hidden_dim_2 = (hidden_dim_1 // (filter_num * 4) + 1) * filter_num * 4
self.C_dim = C_dim
# ENCODER
# C 1 -> 8
self.down_sample_1C = DownSample(1, filter_num, down_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 8 -> 16
self.down_sample_2C = DownSample(filter_num, filter_num * 2, down_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# 16 -> 32
self.down_sample_3 = DownSample(filter_num * 2, filter_num * 4, down_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# Flatten
self.flatten = Flatten()
# FC to mean
self.fc_mean = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# FC to log_var
self.fc_log_var = FCBlock(hidden_dim_1, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# FC from z
self.fc_z = FCBlock(latent_dim, hidden_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=True)
# Unflatten
self.unflatten = Unflatten(filter_num * 4, hidden_dim_2 // (4 * filter_num))
# 32 -> 16
self.up_sample_1 = UpSample(filter_num * 4, filter_num * 2, up_ratio=ratio_3, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C 16 -> 8
self.up_sample_2C = UpSample(filter_num * 2, filter_num, up_ratio=ratio_2C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p)
# C 8 -> 1
self.up_sample_3C = UpSample(filter_num, filter_num, up_ratio=ratio_1C, kernel_size=kernel_size,
norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0)
# C Output
self.output_conv_C = OutputConv(filter_num, 1)
def encode(self, x):
level_2_C = self.down_sample_1C(x[2])
level_3_C = self.down_sample_2C(level_2_C)
level_4 = self.down_sample_3(level_3_C)
level_4_flatten = self.flatten(level_4)
latent_mean = self.fc_mean(level_4_flatten)
latent_log_var = self.fc_log_var(level_4_flatten)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.fc_z(z)
level_1_unflatten = self.unflatten(level_1)
level_2 = self.up_sample_1(level_1_unflatten)
level_3_C = self.up_sample_2C(level_2)
level_4_C = self.up_sample_3C(level_3_C)
output_C = self.output_conv_C(level_4_C)
recon_C = output_C[:, :, 0:self.C_dim]
return [None, None, recon_C]
def get_last_encode_layer(self):
return self.fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# FcSepVae
class FcSepVaeABC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_1A=2048, dim_2A=1024, dim_1C=1024, dim_2C=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeABC, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim_list = omics_dims[1]
self.C_dim = omics_dims[2]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
self.dim_2C = dim_2C
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A+dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A+dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3C = FCBlock(dim_2C, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4C = FCBlock(dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_2_A = self.encode_fc_1A(x[0])
level_2_C = self.encode_fc_1C(x[2])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3_C = self.encode_fc_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_2_C = level_2.narrow(1, self.dim_2B+self.dim_2A, self.dim_2C)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
level_3_A = self.decode_fc_3A(level_2_A)
level_3_C = self.decode_fc_3C(level_2_C)
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
recon_A = self.decode_fc_4A(level_3_A)
recon_C = self.decode_fc_4C(level_3_C)
return [recon_A, recon_B_list, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcSepVaeAB(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_1A=2048, dim_2A=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeAB, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim_list = omics_dims[1]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_2_A = self.encode_fc_1A(x[0])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
level_3_A = self.decode_fc_3A(level_2_A)
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A, recon_B_list]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcSepVaeB(nn.Module):
"""
Defines a fully-connected variational autoencoder for DNA methylation dataset
DNA methylation input separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=128, dim_2B=1024,
dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcSepVaeB, self).__init__()
self.B_dim_list = omics_dims[1]
self.dim_1B = dim_1B
# ENCODER
# Layer 1
self.encode_fc_1B_list = nn.ModuleList()
for i in range(0, 23):
self.encode_fc_1B_list.append(
FCBlock(self.B_dim_list[i], dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True))
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B*23, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B*23, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B_list = nn.ModuleList()
for i in range(0, 23):
self.decode_fc_4B_list.append(
FCBlock(dim_1B, self.B_dim_list[i], norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False))
def encode(self, x):
level_2_B_list = []
for i in range(0, 23):
level_2_B_list.append(self.encode_fc_1B_list[i](x[1][i]))
level_2_B = torch.cat(level_2_B_list, 1)
level_3_B = self.encode_fc_2B(level_2_B)
level_4 = self.encode_fc_3(level_3_B)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_B = self.decode_fc_3B(level_2)
level_3_B_list = []
for i in range(0, 23):
level_3_B_list.append(level_3_B.narrow(1, self.dim_1B*i, self.dim_1B))
recon_B_list = []
for i in range(0, 23):
recon_B_list.append(self.decode_fc_4B_list[i](level_3_B_list[i]))
return [None, recon_B_list]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# FcVae
class FcVaeABC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, param, omics_dims, omics_subset_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=384, dim_2B=256,
dim_1A=384, dim_2A=256, dim_1C=384, dim_2C=256, dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeABC, self).__init__()
if omics_subset_dims is not None:
self.A_subset_dim = omics_subset_dims[0]
self.B_subset_dim = omics_subset_dims[1]
self.C_subset_dim = omics_subset_dims[2]
# Decoder dimensions
self.dim_1A = dim_1A // param.dec_reduction_factor ; self.dim_1B = dim_1B // param.dec_reduction_factor ; self.dim_1C = dim_1C // param.dec_reduction_factor
self.dim_2A = dim_2A // param.dec_reduction_factor ; self.dim_2B = dim_2B // param.dec_reduction_factor ; self.dim_2C = dim_2C // param.dec_reduction_factor
# Encoder dimensions
dim_1A //= param.enc_reduction_factor ; dim_1B //= param.enc_reduction_factor ; dim_1C //= param.enc_reduction_factor
dim_2B //= param.enc_reduction_factor ; dim_2B //= param.enc_reduction_factor ; dim_2C //= param.enc_reduction_factor
self.A_dim = omics_dims[0]
self.B_dim = omics_dims[1]
self.C_dim = omics_dims[2]
# ENCODER
# Layer 1
if omics_subset_dims is None:
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
else:
self.encode_fc_1B = FCBlock(self.B_subset_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_subset_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1C = FCBlock(self.C_subset_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A+dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, self.dim_2B+self.dim_2A+self.dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(self.dim_2B, self.dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(self.dim_2A, self.dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3C = FCBlock(self.dim_2C, self.dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(self.dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4A = FCBlock(self.dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4C = FCBlock(self.dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_2_A = self.encode_fc_1A(x[0])
level_2_C = self.encode_fc_1C(x[2])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3_C = self.encode_fc_2C(level_2_C)
level_3 = torch.cat((level_3_B, level_3_A, level_3_C), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_2_C = level_2.narrow(1, self.dim_2B+self.dim_2A, self.dim_2C)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_A = self.decode_fc_3A(level_2_A)
level_3_C = self.decode_fc_3C(level_2_C)
recon_B = self.decode_fc_4B(level_3_B)
recon_A = self.decode_fc_4A(level_3_A)
recon_C = self.decode_fc_4C(level_3_C)
return [recon_A, recon_B, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeAB(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=384, dim_2B=256,
dim_1A=384, dim_2A=256, dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeAB, self).__init__()
self.A_dim = omics_dims[0]
self.B_dim = omics_dims[1]
self.dim_1B = dim_1B
self.dim_2B = dim_2B
self.dim_2A = dim_2A
# ENCODER
# Layer 1
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B+dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B+dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_2_A = self.encode_fc_1A(x[0])
level_3_B = self.encode_fc_2B(level_2_B)
level_3_A = self.encode_fc_2A(level_2_A)
level_3 = torch.cat((level_3_B, level_3_A), 1)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_2_B = level_2.narrow(1, 0, self.dim_2B)
level_2_A = level_2.narrow(1, self.dim_2B, self.dim_2A)
level_3_B = self.decode_fc_3B(level_2_B)
level_3_A = self.decode_fc_3A(level_2_A)
recon_B = self.decode_fc_4B(level_3_B)
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A, recon_B]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeB(nn.Module):
"""
Defines a fully-connected variational autoencoder for DNA methylation dataset
DNA methylation input not separated by chromosome
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1B=512, dim_2B=256,
dim_3=256, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeB, self).__init__()
self.B_dim = omics_dims[1]
# ENCODER
# Layer 1
self.encode_fc_1B = FCBlock(self.B_dim, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2B = FCBlock(dim_1B, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2B, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=0, activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3B = FCBlock(dim_2B, dim_1B, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4B = FCBlock(dim_1B, self.B_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_B = self.encode_fc_1B(x[1])
level_3 = self.encode_fc_2B(level_2_B)
level_4 = self.encode_fc_3(level_3)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_B = self.decode_fc_3B(level_2)
recon_B = self.decode_fc_4B(level_3_B)
return [None, recon_B]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeA(nn.Module):
"""
Defines a fully-connected variational autoencoder for gene expression dataset
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1A=1024, dim_2A=1024,
dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeA, self).__init__()
self.A_dim = omics_dims[0]
# ENCODER
# Layer 1
self.encode_fc_1A = FCBlock(self.A_dim, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2A = FCBlock(dim_1A, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2A, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3A = FCBlock(dim_2A, dim_1A, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4A = FCBlock(dim_1A, self.A_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_A = self.encode_fc_1A(x[0])
level_3_A = self.encode_fc_2A(level_2_A)
level_4 = self.encode_fc_3(level_3_A)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_A = self.decode_fc_3A(level_2)
recon_A = self.decode_fc_4A(level_3_A)
return [recon_A]
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
class FcVaeC(nn.Module):
"""
Defines a fully-connected variational autoencoder for multi-omics dataset
"""
def __init__(self, omics_dims, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, dim_1C=1024, dim_2C=1024, dim_3=512, latent_dim=256):
"""
Construct a fully-connected variational autoencoder
Parameters:
omics_dims (list) -- the list of input omics dimensions
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
"""
super(FcVaeC, self).__init__()
self.C_dim = omics_dims[2]
self.dim_2C = dim_2C
# ENCODER
# Layer 1
self.encode_fc_1C = FCBlock(self.C_dim, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.encode_fc_2C = FCBlock(dim_1C, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.encode_fc_3 = FCBlock(dim_2C, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.encode_fc_mean = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
self.encode_fc_log_var = FCBlock(dim_3, latent_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
# DECODER
# Layer 1
self.decode_fc_z = FCBlock(latent_dim, dim_3, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 2
self.decode_fc_2 = FCBlock(dim_3, dim_2C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 3
self.decode_fc_3C = FCBlock(dim_2C, dim_1C, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# Layer 4
self.decode_fc_4C = FCBlock(dim_1C, self.C_dim, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def encode(self, x):
level_2_C = self.encode_fc_1C(x[2])
level_3_C = self.encode_fc_2C(level_2_C)
level_4 = self.encode_fc_3(level_3_C)
latent_mean = self.encode_fc_mean(level_4)
latent_log_var = self.encode_fc_log_var(level_4)
return latent_mean, latent_log_var
def reparameterize(self, mean, log_var):
std = torch.exp(0.5 * log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mean)
def decode(self, z):
level_1 = self.decode_fc_z(z)
level_2 = self.decode_fc_2(level_1)
level_3_C = self.decode_fc_3C(level_2)
recon_C = self.decode_fc_4C(level_3_C)
return [None, None, recon_C]
def get_last_encode_layer(self):
return self.encode_fc_mean
def get_last_encode_layer(self):
return self.encode_fc_mean
def forward(self, x):
mean, log_var = self.encode(x)
z = self.reparameterize(mean, log_var)
recon_x = self.decode(z)
return z, recon_x, mean, log_var
# Class for downstream task
class MultiFcClassifier(nn.Module):
"""
Defines a multi-layer fully-connected classifier
"""
def __init__(self, param, class_num=2, latent_dim=256, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
class_dim_1=128, class_dim_2=64, layer_num=3):
"""
Construct a multi-layer fully-connected classifier
Parameters:
class_num (int) -- the number of class
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcClassifier, self).__init__()
class_dim_1 = class_dim_1 // param.down_reduction_factor
class_dim_2 = class_dim_2 // param.down_reduction_factor
self.input_fc = FCBlock(latent_dim, class_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = class_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, class_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_flag*dropout_p, activation=True)]
input_dim = class_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
self.output_fc = FCBlock(class_dim_2, class_num, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcRegression(nn.Module):
"""
Defines a multi-layer fully-connected regression net
"""
def __init__(self, latent_dim=256, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0, down_dim_1=128,
down_dim_2=64, layer_num=3):
"""
Construct a one dimensional multi-layer regression net
Parameters:
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcRegression, self).__init__()
self.input_fc = FCBlock(latent_dim, down_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True)
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = down_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, down_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_flag*dropout_p, activation=True)]
input_dim = down_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
self.output_fc = FCBlock(down_dim_2, 1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcSurvival(nn.Module):
"""
Defines a multi-layer fully-connected survival predictor
"""
def __init__(self, time_num=256, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
down_dim_1=512, down_dim_2=256, layer_num=3):
"""
Construct a multi-layer fully-connected survival predictor
Parameters:
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
"""
super(MultiFcSurvival, self).__init__()
self.input_fc = FCBlock(latent_dim, down_dim_1, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=dropout_p,
activation=True, activation_name='Tanh')
# create a list to store fc blocks
mul_fc_block = []
# the block number of the multi-layer fully-connected block should be at least 3
block_layer_num = max(layer_num, 3)
input_dim = down_dim_1
dropout_flag = True
for num in range(0, block_layer_num-2):
mul_fc_block += [FCBlock(input_dim, down_dim_2, norm_layer=norm_layer, leaky_slope=leaky_slope,
dropout_p=dropout_p, activation=True, activation_name='Tanh')]
input_dim = down_dim_2
# dropout for every other layer
dropout_flag = not dropout_flag
self.mul_fc = nn.Sequential(*mul_fc_block)
# the output fully-connected layer of the classifier
# the output dimension should be the number of time intervals
self.output_fc = FCBlock(down_dim_2, time_num, norm_layer=norm_layer, leaky_slope=leaky_slope, dropout_p=0,
activation=False, normalization=False)
def forward(self, x):
x1 = self.input_fc(x)
x2 = self.mul_fc(x1)
y = self.output_fc(x2)
return y
class MultiFcMultitask(nn.Module):
"""
Defines a multi-layer fully-connected multitask downstream network
"""
def __init__(self, class_num=2, time_num=256, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
layer_num=3):
"""
Construct a multi-layer fully-connected multitask downstream network
Parameters:
class_num (int) -- the number of class
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the downstream networks, >=3
"""
super(MultiFcMultitask, self).__init__()
norm_layer_none = lambda x: Identity()
self.survival = MultiFcSurvival(time_num, latent_dim, norm_layer=norm_layer_none, leaky_slope=leaky_slope, dropout_p=0.5, layer_num=layer_num)
self.classifier = MultiFcClassifier(class_num, latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.2, layer_num=layer_num)
self.regression = MultiFcRegression(latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.01, layer_num=layer_num)
def forward(self, x):
y_out_sur = self.survival(x)
y_out_cla = self.classifier(x)
y_out_reg = self.regression(x)
return y_out_sur, y_out_cla, y_out_reg
class MultiFcAlltask(nn.Module):
"""
Defines a multi-layer fully-connected multitask downstream network (all tasks)
"""
def __init__(self, class_num, time_num=256, task_num=7, latent_dim=128, norm_layer=nn.BatchNorm1d, leaky_slope=0.2, dropout_p=0,
layer_num=3):
"""
Construct a multi-layer fully-connected multitask downstream network (all tasks)
Parameters:
class_num (list) -- the list of class numbers
time_num (int) -- the number of time intervals in the model
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
norm_layer -- normalization layer
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
layer_num (int) -- the layer number of the classifier, >=3
task_num (int) -- the number of downstream tasks
"""
super(MultiFcAlltask, self).__init__()
norm_layer_none = lambda x: Identity()
self.survival = MultiFcSurvival(time_num, latent_dim, norm_layer=norm_layer_none, leaky_slope=leaky_slope, dropout_p=0.5, layer_num=layer_num)
self.classifiers = nn.ModuleList([MultiFcClassifier(class_num[i], latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.2, layer_num=layer_num) for i in range(task_num-2)])
self.regression = MultiFcRegression(latent_dim, norm_layer=nn.BatchNorm1d, leaky_slope=leaky_slope, dropout_p=0.01, layer_num=layer_num)
self.task_num = task_num
def forward(self, x):
y_out_sur = self.survival(x)
y_out_cla = []
for i in range(self.task_num - 2):
y_out_cla.append(self.classifiers[i](x))
y_out_reg = self.regression(x)
return y_out_sur, y_out_cla, y_out_reg
# Class for the OmiEmbed combined network
class OmiEmbed(nn.Module):
"""
Defines the OmiEmbed combined network
"""
def __init__(self, net_VAE, net_down, omics_dims, omics_mode='multi_omics', norm_layer=nn.InstanceNorm1d, filter_num=8, kernel_size=9,
leaky_slope=0.2, dropout_p=0, latent_dim=128, class_num=2, time_num=256, task_num=7):
"""
Construct the OmiEmbed combined network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_layer -- normalization layer
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
class_num (int/list) -- the number of classes
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
"""
super(OmiEmbed, self).__init__()
self.vae = None
if net_VAE == 'conv_1d':
if omics_mode == 'abc':
self.vae = ConvVaeABC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = ConvVaeAB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = ConvVaeB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = ConvVaeA(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = ConvVaeC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc_sep':
if omics_mode == 'abc':
self.vae = FcSepVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = FcSepVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = FcSepVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc':
if omics_mode == 'abc':
self.vae = FcVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
self.vae = FcVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
self.vae = FcVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
self.vae = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
self.vae = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % net_VAE)
self.net_down = net_down
self.down = None
if net_down == 'multi_FC_classifier':
self.down = MultiFcClassifier(class_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_regression':
self.down = MultiFcRegression(latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_survival':
self.down = MultiFcSurvival(time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_multitask':
self.down = MultiFcMultitask(class_num, time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_alltask':
self.down = MultiFcAlltask(class_num, time_num, task_num, latent_dim, norm_layer, leaky_slope, dropout_p)
else:
raise NotImplementedError('Downstream model name [%s] is not recognized' % net_down)
def get_last_encode_layer(self):
return self.vae.get_last_encode_layer()
def forward(self, x):
z, recon_x, mean, log_var = self.vae(x)
if self.net_down == 'multi_FC_multitask' or self.net_down == 'multi_FC_alltask':
y_out_sur, y_out_cla, y_out_reg = self.down(mean)
return z, recon_x, mean, log_var, y_out_sur, y_out_cla, y_out_reg
else:
y_out = self.down(mean)
return z, recon_x, mean, log_var, y_out
def get_norm_layer(norm_type='batch'):
"""
Return a normalization layer
Parameters:
norm_type (str) -- the type of normalization applied to the model, default to use batch normalization, options: [batch | instance | none ]
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm1d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm1d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = lambda x: Identity()
else:
raise NotImplementedError('normalization method [%s] is not found' % norm_type)
return norm_layer
def define_net(net_VAE, net_down, omics_dims, omics_mode='multi_omics', norm_type='batch', filter_num=8, kernel_size=9,
leaky_slope=0.2, dropout_p=0, latent_dim=256, class_num=2, time_num=256, task_num=7, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the OmiEmbed network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_type (str) -- the name of normalization layers used in the network, default: batch
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
class_num (int) -- the number of classes
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns the OmiEmbed network
The network has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
net = OmiEmbed(net_VAE, net_down, omics_dims, omics_mode, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim, class_num, time_num, task_num)
return init_net(net, init_type, init_gain, gpu_ids)
def define_VAE(param, net_VAE, omics_subset_dims, omics_dims, omics_mode='multi_omics', norm_type='batch', filter_num=8, kernel_size=9, leaky_slope=0.2, dropout_p=0,
latent_dim=256, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the VAE network
Parameters:
net_VAE (str) -- the backbone of the VAE, default: conv_1d
omics_dims (list) -- the list of input omics dimensions
omics_mode (str) -- omics types would like to use in the model
norm_type (str) -- the name of normalization layers used in the network, default: batch
filter_num (int) -- the number of filters in the first convolution layer in the VAE
kernel_size (int) -- the kernel size of convolution layers
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns a VAE
The default backbone of the VAE is one dimensional convolutional layer.
The generator has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
if net_VAE == 'conv_1d':
if omics_mode == 'abc':
net = ConvVaeABC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'ab':
net = ConvVaeAB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'b':
net = ConvVaeB(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'a':
net = ConvVaeA(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif omics_mode == 'c':
net = ConvVaeC(omics_dims, norm_layer, filter_num, kernel_size, leaky_slope, dropout_p,
latent_dim=latent_dim)
elif net_VAE == 'fc_sep':
if omics_mode == 'abc':
net = FcSepVaeABC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
net = FcSepVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
net = FcSepVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
net = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
net = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif net_VAE == 'fc':
if omics_mode == 'abc':
net = FcVaeABC(param, omics_dims, omics_subset_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'ab':
net = FcVaeAB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'b':
net = FcVaeB(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'a':
net = FcVaeA(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
elif omics_mode == 'c':
net = FcVaeC(omics_dims, norm_layer, leaky_slope, dropout_p, latent_dim=latent_dim)
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % net_VAE)
return init_net(net, init_type, init_gain, gpu_ids)
def define_down(param, net_down, norm_type='batch', leaky_slope=0.2, dropout_p=0, latent_dim=256, class_num=2, time_num=256,
task_num=7, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Create the downstream task network
Parameters:
net_down (str) -- the backbone of the downstream task network, default: multi_FC_classifier
norm_type (str) -- the name of normalization layers used in the network, default: batch
leaky_slope (float) -- the negative slope of the Leaky ReLU activation function
dropout_p (float) -- probability of an element to be zeroed in a dropout layer
latent_dim (int) -- the dimensionality of the latent space and the input layer of the classifier
class_num (int) -- the number of class
time_num (int) -- the number of time intervals
task_num (int) -- the number of downstream tasks
init_type (str) -- the name of our initialization method
init_gain (float) -- scaling factor for normal, xavier and orthogonal initialization methods
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1
Returns a downstream task network
The default downstream task network is a multi-layer fully-connected classifier.
The generator has been initialized by <init_net>.
"""
net = None
# get the normalization layer
norm_layer = get_norm_layer(norm_type=norm_type)
if net_down == 'multi_FC_classifier':
net = MultiFcClassifier(param, class_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_regression':
net = MultiFcRegression(latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_survival':
net = MultiFcSurvival(time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_multitask':
net = MultiFcMultitask(class_num, time_num, latent_dim, norm_layer, leaky_slope, dropout_p)
elif net_down == 'multi_FC_alltask':
net = MultiFcAlltask(class_num, time_num, task_num, latent_dim, norm_layer, leaky_slope, dropout_p)
else:
raise NotImplementedError('Downstream model name [%s] is not recognized' % net_down)
return init_net(net, init_type, init_gain, gpu_ids)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""
Initialize a network:
1. register CPU/GPU device (with multi-GPU support);
2. initialize the network weights
Parameters:
net (nn.Module) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
# multi-GPUs
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, init_gain=init_gain)
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""
Initialize network weights.
Parameters:
net (nn.Module) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier_normal | xavier_uniform | kaiming_normal | kaiming_uniform | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
"""
# define the initialization function
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier_normal':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming_normal':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'kaiming_uniform':
init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('Initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def get_scheduler(optimizer, param):
"""
Return a learning rate scheduler
Parameters:
optimizer (opt class) -- the optimizer of the network
param (params class) -- param.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <param.niter> epochs and linearly decay the rate to zero
over the next <param.niter_decay> epochs.
"""
if param.lr_policy == 'linear':
def lambda_rule(epoch):
lr_lambda = 1.0 - max(0, epoch + param.epoch_count - param.epoch_num + param.epoch_num_decay) / float(param.epoch_num_decay + 1)
return lr_lambda
# lr_scheduler is imported from torch.optim
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif param.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=param.decay_step_size, gamma=0.1)
elif param.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif param.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=param.epoch_num, eta_min=0)
else:
return NotImplementedError('Learning rate policy [%s] is not found', param.lr_policy)
return scheduler
| 107,411
| 46.131198
| 202
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/basic_model.py
|
import os
import torch
import numpy as np
from abc import ABC, abstractmethod
from . import networks
from collections import OrderedDict
class BasicModel(ABC):
"""
This class is an abstract base class for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: Initialize the class, first call BasicModel.__init__(self, param)
-- <modify_commandline_parameters>: Add model-specific parameters, and rewrite default values for existing parameters
-- <set_input>: Unpack input data from the output dictionary of the dataloader
-- <forward>: Get the reconstructed omics data and results for the downstream task
-- <update>: Calculate losses, gradients and update network parameters
"""
def __init__(self, param):
"""
Initialize the BaseModel class
"""
self.param = param
self.gpu_ids = param.gpu_ids
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(param.checkpoints_dir, param.experiment_name) # save all the checkpoints to save_dir, and this is where to load the models
self.load_net_dir = os.path.join(param.checkpoints_dir, param.experiment_to_load) # load pretrained networks from certain experiment folder
self.isTrain = param.isTrain
self.phase = 'p1'
self.epoch = 1
self.iter = 0
# Improve the performance if the dimensionality and shape of the input data keep the same
torch.backends.cudnn.benchmark = True
self.plateau_metric = 0 # used for learning rate policy 'plateau'
self.loss_names = []
self.model_names = []
self.metric_names = []
self.optimizers = []
self.schedulers = []
self.latent = None
self.loss_embed = None
self.loss_embed_sum = []
self.loss_down = None
self.loss_All = None
@staticmethod
def modify_commandline_parameters(parser, is_train):
"""
Add model-specific parameters, and rewrite default values for existing parameters.
Parameters:
parser -- original parameter parser
is_train (bool) -- whether it is currently training phase or test phase. Use this flag to add or change training-specific or test-specific parameters.
Returns:
The modified parser.
"""
return parser
@abstractmethod
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its label
"""
pass
@abstractmethod
def forward(self):
"""
Run forward pass
"""
pass
@abstractmethod
def cal_losses(self):
"""
Calculate losses
"""
pass
@abstractmethod
def update(self):
"""
Calculate losses, gradients and update network weights; called in every training iteration
"""
pass
def setup(self, param):
"""
Load and print networks, create schedulers
"""
if self.isTrain:
self.print_networks(param)
# For every optimizer we have a scheduler
self.schedulers = [networks.get_scheduler(optimizer, param) for optimizer in self.optimizers]
# Loading the networks
if not self.isTrain or param.continue_train:
self.load_networks(param.epoch_to_load)
def update_learning_rate(self):
"""
Update learning rates for all the networks
Called at the end of each epoch
"""
lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.param.lr_policy == 'plateau':
scheduler.step(self.plateau_metric)
else:
scheduler.step()
return lr
def print_networks(self, param):
"""
Print the total number of parameters in the network and network architecture if detail is true
Save the networks information to the disk
"""
message = '\n----------------------Networks Information----------------------'
for model_name in self.model_names:
if isinstance(model_name, str):
net = getattr(self, 'net' + model_name)
num_params = 0
for parameter in net.parameters():
num_params += parameter.numel()
if param.detail:
message += '\n' + str(net)
message += '\n[Network {:s}] Total number of parameters : {:.3f} M'.format(model_name, num_params / 1e6)
message += '\n----------------------------------------------------------------\n'
# Save the networks information to the disk
net_info_filename = os.path.join(param.checkpoints_dir, param.experiment_name, 'net_info.txt')
with open(net_info_filename, 'w') as log_file:
log_file.write(message)
print(message)
def save_networks(self, epoch):
"""
Save all the networks to the disk.
Parameters:
epoch (str) -- current epoch
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '{:s}_net_{:s}.pth'.format(epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + name)
# If we use multi GPUs and apply the data parallel
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def load_networks(self, epoch):
"""
Load networks at specified epoch from the disk.
Parameters:
epoch (str) -- Which epoch to load
"""
for model_name in self.model_names:
if isinstance(model_name, str):
load_filename = '{:s}_net_{:s}.pth'.format(epoch, model_name)
load_path = os.path.join(self.load_net_dir, load_filename)
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + model_name)
# If we use multi GPUs and apply the data parallel
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('Loading the model from %s' % load_path)
state_dict = torch.load(load_path, map_location=self.device)
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
net.load_state_dict(state_dict)
def set_train(self):
"""
Set train mode for networks
"""
for model_name in self.model_names:
if isinstance(model_name, str):
# Use the str to get the attribute aka the network (self.netXXX)
net = getattr(self, 'net' + model_name)
net.train()
self.isTrain = True
def set_eval(self):
"""
Set eval mode for networks
"""
for model_name in self.model_names:
if isinstance(model_name, str):
# Use the str to get the attribute aka the network (self.netG / self.netD)
net = getattr(self, 'net' + model_name)
net.eval()
self.isTrain = False
def test(self):
"""
Forward in testing to get the output tensors
"""
with torch.no_grad():
self.forward()
self.cal_losses()
# if self.param.use_subset_features:
# self.loss_embed_sum = []
# self.loss_down_sum = []
# self.y_out_subset = []
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.cal_losses()
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_embed = sum(self.loss_embed_sum) / self.param.subset_num
# self.loss_down = sum(self.loss_down_sum) / self.param.subset_num
# if self.param.agg_method == 'mean':
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
# else:
# self.forward()
# self.cal_losses()
def init_output_dict(self):
"""
initialize a dictionary for downstream task output
"""
output_dict = OrderedDict()
output_names = []
if self.param.downstream_task == 'classification':
output_names = ['index', 'y_true', 'y_pred', 'y_prob']
elif self.param.downstream_task == 'regression':
output_names = ['index', 'y_true', 'y_pred']
elif self.param.downstream_task == 'survival':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out']
elif self.param.downstream_task == 'multitask' or self.param.downstream_task == 'alltask':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out_sur', 'y_true_cla', 'y_pred_cla',
'y_prob_cla', 'y_true_reg', 'y_pred_reg']
for name in output_names:
output_dict[name] = None
return output_dict
def update_output_dict(self, output_dict):
"""
output_dict (OrderedDict) -- the output dictionary to be updated
"""
down_output = self.get_down_output()
output_names = []
if self.param.downstream_task == 'classification':
output_names = ['index', 'y_true', 'y_pred', 'y_prob']
elif self.param.downstream_task == 'regression':
output_names = ['index', 'y_true', 'y_pred']
elif self.param.downstream_task == 'survival':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out']
elif self.param.downstream_task == 'multitask' or self.param.downstream_task == 'alltask':
output_names = ['index', 'y_true_E', 'y_true_T', 'survival', 'risk', 'y_out_sur', 'y_true_cla',
'y_pred_cla', 'y_prob_cla', 'y_true_reg', 'y_pred_reg']
for name in output_names:
if output_dict[name] is None:
output_dict[name] = down_output[name]
else:
if self.param.downstream_task == 'alltask' and name in ['y_true_cla', 'y_pred_cla', 'y_prob_cla']:
for i in range(self.param.task_num-2):
output_dict[name][i] = torch.cat((output_dict[name][i], down_output[name][i]))
else:
output_dict[name] = torch.cat((output_dict[name], down_output[name]))
def init_losses_dict(self):
"""
initialize a losses dictionary
"""
losses_dict = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
losses_dict[name] = []
return losses_dict
def update_losses_dict(self, losses_dict, actual_batch_size):
"""
losses_dict (OrderedDict) -- the losses dictionary to be updated
actual_batch_size (int) -- actual batch size for loss normalization
"""
for name in self.loss_names:
if isinstance(name, str):
if self.param.reduction == 'sum':
losses_dict[name].append(float(getattr(self, 'loss_' + name))/actual_batch_size)
elif self.param.reduction == 'mean':
losses_dict[name].append(float(getattr(self, 'loss_' + name)))
def init_metrics_dict(self):
"""
initialize a metrics dictionary
"""
metrics_dict = OrderedDict()
for name in self.metric_names:
if isinstance(name, str):
metrics_dict[name] = None
return metrics_dict
def update_metrics_dict(self, metrics_dict):
"""
metrics_dict (OrderedDict) -- the metrics dictionary to be updated
"""
for name in self.metric_names:
if isinstance(name, str):
metrics_dict[name] = getattr(self, 'metric_' + name)
def init_log_dict(self):
"""
initialize losses and metrics dictionary
"""
output_dict = self.init_output_dict()
losses_dict = self.init_losses_dict()
metrics_dict = self.init_metrics_dict()
return output_dict, losses_dict, metrics_dict
def update_log_dict(self, output_dict, losses_dict, metrics_dict, actual_batch_size):
"""
output_dict (OrderedDict) -- the output dictionary to be updated
losses_dict (OrderedDict) -- the losses dictionary to be updated
metrics_dict (OrderedDict) -- the metrics dictionary to be updated
actual_batch_size (int) -- actual batch size for loss normalization
"""
self.update_output_dict(output_dict)
self.calculate_current_metrics(output_dict)
self.update_losses_dict(losses_dict, actual_batch_size)
self.update_metrics_dict(metrics_dict)
def init_latent_dict(self):
"""
initialize and return an empty latent space array and an empty index array
"""
latent_dict = OrderedDict()
latent_dict['index'] = np.zeros(shape=[0])
latent_dict['latent'] = np.zeros(shape=[0, self.param.latent_space_dim])
return latent_dict
def update_latent_dict(self, latent_dict):
"""
update the latent dict
latent_dict (OrderedDict)
"""
with torch.no_grad():
current_latent_array = self.latent.cpu().numpy()
latent_dict['latent'] = np.concatenate((latent_dict['latent'], current_latent_array))
current_index_array = self.data_index.cpu().numpy()
latent_dict['index'] = np.concatenate((latent_dict['index'], current_index_array))
return latent_dict
| 15,137
| 39.475936
| 166
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_classifier_model.py
|
import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
import random
class VaeClassifierModel(VaeBasicModel):
"""
This class implements the VAE classifier model, using the VAE framework with the classification downstream task.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# changing the default values of parameters to match the vae regression model
parser.add_argument('--class_num', type=int, default=0,
help='the number of classes for the classification task')
return parser
def __init__(self, param):
"""
Initialize the VAE_classifier class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.append('classifier')
# specify the metrics you want to print out.
self.metric_names = ['accuracy']
# input tensor
self.label = None
# output tensor
self.y_out = None
self.y_out_subset = []
if param.use_subset_features:
if param.use_subset_identity:
param.latent_space_dim = param.latent_space_dim + param.subset_num
elif param.agg_method == 'concat':
# if param.use_subset_identity:
# param.latent_space_dim = (param.latent_space_dim + param.subset_num) * param.subset_num
# else:
param.latent_space_dim = param.latent_space_dim * param.subset_num
# define the network
self.netDown = networks.define_down(param, param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, None, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
self.loss_classifier = None
self.metric_accuracy = None
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.label = input_dict['label'].to(self.device)
def forward(self):
# if self.param.use_subset_features:
# self.latent_subset = []
# self.recon_omics_subset = []
# self.latent_identity = F.one_hot(torch.arange(0,self.param.subset_num).to(self.device))
# for subset in range(self.param.subset_num):
# self.subset = subset
# VaeBasicModel.forward(self)
# if self.param.use_subset_identity:
# self.latent_subset.append(torch.cat([self.latent, self.latent_identity[subset].repeat(self.latent.shape[0], 1)], dim=1))
# else:
# self.latent_subset.append(self.latent)
# self.recon_omics_subset.append(self.recon_omics)
# if self.param.agg_method == 'mean':
# self.latent = torch.mean(torch.stack(self.latent_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.latent = torch.max(torch.stack(self.latent_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.latent = torch.min(torch.stack(self.latent_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.latent = torch.sum(torch.stack(self.latent_subset), axis=0)
# elif self.param.agg_method == 'concat':
# self.latent = torch.cat(self.latent_subset, axis=1)
if self.param.use_subset_features:
self.latent_subset = []
self.recon_omics_subset = []
self.y_out_subset = []
self.latent_identity = F.one_hot(torch.arange(0,self.param.subset_num).to(self.device))
for subset in range(self.param.subset_num):
self.subset = subset
VaeBasicModel.forward(self)
if self.param.use_subset_identity:
self.latent = torch.cat([self.latent, self.latent_identity[subset].repeat(self.latent.shape[0], 1)], dim=1)
self.y_out = self.netDown(self.latent)
self.y_out_subset.append(self.y_out)
else:
self.latent_subset.append(self.latent)
self.recon_omics_subset.append(self.recon_omics)
if self.param.use_subset_identity:
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
if self.param.agg_method == 'mean':
self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
elif self.param.agg_method == 'max':
self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
elif self.param.agg_method == 'min':
self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
elif self.param.agg_method == 'sum':
self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
elif self.param.agg_method == 'concat':
self.y_out = torch.cat(self.y_out_subset, axis=1)
elif self.param.agg_method == 'random':
self.y_out = self.y_out_subset[random.randrange(0, self.param.subset_num)]
else:
if self.param.agg_method == 'mean':
self.latent = torch.mean(torch.stack(self.latent_subset), axis=0)
elif self.param.agg_method == 'max':
self.latent = torch.max(torch.stack(self.latent_subset), axis=0)[0]
elif self.param.agg_method == 'min':
self.latent = torch.min(torch.stack(self.latent_subset), axis=0)[0]
elif self.param.agg_method == 'sum':
self.latent = torch.sum(torch.stack(self.latent_subset), axis=0)
elif self.param.agg_method == 'concat':
self.latent = torch.cat(self.latent_subset, axis=1)
elif self.param.agg_method == 'random':
self.latent = self.latent_subset[random.randrange(0, self.param.subset_num)]
# Get the output tensor
self.y_out = self.netDown(self.latent)
else:
VaeBasicModel.forward(self)
# Get the output tensor
self.y_out = self.netDown(self.latent)
# if self.param.use_subset_features:
# VaeBasicModel.forward(self)
# # Get the output tensor
# self.y_out = self.netDown(self.latent)
# # if self.isTrain:
# # VaeBasicModel.forward(self)
# # # Get the output tensor
# # self.y_out = self.netDown(self.latent)
# # else:
# # self.y_out_subset = []
# # for subset in range(self.param.subset_num):
# # self.subset = subset
# # VaeBasicModel.forward(self)
# # # Get the output tensor
# # self.y_out_subset.append(self.netDown(self.latent))
# # self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# else:
# VaeBasicModel.forward(self)
# # Get the output tensor
# self.y_out = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
if self.param.use_subset_features:
self.loss_embed_subset = []
for subset in range(self.param.subset_num):
self.recon_omics = self.recon_omics_subset[subset]
VaeBasicModel.cal_losses(self)
self.loss_embed_subset.append(self.loss_embed)
self.loss_embed = sum(self.loss_embed_subset)
else:
VaeBasicModel.cal_losses(self)
# Calculate the classification loss (downstream loss)
self.loss_classifier = self.lossFuncClass(self.y_out, self.label)
# LOSS DOWN
self.loss_down = self.loss_classifier
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# VaeBasicModel.cal_losses(self)
# # Calculate the classification loss (downstream loss)
# self.loss_classifier = self.lossFuncClass(self.y_out, self.label)
# # LOSS DOWN
# self.loss_down = self.loss_classifier
# self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
y_prob = F.softmax(self.y_out, dim=1)
_, y_pred = torch.max(y_prob, 1)
index = self.data_index
y_true = self.label
return {'index': index, 'y_true': y_true, 'y_pred': y_pred, 'y_prob': y_prob}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true'] == output_dict['y_pred']).sum().item() / len(output_dict['y_true'])
| 9,850
| 44.396313
| 151
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/__init__.py
|
"""
This package contains modules related to objective functions, optimizations, and network architectures.
"""
import importlib
from models.basic_model import BasicModel
def find_model_using_name(model_name):
"""
Import the module with certain name
"""
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
# instantiate the model class
model = None
# Change the name format to corresponding class name
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BasicModel):
model = cls
if model is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
return model
def get_param_setter(model_name):
"""Return the static method <modify_commandline_options> of the model class."""
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_parameters
def create_model(param):
"""
Create a model given the parameters
"""
model = find_model_using_name(param.model)
# Initialize the model
instance = model(param)
print('Model [%s] was created' % type(instance).__name__)
return instance
| 1,425
| 30
| 170
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_multitask_model.py
|
import torch
from .vae_basic_model import VaeBasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeMultitaskModel(VaeBasicModel):
"""
This class implements the VAE multitasking model, using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_multitask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# Loss combined
parser.add_argument('--k_survival', type=float, default=1,
help='weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1,
help='weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1,
help='weight for the regression loss')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
VaeBasicModel.__init__(self, param)
# specify the training losses you want to print out.
self.loss_names.extend(['survival', 'classifier', 'regression'])
# specify the metrics you want to print out.
self.metric_names = ['accuracy', 'rmse']
# input tensor
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# define the network
self.netDown = networks.define_down(param.net_down, param.norm_type, param.leaky_slope, param.dropout_p,
param.latent_space_dim, param.class_num, param.time_num, None, param.init_type,
param.init_gain, self.gpu_ids)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_survival = None
self.loss_classifier = None
self.loss_regression = None
self.metric_accuracy = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
if self.isTrain:
# Set the optimizer
self.optimizer_Down = torch.optim.Adam(self.netDown.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Down)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
VaeBasicModel.set_input(self, input_dict)
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = input_dict['label'].to(self.device)
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
VaeBasicModel.forward(self)
self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netDown(self.latent)
def cal_losses(self):
"""Calculate losses"""
VaeBasicModel.cal_losses(self)
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier = self.lossFuncClass(self.y_out_cla, self.label)
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# LOSS DOWN
self.loss_down = self.param.k_survival * self.loss_survival + self.param.k_classifier * self.loss_classifier + self.param.k_regression * self.loss_regression
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
def update(self):
VaeBasicModel.update(self)
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = F.softmax(self.y_out_cla, dim=1)
_, y_pred_cla = torch.max(y_prob_cla, 1)
y_true_cla = self.label
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true_cla'] == output_dict['y_pred_cla']).sum().item() / len(output_dict['y_true_cla'])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 8,142
| 44.238889
| 269
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_basic_model.py
|
import torch
from .basic_model import BasicModel
from . import networks
from . import losses
class VaeBasicModel(BasicModel):
"""
This is the basic VAE model class, called by all other VAE son classes.
"""
def __init__(self, param):
"""
Initialize the VAE basic class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
# specify the models you want to save to the disk and load.
self.model_names = ['Embed', 'Down']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.input_omics_subsets = []
# for feature subsetting
if self.param.use_subset_features:
self.omics_subset_dims = []
for i in range(3):
self.omics_subset_dims.append(param.omics_dims[i] // param.subset_num)
else:
self.omics_subset_dims = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
# define the network
self.netEmbed = networks.define_VAE(param, param.net_VAE, self.omics_subset_dims, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.init_type, param.init_gain,
self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
if self.isTrain:
# Set the optimizer
# netEmbed and netDown can set to different initial learning rate
self.optimizer_Embed = torch.optim.Adam(self.netEmbed.parameters(), lr=param.lr, betas=(param.beta1, 0.999), weight_decay=param.weight_decay)
# optimizer list was already defined in BaseModel
self.optimizers.append(self.optimizer_Embed)
self.optimizer_Down = None
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
if self.param.use_subset_features:
self.input_omics_subsets = []
for i in range(self.param.subset_num):
input_subset = []
for j in range(3):
subset_size = self.input_omics[j].shape[1] // self.param.subset_num
indices = torch.tensor(range(subset_size * i, subset_size * (i+1))).to(self.device)
# mask = torch.zeros_like(self.input_omics[j])
# mask[:, indices] = 1
# input_subset.append(mask * self.input_omics[j])
input_subset.append(torch.index_select(self.input_omics[j], 1, indices))
self.input_omics_subsets.append(input_subset)
self.data_index = input_dict['index']
def forward(self):
# Get the output tensor
if self.param.use_subset_features:
self.z, self.recon_omics, self.mean, self.log_var = self.netEmbed(self.input_omics_subsets[self.subset])
# define the latent
if self.phase == 'p1' or self.phase == 'p3':
# self.latent = self.mean
self.latent = self.z
elif self.phase == 'p2':
# self.latent = self.mean.detach()
self.latent = self.z.detach()
else:
self.z, self.recon_omics, self.mean, self.log_var = self.netEmbed(self.input_omics)
# define the latent
if self.phase == 'p1' or self.phase == 'p3':
self.latent = self.mean
elif self.phase == 'p2':
self.latent = self.mean.detach()
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# if not self.isTrain:
# self.loss_embed_sum.append(self.loss_embed)
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_Embed.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_Embed.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_Down.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_Down.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.optimizer_Embed.zero_grad() # Set gradients to zero
self.optimizer_Down.zero_grad()
self.cal_losses() # Calculate losses
self.loss_All.backward() # Backpropagation
self.optimizer_Embed.step() # Update weights
self.optimizer_Down.step()
# if self.param.use_subset_features:
# self.loss_embed_sum = []
# self.loss_down_sum = []
# self.y_out_subset = []
# if self.phase == 'p1':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_embed.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# elif self.phase == 'p2':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Down.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_down.backward() # Backpropagation
# self.optimizer_Down.step() # Update weights
# elif self.phase == 'p3':
# for subset in range(self.param.subset_num):
# self.subset = subset
# self.forward()
# self.y_out_subset.append(self.y_out)
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.optimizer_Down.zero_grad()
# self.cal_losses() # Calculate losses
# self.loss_embed_sum.append(self.loss_embed)
# self.loss_down_sum.append(self.loss_down)
# self.loss_All.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# self.optimizer_Down.step()
# self.loss_embed = sum(self.loss_embed_sum) / self.param.subset_num
# self.loss_down = sum(self.loss_down_sum) / self.param.subset_num
# if self.param.agg_method == 'mean':
# self.y_out = torch.mean(torch.stack(self.y_out_subset), axis=0)
# elif self.param.agg_method == 'max':
# self.y_out = torch.max(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'min':
# self.y_out = torch.min(torch.stack(self.y_out_subset), axis=0)[0]
# elif self.param.agg_method == 'sum':
# self.y_out = torch.sum(torch.stack(self.y_out_subset), axis=0)
# else:
# if self.phase == 'p1':
# self.forward()
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_embed.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# elif self.phase == 'p2':
# self.forward()
# self.optimizer_Down.zero_grad() # Set gradients to zero
# self.cal_losses() # Calculate losses
# self.loss_down.backward() # Backpropagation
# self.optimizer_Down.step() # Update weights
# elif self.phase == 'p3':
# self.forward()
# self.optimizer_Embed.zero_grad() # Set gradients to zero
# self.optimizer_Down.zero_grad()
# self.cal_losses() # Calculate losses
# self.loss_All.backward() # Backpropagation
# self.optimizer_Embed.step() # Update weights
# self.optimizer_Down.step()
| 12,659
| 48.84252
| 153
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/models/vae_multitask_gn_model.py
|
import torch
import torch.nn as nn
from .basic_model import BasicModel
from . import networks
from . import losses
from torch.nn import functional as F
from sklearn import metrics
class VaeMultitaskGNModel(BasicModel):
"""
This class implements the VAE multitasking model with GradNorm, using the VAE framework with the multiple downstream tasks.
"""
@staticmethod
def modify_commandline_parameters(parser, is_train=True):
# Downstream task network
parser.set_defaults(net_down='multi_FC_multitask')
# Survival prediction related
parser.add_argument('--survival_loss', type=str, default='MTLR', help='choose the survival loss')
parser.add_argument('--survival_T_max', type=float, default=-1, help='maximum T value for survival prediction task')
parser.add_argument('--time_num', type=int, default=256, help='number of time intervals in the survival model')
# Classification related
parser.add_argument('--class_num', type=int, default=0, help='the number of classes for the classification task')
# Regression related
parser.add_argument('--regression_scale', type=int, default=1, help='normalization scale for y in regression task')
parser.add_argument('--dist_loss', type=str, default='L1', help='choose the distance loss for regression task, options: [MSE | L1]')
# GradNorm ralated
parser.add_argument('--alpha', type=float, default=1.5, help='the additional hyperparameter for GradNorm')
parser.add_argument('--lr_gn', type=float, default=1e-3, help='the learning rate for GradNorm')
parser.add_argument('--k_survival', type=float, default=1.0, help='initial weight for the survival loss')
parser.add_argument('--k_classifier', type=float, default=1.0, help='initial weight for the classifier loss')
parser.add_argument('--k_regression', type=float, default=1.0, help='initial weight for the regression loss')
return parser
def __init__(self, param):
"""
Initialize the VAE_multitask class.
"""
BasicModel.__init__(self, param)
# specify the training losses you want to print out.
if param.omics_mode == 'abc':
self.loss_names = ['recon_A', 'recon_B', 'recon_C', 'kl']
if param.omics_mode == 'ab':
self.loss_names = ['recon_A', 'recon_B', 'kl']
elif param.omics_mode == 'b':
self.loss_names = ['recon_B', 'kl']
elif param.omics_mode == 'a':
self.loss_names = ['recon_A', 'kl']
elif param.omics_mode == 'c':
self.loss_names = ['recon_C', 'kl']
self.loss_names.extend(['survival', 'classifier', 'regression', 'gradient', 'w_sur', 'w_cla', 'w_reg'])
# specify the models you want to save to the disk and load.
self.model_names = ['All']
# input tensor
self.input_omics = []
self.data_index = None # The indexes of input data
self.survival_T = None
self.survival_E = None
self.y_true = None
self.label = None
self.value = None
# output tensor
self.z = None
self.recon_omics = None
self.mean = None
self.log_var = None
self.y_out_sur = None
self.y_out_cla = None
self.y_out_reg = None
# specify the metrics you want to print out.
self.metric_names = ['accuracy', 'rmse']
# define the network
self.netAll = networks.define_net(param.net_VAE, param.net_down, param.omics_dims, param.omics_mode,
param.norm_type, param.filter_num, param.conv_k_size, param.leaky_slope,
param.dropout_p, param.latent_space_dim, param.class_num, param.time_num, None,
param.init_type, param.init_gain, self.gpu_ids)
# define the reconstruction loss
self.lossFuncRecon = losses.get_loss_func(param.recon_loss, param.reduction)
# define the classification loss
self.lossFuncClass = losses.get_loss_func('CE', param.reduction)
# define the regression distance loss
self.lossFuncDist = losses.get_loss_func(param.dist_loss, param.reduction)
self.loss_recon_A = None
self.loss_recon_B = None
self.loss_recon_C = None
self.loss_recon = None
self.loss_kl = None
self.loss_survival = None
self.loss_classifier = None
self.loss_regression = None
self.loss_gradient = 0
self.loss_w_sur = None
self.loss_w_cla = None
self.loss_w_reg = None
self.task_losses = None
self.weighted_losses = None
self.initial_losses = None
self.metric_accuracy = None
self.metric_rmse = None
if param.survival_loss == 'MTLR':
self.tri_matrix_1 = self.get_tri_matrix(dimension_type=1)
self.tri_matrix_2 = self.get_tri_matrix(dimension_type=2)
# Weights of multiple downstream tasks
self.loss_weights = nn.Parameter(torch.ones(3, requires_grad=True, device=self.device))
if self.isTrain:
# Set the optimizer
self.optimizer_All = torch.optim.Adam([{'params': self.netAll.parameters(), 'lr': param.lr, 'betas': (param.beta1, 0.999), 'weight_decay': param.weight_decay},
{'params': self.loss_weights, 'lr': param.lr_gn}])
self.optimizers.append(self.optimizer_All)
def set_input(self, input_dict):
"""
Unpack input data from the output dictionary of the dataloader
Parameters:
input_dict (dict): include the data tensor and its index.
"""
self.input_omics = []
for i in range(0, 3):
if i == 1 and self.param.ch_separate:
input_B = []
for ch in range(0, 23):
input_B.append(input_dict['input_omics'][1][ch].to(self.device))
self.input_omics.append(input_B)
else:
self.input_omics.append(input_dict['input_omics'][i].to(self.device))
self.data_index = input_dict['index']
self.survival_T = input_dict['survival_T'].to(self.device)
self.survival_E = input_dict['survival_E'].to(self.device)
self.y_true = input_dict['y_true'].to(self.device)
self.label = input_dict['label'].to(self.device)
self.value = input_dict['value'].to(self.device)
def forward(self):
# Get the output tensor
self.z, self.recon_omics, self.mean, self.log_var, self.y_out_sur, self.y_out_cla, self.y_out_reg = self.netAll(self.input_omics)
# define the latent
self.latent = self.mean
def cal_losses(self):
"""Calculate losses"""
# Calculate the reconstruction loss for A
if self.param.omics_mode == 'a' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
self.loss_recon_A = self.lossFuncRecon(self.recon_omics[0], self.input_omics[0])
else:
self.loss_recon_A = 0
# Calculate the reconstruction loss for B
if self.param.omics_mode == 'b' or self.param.omics_mode == 'ab' or self.param.omics_mode == 'abc':
if self.param.ch_separate:
recon_omics_B = torch.cat(self.recon_omics[1], -1)
input_omics_B = torch.cat(self.input_omics[1], -1)
self.loss_recon_B = self.lossFuncRecon(recon_omics_B, input_omics_B)
else:
self.loss_recon_B = self.lossFuncRecon(self.recon_omics[1], self.input_omics[1])
else:
self.loss_recon_B = 0
# Calculate the reconstruction loss for C
if self.param.omics_mode == 'c' or self.param.omics_mode == 'abc':
self.loss_recon_C = self.lossFuncRecon(self.recon_omics[2], self.input_omics[2])
else:
self.loss_recon_C = 0
# Overall reconstruction loss
if self.param.reduction == 'sum':
self.loss_recon = self.loss_recon_A + self.loss_recon_B + self.loss_recon_C
elif self.param.reduction == 'mean':
self.loss_recon = (self.loss_recon_A + self.loss_recon_B + self.loss_recon_C) / self.param.omics_num
# Calculate the kl loss
self.loss_kl = losses.kl_loss(self.mean, self.log_var, self.param.reduction)
# Calculate the overall vae loss (embedding loss)
# LOSS EMBED
self.loss_embed = self.loss_recon + self.param.k_kl * self.loss_kl
# Calculate the survival loss
if self.param.survival_loss == 'MTLR':
self.loss_survival = losses.MTLR_survival_loss(self.y_out_sur, self.y_true, self.survival_E, self.tri_matrix_1, self.param.reduction)
# Calculate the classification loss
self.loss_classifier = self.lossFuncClass(self.y_out_cla, self.label)
# Calculate the regression loss
self.loss_regression = self.lossFuncDist(self.y_out_reg.squeeze().type(torch.float32), (self.value / self.param.regression_scale).type(torch.float32))
# Calculate the weighted downstream losses
# Add initial weights
self.task_losses = torch.stack([self.param.k_survival * self.loss_survival, self.param.k_classifier * self.loss_classifier, self.param.k_regression * self.loss_regression])
self.weighted_losses = self.loss_weights * self.task_losses
# LOSS DOWN
self.loss_down = self.weighted_losses.sum()
self.loss_All = self.param.k_embed * self.loss_embed + self.loss_down
# Log the loss weights
self.loss_w_sur = self.loss_weights[0] * self.param.k_survival
self.loss_w_cla = self.loss_weights[1] * self.param.k_classifier
self.loss_w_reg = self.loss_weights[2] * self.param.k_regression
def update(self):
if self.phase == 'p1':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_embed.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p2':
self.forward()
self.optimizer_All.zero_grad() # Set gradients to zero
self.cal_losses() # Calculate losses
self.loss_down.backward() # Backpropagation
self.optimizer_All.step() # Update weights
elif self.phase == 'p3':
self.forward()
self.cal_losses() # Calculate losses
self.optimizer_All.zero_grad() # Set gradients to zero
# Calculate the GradNorm gradients
if isinstance(self.netAll, torch.nn.DataParallel):
W = list(self.netAll.module.get_last_encode_layer().parameters())
else:
W = list(self.netAll.get_last_encode_layer().parameters())
grad_norms = []
for weight, loss in zip(self.loss_weights, self.task_losses):
grad = torch.autograd.grad(loss, W, retain_graph=True)
grad_norms.append(torch.norm(weight * grad[0]))
grad_norms = torch.stack(grad_norms)
if self.iter == 0:
self.initial_losses = self.task_losses.detach()
# Calculate the constant targets
with torch.no_grad():
# loss ratios
loss_ratios = self.task_losses / self.initial_losses
# inverse training rate
inverse_train_rates = loss_ratios / loss_ratios.mean()
constant_terms = grad_norms.mean() * (inverse_train_rates ** self.param.alpha)
# Calculate the gradient loss
self.loss_gradient = (grad_norms - constant_terms).abs().sum()
# Set the gradients of weights
loss_weights_grad = torch.autograd.grad(self.loss_gradient, self.loss_weights)[0]
self.loss_All.backward()
self.loss_weights.grad = loss_weights_grad
self.optimizer_All.step() # Update weights
# Re-normalize the losses weights
with torch.no_grad():
normalize_coeff = len(self.loss_weights) / self.loss_weights.sum()
self.loss_weights.data = self.loss_weights.data * normalize_coeff
def get_down_output(self):
"""
Get output from downstream task
"""
with torch.no_grad():
index = self.data_index
# Survival
y_true_E = self.survival_E
y_true_T = self.survival_T
y_out_sur = self.y_out_sur
predict = self.predict_risk()
# density = predict['density']
survival = predict['survival']
# hazard = predict['hazard']
risk = predict['risk']
# Classification
y_prob_cla = F.softmax(self.y_out_cla, dim=1)
_, y_pred_cla = torch.max(y_prob_cla, 1)
y_true_cla = self.label
# Regression
y_true_reg = self.value
y_pred_reg = self.y_out_reg * self.param.regression_scale
return {'index': index, 'y_true_E': y_true_E, 'y_true_T': y_true_T, 'survival': survival, 'risk': risk, 'y_out_sur': y_out_sur, 'y_true_cla': y_true_cla, 'y_pred_cla': y_pred_cla, 'y_prob_cla': y_prob_cla, 'y_true_reg': y_true_reg, 'y_pred_reg': y_pred_reg}
def calculate_current_metrics(self, output_dict):
"""
Calculate current metrics
"""
self.metric_accuracy = (output_dict['y_true_cla'] == output_dict['y_pred_cla']).sum().item() / len(output_dict['y_true_cla'])
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
self.metric_rmse = metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
def get_tri_matrix(self, dimension_type=1):
"""
Get tensor of the triangular matrix
"""
if dimension_type == 1:
ones_matrix = torch.ones(self.param.time_num, self.param.time_num + 1, device=self.device)
else:
ones_matrix = torch.ones(self.param.time_num + 1, self.param.time_num + 1, device=self.device)
tri_matrix = torch.tril(ones_matrix)
return tri_matrix
def predict_risk(self):
"""
Predict the density, survival and hazard function, as well as the risk score
"""
if self.param.survival_loss == 'MTLR':
phi = torch.exp(torch.mm(self.y_out_sur, self.tri_matrix_1))
div = torch.repeat_interleave(torch.sum(phi, 1).reshape(-1, 1), phi.shape[1], dim=1)
density = phi / div
survival = torch.mm(density, self.tri_matrix_2)
hazard = density[:, :-1] / survival[:, 1:]
cumulative_hazard = torch.cumsum(hazard, dim=1)
risk = torch.sum(cumulative_hazard, 1)
return {'density': density, 'survival': survival, 'hazard': hazard, 'risk': risk}
| 15,071
| 45.091743
| 269
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/util/visualizer.py
|
import os
import time
import numpy as np
import pandas as pd
import sklearn as sk
from sklearn.preprocessing import label_binarize
from util import util
from util import metrics
from torch.utils.tensorboard import SummaryWriter
class Visualizer:
"""
This class print/save logging information
"""
def __init__(self, param):
"""
Initialize the Visualizer class
"""
self.param = param
self.output_path = os.path.join(param.checkpoints_dir, param.experiment_name)
tb_dir = os.path.join(self.output_path, 'tb_log')
util.mkdir(tb_dir)
if param.isTrain:
# Create a logging file to store training losses
self.train_log_filename = os.path.join(self.output_path, 'train_log.txt')
with open(self.train_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Log ({:s}) -----------------------\n'.format(now))
self.train_summary_filename = os.path.join(self.output_path, 'train_summary.txt')
with open(self.train_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Training Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_train_dir = os.path.join(self.output_path, 'tb_log', 'train')
util.mkdir(tb_train_dir)
util.clear_dir(tb_train_dir)
# Create TensorBoard writer
self.train_writer = SummaryWriter(log_dir=tb_train_dir)
if param.isTest:
# Create a logging file to store testing metrics
self.test_log_filename = os.path.join(self.output_path, 'test_log.txt')
with open(self.test_log_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Log ({:s}) -----------------------\n'.format(now))
self.test_summary_filename = os.path.join(self.output_path, 'test_summary.txt')
with open(self.test_summary_filename, 'a') as log_file:
now = time.strftime('%c')
log_file.write('----------------------- Testing Summary ({:s}) -----------------------\n'.format(now))
# Create log folder for TensorBoard
tb_test_dir = os.path.join(self.output_path, 'tb_log', 'test')
util.mkdir(tb_test_dir)
util.clear_dir(tb_test_dir)
# Create TensorBoard writer
self.test_writer = SummaryWriter(log_dir=tb_test_dir)
def print_train_log(self, epoch, iteration, losses_dict, metrics_dict, load_time, comp_time, batch_size, dataset_size, with_time=True):
"""
print train log on console and save the message to the disk
Parameters:
epoch (int) -- current epoch
iteration (int) -- current training iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
load_time (float) -- data loading time per data point (normalized by batch_size)
comp_time (float) -- computational time per data point (normalized by batch_size)
batch_size (int) -- batch size of training
dataset_size (int) -- size of the training dataset
with_time (bool) -- print the running time or not
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
if with_time:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d} Load_t: {:.3f} Comp_t: {:.3f}] '.format(epoch, data_point_covered, load_time, comp_time)
else:
message = '[TRAIN] [Epoch: {:3d} Iter: {:4d}]\n'.format(epoch, data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message) # print the message
with open(self.train_log_filename, 'a') as log_file:
log_file.write(message + '\n') # save the message
def print_train_summary(self, epoch, losses_dict, output_dict, train_time, current_lr):
"""
print the summary of this training epoch
Parameters:
epoch (int) -- epoch number of this training model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
train_time (float) -- time used for training this epoch
current_lr (float) -- the learning rate of this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TRAIN] [Epoch: {:3d}]\n'.format(int(epoch))
for name, loss in losses_dict.items():
write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.train_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.train_writer.add_scalar('metric_'+name, metric, epoch)
train_time_msg = 'Training time used: {:.3f}s'.format(train_time)
print_message += '\n' + train_time_msg
with open(self.train_log_filename, 'a') as log_file:
log_file.write(train_time_msg + '\n')
current_lr_msg = 'Learning rate for this epoch: {:.7f}'.format(current_lr)
print_message += '\n' + current_lr_msg
self.train_writer.add_scalar('lr', current_lr, epoch)
with open(self.train_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
print(print_message)
def print_test_log(self, epoch, iteration, losses_dict, metrics_dict, batch_size, dataset_size):
"""
print performance metrics of this iteration on console and save the message to the disk
Parameters:
epoch (int) -- epoch number of this testing model
iteration (int) -- current testing iteration during this epoch
losses_dict (OrderedDict) -- training losses stored in the ordered dict
metrics_dict (OrderedDict) -- metrics stored in the ordered dict
batch_size (int) -- batch size of testing
dataset_size (int) -- size of the testing dataset
"""
data_point_covered = min((iteration + 1) * batch_size, dataset_size)
message = '[TEST] [Epoch: {:3d} Iter: {:4d}] '.format(int(epoch), data_point_covered)
for name, loss in losses_dict.items():
message += '{:s}: {:.3f} '.format(name, loss[-1])
for name, metric in metrics_dict.items():
message += '{:s}: {:.3f} '.format(name, metric)
print(message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(message + '\n')
def print_test_summary(self, epoch, losses_dict, output_dict, test_time):
"""
print the summary of this testing epoch
Parameters:
epoch (int) -- epoch number of this testing model
losses_dict (OrderedDict) -- the losses dictionary
output_dict (OrderedDict) -- the downstream output dictionary
test_time (float) -- time used for testing this epoch
"""
write_message = '{:s}\t'.format(str(epoch))
print_message = '[TEST] [Epoch: {:3d}] '.format(int(epoch))
for name, loss in losses_dict.items():
# write_message += '{:.6f}\t'.format(np.mean(loss))
print_message += name + ': {:.3f} '.format(np.mean(loss))
self.test_writer.add_scalar('loss_'+name, np.mean(loss), epoch)
metrics_dict = self.get_epoch_metrics(output_dict)
for name, metric in metrics_dict.items():
write_message += '{:.6f}\t'.format(metric)
print_message += name + ': {:.3f} '.format(metric)
self.test_writer.add_scalar('metric_' + name, metric, epoch)
with open(self.test_summary_filename, 'a') as log_file:
log_file.write(write_message + '\n')
test_time_msg = 'Testing time used: {:.3f}s'.format(test_time)
print_message += '\n' + test_time_msg
print(print_message)
with open(self.test_log_filename, 'a') as log_file:
log_file.write(test_time_msg + '\n')
def get_epoch_metrics(self, output_dict):
"""
Get the downstream task metrics for whole epoch
Parameters:
output_dict (OrderedDict) -- the output dictionary used to compute the downstream task metrics
"""
if self.param.downstream_task == 'classification':
y_true = output_dict['y_true'].cpu().numpy()
y_true_binary = label_binarize(y_true, classes=range(self.param.class_num))
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
if self.param.class_num == 2:
y_prob = y_prob[:, 1]
accuracy = sk.metrics.accuracy_score(y_true, y_pred)
precision = sk.metrics.precision_score(y_true, y_pred, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true, y_pred, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true, y_pred, average='macro', zero_division=0)
try:
auc = sk.metrics.roc_auc_score(y_true_binary, y_prob, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
return {'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'auc': auc}
elif self.param.downstream_task == 'regression':
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().detach().numpy()
mse = sk.metrics.mean_squared_error(y_true, y_pred)
rmse = sk.metrics.mean_squared_error(y_true, y_pred, squared=False)
mae = sk.metrics.mean_absolute_error(y_true, y_pred)
medae = sk.metrics.median_absolute_error(y_true, y_pred)
r2 = sk.metrics.r2_score(y_true, y_pred)
return {'mse': mse, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'survival':
metrics_start_time = time.time()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs}
elif self.param.downstream_task == 'multitask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
y_true_cla = output_dict['y_true_cla'].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num))
y_pred_cla = output_dict['y_pred_cla'].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'].cpu().numpy()
if self.param.class_num == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy = sk.metrics.accuracy_score(y_true_cla, y_pred_cla)
precision = sk.metrics.precision_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
recall = sk.metrics.recall_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
f1 = sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0)
'''
try:
auc = sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro')
except ValueError:
auc = -1
print('ValueError: ROC AUC score is not defined in this case.')
'''
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'rmse': rmse, 'mae': mae, 'medae': medae, 'r2': r2}
elif self.param.downstream_task == 'alltask':
metrics_start_time = time.time()
# Survival
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
y_pred_survival = output_dict['survival'].cpu().numpy()
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
try:
c_index = metrics.c_index(y_true_T, y_true_E, y_pred_risk)
except ValueError:
c_index = -1
print('ValueError: NaNs detected in input when calculating c-index.')
try:
ibs = metrics.ibs(y_true_T, y_true_E, y_pred_survival, time_points)
except ValueError:
ibs = -1
print('ValueError: NaNs detected in input when calculating integrated brier score.')
# Classification
accuracy = []
f1 = []
auc = []
for i in range(self.param.task_num - 2):
y_true_cla = output_dict['y_true_cla'][i].cpu().numpy()
y_true_cla_binary = label_binarize(y_true_cla, classes=range(self.param.class_num[i]))
y_pred_cla = output_dict['y_pred_cla'][i].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'][i].cpu().numpy()
if self.param.class_num[i] == 2:
y_prob_cla = y_prob_cla[:, 1]
accuracy.append(sk.metrics.accuracy_score(y_true_cla, y_pred_cla))
f1.append(sk.metrics.f1_score(y_true_cla, y_pred_cla, average='macro', zero_division=0))
try:
auc.append(sk.metrics.roc_auc_score(y_true_cla_binary, y_prob_cla, multi_class='ovo', average='macro'))
except ValueError:
auc.append(-1)
print('ValueError: ROC AUC score is not defined in this case.')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = output_dict['y_pred_reg'].cpu().detach().numpy()
# mse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg)
rmse = sk.metrics.mean_squared_error(y_true_reg, y_pred_reg, squared=False)
# mae = sk.metrics.mean_absolute_error(y_true_reg, y_pred_reg)
# medae = sk.metrics.median_absolute_error(y_true_reg, y_pred_reg)
r2 = sk.metrics.r2_score(y_true_reg, y_pred_reg)
metrics_time = time.time() - metrics_start_time
print('Metrics computing time: {:.3f}s'.format(metrics_time))
return {'c-index': c_index, 'ibs': ibs, 'accuracy_1': accuracy[0], 'f1_1': f1[0], 'auc_1': auc[0], 'accuracy_2': accuracy[1], 'f1_2': f1[1], 'auc_2': auc[1], 'accuracy_3': accuracy[2], 'f1_3': f1[2], 'auc_3': auc[2], 'accuracy_4': accuracy[3], 'f1_4': f1[3], 'auc_4': auc[3], 'accuracy_5': accuracy[4], 'f1_5': f1[4], 'auc_5': auc[4], 'rmse': rmse, 'r2': r2}
def save_output_dict(self, output_dict):
"""
Save the downstream task output to disk
Parameters:
output_dict (OrderedDict) -- the downstream task output dictionary to be saved
"""
down_path = os.path.join(self.output_path, 'down_output')
util.mkdir(down_path)
if self.param.downstream_task == 'classification':
# Prepare files
index = output_dict['index'].numpy()
y_true = output_dict['y_true'].cpu().numpy()
y_pred = output_dict['y_pred'].cpu().numpy()
y_prob = output_dict['y_prob'].cpu().numpy()
sample_list = self.param.sample_list[index]
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'y_true': y_true, 'y_pred': y_pred}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
prob_df = pd.DataFrame(y_prob, columns=range(self.param.class_num), index=sample_list)
y_prob_path = os.path.join(down_path, 'y_prob.tsv')
prob_df.to_csv(y_prob_path, sep='\t')
elif self.param.downstream_task == 'regression':
# Prepare files
index = output_dict['index'].numpy()
y_true = output_dict['y_true'].cpu().numpy()
y_pred = np.squeeze(output_dict['y_pred'].cpu().detach().numpy())
sample_list = self.param.sample_list[index]
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'y_true': y_true, 'y_pred': y_pred}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
elif self.param.downstream_task == 'survival':
# Prepare files
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out = output_dict['y_out'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
# Output files
y_df = pd.DataFrame({'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_path = os.path.join(down_path, 'y_df.tsv')
y_df.to_csv(y_df_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_df = pd.DataFrame(y_out, index=sample_list)
y_out_path = os.path.join(down_path, 'y_out.tsv')
y_out_df.to_csv(y_out_path, sep='\t')
elif self.param.downstream_task == 'multitask':
# Survival
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out_sur = output_dict['y_out_sur'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
y_df_sur = pd.DataFrame(
{'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_sur_path = os.path.join(down_path, 'y_df_survival.tsv')
y_df_sur.to_csv(y_df_sur_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_sur_df = pd.DataFrame(y_out_sur, index=sample_list)
y_out_sur_path = os.path.join(down_path, 'y_out_survival.tsv')
y_out_sur_df.to_csv(y_out_sur_path, sep='\t')
# Classification
y_true_cla = output_dict['y_true_cla'].cpu().numpy()
y_pred_cla = output_dict['y_pred_cla'].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'].cpu().numpy()
y_df_cla = pd.DataFrame({'sample': sample_list, 'y_true': y_true_cla, 'y_pred': y_pred_cla}, index=index)
y_df_cla_path = os.path.join(down_path, 'y_df_classification.tsv')
y_df_cla.to_csv(y_df_cla_path, sep='\t')
prob_cla_df = pd.DataFrame(y_prob_cla, columns=range(self.param.class_num), index=sample_list)
y_prob_cla_path = os.path.join(down_path, 'y_prob_classification.tsv')
prob_cla_df.to_csv(y_prob_cla_path, sep='\t')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = np.squeeze(output_dict['y_pred_reg'].cpu().detach().numpy())
y_df_reg = pd.DataFrame({'sample': sample_list, 'y_true': y_true_reg, 'y_pred': y_pred_reg}, index=index)
y_df_reg_path = os.path.join(down_path, 'y_df_regression.tsv')
y_df_reg.to_csv(y_df_reg_path, sep='\t')
elif self.param.downstream_task == 'alltask':
# Survival
index = output_dict['index'].numpy()
y_true_E = output_dict['y_true_E'].cpu().numpy()
y_true_T = output_dict['y_true_T'].cpu().numpy()
y_pred_risk = output_dict['risk'].cpu().numpy()
survival_function = output_dict['survival'].cpu().numpy()
y_out_sur = output_dict['y_out_sur'].cpu().numpy()
sample_list = self.param.sample_list[index]
time_points = util.get_time_points(self.param.survival_T_max, self.param.time_num)
y_df_sur = pd.DataFrame(
{'sample': sample_list, 'true_T': y_true_T, 'true_E': y_true_E, 'pred_risk': y_pred_risk}, index=index)
y_df_sur_path = os.path.join(down_path, 'y_df_survival.tsv')
y_df_sur.to_csv(y_df_sur_path, sep='\t')
survival_function_df = pd.DataFrame(survival_function, columns=time_points, index=sample_list)
survival_function_path = os.path.join(down_path, 'survival_function.tsv')
survival_function_df.to_csv(survival_function_path, sep='\t')
y_out_sur_df = pd.DataFrame(y_out_sur, index=sample_list)
y_out_sur_path = os.path.join(down_path, 'y_out_survival.tsv')
y_out_sur_df.to_csv(y_out_sur_path, sep='\t')
# Classification
for i in range(self.param.task_num - 2):
y_true_cla = output_dict['y_true_cla'][i].cpu().numpy()
y_pred_cla = output_dict['y_pred_cla'][i].cpu().numpy()
y_prob_cla = output_dict['y_prob_cla'][i].cpu().numpy()
y_df_cla = pd.DataFrame({'sample': sample_list, 'y_true': y_true_cla, 'y_pred': y_pred_cla}, index=index)
y_df_cla_path = os.path.join(down_path, 'y_df_classification_'+str(i+1)+'.tsv')
y_df_cla.to_csv(y_df_cla_path, sep='\t')
prob_cla_df = pd.DataFrame(y_prob_cla, columns=range(self.param.class_num[i]), index=sample_list)
y_prob_cla_path = os.path.join(down_path, 'y_prob_classification_'+str(i+1)+'.tsv')
prob_cla_df.to_csv(y_prob_cla_path, sep='\t')
# Regression
y_true_reg = output_dict['y_true_reg'].cpu().numpy()
y_pred_reg = np.squeeze(output_dict['y_pred_reg'].cpu().detach().numpy())
y_df_reg = pd.DataFrame({'sample': sample_list, 'y_true': y_true_reg, 'y_pred': y_pred_reg}, index=index)
y_df_reg_path = os.path.join(down_path, 'y_df_regression.tsv')
y_df_reg.to_csv(y_df_reg_path, sep='\t')
def save_latent_space(self, latent_dict, sample_list):
"""
save the latent space matrix to disc
Parameters:
latent_dict (OrderedDict) -- the latent space dictionary
sample_list (ndarray) -- the sample list for the latent matrix
"""
reordered_sample_list = sample_list[latent_dict['index'].astype(int)]
latent_df = pd.DataFrame(latent_dict['latent'], index=reordered_sample_list)
output_path = os.path.join(self.param.checkpoints_dir, self.param.experiment_name, 'latent_space.tsv')
print('Saving the latent space matrix...')
latent_df.to_csv(output_path, sep='\t')
@staticmethod
def print_phase(phase):
"""
print the phase information
Parameters:
phase (int) -- the phase of the training process
"""
if phase == 'p1':
print('PHASE 1: Unsupervised Phase')
elif phase == 'p2':
print('PHASE 2: Supervised Phase')
elif phase == 'p3':
print('PHASE 3: Supervised Phase')
| 27,478
| 49.981447
| 370
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/util/util.py
|
"""
Contain some simple helper functions
"""
import os
import shutil
import torch
import random
import numpy as np
def mkdir(path):
"""
Create a empty directory in the disk if it didn't exist
Parameters:
path(str) -- a directory path we would like to create
"""
if not os.path.exists(path):
os.makedirs(path)
def clear_dir(path):
"""
delete all files in a path
Parameters:
path(str) -- a directory path that we would like to delete all files in it
"""
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
os.makedirs(path, exist_ok=True)
def setup_seed(seed):
"""
setup seed to make the experiments deterministic
Parameters:
seed(int) -- the random seed
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def get_time_points(T_max, time_num, extra_time_percent=0.1):
"""
Get time points for the MTLR model
"""
# Get time points in the time axis
time_points = np.linspace(0, T_max * (1 + extra_time_percent), time_num + 1)
return time_points
| 1,204
| 20.517857
| 82
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/util/metrics.py
|
"""
Contain some metrics
"""
import numpy as np
# from lifelines.utils import concordance_index
# from pysurvival.utils._metrics import _concordance_index
from sksurv.metrics import concordance_index_censored
from sksurv.metrics import integrated_brier_score
def c_index(true_T, true_E, pred_risk, include_ties=True):
"""
Calculate c-index for survival prediction downstream task
"""
# Ordering true_T, true_E and pred_score in descending order according to true_T
order = np.argsort(-true_T)
true_T = true_T[order]
true_E = true_E[order]
pred_risk = pred_risk[order]
# Calculating the c-index
# result = concordance_index(true_T, -pred_risk, true_E)
# result = _concordance_index(pred_risk, true_T, true_E, include_ties)[0]
result = concordance_index_censored(true_E.astype(bool), true_T, pred_risk)[0]
return result
def ibs(true_T, true_E, pred_survival, time_points):
"""
Calculate integrated brier score for survival prediction downstream task
"""
true_E_bool = true_E.astype(bool)
true = np.array([(true_E_bool[i], true_T[i]) for i in range(len(true_E))], dtype=[('event', np.bool_), ('time', np.float32)])
# time points must be within the range of T
min_T = true_T.min()
max_T = true_T.max()
valid_index = []
for i in range(len(time_points)):
if min_T <= time_points[i] <= max_T:
valid_index.append(i)
time_points = time_points[valid_index]
pred_survival = pred_survival[:, valid_index]
result = integrated_brier_score(true, true, pred_survival, time_points)
return result
| 1,617
| 31.36
| 129
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/util/__init__.py
| 0
| 0
| 0
|
py
|
|
SubOmiEmbed
|
SubOmiEmbed-main/util/preprocess.py
|
"""
Contain some omics data preprocess functions
"""
import pandas as pd
def separate_B(B_df_single):
"""
Separate the DNA methylation dataframe into subsets according to their targeting chromosomes
Parameters:
B_df_single(DataFrame) -- a dataframe that contains the single DNA methylation matrix
Return:
B_df_list(list) -- a list with 23 subset dataframe
B_dim(list) -- the dims of each chromosome
"""
anno = pd.read_csv('./anno/B_anno.csv', dtype={'CHR': str}, index_col=0)
anno_contain = anno.loc[B_df_single.index, :]
print('Separating B according the targeting chromosome...')
B_df_list, B_dim_list = [], []
ch_id = list(range(1, 23))
ch_id.append('X')
for ch in ch_id:
ch_index = anno_contain[anno_contain.CHR == str(ch)].index
ch_df = B_df_single.loc[ch_index, :]
B_df_list.append(ch_df)
B_dim_list.append(len(ch_df))
return B_df_list, B_dim_list
| 967
| 30.225806
| 96
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/params/train_test_params.py
|
from .basic_params import BasicParams
class TrainTestParams(BasicParams):
"""
This class is a son class of BasicParams.
This class includes parameters for training & testing and parameters inherited from the father class.
"""
def initialize(self, parser):
parser = BasicParams.initialize(self, parser)
# Training parameters
parser.add_argument('--epoch_num_p1', type=int, default=50,
help='epoch number for phase 1')
parser.add_argument('--epoch_num_p2', type=int, default=50,
help='epoch number for phase 2')
parser.add_argument('--epoch_num_p3', type=int, default=100,
help='epoch number for phase 3')
parser.add_argument('--lr', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('--beta1', type=float, default=0.5,
help='momentum term of adam')
parser.add_argument('--lr_policy', type=str, default='linear',
help='The learning rate policy for the scheduler. [linear | step | plateau | cosine]')
parser.add_argument('--epoch_count', type=int, default=1,
help='the starting epoch count, default start from 1')
parser.add_argument('--epoch_num_decay', type=int, default=50,
help='Number of epoch to linearly decay learning rate to zero (lr_policy == linear)')
parser.add_argument('--decay_step_size', type=int, default=50,
help='The original learning rate multiply by a gamma every decay_step_size epoch (lr_policy == step)')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay (L2 penalty)')
# Network saving and loading parameters
parser.add_argument('--continue_train', action='store_true',
help='load the latest model and continue training')
parser.add_argument('--save_model', action='store_true',
help='save the model during training')
parser.add_argument('--save_epoch_freq', type=int, default=-1,
help='frequency of saving checkpoints at the end of epochs, -1 means only save the last epoch')
# Logging and visualization
parser.add_argument('--print_freq', type=int, default=1,
help='frequency of showing results on console')
# Dataset parameters
parser.add_argument('--train_ratio', type=float, default=0.8,
help='ratio of training set in the full dataset')
parser.add_argument('--test_ratio', type=float, default=0.2,
help='ratio of testing set in the full dataset')
self.isTrain = True
self.isTest = True
return parser
| 2,954
| 52.727273
| 130
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/params/basic_params.py
|
import time
import argparse
import torch
import os
import models
from util import util
class BasicParams:
"""
This class define the console parameters
"""
def __init__(self):
"""
Reset the class. Indicates the class hasn't been initialized
"""
self.initialized = False
self.isTrain = True
self.isTest = True
def initialize(self, parser):
"""
Define the common console parameters
"""
parser.add_argument('--gpu_ids', type=str, default='0',
help='which GPU would like to use: e.g. 0 or 0,1, -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints',
help='models, settings and intermediate results are saved in folder in this directory')
parser.add_argument('--experiment_name', type=str, default='test',
help='name of the folder in the checkpoint directory')
# Dataset parameters
parser.add_argument('--omics_mode', type=str, default='a',
help='omics types would like to use in the model, options: [abc | ab | a | b | c]')
parser.add_argument('--data_root', type=str, default='./data',
help='path to input data')
parser.add_argument('--batch_size', type=int, default=32,
help='input data batch size')
parser.add_argument('--num_threads', default=0, type=int,
help='number of threads for loading data')
parser.add_argument('--set_pin_memory', action='store_true',
help='set pin_memory in the dataloader to increase data loading performance')
parser.add_argument('--not_stratified', action='store_true',
help='do not apply the stratified mode in train/test split if set true')
parser.add_argument('--use_sample_list', action='store_true',
help='provide a subset sample list of the dataset, store in the path data_root/sample_list.tsv, if False use all the samples')
parser.add_argument('--use_feature_lists', action='store_true',
help='provide feature lists of the input omics data, e.g. data_root/feature_list_A.tsv, if False use all the features')
parser.add_argument('--detect_na', action='store_true',
help='detect missing value markers during data loading, stay False can improve the loading performance')
parser.add_argument('--file_format', type=str, default='tsv',
help='file format of the omics data, options: [tsv | csv | hdf]')
# Model parameters
parser.add_argument('--model', type=str, default='vae_classifier',
help='chooses which model want to use, options: [vae_classifier | vae_regression | vae_survival | vae_multitask]')
parser.add_argument('--net_VAE', type=str, default='fc_sep',
help='specify the backbone of the VAE, default is the one dimensional CNN, options: [conv_1d | fc_sep | fc]')
parser.add_argument('--net_down', type=str, default='multi_FC_classifier',
help='specify the backbone of the downstream task network, default is the multi-layer FC classifier, options: [multi_FC_classifier | multi_FC_regression | multi_FC_survival | multi_FC_multitask]')
parser.add_argument('--norm_type', type=str, default='batch',
help='the type of normalization applied to the model, default to use batch normalization, options: [batch | instance | none ]')
parser.add_argument('--filter_num', type=int, default=8,
help='number of filters in the last convolution layer in the generator')
parser.add_argument('--conv_k_size', type=int, default=9,
help='the kernel size of convolution layer, default kernel size is 9, the kernel is one dimensional.')
parser.add_argument('--dropout_p', type=float, default=0.2,
help='probability of an element to be zeroed in a dropout layer, default is 0 which means no dropout.')
parser.add_argument('--leaky_slope', type=float, default=0.2,
help='the negative slope of the Leaky ReLU activation function')
parser.add_argument('--latent_space_dim', type=int, default=128,
help='the dimensionality of the latent space')
parser.add_argument('--seed', type=int, default=42,
help='random seed')
parser.add_argument('--init_type', type=str, default='normal',
help='choose the method of network initialization, options: [normal | xavier_normal | xavier_uniform | kaiming_normal | kaiming_uniform | orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02,
help='scaling factor for normal, xavier and orthogonal initialization methods')
# Feature subsetting
parser.add_argument('--use_subset_features', action='store_true',
help='divide features into subsets, train using each subset, and sum up total reconstruction losses while training with each subset. net_VAE will be set to fc if this argument is provided')
parser.add_argument("--subset_num", type=int, default=1,
help='No of subsets to divide features into')
parser.add_argument("--agg_method", type=str, default='mean',
help='Method to use while aggregating representations from multiple subsets for downstream task, options: [mean | max | min | sum | concat | random]')
parser.add_argument("--enc_reduction_factor", type=int, default=1,
help='the factor by which dimension of encoder hidden layers should be divided')
parser.add_argument("--dec_reduction_factor", type=int, default=1,
help='the factor by which dimension of decoder hidden layers should be divided')
parser.add_argument("--down_reduction_factor", type=int, default=1,
help='the factor by which dimension of downstream hidden layers should be divided')
parser.add_argument("--use_subset_identity", action='store_true',
help='use one hot encoded subset identity as additional input to downstream task.')
# Loss parameters
parser.add_argument('--recon_loss', type=str, default='BCE',
help='chooses the reconstruction loss function, options: [BCE | MSE | L1]')
parser.add_argument('--reduction', type=str, default='mean',
help='chooses the reduction to apply to the loss function, options: [sum | mean]')
parser.add_argument('--k_kl', type=float, default=0.01,
help='weight for the kl loss')
parser.add_argument('--k_embed', type=float, default=0.001,
help='weight for the embedding loss')
# Other parameters
parser.add_argument('--deterministic', action='store_true',
help='make the model deterministic for reproduction if set true')
parser.add_argument('--detail', action='store_true',
help='print more detailed information if set true')
parser.add_argument('--epoch_to_load', type=str, default='latest',
help='the epoch number to load, set latest to load latest cached model')
parser.add_argument('--experiment_to_load', type=str, default='test',
help='the experiment to load')
self.initialized = True # set the initialized to True after we define the parameters of the project
return parser
def get_params(self):
"""
Initialize our parser with basic parameters once.
Add additional model-specific parameters.
"""
if not self.initialized: # check if this object has been initialized
# if not create a new parser object
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# use our method to initialize the parser with the predefined arguments
parser = self.initialize(parser)
# get the basic parameters
param, _ = parser.parse_known_args()
# modify model-related parser options
model_name = param.model
model_param_setter = models.get_param_setter(model_name)
parser = model_param_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_params(self, param):
"""
Print welcome words and command line parameters.
Save the command line parameters in a txt file to the disk
"""
message = ''
message += '\nWelcome to OmiEmbed\nby Xiaoyu Zhang x.zhang18@imperial.ac.uk\n\n'
message += '-----------------------Running Parameters-----------------------\n'
for key, value in sorted(vars(param).items()):
comment = ''
default = self.parser.get_default(key)
if value != default:
comment = '\t[default: %s]' % str(default)
message += '{:>18}: {:<15}{}\n'.format(str(key), str(value), comment)
message += '----------------------------------------------------------------\n'
print(message)
# Save the running parameters setting in the disk
experiment_dir = os.path.join(param.checkpoints_dir, param.experiment_name)
util.mkdir(experiment_dir)
file_name = os.path.join(experiment_dir, 'cmd_parameters.txt')
with open(file_name, 'w') as param_file:
now = time.strftime('%c')
param_file.write('{:s}\n'.format(now))
param_file.write(message)
param_file.write('\n')
def parse(self):
"""
Parse the parameters of our project. Set up GPU device. Print the welcome words and list parameters in the console.
"""
param = self.get_params() # get the parameters to the object param
param.isTrain = self.isTrain
param.isTest = self.isTest
if param.use_subset_features:
param.net_VAE = 'fc'
if param.use_subset_identity:
param.agg_method = 'mean'
# Print welcome words and command line parameters
self.print_params(param)
# Set the internal parameters
# epoch_num: the total epoch number
if self.isTrain:
param.epoch_num = param.epoch_num_p1 + param.epoch_num_p2 + param.epoch_num_p3
# downstream_task: for the classification task a labels.tsv file is needed, for the regression task a values.tsv file is needed
if param.model == 'vae_classifier':
param.downstream_task = 'classification'
elif param.model == 'vae_regression':
param.downstream_task = 'regression'
elif param.model == 'vae_survival':
param.downstream_task = 'survival'
elif param.model == 'vae_multitask' or param.model == 'vae_multitask_gn':
param.downstream_task = 'multitask'
elif param.model == 'vae_alltask' or param.model == 'vae_alltask_gn':
param.downstream_task = 'alltask'
else:
raise NotImplementedError('Model name [%s] is not recognized' % param.model)
# add_channel: add one extra dimension of channel for the input data, used for convolution layer
# ch_separate: separate the DNA methylation matrix base on the chromosome
if param.net_VAE == 'conv_1d':
param.add_channel = True
param.ch_separate = False
elif param.net_VAE == 'fc_sep':
param.add_channel = False
param.ch_separate = True
elif param.net_VAE == 'fc':
param.add_channel = False
param.ch_separate = False
else:
raise NotImplementedError('VAE model name [%s] is not recognized' % param.net_VAE)
# omics_num: the number of omics types
param.omics_num = len(param.omics_mode)
# Set up GPU
str_gpu_ids = param.gpu_ids.split(',')
param.gpu_ids = []
for str_gpu_id in str_gpu_ids:
int_gpu_id = int(str_gpu_id)
if int_gpu_id >= 0:
param.gpu_ids.append(int_gpu_id)
if len(param.gpu_ids) > 0:
torch.cuda.set_device(param.gpu_ids[0])
self.param = param
return self.param
| 12,834
| 54.323276
| 224
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/params/train_params.py
|
from .basic_params import BasicParams
class TrainParams(BasicParams):
"""
This class is a son class of BasicParams.
This class includes parameters for training and parameters inherited from the father class.
"""
def initialize(self, parser):
parser = BasicParams.initialize(self, parser)
# Training parameters
parser.add_argument('--epoch_num_p1', type=int, default=50,
help='epoch number for phase 1')
parser.add_argument('--epoch_num_p2', type=int, default=50,
help='epoch number for phase 2')
parser.add_argument('--epoch_num_p3', type=int, default=100,
help='epoch number for phase 3')
parser.add_argument('--lr', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('--beta1', type=float, default=0.5,
help='momentum term of adam')
parser.add_argument('--lr_policy', type=str, default='linear',
help='The learning rate policy for the scheduler. [linear | step | plateau | cosine]')
parser.add_argument('--epoch_count', type=int, default=1,
help='the starting epoch count, default start from 1')
parser.add_argument('--epoch_num_decay', type=int, default=50,
help='Number of epoch to linearly decay learning rate to zero (lr_policy == linear)')
parser.add_argument('--decay_step_size', type=int, default=50,
help='The original learning rate multiply by a gamma every decay_step_size epoch (lr_policy == step)')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay (L2 penalty)')
# Network saving and loading parameters
parser.add_argument('--continue_train', action='store_true',
help='load the latest model and continue training')
parser.add_argument('--save_model', action='store_true',
help='save the model during training')
parser.add_argument('--save_epoch_freq', type=int, default=-1,
help='frequency of saving checkpoints at the end of epochs, -1 means only save the last epoch')
# Logging and visualization
parser.add_argument('--print_freq', type=int, default=1,
help='frequency of showing results on console')
parser.add_argument('--save_latent_space', action='store_true',
help='save the latent space of input data to disc')
self.isTrain = True
self.isTest = False
return parser
| 2,769
| 53.313725
| 130
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/params/test_params.py
|
from .basic_params import BasicParams
class TestParams(BasicParams):
"""
This class is a son class of BasicParams.
This class includes parameters for testing and parameters inherited from the father class.
"""
def initialize(self, parser):
parser = BasicParams.initialize(self, parser)
# Testing parameters
parser.add_argument('--save_latent_space', action='store_true', help='save the latent space of input data to disc')
# Logging and visualization
parser.add_argument('--print_freq', type=int, default=1,
help='frequency of showing results on console')
self.isTrain = False
self.isTest = True
return parser
| 727
| 32.090909
| 123
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/params/__init__.py
| 0
| 0
| 0
|
py
|
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/a_dataset.py
|
import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
import numpy as np
import pandas as pd
import torch
class ADataset(BasicDataset):
"""
A dataset class for gene expression dataset.
File should be prepared as '/path/to/data/A.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = list(np.zeros(23))
else:
B_tensor = 0
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value, 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value, 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 10,137
| 50.461929
| 184
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/abc_dataset.py
|
import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class ABCDataset(BasicDataset):
"""
A dataset class for multi-omics dataset.
For gene expression data, file should be prepared as '/path/to/data/A.tsv'.
For DNA methylation data, file should be prepared as '/path/to/data/B.tsv'.
For miRNA expression data, file should be prepared as '/path/to/data/C.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
# Load data for B
B_df = load_file(param, 'B')
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
# Load data for C
C_df = load_file(param, 'C')
# Get the feature list for C
if param.use_feature_lists:
feature_list_C_path = os.path.join(param.data_root, 'feature_list_C.tsv') # get the path of feature list
feature_list_C = np.loadtxt(feature_list_C_path, delimiter='\t', dtype='<U32')
else:
feature_list_C = C_df.index
C_df = C_df.loc[feature_list_C, self.sample_list]
self.C_dim = C_df.shape[0]
C_array = C_df.values
if self.param.add_channel:
# Add one dimension for the channel
C_array = C_array[np.newaxis, :, :]
self.C_tensor_all = torch.Tensor(C_array)
self.omics_dims.append(self.C_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, B_tensor, C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of C
if self.param.add_channel:
C_tensor = self.C_tensor_all[:, :, index]
else:
C_tensor = self.C_tensor_all[:, index]
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 13,033
| 48.748092
| 152
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/basic_dataset.py
|
"""
This module implements an abstract base class for datasets. Other datasets can be created from this base class.
"""
import torch.utils.data as data
from abc import ABC, abstractmethod
class BasicDataset(data.Dataset, ABC):
"""
This class is an abstract base class for datasets.
To create a subclass, you need to implement the following three functions:
-- <__init__>: initialize the class, first call BasicDataset.__init__(self, param).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point.
"""
def __init__(self, param):
"""
Initialize the class, save the parameters in the class
"""
self.param = param
self.sample_list = None
@abstractmethod
def __len__(self):
"""Return the total number of samples in the dataset."""
return 0
@abstractmethod
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Parameters:
index - - a integer for data indexing
Returns:
a dictionary of data with their names. It usually contains the data itself and its metadata information.
"""
pass
| 1,272
| 31.641026
| 116
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/ab_dataset.py
|
import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class ABDataset(BasicDataset):
"""
A dataset class for multi-omics dataset.
For gene expression data, file should be prepared as '/path/to/data/A.tsv'.
For DNA methylation data, file should be prepared as '/path/to/data/B.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
# Load data for A
A_df = load_file(param, 'A')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = A_df.columns
# Get the feature list for A
if param.use_feature_lists:
feature_list_A_path = os.path.join(param.data_root, 'feature_list_A.tsv') # get the path of feature list
feature_list_A = np.loadtxt(feature_list_A_path, delimiter='\t', dtype='<U32')
else:
feature_list_A = A_df.index
A_df = A_df.loc[feature_list_A, self.sample_list]
self.A_dim = A_df.shape[0]
self.sample_num = A_df.shape[1]
A_array = A_df.values
if self.param.add_channel:
# Add one dimension for the channel
A_array = A_array[np.newaxis, :, :]
self.A_tensor_all = torch.Tensor(A_array)
self.omics_dims.append(self.A_dim)
# Load data for B
B_df = load_file(param, 'B')
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains A_tensor, B_tensor, C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of A
if self.param.add_channel:
A_tensor = self.A_tensor_all[:, :, index]
else:
A_tensor = self.A_tensor_all[:, index]
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 12,076
| 49.112033
| 152
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/c_dataset.py
|
import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
import numpy as np
import pandas as pd
import torch
class CDataset(BasicDataset):
"""
A dataset class for miRNA expression dataset.
File should be prepared as '/path/to/data/C.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
self.omics_dims.append(None) # First dimension is for gene expression (A)
self.omics_dims.append(None) # Second dimension is for DNA methylation (B)
# Load data for C
C_df = load_file(param, 'C')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='<U32')
else:
self.sample_list = C_df.columns
# Get the feature list for C
if param.use_feature_lists:
feature_list_C_path = os.path.join(param.data_root, 'feature_list_C.tsv') # get the path of feature list
feature_list_C = np.loadtxt(feature_list_C_path, delimiter='\t', dtype='<U32')
else:
feature_list_C = C_df.index
C_df = C_df.loc[feature_list_C, self.sample_list]
self.C_dim = C_df.shape[0]
self.sample_num = C_df.shape[1]
C_array = C_df.values
if self.param.add_channel:
# Add one dimension for the channel
C_array = C_array[np.newaxis, :, :]
self.C_tensor_all = torch.Tensor(C_array)
self.omics_dims.append(self.C_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains C_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of C
if self.param.add_channel:
C_tensor = self.C_tensor_all[:, :, index]
else:
C_tensor = self.C_tensor_all[:, index]
# Get the tensor of A
A_tensor = 0
# Get the tensor of B
# Get the tensor of B
if self.param.ch_separate:
B_tensor = list(np.zeros(23))
else:
B_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 10,372
| 50.098522
| 152
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/__init__.py
|
"""
This package about data loading and data preprocessing
"""
import os
import torch
import importlib
import numpy as np
import pandas as pd
from util import util
from datasets.basic_dataset import BasicDataset
from datasets.dataloader_prefetch import DataLoaderPrefetch
from torch.utils.data import Subset
from sklearn.model_selection import train_test_split
def find_dataset_using_name(dataset_mode):
"""
Get the dataset of certain mode
"""
dataset_filename = "datasets." + dataset_mode + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# Instantiate the dataset class
dataset = None
# Change the name format to corresponding class name
target_dataset_name = dataset_mode.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BasicDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BasicDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def create_dataset(param):
"""
Create a dataset given the parameters.
"""
dataset_class = find_dataset_using_name(param.omics_mode)
# Get an instance of this dataset class
dataset = dataset_class(param)
print("Dataset [%s] was created" % type(dataset).__name__)
return dataset
class CustomDataLoader:
"""
Create a dataloader for certain dataset.
"""
def __init__(self, dataset, param, shuffle=True, enable_drop_last=False):
self.dataset = dataset
self.param = param
drop_last = False
if enable_drop_last:
if len(dataset) % param.batch_size < 3*len(param.gpu_ids):
drop_last = True
# Create dataloader for this dataset
self.dataloader = DataLoaderPrefetch(
dataset,
batch_size=param.batch_size,
shuffle=shuffle,
num_workers=int(param.num_threads),
drop_last=drop_last,
pin_memory=param.set_pin_memory
)
def __len__(self):
"""Return the number of data in the dataset"""
return len(self.dataset)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
yield data
def get_A_dim(self):
"""Return the dimension of first input omics data type"""
return self.dataset.A_dim
def get_B_dim(self):
"""Return the dimension of second input omics data type"""
return self.dataset.B_dim
def get_omics_dims(self):
"""Return a list of omics dimensions"""
return self.dataset.omics_dims
def get_class_num(self):
"""Return the number of classes for the downstream classification task"""
return self.dataset.class_num
def get_values_max(self):
"""Return the maximum target value of the dataset"""
return self.dataset.values_max
def get_values_min(self):
"""Return the minimum target value of the dataset"""
return self.dataset.values_min
def get_survival_T_max(self):
"""Return the maximum T of the dataset"""
return self.dataset.survival_T_max
def get_survival_T_min(self):
"""Return the minimum T of the dataset"""
return self.dataset.survival_T_min
def get_sample_list(self):
"""Return the sample list of the dataset"""
return self.dataset.sample_list
def create_single_dataloader(param, shuffle=True, enable_drop_last=False):
"""
Create a single dataloader
"""
dataset = create_dataset(param)
dataloader = CustomDataLoader(dataset, param, shuffle=shuffle, enable_drop_last=enable_drop_last)
sample_list = dataset.sample_list
return dataloader, sample_list
def create_separate_dataloader(param):
"""
Create set of dataloader (train, val, test).
"""
full_dataset = create_dataset(param)
full_size = len(full_dataset)
full_idx = np.arange(full_size)
if param.not_stratified:
train_idx, test_idx = train_test_split(full_idx,
test_size=param.test_ratio,
train_size=param.train_ratio,
shuffle=True)
else:
if param.downstream_task == 'classification':
targets = full_dataset.labels_array
elif param.downstream_task == 'survival':
targets = full_dataset.survival_E_array
if param.stratify_label:
targets = full_dataset.labels_array
elif param.downstream_task == 'multitask':
targets = full_dataset.labels_array
elif param.downstream_task == 'alltask':
targets = full_dataset.labels_array[0]
train_idx, test_idx = train_test_split(full_idx,
test_size=param.test_ratio,
train_size=param.train_ratio,
shuffle=True,
stratify=targets)
val_idx = list(set(full_idx) - set(train_idx) - set(test_idx))
train_dataset = Subset(full_dataset, train_idx)
val_dataset = Subset(full_dataset, val_idx)
test_dataset = Subset(full_dataset, test_idx)
full_dataloader = CustomDataLoader(full_dataset, param)
train_dataloader = CustomDataLoader(train_dataset, param, enable_drop_last=True)
val_dataloader = CustomDataLoader(val_dataset, param, shuffle=False)
test_dataloader = CustomDataLoader(test_dataset, param, shuffle=False)
return full_dataloader, train_dataloader, val_dataloader, test_dataloader
def load_file(param, file_name):
"""
Load data according to the format.
"""
if param.file_format == 'tsv':
file_path = os.path.join(param.data_root, file_name + '.tsv')
print('Loading data from ' + file_path)
df = pd.read_csv(file_path, sep='\t', header=0, index_col=0, na_filter=param.detect_na)
elif param.file_format == 'csv':
file_path = os.path.join(param.data_root, file_name + '.csv')
print('Loading data from ' + file_path)
df = pd.read_csv(file_path, header=0, index_col=0, na_filter=param.detect_na)
elif param.file_format == 'hdf':
file_path = os.path.join(param.data_root, file_name + '.h5')
print('Loading data from ' + file_path)
df = pd.read_hdf(file_path, header=0, index_col=0)
elif param.file_format == 'npy':
file_path = os.path.join(param.data_root, file_name + '.npy')
print('Loading data from ' + file_path)
values = np.load(file_path, allow_pickle=True)
features_path = os.path.join(param.data_root, file_name + '_features.npy')
print('Loading features from ' + features_path)
features = np.load(features_path, allow_pickle=True)
samples_path = os.path.join(param.data_root, file_name + '_samples.npy')
print('Loading samples from ' + samples_path)
samples = np.load(samples_path, allow_pickle=True)
df = pd.DataFrame(data=values, index=features, columns=samples)
else:
raise NotImplementedError('File format %s is supported' % param.file_format)
return df
def get_survival_y_true(param, T, E):
"""
Get y_true for survival prediction based on T and E
"""
# Get T_max
if param.survival_T_max == -1:
T_max = T.max()
else:
T_max = param.survival_T_max
# Get time points
time_points = util.get_time_points(T_max, param.time_num)
# Get the y_true
y_true = []
for i, (t, e) in enumerate(zip(T, E)):
y_true_i = np.zeros(param.time_num + 1)
dist_to_time_points = [abs(t - point) for point in time_points[:-1]]
time_index = np.argmin(dist_to_time_points)
# if this is a uncensored data point
if e == 1:
y_true_i[time_index] = 1
y_true.append(y_true_i)
# if this is a censored data point
else:
y_true_i[time_index:] = 1
y_true.append(y_true_i)
y_true = torch.Tensor(y_true)
return y_true
| 8,346
| 34.219409
| 177
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/dataloader_prefetch.py
|
from torch.utils.data import DataLoader
from prefetch_generator import BackgroundGenerator
class DataLoaderPrefetch(DataLoader):
def __iter__(self):
return BackgroundGenerator(super().__iter__())
| 210
| 25.375
| 54
|
py
|
SubOmiEmbed
|
SubOmiEmbed-main/datasets/b_dataset.py
|
import os.path
from datasets import load_file
from datasets import get_survival_y_true
from datasets.basic_dataset import BasicDataset
from util import preprocess
import numpy as np
import pandas as pd
import torch
class BDataset(BasicDataset):
"""
A dataset class for methylation dataset.
DNA methylation data file should be prepared as '/path/to/data/B.tsv'.
For each omics file, each columns should be each sample and each row should be each molecular feature.
"""
def __init__(self, param):
"""
Initialize this dataset class.
"""
BasicDataset.__init__(self, param)
self.omics_dims = []
self.omics_dims.append(None) # First dimension is for gene expression (A)
# Load data for B
B_df = load_file(param, 'B')
# Get the sample list
if param.use_sample_list:
sample_list_path = os.path.join(param.data_root, 'sample_list.tsv') # get the path of sample list
self.sample_list = np.loadtxt(sample_list_path, delimiter='\t', dtype='str')
else:
self.sample_list = B_df.columns
# Get the feature list for B
if param.use_feature_lists:
feature_list_B_path = os.path.join(param.data_root, 'feature_list_B.tsv') # get the path of feature list
feature_list_B = np.loadtxt(feature_list_B_path, delimiter='\t', dtype='<U32')
else:
feature_list_B = B_df.index
B_df = B_df.loc[feature_list_B, self.sample_list]
self.sample_num = B_df.shape[1]
if param.ch_separate:
B_df_list, self.B_dim = preprocess.separate_B(B_df)
self.B_tensor_all = []
for i in range(0, 23):
B_array = B_df_list[i].values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
B_tensor_part = torch.Tensor(B_array)
self.B_tensor_all.append(B_tensor_part)
else:
self.B_dim = B_df.shape[0]
B_array = B_df.values
if self.param.add_channel:
# Add one dimension for the channel
B_array = B_array[np.newaxis, :, :]
self.B_tensor_all = torch.Tensor(B_array)
self.omics_dims.append(self.B_dim)
self.class_num = 0
if param.downstream_task == 'classification':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
elif param.downstream_task == 'regression':
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
elif param.downstream_task == 'survival':
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
if param.stratify_label:
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
elif param.downstream_task == 'multitask':
# Load labels
labels_path = os.path.join(param.data_root, 'labels.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array = labels_df.iloc[:, -1].values
# Get the class number
self.class_num = len(labels_df.iloc[:, -1].unique())
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
elif param.downstream_task == 'alltask':
# Load labels
self.labels_array = []
self.class_num = []
for i in range(param.task_num-2):
labels_path = os.path.join(param.data_root, 'labels_'+str(i+1)+'.tsv') # get the path of the label
labels_df = pd.read_csv(labels_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.labels_array.append(labels_df.iloc[:, -1].values)
# Get the class number
self.class_num.append(len(labels_df.iloc[:, -1].unique()))
# Load target values
values_path = os.path.join(param.data_root, 'values.tsv') # get the path of the target value
values_df = pd.read_csv(values_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.values_array = values_df.iloc[:, -1].astype(float).values
self.values_max = self.values_array.max()
self.values_min = self.values_array.min()
# Load survival data
survival_path = os.path.join(param.data_root, 'survival.tsv') # get the path of the survival data
survival_df = pd.read_csv(survival_path, sep='\t', header=0, index_col=0).loc[self.sample_list, :]
self.survival_T_array = survival_df.iloc[:, -2].astype(float).values
self.survival_E_array = survival_df.iloc[:, -1].values
self.survival_T_max = self.survival_T_array.max()
self.survival_T_min = self.survival_T_array.min()
if param.survival_loss == 'MTLR':
self.y_true_tensor = get_survival_y_true(param, self.survival_T_array, self.survival_E_array)
def __getitem__(self, index):
"""
Return a data point and its metadata information.
Returns a dictionary that contains B_tensor, label and index
input_omics (list) -- a list of input omics tensor
label (int) -- label of the sample
index (int) -- the index of this data point
"""
# Get the tensor of B
if self.param.ch_separate:
B_tensor = []
for i in range(0, 23):
if self.param.add_channel:
B_tensor_part = self.B_tensor_all[i][:, :, index]
else:
B_tensor_part = self.B_tensor_all[i][:, index]
B_tensor.append(B_tensor_part)
# Return a list of tensor
else:
if self.param.add_channel:
B_tensor = self.B_tensor_all[:, :, index]
else:
B_tensor = self.B_tensor_all[:, index]
# Return a tensor
# Get the tensor of A
A_tensor = 0
# Get the tensor of C
C_tensor = 0
if self.param.downstream_task == 'classification':
# Get label
label = self.labels_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'index': index}
elif self.param.downstream_task == 'regression':
# Get target value
value = self.values_array[index]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'value': value, 'index': index}
elif self.param.downstream_task == 'survival':
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'multitask':
# Get label
label = self.labels_array[index]
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
elif self.param.downstream_task == 'alltask':
# Get label
label = []
for i in range(self.param.task_num - 2):
label.append(self.labels_array[i][index])
# Get target value
value = self.values_array[index]
# Get survival T and E
survival_T = self.survival_T_array[index]
survival_E = self.survival_E_array[index]
y_true = self.y_true_tensor[index, :]
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'label': label, 'value': value,
'survival_T': survival_T, 'survival_E': survival_E, 'y_true': y_true, 'index': index}
else:
return {'input_omics': [A_tensor, B_tensor, C_tensor], 'index': index}
def __len__(self):
"""
Return the number of data points in the dataset.
"""
return self.sample_num
| 11,172
| 49.556561
| 152
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/default_config.py
|
from yacs.config import CfgNode as CN
def get_default_config():
cfg = CN()
# model
cfg.model = CN()
cfg.model.name = 'resnet50'
cfg.model.pretrained = True # automatically load pretrained model weights if available
cfg.model.load_weights = '' # path to model weights
cfg.model.resume = '' # path to checkpoint for resume training
# data
cfg.data = CN()
cfg.data.type = 'image'
cfg.data.root = 'reid-data'
cfg.data.sources = ['market1501']
cfg.data.targets = ['market1501']
cfg.data.workers = 4 # number of data loading workers
cfg.data.split_id = 0 # split index
cfg.data.height = 256 # image height
cfg.data.width = 128 # image width
cfg.data.combineall = False # combine train, query and gallery for training
cfg.data.transforms = ['random_flip'] # data augmentation
cfg.data.k_tfm = 1 # number of times to apply augmentation to an image independently
cfg.data.norm_mean = [0.485, 0.456, 0.406] # default is imagenet mean
cfg.data.norm_std = [0.229, 0.224, 0.225] # default is imagenet std
cfg.data.save_dir = 'log' # path to save log
cfg.data.load_train_targets = False # load training set from target dataset
# specific datasets
cfg.market1501 = CN()
cfg.market1501.use_500k_distractors = False # add 500k distractors to the gallery set for market1501
cfg.cuhk03 = CN()
cfg.cuhk03.labeled_images = False # use labeled images, if False, use detected images
cfg.cuhk03.classic_split = False # use classic split by Li et al. CVPR14
cfg.cuhk03.use_metric_cuhk03 = False # use cuhk03's metric for evaluation
# sampler
cfg.sampler = CN()
cfg.sampler.train_sampler = 'RandomSampler' # sampler for source train loader
cfg.sampler.train_sampler_t = 'RandomSampler' # sampler for target train loader
cfg.sampler.num_instances = 4 # number of instances per identity for RandomIdentitySampler
cfg.sampler.num_cams = 1 # number of cameras to sample in a batch (for RandomDomainSampler)
# video reid setting
cfg.video = CN()
cfg.video.seq_len = 15 # number of images to sample in a tracklet
cfg.video.sample_method = 'evenly' # how to sample images from a tracklet
cfg.video.pooling_method = 'avg' # how to pool features over a tracklet
# train
cfg.train = CN()
cfg.train.optim = 'adam'
cfg.train.lr = 0.0003
cfg.train.weight_decay = 5e-4
cfg.train.max_epoch = 60
cfg.train.start_epoch = 0
cfg.train.batch_size = 32
cfg.train.fixbase_epoch = 0 # number of epochs to fix base layers
cfg.train.open_layers = [
'classifier'
] # layers for training while keeping others frozen
cfg.train.staged_lr = False # set different lr to different layers
cfg.train.new_layers = ['classifier'] # newly added layers with default lr
cfg.train.base_lr_mult = 0.1 # learning rate multiplier for base layers
cfg.train.lr_scheduler = 'single_step'
cfg.train.stepsize = [20] # stepsize to decay learning rate
cfg.train.gamma = 0.1 # learning rate decay multiplier
cfg.train.print_freq = 20 # print frequency
cfg.train.seed = 1 # random seed
# optimizer
cfg.sgd = CN()
cfg.sgd.momentum = 0.9 # momentum factor for sgd and rmsprop
cfg.sgd.dampening = 0. # dampening for momentum
cfg.sgd.nesterov = False # Nesterov momentum
cfg.rmsprop = CN()
cfg.rmsprop.alpha = 0.99 # smoothing constant
cfg.adam = CN()
cfg.adam.beta1 = 0.9 # exponential decay rate for first moment
cfg.adam.beta2 = 0.999 # exponential decay rate for second moment
# loss
cfg.loss = CN()
cfg.loss.name = 'softmax'
cfg.loss.softmax = CN()
cfg.loss.softmax.label_smooth = True # use label smoothing regularizer
cfg.loss.triplet = CN()
cfg.loss.triplet.margin = 0.3 # distance margin
cfg.loss.triplet.weight_t = 1. # weight to balance hard triplet loss
cfg.loss.triplet.weight_x = 0. # weight to balance cross entropy loss
# test
cfg.test = CN()
cfg.test.batch_size = 100
cfg.test.dist_metric = 'euclidean' # distance metric, ['euclidean', 'cosine']
cfg.test.normalize_feature = False # normalize feature vectors before computing distance
cfg.test.ranks = [1, 5, 10, 20] # cmc ranks
cfg.test.evaluate = False # test only
cfg.test.eval_freq = -1 # evaluation frequency (-1 means to only test after training)
cfg.test.start_eval = 0 # start to evaluate after a specific epoch
cfg.test.rerank = False # use person re-ranking
cfg.test.visrank = False # visualize ranked results (only available when cfg.test.evaluate=True)
cfg.test.visrank_topk = 10 # top-k ranks to visualize
return cfg
def imagedata_kwargs(cfg):
return {
'root': cfg.data.root,
'sources': cfg.data.sources,
'targets': cfg.data.targets,
'height': cfg.data.height,
'width': cfg.data.width,
'transforms': cfg.data.transforms,
'k_tfm': cfg.data.k_tfm,
'norm_mean': cfg.data.norm_mean,
'norm_std': cfg.data.norm_std,
'use_gpu': cfg.use_gpu,
'split_id': cfg.data.split_id,
'combineall': cfg.data.combineall,
'load_train_targets': cfg.data.load_train_targets,
'batch_size_train': cfg.train.batch_size,
'batch_size_test': cfg.test.batch_size,
'workers': cfg.data.workers,
'num_instances': cfg.sampler.num_instances,
'num_cams': cfg.sampler.num_cams,
'train_sampler': cfg.sampler.train_sampler,
'train_sampler_t': cfg.sampler.train_sampler_t,
# image
'cuhk03_labeled': cfg.cuhk03.labeled_images,
'cuhk03_classic_split': cfg.cuhk03.classic_split,
'market1501_500k': cfg.market1501.use_500k_distractors,
}
def videodata_kwargs(cfg):
return {
'root': cfg.data.root,
'sources': cfg.data.sources,
'targets': cfg.data.targets,
'height': cfg.data.height,
'width': cfg.data.width,
'transforms': cfg.data.transforms,
'norm_mean': cfg.data.norm_mean,
'norm_std': cfg.data.norm_std,
'use_gpu': cfg.use_gpu,
'split_id': cfg.data.split_id,
'combineall': cfg.data.combineall,
'batch_size_train': cfg.train.batch_size,
'batch_size_test': cfg.test.batch_size,
'workers': cfg.data.workers,
'num_instances': cfg.sampler.num_instances,
'num_cams': cfg.sampler.num_cams,
'train_sampler': cfg.sampler.train_sampler,
# video
'seq_len': cfg.video.seq_len,
'sample_method': cfg.video.sample_method
}
def optimizer_kwargs(cfg):
return {
'optim': cfg.train.optim,
'lr': cfg.train.lr,
'weight_decay': cfg.train.weight_decay,
'momentum': cfg.sgd.momentum,
'sgd_dampening': cfg.sgd.dampening,
'sgd_nesterov': cfg.sgd.nesterov,
'rmsprop_alpha': cfg.rmsprop.alpha,
'adam_beta1': cfg.adam.beta1,
'adam_beta2': cfg.adam.beta2,
'staged_lr': cfg.train.staged_lr,
'new_layers': cfg.train.new_layers,
'base_lr_mult': cfg.train.base_lr_mult
}
def lr_scheduler_kwargs(cfg):
return {
'lr_scheduler': cfg.train.lr_scheduler,
'stepsize': cfg.train.stepsize,
'gamma': cfg.train.gamma,
'max_epoch': cfg.train.max_epoch
}
def engine_run_kwargs(cfg):
return {
'save_dir': cfg.data.save_dir,
'max_epoch': cfg.train.max_epoch,
'start_epoch': cfg.train.start_epoch,
'fixbase_epoch': cfg.train.fixbase_epoch,
'open_layers': cfg.train.open_layers,
'start_eval': cfg.test.start_eval,
'eval_freq': cfg.test.eval_freq,
'test_only': cfg.test.evaluate,
'print_freq': cfg.train.print_freq,
'dist_metric': cfg.test.dist_metric,
'normalize_feature': cfg.test.normalize_feature,
'visrank': cfg.test.visrank,
'visrank_topk': cfg.test.visrank_topk,
'use_metric_cuhk03': cfg.cuhk03.use_metric_cuhk03,
'ranks': cfg.test.ranks,
'rerank': cfg.test.rerank
}
| 8,128
| 37.709524
| 104
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/main.py
|
import sys
import time
import os.path as osp
import argparse
import torch
import torch.nn as nn
import torchreid
from torchreid.utils import (
Logger, check_isfile, set_random_seed, collect_env_info,
resume_from_checkpoint, load_pretrained_weights, compute_model_complexity
)
from default_config import (
imagedata_kwargs, optimizer_kwargs, videodata_kwargs, engine_run_kwargs,
get_default_config, lr_scheduler_kwargs
)
from models.resnet_ms import resnet50_fc512, resnet50_fc512_ms12_a0d2, resnet50_fc512_ms12_a0d1, resnet50_fc512_ms12_a0d3
from models.resnet_ms import resnet50_fc512_ms1_a0d1, resnet50_fc512_ms123_a0d1, resnet50_fc512_ms1234_a0d1, resnet50_fc512_ms23_a0d1, resnet50_fc512_ms14_a0d1
from models.resnet_ms2 import resnet50_fc512_ms12_a0d1_domprior
from models.resnet_db import resnet50_fc512_db12
from models.osnet_ms import osnet_x1_0, osnet_x1_0_ms23_a0d1, osnet_x1_0_ms23_a0d2, osnet_x1_0_ms23_a0d3
from models.osnet_ms2 import osnet_x1_0_ms23_a0d1_domprior
from models.osnet_db import osnet_x1_0_db23
def build_datamanager(cfg):
if cfg.data.type == 'image':
return torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))
else:
return torchreid.data.VideoDataManager(**videodata_kwargs(cfg))
def build_engine(cfg, datamanager, model, optimizer, scheduler):
if cfg.data.type == 'image':
if cfg.loss.name == 'softmax':
engine = torchreid.engine.ImageSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
engine = torchreid.engine.ImageTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
else:
if cfg.loss.name == 'softmax':
engine = torchreid.engine.VideoSoftmaxEngine(
datamanager,
model,
optimizer=optimizer,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth,
pooling_method=cfg.video.pooling_method
)
else:
engine = torchreid.engine.VideoTripletEngine(
datamanager,
model,
optimizer=optimizer,
margin=cfg.loss.triplet.margin,
weight_t=cfg.loss.triplet.weight_t,
weight_x=cfg.loss.triplet.weight_x,
scheduler=scheduler,
use_gpu=cfg.use_gpu,
label_smooth=cfg.loss.softmax.label_smooth
)
return engine
def reset_config(cfg, args):
if args.root:
cfg.data.root = args.root
if args.sources:
cfg.data.sources = args.sources
if args.targets:
cfg.data.targets = args.targets
if args.transforms:
cfg.data.transforms = args.transforms
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--config-file', type=str, default='', help='path to config file'
)
parser.add_argument(
'-s',
'--sources',
type=str,
nargs='+',
help='source datasets (delimited by space)'
)
parser.add_argument(
'-t',
'--targets',
type=str,
nargs='+',
help='target datasets (delimited by space)'
)
parser.add_argument(
'--transforms', type=str, nargs='+', help='data augmentation'
)
parser.add_argument(
'--root', type=str, default='', help='path to data root'
)
parser.add_argument(
'opts',
default=None,
nargs=argparse.REMAINDER,
help='Modify config options using the command-line'
)
args = parser.parse_args()
cfg = get_default_config()
cfg.use_gpu = torch.cuda.is_available()
if args.config_file:
cfg.merge_from_file(args.config_file)
reset_config(cfg, args)
cfg.merge_from_list(args.opts)
set_random_seed(cfg.train.seed)
log_name = 'test.log' if cfg.test.evaluate else 'train.log'
log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
print('Show configuration\n{}\n'.format(cfg))
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
if cfg.use_gpu:
torch.backends.cudnn.benchmark = True
datamanager = build_datamanager(cfg)
model_factory = {
'resnet50_fc512': resnet50_fc512,
'osnet_x1_0': osnet_x1_0,
# mixstyle models
'resnet50_fc512_ms12_a0d1': resnet50_fc512_ms12_a0d1,
'resnet50_fc512_ms12_a0d2': resnet50_fc512_ms12_a0d2,
'resnet50_fc512_ms12_a0d3': resnet50_fc512_ms12_a0d3,
'resnet50_fc512_ms12_a0d1_domprior': resnet50_fc512_ms12_a0d1_domprior,
'osnet_x1_0_ms23_a0d1': osnet_x1_0_ms23_a0d1,
'osnet_x1_0_ms23_a0d2': osnet_x1_0_ms23_a0d2,
'osnet_x1_0_ms23_a0d3': osnet_x1_0_ms23_a0d3,
'osnet_x1_0_ms23_a0d1_domprior': osnet_x1_0_ms23_a0d1_domprior,
# ablation
'resnet50_fc512_ms1_a0d1': resnet50_fc512_ms1_a0d1,
'resnet50_fc512_ms123_a0d1': resnet50_fc512_ms123_a0d1,
'resnet50_fc512_ms1234_a0d1': resnet50_fc512_ms1234_a0d1,
'resnet50_fc512_ms14_a0d1': resnet50_fc512_ms14_a0d1,
'resnet50_fc512_ms23_a0d1': resnet50_fc512_ms23_a0d1,
# dropblock models
'resnet50_fc512_db12': resnet50_fc512_db12,
'osnet_x1_0_db23': osnet_x1_0_db23
}
print('Building model: {}'.format(cfg.model.name))
model = model_factory[cfg.model.name](
num_classes=datamanager.num_train_pids,
loss=cfg.loss.name,
pretrained=cfg.model.pretrained,
use_gpu=cfg.use_gpu
)
num_params, flops = compute_model_complexity(
model, (1, 3, cfg.data.height, cfg.data.width)
)
print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))
if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
load_pretrained_weights(model, cfg.model.load_weights)
if cfg.use_gpu:
model = nn.DataParallel(model).cuda()
optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
scheduler = torchreid.optim.build_lr_scheduler(
optimizer, **lr_scheduler_kwargs(cfg)
)
if cfg.model.resume and check_isfile(cfg.model.resume):
cfg.train.start_epoch = resume_from_checkpoint(
cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
)
print(
'Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type)
)
engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
engine.run(**engine_run_kwargs(cfg))
if __name__ == '__main__':
main()
| 7,293
| 32.925581
| 159
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/osnet_db.py
|
from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
dropblock_layers=[],
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.dropblock = None
if dropblock_layers:
self.dropblock = LinearScheduler(
DropBlock2D(drop_prob=0.1, block_size=7),
start_value=0.,
stop_value=0.1,
nr_steps=100
)
print('Insert DropBlock ater the following layers: {}'.format(dropblock_layers))
self.dropblock_layers = dropblock_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
if self.dropblock is not None:
self.dropblock.step()
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv3(x)
if 'conv3' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv4(x)
if 'conv4' in self.dropblock_layers:
x = self.dropblock(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""DropBlock models"""
def osnet_x1_0_db23(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
dropblock_layers=['conv2', 'conv3'],
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 18,266
| 27.676609
| 108
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/resnet_db.py
|
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .dropblock import DropBlock2D, LinearScheduler
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
dropblock_layers=[],
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.dropblock = None
if dropblock_layers:
self.dropblock = LinearScheduler(
DropBlock2D(drop_prob=0.1, block_size=7),
start_value=0.,
stop_value=0.1,
nr_steps=100
)
print('Insert DropBlock ater the following layers: {}'.format(dropblock_layers))
self.dropblock_layers = dropblock_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
if self.dropblock is not None:
self.dropblock.step()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer2(x)
if 'layer2' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer3(x)
if 'layer3' in self.dropblock_layers:
x = self.dropblock(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""
ResNet + FC
"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""DropBlock models"""
def resnet50_fc512_db12(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
dropblock_layers=['layer1', 'layer2'],
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 16,436
| 27.685864
| 106
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/osnet_ms.py
|
from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='random')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv3(x)
if 'conv3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv4(x)
if 'conv4' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""
MixStyle models
"""
def osnet_x1_0_ms23_a0d1(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x1_0_ms23_a0d2(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.2,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x1_0_ms23_a0d3(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.3,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 19,075
| 27.5142
| 108
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/resnet_ms.py
|
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='random')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer2(x)
if 'layer2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer3(x)
if 'layer3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer4(x)
if 'layer4' in self.mixstyle_layers:
x = self.mixstyle(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""
ResNet + FC
"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""MixStyle models"""
def resnet50_fc512_ms12_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms12_a0d2(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.2,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms12_a0d3(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.3,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
# more variants in which mixstyle is applied to different layers
def resnet50_fc512_ms1_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms123_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2', 'layer3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms1234_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2', 'layer3', 'layer4'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms23_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer2', 'layer3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet50_fc512_ms14_a0d1(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer4'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 19,818
| 27.516547
| 106
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/mixstyle.py
|
import random
from contextlib import contextmanager
import torch
import torch.nn as nn
def deactivate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(False)
def activate_mixstyle(m):
if type(m) == MixStyle:
m.set_activation_status(True)
def random_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('random')
def crossdomain_mixstyle(m):
if type(m) == MixStyle:
m.update_mix_method('crossdomain')
@contextmanager
def run_without_mixstyle(model):
# Assume MixStyle was initially activated
try:
model.apply(deactivate_mixstyle)
yield
finally:
model.apply(activate_mixstyle)
@contextmanager
def run_with_mixstyle(model, mix=None):
# Assume MixStyle was initially deactivated
if mix == 'random':
model.apply(random_mixstyle)
elif mix == 'crossdomain':
model.apply(crossdomain_mixstyle)
try:
model.apply(activate_mixstyle)
yield
finally:
model.apply(deactivate_mixstyle)
class MixStyle(nn.Module):
"""MixStyle.
Reference:
Zhou et al. Domain Generalization with MixStyle. ICLR 2021.
"""
def __init__(self, p=0.5, alpha=0.1, eps=1e-6, mix='random'):
"""
Args:
p (float): probability of using MixStyle.
alpha (float): parameter of the Beta distribution.
eps (float): scaling parameter to avoid numerical issues.
mix (str): how to mix.
"""
super().__init__()
self.p = p
self.beta = torch.distributions.Beta(alpha, alpha)
self.eps = eps
self.alpha = alpha
self.mix = mix
self._activated = True
def __repr__(self):
return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps}, mix={self.mix})'
def set_activation_status(self, status=True):
self._activated = status
def update_mix_method(self, mix='random'):
self.mix = mix
def forward(self, x):
if not self.training or not self._activated:
return x
if random.random() > self.p:
return x
B = x.size(0)
mu = x.mean(dim=[2, 3], keepdim=True)
var = x.var(dim=[2, 3], keepdim=True)
sig = (var + self.eps).sqrt()
mu, sig = mu.detach(), sig.detach()
x_normed = (x-mu) / sig
lmda = self.beta.sample((B, 1, 1, 1))
lmda = lmda.to(x.device)
if self.mix == 'random':
# random shuffle
perm = torch.randperm(B)
elif self.mix == 'crossdomain':
# split into two halves and swap the order
perm = torch.arange(B - 1, -1, -1) # inverse index
perm_b, perm_a = perm.chunk(2)
perm_b = perm_b[torch.randperm(B // 2)]
perm_a = perm_a[torch.randperm(B // 2)]
perm = torch.cat([perm_b, perm_a], 0)
else:
raise NotImplementedError
mu2, sig2 = mu[perm], sig[perm]
mu_mix = mu*lmda + mu2 * (1-lmda)
sig_mix = sig*lmda + sig2 * (1-lmda)
return x_normed*sig_mix + mu_mix
| 3,127
| 24.430894
| 90
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/osnet_ms2.py
|
from __future__ import division, absolute_import
import warnings
import torch
from torch import nn
from torch.nn import functional as F
from .mixstyle import MixStyle
__all__ = [
'osnet_x1_0', 'osnet_x0_75', 'osnet_x0_5', 'osnet_x0_25', 'osnet_ibn_x1_0'
]
pretrained_urls = {
'osnet_x1_0':
'https://drive.google.com/uc?id=1LaG1EJpHrxdAxKnSCJ_i0u-nbxSAeiFY',
'osnet_x0_75':
'https://drive.google.com/uc?id=1uwA9fElHOk3ZogwbeY5GkLI6QPTX70Hq',
'osnet_x0_5':
'https://drive.google.com/uc?id=16DGLbZukvVYgINws8u8deSaOqjybZ83i',
'osnet_x0_25':
'https://drive.google.com/uc?id=1rb8UN5ZzPKRc_xvtHlyDh-cSz88YX9hs',
'osnet_ibn_x1_0':
'https://drive.google.com/uc?id=1sr90V6irlYYDd4_4ISU2iruoRG8J__6l'
}
##########
# Basic layers
##########
class ConvLayer(nn.Module):
"""Convolution layer (conv + bn + relu)."""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
groups=1,
IN=False
):
super(ConvLayer, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
groups=groups
)
if IN:
self.bn = nn.InstanceNorm2d(out_channels, affine=True)
else:
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1(nn.Module):
"""1x1 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv1x1, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
1,
stride=stride,
padding=0,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Conv1x1Linear(nn.Module):
"""1x1 convolution + bn (w/o non-linearity)."""
def __init__(self, in_channels, out_channels, stride=1):
super(Conv1x1Linear, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1, stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Conv3x3(nn.Module):
"""3x3 convolution + bn + relu."""
def __init__(self, in_channels, out_channels, stride=1, groups=1):
super(Conv3x3, self).__init__()
self.conv = nn.Conv2d(
in_channels,
out_channels,
3,
stride=stride,
padding=1,
bias=False,
groups=groups
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class LightConv3x3(nn.Module):
"""Lightweight 3x3 convolution.
1x1 (linear) + dw 3x3 (nonlinear).
"""
def __init__(self, in_channels, out_channels):
super(LightConv3x3, self).__init__()
self.conv1 = nn.Conv2d(
in_channels, out_channels, 1, stride=1, padding=0, bias=False
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
3,
stride=1,
padding=1,
bias=False,
groups=out_channels
)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.bn(x)
x = self.relu(x)
return x
##########
# Building blocks for omni-scale feature learning
##########
class ChannelGate(nn.Module):
"""A mini-network that generates channel-wise gates conditioned on input tensor."""
def __init__(
self,
in_channels,
num_gates=None,
return_gates=False,
gate_activation='sigmoid',
reduction=16,
layer_norm=False
):
super(ChannelGate, self).__init__()
if num_gates is None:
num_gates = in_channels
self.return_gates = return_gates
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(
in_channels,
in_channels // reduction,
kernel_size=1,
bias=True,
padding=0
)
self.norm1 = None
if layer_norm:
self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(
in_channels // reduction,
num_gates,
kernel_size=1,
bias=True,
padding=0
)
if gate_activation == 'sigmoid':
self.gate_activation = nn.Sigmoid()
elif gate_activation == 'relu':
self.gate_activation = nn.ReLU(inplace=True)
elif gate_activation == 'linear':
self.gate_activation = None
else:
raise RuntimeError(
"Unknown gate activation: {}".format(gate_activation)
)
def forward(self, x):
input = x
x = self.global_avgpool(x)
x = self.fc1(x)
if self.norm1 is not None:
x = self.norm1(x)
x = self.relu(x)
x = self.fc2(x)
if self.gate_activation is not None:
x = self.gate_activation(x)
if self.return_gates:
return x
return input * x
class OSBlock(nn.Module):
"""Omni-scale feature learning block."""
def __init__(
self,
in_channels,
out_channels,
IN=False,
bottleneck_reduction=4,
**kwargs
):
super(OSBlock, self).__init__()
mid_channels = out_channels // bottleneck_reduction
self.conv1 = Conv1x1(in_channels, mid_channels)
self.conv2a = LightConv3x3(mid_channels, mid_channels)
self.conv2b = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2c = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.conv2d = nn.Sequential(
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
LightConv3x3(mid_channels, mid_channels),
)
self.gate = ChannelGate(mid_channels)
self.conv3 = Conv1x1Linear(mid_channels, out_channels)
self.downsample = None
if in_channels != out_channels:
self.downsample = Conv1x1Linear(in_channels, out_channels)
self.IN = None
if IN:
self.IN = nn.InstanceNorm2d(out_channels, affine=True)
def forward(self, x):
identity = x
x1 = self.conv1(x)
x2a = self.conv2a(x1)
x2b = self.conv2b(x1)
x2c = self.conv2c(x1)
x2d = self.conv2d(x1)
x2 = self.gate(x2a) + self.gate(x2b) + self.gate(x2c) + self.gate(x2d)
x3 = self.conv3(x2)
if self.downsample is not None:
identity = self.downsample(identity)
out = x3 + identity
if self.IN is not None:
out = self.IN(out)
return F.relu(out)
##########
# Network architecture
##########
class OSNet(nn.Module):
"""Omni-Scale Network.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. TPAMI, 2021.
"""
def __init__(
self,
num_classes,
blocks,
layers,
channels,
feature_dim=512,
loss='softmax',
IN=False,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(OSNet, self).__init__()
num_blocks = len(blocks)
assert num_blocks == len(layers)
assert num_blocks == len(channels) - 1
self.loss = loss
# convolutional backbone
self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.conv2 = self._make_layer(
blocks[0],
layers[0],
channels[0],
channels[1],
reduce_spatial_size=True,
IN=IN
)
self.conv3 = self._make_layer(
blocks[1],
layers[1],
channels[1],
channels[2],
reduce_spatial_size=True
)
self.conv4 = self._make_layer(
blocks[2],
layers[2],
channels[2],
channels[3],
reduce_spatial_size=False
)
self.conv5 = Conv1x1(channels[3], channels[3])
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
# fully connected layer
self.fc = self._construct_fc_layer(
feature_dim, channels[3], dropout_p=None
)
# identity classification layer
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='crossdomain')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
def _make_layer(
self,
block,
layer,
in_channels,
out_channels,
reduce_spatial_size,
IN=False
):
layers = []
layers.append(block(in_channels, out_channels, IN=IN))
for i in range(1, layer):
layers.append(block(out_channels, out_channels, IN=IN))
if reduce_spatial_size:
layers.append(
nn.Sequential(
Conv1x1(out_channels, out_channels),
nn.AvgPool2d(2, stride=2)
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
if fc_dims is None or fc_dims < 0:
self.feature_dim = input_dim
return None
if isinstance(fc_dims, int):
fc_dims = [fc_dims]
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.conv2(x)
if 'conv2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv3(x)
if 'conv3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv4(x)
if 'conv4' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.conv5(x)
return x
def forward(self, x, return_featuremaps=False):
x = self.featuremaps(x)
if return_featuremaps:
return x
v = self.global_avgpool(x)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, key=''):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from collections import OrderedDict
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# Directory already exists, ignore.
pass
else:
# Unexpected OSError, re-raise.
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file, quiet=False)
state_dict = torch.load(cached_file)
model_dict = model.state_dict()
new_state_dict = OrderedDict()
matched_layers, discarded_layers = [], []
for k, v in state_dict.items():
if k.startswith('module.'):
k = k[7:] # discard module.
if k in model_dict and model_dict[k].size() == v.size():
new_state_dict[k] = v
matched_layers.append(k)
else:
discarded_layers.append(k)
model_dict.update(new_state_dict)
model.load_state_dict(model_dict)
if len(matched_layers) == 0:
warnings.warn(
'The pretrained weights from "{}" cannot be loaded, '
'please check the key names manually '
'(** ignored and continue **)'.format(cached_file)
)
else:
print(
'Successfully loaded imagenet pretrained weights from "{}"'.
format(cached_file)
)
if len(discarded_layers) > 0:
print(
'** The following layers are discarded '
'due to unmatched keys or layer size: {}'.
format(discarded_layers)
)
##########
# Instantiation
##########
def osnet_x1_0(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
def osnet_x0_75(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# medium size (width x0.75)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[48, 192, 288, 384],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_75')
return model
def osnet_x0_5(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# tiny size (width x0.5)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[32, 128, 192, 256],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_5')
return model
def osnet_x0_25(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# very tiny size (width x0.25)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[16, 64, 96, 128],
loss=loss,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x0_25')
return model
def osnet_ibn_x1_0(
num_classes=1000, pretrained=True, loss='softmax', **kwargs
):
# standard size (width x1.0) + IBN layer
# Ref: Pan et al. Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net. ECCV, 2018.
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
IN=True,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_ibn_x1_0')
return model
"""
MixStyle models
"""
def osnet_x1_0_ms23_a0d1_domprior(num_classes=1000, pretrained=True, loss='softmax', **kwargs):
# standard size (width x1.0)
model = OSNet(
num_classes,
blocks=[OSBlock, OSBlock, OSBlock],
layers=[2, 2, 2],
channels=[64, 256, 384, 512],
loss=loss,
mixstyle_layers=['conv2', 'conv3'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, key='osnet_x1_0')
return model
| 18,135
| 27.56063
| 108
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/resnet_ms2.py
|
"""
Code source: https://github.com/pytorch/vision
"""
from __future__ import division, absolute_import
import torch.utils.model_zoo as model_zoo
from torch import nn
from .mixstyle import MixStyle
__all__ = [
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152',
'resnext50_32x4d', 'resnext101_32x8d', 'resnet50_fc512'
]
model_urls = {
'resnet18':
'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34':
'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50':
'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101':
'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152':
'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d':
'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d':
'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(
in_planes, out_planes, kernel_size=1, stride=stride, bias=False
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError(
'BasicBlock only supports groups=1 and base_width=64'
)
if dilation > 1:
raise NotImplementedError(
"Dilation > 1 not supported in BasicBlock"
)
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width/64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
"""Residual network.
Reference:
- He et al. Deep Residual Learning for Image Recognition. CVPR 2016.
- Xie et al. Aggregated Residual Transformations for Deep Neural Networks. CVPR 2017.
Public keys:
- ``resnet18``: ResNet18.
- ``resnet34``: ResNet34.
- ``resnet50``: ResNet50.
- ``resnet101``: ResNet101.
- ``resnet152``: ResNet152.
- ``resnext50_32x4d``: ResNeXt50.
- ``resnext101_32x8d``: ResNeXt101.
- ``resnet50_fc512``: ResNet50 + FC.
"""
def __init__(
self,
num_classes,
loss,
block,
layers,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
last_stride=2,
fc_dims=None,
dropout_p=None,
mixstyle_layers=[],
mixstyle_p=0.5,
mixstyle_alpha=0.3,
**kwargs
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.loss = loss
self.feature_dim = 512 * block.expansion
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".
format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block,
512,
layers[3],
stride=last_stride,
dilate=replace_stride_with_dilation[2]
)
self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = self._construct_fc_layer(
fc_dims, 512 * block.expansion, dropout_p
)
self.classifier = nn.Linear(self.feature_dim, num_classes)
self.mixstyle = None
if mixstyle_layers:
self.mixstyle = MixStyle(p=mixstyle_p, alpha=mixstyle_alpha, mix='crossdomain')
print('Insert MixStyle after the following layers: {}'.format(mixstyle_layers))
self.mixstyle_layers = mixstyle_layers
self._init_params()
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer
)
)
return nn.Sequential(*layers)
def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):
"""Constructs fully connected layer
Args:
fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed
input_dim (int): input dimension
dropout_p (float): dropout probability, if None, dropout is unused
"""
if fc_dims is None:
self.feature_dim = input_dim
return None
assert isinstance(
fc_dims, (list, tuple)
), 'fc_dims must be either list or tuple, but got {}'.format(
type(fc_dims)
)
layers = []
for dim in fc_dims:
layers.append(nn.Linear(input_dim, dim))
layers.append(nn.BatchNorm1d(dim))
layers.append(nn.ReLU(inplace=True))
if dropout_p is not None:
layers.append(nn.Dropout(p=dropout_p))
input_dim = dim
self.feature_dim = fc_dims[-1]
return nn.Sequential(*layers)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu'
)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def featuremaps(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
if 'layer1' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer2(x)
if 'layer2' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer3(x)
if 'layer3' in self.mixstyle_layers:
x = self.mixstyle(x)
x = self.layer4(x)
return x
def forward(self, x):
f = self.featuremaps(x)
v = self.global_avgpool(f)
v = v.view(v.size(0), -1)
if self.fc is not None:
v = self.fc(v)
if not self.training:
return v
y = self.classifier(v)
if self.loss == 'softmax':
return y
elif self.loss == 'triplet':
return y, v
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
def init_pretrained_weights(model, model_url):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
pretrain_dict = model_zoo.load_url(model_url)
model_dict = model.state_dict()
pretrain_dict = {
k: v
for k, v in pretrain_dict.items()
if k in model_dict and model_dict[k].size() == v.size()
}
model_dict.update(pretrain_dict)
model.load_state_dict(model_dict)
"""ResNet"""
def resnet18(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[2, 2, 2, 2],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet18'])
return model
def resnet34(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=BasicBlock,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet34'])
return model
def resnet50(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
def resnet101(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet101'])
return model
def resnet152(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 8, 36, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet152'])
return model
"""ResNeXt"""
def resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=4,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext50_32x4d'])
return model
def resnext101_32x8d(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 23, 3],
last_stride=2,
fc_dims=None,
dropout_p=None,
groups=32,
width_per_group=8,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnext101_32x8d'])
return model
"""ResNet + FC"""
def resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
"""MixStyle models"""
def resnet50_fc512_ms12_a0d1_domprior(num_classes, loss='softmax', pretrained=True, **kwargs):
model = ResNet(
num_classes=num_classes,
loss=loss,
block=Bottleneck,
layers=[3, 4, 6, 3],
last_stride=1,
fc_dims=[512],
dropout_p=None,
mixstyle_layers=['layer1', 'layer2'],
mixstyle_alpha=0.1,
**kwargs
)
if pretrained:
init_pretrained_weights(model, model_urls['resnet50'])
return model
| 16,298
| 27.898936
| 106
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/__init__.py
| 0
| 0
| 0
|
py
|
|
mixstyle-release
|
mixstyle-release-master/reid/models/dropblock/dropblock.py
|
import torch
import torch.nn.functional as F
from torch import nn
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 2)
class DropBlock3D(DropBlock2D):
r"""Randomly zeroes 3D spatial blocks of the input tensor.
An extension to the concept described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, D, H, W)`
- Output: `(N, C, D, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock3D, self).__init__(drop_prob, block_size)
def forward(self, x):
# shape: (bsize, channels, depth, height, width)
assert x.dim() == 5, \
"Expected input with 5 dimensions (bsize, channels, depth, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool3d(input=mask[:, None, :, :, :],
kernel_size=(self.block_size, self.block_size, self.block_size),
stride=(1, 1, 1),
padding=self.block_size // 2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size ** 3)
| 4,440
| 29.210884
| 98
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/dropblock/scheduler.py
|
import numpy as np
from torch import nn
class LinearScheduler(nn.Module):
def __init__(self, dropblock, start_value, stop_value, nr_steps):
super(LinearScheduler, self).__init__()
self.dropblock = dropblock
self.i = 0
self.drop_values = np.linspace(start=start_value, stop=stop_value, num=nr_steps)
def forward(self, x):
return self.dropblock(x)
def step(self):
if self.i < len(self.drop_values):
self.dropblock.drop_prob = self.drop_values[self.i]
self.i += 1
| 546
| 26.35
| 88
|
py
|
mixstyle-release
|
mixstyle-release-master/reid/models/dropblock/__init__.py
|
from .dropblock import DropBlock2D, DropBlock3D
from .scheduler import LinearScheduler
__all__ = ['DropBlock2D', 'DropBlock3D', 'LinearScheduler']
| 148
| 28.8
| 59
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/vis.py
|
import argparse
import torch
import os.path as osp
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
def normalize(feature):
norm = np.sqrt((feature**2).sum(1, keepdims=True))
return feature / (norm + 1e-12)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('src', type=str, default='', help='path to source file')
parser.add_argument('--dst', type=str, default='', help='destination directory')
parser.add_argument('--method', type=str, default='tsne', help='tnse, pca or none')
args = parser.parse_args()
if not args.dst:
args.dst = osp.dirname(args.src)
print('Loading file from "{}"'.format(args.src))
file = torch.load(args.src)
embed = file['embed']
domain = file['domain']
dnames = file['dnames']
#dim = embed.shape[1] // 2
#embed = embed[:, dim:]
#domain = file['label']
#dnames = ['dog', 'elephant', 'giraffe', 'guitar', 'horse', 'house', 'person']
nd_src = len(dnames)
embed = normalize(embed)
print('Loaded features with shape {}'.format(embed.shape))
embed2d_path = osp.join(args.dst, 'embed2d_' + args.method + '.pt')
if osp.exists(embed2d_path):
embed2d = torch.load(embed2d_path)
print('Loaded embed2d from "{}"'.format(embed2d_path))
else:
if args.method == 'tsne':
print('Dimension reduction with t-SNE (dim=2) ...')
tsne = TSNE(
n_components=2, metric='euclidean', verbose=1,
perplexity=50, n_iter=1000, learning_rate=200.
)
embed2d = tsne.fit_transform(embed)
torch.save(embed2d, embed2d_path)
print('Saved embed2d to "{}"'.format(embed2d_path))
elif args.method == 'pca':
print('Dimension reduction with PCA (dim=2) ...')
pca = PCA(n_components=2)
embed2d = pca.fit_transform(embed)
torch.save(embed2d, embed2d_path)
print('Saved embed2d to "{}"'.format(embed2d_path))
elif args.method == 'none':
# the original embedding is 2-D
embed2d = embed
avai_domains = list(set(domain.tolist()))
avai_domains.sort()
print('Plotting ...')
SIZE = 3
COLORS = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
LEGEND_MS = 3
fig, ax = plt.subplots()
for d in avai_domains:
d = int(d)
e = embed2d[domain == d]
"""
label = '$D_{}$'.format(str(d + 1))
if d < nd_src:
label += ' ($\mathcal{S}$)'
else:
label += ' ($\mathcal{N}$)'
"""
label = dnames[d]
ax.scatter(
e[:, 0],
e[:, 1],
s=SIZE,
c=COLORS[d],
edgecolors='none',
label=label,
alpha=1,
rasterized=False
)
#ax.legend(loc='upper left', fontsize=10, markerscale=LEGEND_MS)
ax.legend(fontsize=10, markerscale=LEGEND_MS)
ax.set_xticks([])
ax.set_yticks([])
#LIM = 22
#ax.set_xlim(-LIM, LIM)
#ax.set_ylim(-LIM, LIM)
figname = 'embed.pdf'
fig.savefig(osp.join(args.dst, figname), bbox_inches='tight')
plt.close()
if __name__ == '__main__':
main()
| 3,338
| 26.368852
| 87
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/parse_test_res.py
|
"""
Goal
---
1. Read test results from log.txt files
2. Compute mean and std across different folders (seeds)
Usage
---
Assume the output files are saved under output/my_experiment,
which contains results of different seeds, e.g.,
my_experiment/
seed1/
log.txt
seed2/
log.txt
seed3/
log.txt
Run the following command from the root directory:
$ python tools/parse_test_res.py output/my_experiment
Add --ci95 to the argument if you wanna get 95% confidence
interval instead of standard deviation:
$ python tools/parse_test_res.py output/my_experiment --ci95
If my_experiment/ has the following structure,
my_experiment/
exp-1/
seed1/
log.txt
...
seed2/
log.txt
...
seed3/
log.txt
...
exp-2/
...
exp-3/
...
Run
$ python tools/parse_test_res.py output/my_experiment --multi-exp
"""
import re
import numpy as np
import os.path as osp
import argparse
from collections import OrderedDict, defaultdict
from dassl.utils import check_isfile, listdir_nohidden
def compute_ci95(res):
return 1.96 * np.std(res) / np.sqrt(len(res))
def parse_function(*metrics, directory='', args=None, end_signal=None):
print(f'Parsing files in {directory}')
subdirs = listdir_nohidden(directory, sort=True)
outputs = []
for subdir in subdirs:
fpath = osp.join(directory, subdir, 'log.txt')
assert check_isfile(fpath)
good_to_go = False
output = OrderedDict()
with open(fpath, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
if line == end_signal:
good_to_go = True
for metric in metrics:
match = metric['regex'].search(line)
if match and good_to_go:
if 'file' not in output:
output['file'] = fpath
num = float(match.group(1))
name = metric['name']
output[name] = num
if output:
outputs.append(output)
assert len(outputs) > 0, f'Nothing found in {directory}'
metrics_results = defaultdict(list)
for output in outputs:
msg = ''
for key, value in output.items():
if isinstance(value, float):
msg += f'{key}: {value:.2f}%. '
else:
msg += f'{key}: {value}. '
if key != 'file':
metrics_results[key].append(value)
print(msg)
output_results = OrderedDict()
print('===')
print(f'Summary of directory: {directory}')
for key, values in metrics_results.items():
avg = np.mean(values)
std = compute_ci95(values) if args.ci95 else np.std(values)
print(f'* {key}: {avg:.2f}% +- {std:.2f}%')
output_results[key] = avg
print('===')
return output_results
def main(args, end_signal):
metric1 = {
'name': 'accuracy',
'regex': re.compile(r'\* accuracy: ([\.\deE+-]+)%')
}
metric2 = {
'name': 'error',
'regex': re.compile(r'\* error: ([\.\deE+-]+)%')
}
if args.multi_exp:
final_results = defaultdict(list)
for directory in listdir_nohidden(args.directory, sort=True):
directory = osp.join(args.directory, directory)
results = parse_function(
metric1,
metric2,
directory=directory,
args=args,
end_signal=end_signal
)
for key, value in results.items():
final_results[key].append(value)
print('Average performance')
for key, values in final_results.items():
avg = np.mean(values)
print(f'* {key}: {avg:.2f}%')
else:
parse_function(
metric1,
metric2,
directory=args.directory,
args=args,
end_signal=end_signal
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help='path to directory')
parser.add_argument(
'--ci95',
action='store_true',
help=r'compute 95\% confidence interval'
)
parser.add_argument(
'--test-log', action='store_true', help='parse test-only logs'
)
parser.add_argument(
'--multi-exp', action='store_true', help='parse multiple experiments'
)
args = parser.parse_args()
end_signal = 'Finished training'
if args.test_log:
end_signal = '=> result'
main(args, end_signal)
| 4,752
| 24.148148
| 77
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/train.py
|
import argparse
import copy
import torch
from dassl.utils import setup_logger, set_random_seed, collect_env_info
from dassl.config import get_cfg_default
from dassl.engine import build_trainer
# custom
from yacs.config import CfgNode as CN
import datasets.ssdg_pacs
import datasets.ssdg_officehome
import datasets.msda_pacs
import trainers.vanilla2
import trainers.semimixstyle
def print_args(args, cfg):
print('***************')
print('** Arguments **')
print('***************')
optkeys = list(args.__dict__.keys())
optkeys.sort()
for key in optkeys:
print('{}: {}'.format(key, args.__dict__[key]))
print('************')
print('** Config **')
print('************')
print(cfg)
def reset_cfg(cfg, args):
if args.root:
cfg.DATASET.ROOT = args.root
if args.output_dir:
cfg.OUTPUT_DIR = args.output_dir
if args.resume:
cfg.RESUME = args.resume
if args.seed:
cfg.SEED = args.seed
if args.source_domains:
cfg.DATASET.SOURCE_DOMAINS = args.source_domains
if args.target_domains:
cfg.DATASET.TARGET_DOMAINS = args.target_domains
if args.transforms:
cfg.INPUT.TRANSFORMS = args.transforms
if args.trainer:
cfg.TRAINER.NAME = args.trainer
if args.backbone:
cfg.MODEL.BACKBONE.NAME = args.backbone
if args.head:
cfg.MODEL.HEAD.NAME = args.head
def extend_cfg(cfg):
# Here you can extend the existing cfg variables by adding new ones
cfg.TRAINER.VANILLA2 = CN()
cfg.TRAINER.VANILLA2.MIX = 'random' # random or crossdomain
cfg.TRAINER.SEMIMIXSTYLE = CN()
cfg.TRAINER.SEMIMIXSTYLE.WEIGHT_U = 1. # weight on the unlabeled loss
cfg.TRAINER.SEMIMIXSTYLE.CONF_THRE = 0.95 # confidence threshold
cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS = ()
cfg.TRAINER.SEMIMIXSTYLE.MS_LABELED = False # apply mixstyle to labeled data
cfg.TRAINER.SEMIMIXSTYLE.MIX = 'random' # random or crossdomain
def setup_cfg(args):
cfg = get_cfg_default()
extend_cfg(cfg)
reset_cfg(cfg, args)
if args.dataset_config_file:
cfg.merge_from_file(args.dataset_config_file)
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def main(args):
cfg = setup_cfg(args)
if cfg.SEED >= 0:
print('Setting fixed seed: {}'.format(cfg.SEED))
set_random_seed(cfg.SEED)
setup_logger(cfg.OUTPUT_DIR)
if torch.cuda.is_available() and cfg.USE_CUDA:
torch.backends.cudnn.benchmark = True
print_args(args, cfg)
print('Collecting env info ...')
print('** System info **\n{}\n'.format(collect_env_info()))
trainer = build_trainer(cfg)
if args.vis:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.vis()
return
if args.eval_only:
trainer.load_model(args.model_dir, epoch=args.load_epoch)
trainer.test()
return
if not args.no_train:
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='', help='path to dataset')
parser.add_argument(
'--output-dir', type=str, default='', help='output directory'
)
parser.add_argument(
'--resume',
type=str,
default='',
help='checkpoint directory (from which the training resumes)'
)
parser.add_argument(
'--seed',
type=int,
default=-1,
help='only positive value enables a fixed seed'
)
parser.add_argument(
'--source-domains',
type=str,
nargs='+',
help='source domains for DA/DG'
)
parser.add_argument(
'--target-domains',
type=str,
nargs='+',
help='target domains for DA/DG'
)
parser.add_argument(
'--transforms', type=str, nargs='+', help='data augmentation methods'
)
parser.add_argument(
'--config-file', type=str, default='', help='path to config file'
)
parser.add_argument(
'--dataset-config-file',
type=str,
default='',
help='path to config file for dataset setup'
)
parser.add_argument(
'--trainer', type=str, default='', help='name of trainer'
)
parser.add_argument(
'--backbone', type=str, default='', help='name of CNN backbone'
)
parser.add_argument('--head', type=str, default='', help='name of head')
parser.add_argument(
'--eval-only', action='store_true', help='evaluation only'
)
parser.add_argument(
'--model-dir',
type=str,
default='',
help='load model from this directory for eval-only mode'
)
parser.add_argument(
'--load-epoch',
type=int,
help='load model weights at this epoch for evaluation'
)
parser.add_argument(
'--no-train', action='store_true', help='do not call trainer.train()'
)
parser.add_argument('--vis', action='store_true', help='visualization')
parser.add_argument(
'opts',
default=None,
nargs=argparse.REMAINDER,
help='modify config options using the command-line'
)
args = parser.parse_args()
main(args)
| 5,312
| 26.386598
| 80
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/datasets/ssdg_officehome.py
|
import os.path as osp
import glob
import random
from dassl.utils import listdir_nohidden
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
from dassl.utils import mkdir_if_missing
from .ssdg_pacs import SSDGPACS
@DATASET_REGISTRY.register()
class SSDGOfficeHome(DatasetBase):
"""Office-Home.
Statistics:
- 4 domains: Art, Clipart, Product, Real world.
- 65 categories.
Reference:
- Venkateswara et al. Deep Hashing Network for Unsupervised
Domain Adaptation. CVPR 2017.
- Zhou et al. Semi-Supervised Domain Generalization with
Stochastic StyleMatch. ArXiv preprint, 2021.
"""
dataset_dir = 'office_home_dg'
domains = ['art', 'clipart', 'product', 'real_world']
data_url = 'https://drive.google.com/uc?id=1gkbf_KaxoBws-GWT3XIPZ7BnkqbAxIFa'
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.split_ssdg_dir = osp.join(self.dataset_dir, 'splits_ssdg')
mkdir_if_missing(self.split_ssdg_dir)
if not osp.exists(self.dataset_dir):
dst = osp.join(root, 'office_home_dg.zip')
self.download_data(self.data_url, dst, from_gdrive=True)
self.check_input_domains(
cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS
)
seed = cfg.SEED
num_labeled = cfg.DATASET.NUM_LABELED
src_domains = cfg.DATASET.SOURCE_DOMAINS
tgt_domain = cfg.DATASET.TARGET_DOMAINS[0]
split_ssdg_path = osp.join(self.split_ssdg_dir, f'{tgt_domain}_nlab{num_labeled}_seed{seed}.json')
if not osp.exists(split_ssdg_path):
train_x, train_u = self._read_data_train(cfg.DATASET.SOURCE_DOMAINS, 'train', num_labeled)
SSDGPACS.write_json_train(split_ssdg_path, src_domains, self.dataset_dir, train_x, train_u)
else:
train_x, train_u = SSDGPACS.read_json_train(split_ssdg_path, src_domains, self.dataset_dir)
val = self._read_data_test(cfg.DATASET.SOURCE_DOMAINS, 'val')
test = self._read_data_test(cfg.DATASET.TARGET_DOMAINS, 'all')
if cfg.DATASET.ALL_AS_UNLABELED:
train_u = train_u + train_x
super().__init__(train_x=train_x, train_u=train_u, val=val, test=test)
def _read_data_train(self, input_domains, split, num_labeled):
items_x, items_u = [], []
num_labeled_per_class = None
num_domains = len(input_domains)
for domain, dname in enumerate(input_domains):
path = osp.join(self.dataset_dir, dname, split)
folders = listdir_nohidden(path, sort=True)
if num_labeled_per_class is None:
num_labeled_per_class = num_labeled / (num_domains * len(folders))
for label, folder in enumerate(folders):
impaths = glob.glob(osp.join(path, folder, '*.jpg'))
assert len(impaths) >= num_labeled_per_class
random.shuffle(impaths)
for i, impath in enumerate(impaths):
item = Datum(impath=impath, label=label, domain=domain)
if (i + 1) <= num_labeled_per_class:
items_x.append(item)
else:
items_u.append(item)
return items_x, items_u
def _read_data_test(self, input_domains, split):
def _load_data_from_directory(directory):
folders = listdir_nohidden(directory, sort=True)
folders.sort()
items_ = []
for label, folder in enumerate(folders):
impaths = glob.glob(osp.join(directory, folder, '*.jpg'))
for impath in impaths:
items_.append((impath, label))
return items_
items = []
for domain, dname in enumerate(input_domains):
if split == 'all':
train_dir = osp.join(self.dataset_dir, dname, 'train')
impath_label_list = _load_data_from_directory(train_dir)
val_dir = osp.join(self.dataset_dir, dname, 'val')
impath_label_list += _load_data_from_directory(val_dir)
else:
split_dir = osp.join(self.dataset_dir, dname, split)
impath_label_list = _load_data_from_directory(split_dir)
for impath, label in impath_label_list:
item = Datum(impath=impath, label=label, domain=domain)
items.append(item)
return items
| 4,583
| 36.884298
| 106
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/datasets/ssdg_pacs.py
|
import os.path as osp
import random
from collections import defaultdict
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
from dassl.utils import mkdir_if_missing, read_json, write_json
@DATASET_REGISTRY.register()
class SSDGPACS(DatasetBase):
"""PACS.
Statistics:
- 4 domains: Photo (1,670), Art (2,048), Cartoon
(2,344), Sketch (3,929).
- 7 categories: dog, elephant, giraffe, guitar, horse,
house and person.
Reference:
- Li et al. Deeper, broader and artier domain generalization.
ICCV 2017.
- Zhou et al. Semi-Supervised Domain Generalization with
Stochastic StyleMatch. ArXiv preprint, 2021.
"""
dataset_dir = 'pacs'
domains = ['art_painting', 'cartoon', 'photo', 'sketch']
data_url = 'https://drive.google.com/uc?id=1m4X4fROCCXMO0lRLrr6Zz9Vb3974NWhE'
# the following images contain errors and should be ignored
_error_paths = ['sketch/dog/n02103406_4068-1.png']
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.image_dir = osp.join(self.dataset_dir, 'images')
self.split_dir = osp.join(self.dataset_dir, 'splits')
self.split_ssdg_dir = osp.join(self.dataset_dir, 'splits_ssdg')
mkdir_if_missing(self.split_ssdg_dir)
if not osp.exists(self.dataset_dir):
dst = osp.join(root, 'pacs.zip')
self.download_data(self.data_url, dst, from_gdrive=True)
self.check_input_domains(
cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS
)
seed = cfg.SEED
num_labeled = cfg.DATASET.NUM_LABELED
src_domains = cfg.DATASET.SOURCE_DOMAINS
tgt_domain = cfg.DATASET.TARGET_DOMAINS[0]
split_ssdg_path = osp.join(self.split_ssdg_dir, f'{tgt_domain}_nlab{num_labeled}_seed{seed}.json')
if not osp.exists(split_ssdg_path):
train_x, train_u = self._read_data_train(cfg.DATASET.SOURCE_DOMAINS, num_labeled)
self.write_json_train(split_ssdg_path, src_domains, self.image_dir, train_x, train_u)
else:
train_x, train_u = self.read_json_train(split_ssdg_path, src_domains, self.image_dir)
val = self._read_data_test(cfg.DATASET.SOURCE_DOMAINS, 'crossval')
test = self._read_data_test(cfg.DATASET.TARGET_DOMAINS, 'all')
if cfg.DATASET.ALL_AS_UNLABELED:
train_u = train_u + train_x
super().__init__(train_x=train_x, train_u=train_u, val=val, test=test)
@staticmethod
def read_json_train(filepath, src_domains, image_dir):
"""
The latest office_home_dg dataset's class folders have
been changed to only contain the class names, e.g.,
000_Alarm_Clock/ is changed to Alarm_Clock/.
"""
def _convert_to_datums(items):
out = []
for impath, label, dname in items:
if dname not in src_domains:
continue
domain = src_domains.index(dname)
impath2 = osp.join(image_dir, impath)
if not osp.exists(impath2):
impath = impath.split('/')
if impath[-2].startswith('0'):
impath[-2] = impath[-2][4:]
impath = '/'.join(impath)
impath2 = osp.join(image_dir, impath)
item = Datum(impath=impath2, label=int(label), domain=domain)
out.append(item)
return out
print(f'Reading split from "{filepath}"')
split = read_json(filepath)
train_x = _convert_to_datums(split['train_x'])
train_u = _convert_to_datums(split['train_u'])
return train_x, train_u
@staticmethod
def write_json_train(filepath, src_domains, image_dir, train_x, train_u):
def _convert_to_list(items):
out = []
for item in items:
impath = item.impath
label = item.label
domain = item.domain
dname = src_domains[domain]
impath = impath.replace(image_dir, '')
if impath.startswith('/'):
impath = impath[1:]
out.append((impath, label, dname))
return out
train_x = _convert_to_list(train_x)
train_u = _convert_to_list(train_u)
output = {
'train_x': train_x,
'train_u': train_u
}
write_json(output, filepath)
print(f'Saved the split to "{filepath}"')
def _read_data_train(self, input_domains, num_labeled):
num_labeled_per_class = None
num_domains = len(input_domains)
items_x, items_u = [], []
for domain, dname in enumerate(input_domains):
file = osp.join(
self.split_dir, dname + '_train_kfold.txt'
)
impath_label_list = self._read_split_pacs(file)
impath_label_dict = defaultdict(list)
for impath, label in impath_label_list:
impath_label_dict[label].append((impath, label))
labels = list(impath_label_dict.keys())
if num_labeled_per_class is None:
num_labeled_per_class = num_labeled / (num_domains * len(labels))
for label in labels:
pairs = impath_label_dict[label]
assert len(pairs) >= num_labeled_per_class
random.shuffle(pairs)
for i, (impath, label) in enumerate(pairs):
item = Datum(impath=impath, label=label, domain=domain)
if (i + 1) <= num_labeled_per_class:
items_x.append(item)
else:
items_u.append(item)
return items_x, items_u
def _read_data_test(self, input_domains, split):
items = []
for domain, dname in enumerate(input_domains):
if split == 'all':
file_train = osp.join(
self.split_dir, dname + '_train_kfold.txt'
)
impath_label_list = self._read_split_pacs(file_train)
file_val = osp.join(
self.split_dir, dname + '_crossval_kfold.txt'
)
impath_label_list += self._read_split_pacs(file_val)
else:
file = osp.join(
self.split_dir, dname + '_' + split + '_kfold.txt'
)
impath_label_list = self._read_split_pacs(file)
for impath, label in impath_label_list:
item = Datum(impath=impath, label=label, domain=domain)
items.append(item)
return items
def _read_split_pacs(self, split_file):
items = []
with open(split_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
impath, label = line.split(' ')
if impath in self._error_paths:
continue
impath = osp.join(self.image_dir, impath)
label = int(label) - 1
items.append((impath, label))
return items
| 7,401
| 36.01
| 106
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
mixstyle-release
|
mixstyle-release-master/imcls/datasets/msda_pacs.py
|
import os.path as osp
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
@DATASET_REGISTRY.register()
class MSDAPACS(DatasetBase):
"""PACS.
Modified for multi-source domain adaptation.
Statistics:
- 4 domains: Photo (1,670), Art (2,048), Cartoon
(2,344), Sketch (3,929).
- 7 categories: dog, elephant, giraffe, guitar, horse,
house and person.
Reference:
- Li et al. Deeper, broader and artier domain generalization.
ICCV 2017.
"""
dataset_dir = 'pacs'
domains = ['art_painting', 'cartoon', 'photo', 'sketch']
data_url = 'https://drive.google.com/uc?id=1m4X4fROCCXMO0lRLrr6Zz9Vb3974NWhE'
# the following images contain errors and should be ignored
_error_paths = ['sketch/dog/n02103406_4068-1.png']
def __init__(self, cfg):
root = osp.abspath(osp.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = osp.join(root, self.dataset_dir)
self.image_dir = osp.join(self.dataset_dir, 'images')
self.split_dir = osp.join(self.dataset_dir, 'splits')
if not osp.exists(self.dataset_dir):
dst = osp.join(root, 'pacs.zip')
self.download_data(self.data_url, dst, from_gdrive=True)
self.check_input_domains(
cfg.DATASET.SOURCE_DOMAINS, cfg.DATASET.TARGET_DOMAINS
)
train_x = self._read_data(cfg.DATASET.SOURCE_DOMAINS, 'train')
train_u = self._read_data(cfg.DATASET.TARGET_DOMAINS, 'train')
val = self._read_data(cfg.DATASET.SOURCE_DOMAINS, 'crossval')
test = self._read_data(cfg.DATASET.TARGET_DOMAINS, 'crossval')
super().__init__(train_x=train_x, train_u=train_u, val=val, test=test)
def _read_data(self, input_domains, split):
items = []
for domain, dname in enumerate(input_domains):
if split == 'all':
file_train = osp.join(
self.split_dir, dname + '_train_kfold.txt'
)
impath_label_list = self._read_split_pacs(file_train)
file_val = osp.join(
self.split_dir, dname + '_crossval_kfold.txt'
)
impath_label_list += self._read_split_pacs(file_val)
else:
file = osp.join(
self.split_dir, dname + '_' + split + '_kfold.txt'
)
impath_label_list = self._read_split_pacs(file)
for impath, label in impath_label_list:
item = Datum(impath=impath, label=label, domain=domain)
items.append(item)
return items
def _read_split_pacs(self, split_file):
items = []
with open(split_file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
impath, label = line.split(' ')
if impath in self._error_paths:
continue
impath = osp.join(self.image_dir, impath)
label = int(label) - 1
items.append((impath, label))
return items
| 3,145
| 33.955556
| 81
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/trainers/semimixstyle.py
|
import torch
from torch.nn import functional as F
from dassl.data import DataManager
from dassl.engine import TRAINER_REGISTRY, TrainerXU
from dassl.metrics import compute_accuracy
from dassl.data.transforms import build_transform
from dassl.modeling.ops import deactivate_mixstyle, run_with_mixstyle
@TRAINER_REGISTRY.register()
class SemiMixStyle(TrainerXU):
def __init__(self, cfg):
super().__init__(cfg)
self.weight_u = cfg.TRAINER.SEMIMIXSTYLE.WEIGHT_U
self.conf_thre = cfg.TRAINER.SEMIMIXSTYLE.CONF_THRE
self.ms_labeled = cfg.TRAINER.SEMIMIXSTYLE.MS_LABELED
mix = cfg.TRAINER.SEMIMIXSTYLE.MIX
if mix == 'random':
self.model.apply(random_mixstyle)
print('MixStyle: random mixing')
elif mix == 'crossdomain':
self.model.apply(crossdomain_mixstyle)
print('MixStyle: cross-domain mixing')
else:
raise NotImplementedError
self.model.apply(deactivate_mixstyle)
def check_cfg(self, cfg):
assert len(cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS) > 0
def build_data_loader(self):
cfg = self.cfg
tfm_train = build_transform(cfg, is_train=True)
custom_tfm_train = [tfm_train]
choices = cfg.TRAINER.SEMIMIXSTYLE.STRONG_TRANSFORMS
tfm_train_strong = build_transform(cfg, is_train=True, choices=choices)
custom_tfm_train += [tfm_train_strong]
dm = DataManager(self.cfg, custom_tfm_train=custom_tfm_train)
self.train_loader_x = dm.train_loader_x
self.train_loader_u = dm.train_loader_u
self.val_loader = dm.val_loader
self.test_loader = dm.test_loader
self.num_classes = dm.num_classes
self.num_source_domains = dm.num_source_domains
self.lab2cname = dm.lab2cname
def assess_y_pred_quality(self, y_pred, y_true, mask):
n_masked_correct = (y_pred.eq(y_true).float() * mask).sum()
acc_thre = n_masked_correct / (mask.sum() + 1e-5)
acc_raw = y_pred.eq(y_true).sum() / y_pred.numel() # raw accuracy
keep_rate = mask.sum() / mask.numel()
output = {
'acc_thre': acc_thre,
'acc_raw': acc_raw,
'keep_rate': keep_rate
}
return output
def forward_backward(self, batch_x, batch_u):
parsed_data = self.parse_batch_train(batch_x, batch_u)
input_x, input_x2, label_x, input_u, input_u2, label_u = parsed_data
input_u = torch.cat([input_x, input_u], 0)
input_u2 = torch.cat([input_x2, input_u2], 0)
n_x = input_x.size(0)
# Generate pseudo labels
with torch.no_grad():
output_u = F.softmax(self.model(input_u), 1)
max_prob, label_u_pred = output_u.max(1)
mask_u = (max_prob >= self.conf_thre).float()
# Evaluate pseudo labels' accuracy
y_u_pred_stats = self.assess_y_pred_quality(
label_u_pred[n_x:], label_u, mask_u[n_x:]
)
# Supervised loss
if self.ms_labeled:
with run_with_mixstyle(self.model, mix='random'):
output_x = self.model(input_x)
loss_x = F.cross_entropy(output_x, label_x)
else:
output_x = self.model(input_x)
loss_x = F.cross_entropy(output_x, label_x)
# Unsupervised loss
with run_with_mixstyle(self.model, mix='crossdomain'):
output_u = self.model(input_u2)
loss_u = F.cross_entropy(output_u, label_u_pred, reduction='none')
loss_u = (loss_u * mask_u).mean()
loss = loss_x + loss_u * self.weight_u
self.model_backward_and_update(loss)
loss_summary = {
'loss_x': loss_x.item(),
'acc_x': compute_accuracy(output_x, label_x)[0].item(),
'loss_u': loss_u.item(),
'y_u_pred_acc_raw': y_u_pred_stats['acc_raw'],
'y_u_pred_acc_thre': y_u_pred_stats['acc_thre'],
'y_u_pred_keep': y_u_pred_stats['keep_rate']
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch_x, batch_u):
input_x = batch_x['img']
input_x2 = batch_x['img2']
label_x = batch_x['label']
input_u = batch_u['img']
input_u2 = batch_u['img2']
# label_u is used only for evaluating pseudo labels' accuracy
label_u = batch_u['label']
input_x = input_x.to(self.device)
input_x2 = input_x2.to(self.device)
label_x = label_x.to(self.device)
input_u = input_u.to(self.device)
input_u2 = input_u2.to(self.device)
label_u = label_u.to(self.device)
return input_x, input_x2, label_x, input_u, input_u2, label_u
| 4,835
| 35.360902
| 79
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/trainers/vanilla2.py
|
import torch
from torch.nn import functional as F
from dassl.engine import TRAINER_REGISTRY, TrainerX
from dassl.metrics import compute_accuracy
from dassl.modeling.ops import random_mixstyle, crossdomain_mixstyle
@TRAINER_REGISTRY.register()
class Vanilla2(TrainerX):
"""Vanilla baseline.
Slightly modified for mixstyle.
"""
def __init__(self, cfg):
super().__init__(cfg)
mix = cfg.TRAINER.VANILLA2.MIX
if mix == 'random':
self.model.apply(random_mixstyle)
print('MixStyle: random mixing')
elif mix == 'crossdomain':
self.model.apply(crossdomain_mixstyle)
print('MixStyle: cross-domain mixing')
else:
raise NotImplementedError
def forward_backward(self, batch):
input, label = self.parse_batch_train(batch)
output = self.model(input)
loss = F.cross_entropy(output, label)
self.model_backward_and_update(loss)
loss_summary = {
'loss': loss.item(),
'acc': compute_accuracy(output, label)[0].item()
}
if (self.batch_idx + 1) == self.num_batches:
self.update_lr()
return loss_summary
def parse_batch_train(self, batch):
input = batch['img']
label = batch['label']
input = input.to(self.device)
label = label.to(self.device)
return input, label
@torch.no_grad()
def vis(self):
self.set_model_mode('eval')
output_dir = self.cfg.OUTPUT_DIR
source_domains = self.cfg.DATASET.SOURCE_DOMAINS
print('Source domains:', source_domains)
out_embed = []
out_domain = []
out_label = []
split = self.cfg.TEST.SPLIT
data_loader = self.val_loader if split == 'val' else self.test_loader
print('Extracting style features')
for batch_idx, batch in enumerate(data_loader):
input = batch['img'].to(self.device)
label = batch['label']
domain = batch['domain']
impath = batch['impath']
# model should directly output features or style statistics
raise NotImplementedError
output = self.model(input)
output = output.cpu().numpy()
out_embed.append(output)
out_domain.append(domain.numpy())
out_label.append(label.numpy()) # CLASS LABEL
print('processed batch-{}'.format(batch_idx + 1))
out_embed = np.concatenate(out_embed, axis=0)
out_domain = np.concatenate(out_domain, axis=0)
out_label = np.concatenate(out_label, axis=0)
print('shape of feature matrix:', out_embed.shape)
out = {
'embed': out_embed,
'domain': out_domain,
'dnames': source_domains,
'label': out_label
}
out_path = osp.join(output_dir, 'embed.pt')
torch.save(out, out_path)
print('File saved to "{}"'.format(out_path))
| 3,011
| 29.424242
| 77
|
py
|
mixstyle-release
|
mixstyle-release-master/imcls/trainers/__init__.py
| 0
| 0
| 0
|
py
|
|
mixstyle-release
|
mixstyle-release-master/rl/setup.py
|
from setuptools import setup, find_packages
setup(
name='coinrun',
packages=find_packages(),
version='0.0.1',
)
| 125
| 14.75
| 43
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/plots.py
|
import tensorflow as tf
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set()
sns.set_style("ticks")
params = {'legend.fontsize': 10, 'legend.handlelength': 2,
'font.size': 10}
plt.rcParams.update(params)
def movingaverage (values, window):
weights = np.repeat(1.0, window)/window
sma = np.convolve(values, weights, 'valid')
return sma
# window_size is used for the moving average
window_size = 2000
xlims = None
max_step = 200000000
tag = "rew_mean"
ylims_test = (6, 9)
ylims_train = (4, 10)
path = "./tb_log/{}/"
save_dir = 'figures/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
LEGEND_FONT = 12
##### Main Train/Test Plot
plotname = "Paper_Coinrun_Main.pdf"
plotname_kl = "Paper_Coinrun_KL.pdf"
experiments = {
"Baseline": [
'l2wd_da_run1_{}', 'l2wd_da_run2_{}', 'l2wd_da_run3_{}'
],
"Baseline + MixStyle": [
'l2wd_da_ms_a0d1_run1_{}', 'l2wd_da_ms_a0d1_run2_{}', 'l2wd_da_ms_a0d1_run3_{}'
], # alpha=0.1
"IBAC-SNI": [
'ibac_sni_lmda0.5_run1_{}', 'ibac_sni_lmda0.5_run2_{}', 'ibac_sni_lmda0.5_run3_{}'
],
"IBAC-SNI + MixStyle": [
'ibac_sni_lmda0.5_ms_a0d1_run1_{}', 'ibac_sni_lmda0.5_ms_a0d1_run2_{}', 'ibac_sni_lmda0.5_ms_a0d1_run3_{}'
], # alpha=0.1
}
##### Main Train/Test Plot
# plotname = "Paper_Coinrun_Alpha.pdf"
# plotname_kl = "Paper_Coinrun_KL.pdf"
# experiments = {
# "Baseline + MixStyle ($\\alpha=0.1$)": [
# 'l2wd_da_ms_a0d1_run1_{}', 'l2wd_da_ms_a0d1_run2_{}', 'l2wd_da_ms_a0d1_run3_{}'
# ], # alpha=0.1
# "Baseline + MixStyle ($\\alpha=0.2$)": [
# 'l2wd_da_ms_a0d2_run1_{}', 'l2wd_da_ms_a0d2_run2_{}', 'l2wd_da_ms_a0d2_run3_{}'
# ], # alpha=0.2
# "Baseline + MixStyle ($\\alpha=0.3$)": [
# 'l2wd_da_ms_run1_{}', 'l2wd_da_ms_run2_{}', 'l2wd_da_ms_run3_{}'
# ], # alpha=0.3
# ###
# "IBAC-SNI + MixStyle ($\\alpha=0.1$)": [
# 'ibac_sni_lmda0.5_ms_a0d1_run1_{}', 'ibac_sni_lmda0.5_ms_a0d1_run2_{}', 'ibac_sni_lmda0.5_ms_a0d1_run3_{}'
# ], # alpha=0.1
# "IBAC-SNI + MixStyle ($\\alpha=0.2$)": [
# 'ibac_sni_lmda0.5_ms_a0d2_run1_{}', 'ibac_sni_lmda0.5_ms_a0d2_run2_{}', 'ibac_sni_lmda0.5_ms_a0d2_run3_{}'
# ], # alpha=0.2
# "IBAC-SNI + MixStyle ($\\alpha=0.3$)": [
# 'ibac_sni_lmda0.5_ms_run1_{}', 'ibac_sni_lmda0.5_ms_run2_{}', 'ibac_sni_lmda0.5_ms_run3_{}'
# ], # alpha=0.3
# }
fig_main, ax_main = plt.subplots(1, 1) # train performannce
fig_main2, ax_main2 = plt.subplots(1, 1) # test performance
fig_main3, ax_main3 = plt.subplots(1, 1) # generalization gap
fig_approxkl, ax_approxkl = plt.subplots(1, 1)
palette = sns.color_palette()
for key_idx, key in enumerate(experiments):
results = {}
for ending, marker in zip([0, 1], ['--', '-']):
all_steps = []
all_values = []
all_approxkl_run = []
all_approxkl_train = []
for idx in range(len(experiments[key])):
dirname = experiments[key][idx].format(ending)
print(dirname)
steps = []
values = []
approxkl_run = []
approxkl_train = []
modified_path = path.format(dirname)
for filename in os.listdir(modified_path):
if not filename.startswith('events'):
continue
try:
for e in tf.train.summary_iterator(modified_path + filename):
for v in e.summary.value:
if v.tag == 'rew_mean' and e.step <= max_step:
steps.append(e.step)
values.append(v.simple_value)
if ending == 0:
if v.tag == 'approxkl_run' and e.step <= max_step:
approxkl_run.append(v.simple_value)
elif v.tag == 'approxkl_train' and e.step <= max_step:
approxkl_train.append(v.simple_value)
except:
pass
# print(e)
steps = np.array(steps)[window_size//2:-window_size//2] * 3
values = movingaverage(np.array(values), window_size)
min_len = min(steps.shape[0], values.shape[0])
values, steps = values[:min_len], steps[:min_len]
if len(approxkl_run) == 0:
approxkl_run = np.zeros(min_len)
approxkl_train = np.zeros(min_len)
else:
approxkl_run = movingaverage(np.array(approxkl_run), window_size)[:min_len]
approxkl_train = movingaverage(np.array(approxkl_train), window_size)[:min_len]
all_steps.append(steps)
all_values.append(values)
all_approxkl_run.append(approxkl_run)
all_approxkl_train.append(approxkl_train)
min_length = np.inf
for steps, values in zip(all_steps, all_values):
min_length = min(min_length, steps.shape[0])
min_length = min(min_length, values.shape[0])
new_all_steps = []
new_all_values = []
new_all_approxkl_run = []
new_all_approxkl_train = []
for steps, values, approxkl_run, approxkl_train in zip(all_steps, all_values, all_approxkl_run, all_approxkl_train):
new_all_steps.append(steps[:min_length])
new_all_values.append(values[:min_length])
new_all_approxkl_run.append(approxkl_run[:min_length])
new_all_approxkl_train.append(approxkl_train[:min_length])
all_steps = np.stack(new_all_steps)
all_values = np.stack(new_all_values)
all_approxkl_run = np.stack(new_all_approxkl_run)
all_approxkl_train = np.stack(new_all_approxkl_train)
mean = np.mean(all_values, 0)[::10]
std = np.std(all_values, 0)[::10]
steps = all_steps[0][::10]
results[ending] = all_values
# label = key if ending == 0 else None
label = key
if ending == 0:
ax = ax_main
else:
ax = ax_main2
# ax.plot(steps, mean, label=label, linestyle=marker, color=palette[key_idx])
ax.plot(steps, mean, label=label, color=palette[key_idx])
ax.fill_between(steps, mean+std, mean-std, alpha=0.5, color=palette[key_idx])
if ending == 0 and all_approxkl_run[0,0] != 0:
mean = np.mean(all_approxkl_run, 0)[::10]
std = np.std(all_approxkl_run, 0)[::10]
ax_approxkl.plot(steps, mean, label=key + " (det)", color=palette[key_idx])
ax_approxkl.fill_between(steps, mean+std, mean-std, alpha=0.5, color=palette[key_idx])
mean = np.mean(all_approxkl_train, 0)[::10]
std = np.std(all_approxkl_train, 0)[::10]
ax_approxkl.plot(steps, mean, label=key + " (stoch)", linestyle='--', color=palette[key_idx])
ax_approxkl.fill_between(steps, mean+std, mean-std, alpha=0.5, color=palette[key_idx])
elif ending == 0 and all_approxkl_train[0,0] != 0:
mean = np.mean(all_approxkl_train, 0)[::10]
std = np.std(all_approxkl_train, 0)[::10]
ax_approxkl.plot(steps, mean, label=key, linestyle='-.', color=palette[key_idx])
ax_approxkl.fill_between(steps, mean+std, mean-std, alpha=0.5, color=palette[key_idx])
gen_gap = results[0] - results[1]
mean = np.mean(gen_gap, 0)[::10]
std = np.std(gen_gap, 0)[::10]
ax_main3.plot(steps, mean, label=label, color=palette[key_idx])
ax_main3.fill_between(steps, mean+std, mean-std, alpha=0.5, color=palette[key_idx])
ax_main.legend(loc='upper left', fontsize=LEGEND_FONT)
ax_main2.legend(loc='upper left', fontsize=LEGEND_FONT)
ax_main3.legend(loc='upper right', fontsize=LEGEND_FONT)
ax_main2.set_ylim(*ylims_test)
# if plotname == "Dropout_on_Plain.pdf":
# ax_main2.set_ylim(5, 7.1)
ax_main.set_ylim(*ylims_train)
ax_main.set_xlabel("Frames")
ax_main2.set_xlabel("Frames")
ax_main3.set_xlabel("Frames")
ax_main.set_ylabel("Return")
ax_main2.set_ylabel("Return")
ax_main3.set_ylabel("Generalization gap")
# if xlims is not None:
# ax_main.set_xlim(*xlims)
fig_main.savefig(os.path.join(save_dir, "Train_"+plotname), bbox_inches='tight')
fig_main2.savefig(os.path.join(save_dir, "Test_"+plotname), bbox_inches='tight')
fig_main3.savefig(os.path.join(save_dir, "Gap_"+plotname), bbox_inches='tight')
ax_approxkl.legend(loc='upper right')
ax_approxkl.set_xlabel("Frames")
ax_approxkl.set_ylabel("Approx KL")
fig_approxkl.savefig(os.path.join(save_dir, plotname_kl), bbox_inches='tight')
| 8,656
| 39.265116
| 124
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/create_gif.py
|
import imageio
import os
images = []
# for filename in os.listdir(modified_path):
filenames = []
for filename in os.listdir('./images'):
if filename.startswith('img_'):
filenames.append(filename)
sorted_files = [None] * len(filenames)
for filename in filenames:
nr = int(filename[4:-4])
sorted_files[nr-1] = filename
print(sorted_files)
images = []
for filename in sorted_files:
images.append(imageio.imread(filename))
imageio.mimsave('coinrun.gif', images, fps=25)
| 495
| 20.565217
| 46
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/create_saliency.py
|
"""
Load an agent trained with train_agent.py and
"""
import time
import tensorflow as tf
import numpy as np
from coinrun import setup_utils
import coinrun.main_utils as utils
from coinrun.config import Config
from coinrun import config
from coinrun import policies, wrappers
import imageio
import sys
# Import for saliency:
import tensorflow as tf
import numpy as np
import PIL.Image
from matplotlib import pylab as P
import pickle
import os
slim=tf.contrib.slim
import saliency
titlesize = 30
# Boilerplate methods.
def ShowImage(im, title='', ax=None):
if ax is None:
P.figure()
P.axis('off')
# im = ((im + 1) * 127.5).astype(np.uint8)
P.imshow(im)
P.title(title, fontsize=titlesize)
def ShowGrayscaleImage(im, title='', ax=None):
if ax is None:
P.figure()
P.axis('off')
P.imshow(im, cmap=P.cm.gray, vmin=0, vmax=1)
P.title(title, fontsize=titlesize)
def ShowDivergingImage(grad, title='', percentile=99, ax=None):
if ax is None:
fig, ax = P.subplots()
else:
fig = ax.figure
P.axis('off')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
im = ax.imshow(grad, cmap=P.cm.coolwarm, vmin=-1, vmax=1)
fig.colorbar(im, cax=cax, orientation='vertical')
P.title(title)
def LoadImage(file_path):
im = PIL.Image.open(file_path)
im = np.asarray(im)
return im / 127.5 - 1.0
# First one is None for the original image
models = [None, '0322_plain', None,
'0401_l2a_l2w_uda', '0401_l2w_uda', '0401_l2w',
None, '0327_l2a1e4', '0401_l2a1e4_noUda',
]
names = {
'0322_plain': 'No Regularization',
# '0322_plain_all': 'L2W + UDA + BN',
'0401_l2w_uda': 'L2W + UDA',
'0401_l2w': 'L2W',
'0401_l2a_l2w_uda': 'L2W + L2A + UDA',
'0327_l2a1e4': 'L2A + UDA',
'0401_l2a1e4_noUda': 'L2A',
# '0405__vib_l2w_uda_nn': 'VIB(modified) + L2W + UDA'
}
ROWS = 3
COLS = 3
UPSCALE_FACTOR = 10
# regular code:
mpi_print = utils.mpi_print
def create_act_model(sess, env, nenvs):
ob_space = env.observation_space
ac_space = env.action_space
policy = policies.get_policy()
act = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
return act
def enjoy_env_sess():
# utils.setup_mpi_gpus()
# setup_utils.setup_and_load({'restore_id': collecting_model})
directory = './images/'
directory_saliency = "./images_saliency"
def create_saliency(model_idx, sess):
graph = tf.get_default_graph()
env = utils.make_general_env(1)
env = wrappers.add_final_wrappers(env)
agent = create_act_model(sess, env, 1)
action_selector = tf.placeholder(tf.int32)
gradient_saliency = saliency.GradientSaliency(graph, sess, agent.pd.logits[0][action_selector], agent.X)
sess.run(tf.global_variables_initializer())
# setup_utils.restore_file(models[model_idx])
try:
loaded_params = utils.load_params_for_scope(sess, 'model')
if not loaded_params:
print('NO SAVED PARAMS LOADED')
except AssertionError as e:
models[model_idx] = None
return [None]*3
return agent, gradient_saliency, action_selector
orig_images_low = []
orig_images_high = []
filenames = []
print("Loading files...")
for idx, filename in enumerate(os.listdir(directory)):
if len(filename) > 15 or os.path.isdir(os.path.join(directory, filename)):
continue
print('.', end='')
img = imageio.imread(os.path.join(directory, filename))
img = img.astype(np.float32)
if filename.startswith('img_') and len(filename) < 15:
filenames.append(filename)
list_to_append = orig_images_low
if filename.startswith('imgL_') and len(filename) < 15:
list_to_append = orig_images_high
list_to_append.append(img)
list_of_images_lists = [] # First one for 0
list_of_vmax_lists = []
for idx, model_name in enumerate(models):
if model_name is None:
list_of_images_lists.append(None)
list_of_vmax_lists.append(None)
continue
model_images = []
vmaxs = []
config.Config = config.ConfigSingle()
setup_utils.setup_and_load(use_cmd_line_args=False, restore_id=model_name, replay=True)
print("\nComputing saliency for Model {}\{}: {}...".format(idx, len(models)-1, names[model_name]))
with tf.Session() as sess:
agent, gradient_saliency, action_selector = create_saliency(idx, sess)
for img in orig_images_low:
print('.', end=''); sys.stdout.flush()
action, values, state, _ = agent.step(np.expand_dims(img, 0), agent.initial_state, False)
s_vanilla_mask_3d = gradient_saliency.GetSmoothedMask(img, feed_dict={'model/is_training_:0': False, action_selector: action[0]})
s_vanilla_mask_grayscale, vmax = saliency.VisualizeImageGrayscale(s_vanilla_mask_3d)
model_images.append(s_vanilla_mask_grayscale)
vmaxs.append(vmax)
list_of_images_lists.append(model_images)
list_of_vmax_lists.append(vmaxs)
print("\nMaking pretty images..")
for idx, filename in enumerate(filenames):
print('.', end=''); sys.stdout.flush()
P.figure(figsize=(COLS * UPSCALE_FACTOR, ROWS * UPSCALE_FACTOR))
ShowImage(orig_images_high[idx]/255, title="Original", ax=P.subplot(ROWS, COLS, 1))
for row in range(ROWS):
for col in range(COLS):
model_idx = col + row * COLS
if models[model_idx] is None:
continue
ShowGrayscaleImage(
list_of_images_lists[model_idx][idx],
title=names[models[model_idx]] + " Vmax: {:.2E}".format(list_of_vmax_lists[model_idx][idx]),
ax=P.subplot(ROWS, COLS, model_idx+1))
P.savefig(os.path.join(directory_saliency, filename[:-4]+"_saliency.png"))
P.close()
print("\nDone")
def main():
enjoy_env_sess()
if __name__ == '__main__':
main()
| 6,228
| 30.301508
| 145
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/tb_utils.py
|
import tensorflow as tf
from mpi4py import MPI
from coinrun.config import Config
import numpy as np
def clean_tb_dir():
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == 0:
if tf.gfile.Exists(Config.TB_DIR):
tf.gfile.DeleteRecursively(Config.TB_DIR)
tf.gfile.MakeDirs(Config.TB_DIR)
comm.Barrier()
class TB_Writer(object):
def __init__(self, sess):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# clean_tb_dir()
tb_writer = tf.summary.FileWriter(Config.TB_DIR + '/' + Config.RUN_ID + '_' + str(rank), sess.graph)
total_steps = [0]
# should_log = (rank == 0 or Config.LOG_ALL_MPI)
# I use rank 1 for testing
should_log = (rank in [0, 1])
if should_log:
hyperparams = np.array(Config.get_arg_text())
hyperparams_tensor = tf.constant(hyperparams)
summary_op = tf.summary.text("hyperparameters info", hyperparams_tensor)
summary = sess.run(summary_op)
tb_writer.add_summary(summary)
def add_summary(_merged, interval=1):
if should_log:
total_steps[0] += 1
if total_steps[0] % interval == 0:
tb_writer.add_summary(_merged, total_steps[0])
tb_writer.flush()
tuples = []
def make_scalar_graph(name):
if name.startswith("info_loss"):
with tf.variable_scope("info_loss"):
scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32)
else:
scalar_ph = tf.placeholder(name='scalar_' + name, dtype=tf.float32)
scalar_summary = tf.summary.scalar(name, scalar_ph)
merged = tf.summary.merge([scalar_summary])
tuples.append((scalar_ph, merged))
# Gives index of name
name_dict = {}
curr_name_idx = [0]
def log_scalar(x, name, step=-1):
if not name in name_dict:
name_dict[name] = curr_name_idx[0]
# tf_name = (name + '_' + Config.RUN_ID) if curr_name_idx[0] == 0 else name
tf_name = name
make_scalar_graph(tf_name)
curr_name_idx[0] += 1
idx = name_dict[name]
scalar_ph, merged = tuples[idx]
if should_log:
if step == -1:
step = total_steps[0]
total_steps[0] += 1
_merged = sess.run(merged, {scalar_ph: x})
tb_writer.add_summary(_merged, step)
tb_writer.flush()
self.add_summary = add_summary
self.log_scalar = log_scalar
| 2,740
| 30.147727
| 108
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/train_agent.py
|
"""
Train an agent using a PPO2 based on OpenAI Baselines.
"""
import time
from mpi4py import MPI
import tensorflow as tf
from baselines.common import set_global_seeds
import coinrun.main_utils as utils
from coinrun import setup_utils, policies, wrappers, ppo2
from coinrun.config import Config
def main():
args = setup_utils.setup_and_load()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
seed = int(time.time()) % 10000
set_global_seeds(seed * 100 + rank)
utils.setup_mpi_gpus()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # pylint: disable=E1101
nenvs = Config.NUM_ENVS
total_timesteps = int(160e6)
if Config.LONG_TRAINING:
total_timesteps = int(200e6)
elif Config.SHORT_TRAINING:
total_timesteps = int(120e6)
save_interval = args.save_interval
env = utils.make_general_env(nenvs, seed=rank)
with tf.Session(config=config):
env = wrappers.add_final_wrappers(env)
policy = policies.get_policy()
ppo2.learn(policy=policy,
env=env,
save_interval=save_interval,
nsteps=Config.NUM_STEPS,
nminibatches=Config.NUM_MINIBATCHES,
lam=0.95,
gamma=Config.GAMMA,
noptepochs=Config.PPO_EPOCHS,
log_interval=1,
ent_coef=Config.ENTROPY_COEFF,
lr=lambda f : f * Config.LEARNING_RATE,
cliprange=lambda f : f * 0.2,
total_timesteps=total_timesteps)
if __name__ == '__main__':
main()
| 1,651
| 27
| 66
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/test_coinrun.py
|
from coinrun import random_agent
def test_coinrun():
random_agent.random_agent(num_envs=16, max_steps=100)
if __name__ == '__main__':
test_coinrun()
| 159
| 19
| 57
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/coinrunenv.py
|
"""
Python interface to the CoinRun shared library using ctypes.
On import, this will attempt to build the shared library.
"""
import os
import atexit
import random
import sys
from ctypes import c_int, c_char_p, c_float, c_bool
import gym
import gym.spaces
import numpy as np
import numpy.ctypeslib as npct
from baselines.common.vec_env import VecEnv
from baselines import logger
from coinrun.config import Config
from mpi4py import MPI
from baselines.common import mpi_util
# if the environment is crashing, try using the debug build to get
# a readable stack trace
DEBUG = False
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
game_versions = {
'standard': 1000,
'platform': 1001,
'maze': 1002,
}
def build():
lrank, _lsize = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
if lrank == 0:
dirname = os.path.dirname(__file__)
if len(dirname):
make_cmd = "QT_SELECT=5 make -C %s" % dirname
else:
make_cmd = "QT_SELECT=5 make"
r = os.system(make_cmd)
if r != 0:
logger.error('coinrun: make failed')
sys.exit(1)
MPI.COMM_WORLD.barrier()
build()
if DEBUG:
lib_path = '.build-debug/coinrun_cpp_d'
else:
lib_path = '.build-release/coinrun_cpp'
lib = npct.load_library(lib_path, os.path.dirname(__file__))
lib.init.argtypes = [c_int]
lib.get_NUM_ACTIONS.restype = c_int
lib.get_RES_W.restype = c_int
lib.get_RES_H.restype = c_int
lib.get_VIDEORES.restype = c_int
lib.vec_create.argtypes = [
c_int, # game_type
c_int, # nenvs
c_int, # lump_n
c_bool, # want_hires_render
c_float, # default_zoom
]
lib.vec_create.restype = c_int
lib.vec_close.argtypes = [c_int]
lib.vec_step_async_discrete.argtypes = [c_int, npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_args.argtypes = [npct.ndpointer(dtype=np.int32, ndim=1)]
lib.initialize_set_monitor_dir.argtypes = [c_char_p, c_int]
lib.vec_wait.argtypes = [
c_int,
npct.ndpointer(dtype=np.uint8, ndim=4), # normal rgb
npct.ndpointer(dtype=np.uint8, ndim=4), # larger rgb for render()
npct.ndpointer(dtype=np.float32, ndim=1), # rew
npct.ndpointer(dtype=np.bool, ndim=1), # done
]
already_inited = False
def init_args_and_threads(cpu_count=4,
monitor_csv_policy='all',
rand_seed=None):
"""
Perform one-time global init for the CoinRun library. This must be called
before creating an instance of CoinRunVecEnv. You should not
call this multiple times from the same process.
"""
os.environ['COINRUN_RESOURCES_PATH'] = os.path.join(SCRIPT_DIR, 'assets')
is_high_difficulty = Config.HIGH_DIFFICULTY
if rand_seed is None:
rand_seed = random.SystemRandom().randint(0, 1000000000)
# ensure different MPI processes get different seeds (just in case SystemRandom implementation is poor)
mpi_rank, mpi_size = mpi_util.get_local_rank_size(MPI.COMM_WORLD)
rand_seed = rand_seed - rand_seed % mpi_size + mpi_rank
int_args = np.array([int(is_high_difficulty), Config.NUM_LEVELS, int(Config.PAINT_VEL_INFO), Config.USE_DATA_AUGMENTATION, game_versions[Config.GAME_TYPE], Config.SET_SEED, rand_seed]).astype(np.int32)
lib.initialize_args(int_args)
lib.initialize_set_monitor_dir(logger.get_dir().encode('utf-8'), {'off': 0, 'first_env': 1, 'all': 2}[monitor_csv_policy])
global already_inited
if already_inited:
return
lib.init(cpu_count)
already_inited = True
@atexit.register
def shutdown():
global already_inited
if not already_inited:
return
lib.coinrun_shutdown()
class CoinRunVecEnv(VecEnv):
"""
This is the CoinRun VecEnv, all CoinRun environments are just instances
of this class with different values for `game_type`
`game_type`: int game type corresponding to the game type to create, see `enum GameType` in `coinrun.cpp`
`num_envs`: number of environments to create in this VecEnv
`lump_n`: only used when the environment creates `monitor.csv` files
`default_zoom`: controls how much of the level the agent can see
"""
def __init__(self, game_type, num_envs, lump_n=0, default_zoom=5.0):
self.metadata = {'render.modes': []}
self.reward_range = (-float('inf'), float('inf'))
self.NUM_ACTIONS = lib.get_NUM_ACTIONS()
self.RES_W = lib.get_RES_W()
self.RES_H = lib.get_RES_H()
self.VIDEORES = lib.get_VIDEORES()
self.buf_rew = np.zeros([num_envs], dtype=np.float32)
self.buf_done = np.zeros([num_envs], dtype=np.bool)
self.buf_rgb = np.zeros([num_envs, self.RES_H, self.RES_W, 3], dtype=np.uint8)
self.hires_render = Config.IS_HIGH_RES
if self.hires_render:
self.buf_render_rgb = np.zeros([num_envs, self.VIDEORES, self.VIDEORES, 3], dtype=np.uint8)
else:
self.buf_render_rgb = np.zeros([1, 1, 1, 1], dtype=np.uint8)
num_channels = 1 if Config.USE_BLACK_WHITE else 3
obs_space = gym.spaces.Box(0, 255, shape=[self.RES_H, self.RES_W, num_channels], dtype=np.uint8)
super().__init__(
num_envs=num_envs,
observation_space=obs_space,
action_space=gym.spaces.Discrete(self.NUM_ACTIONS),
)
self.handle = lib.vec_create(
game_versions[game_type],
self.num_envs,
lump_n,
self.hires_render,
default_zoom)
self.dummy_info = [{} for _ in range(num_envs)]
def __del__(self):
if hasattr(self, 'handle'):
lib.vec_close(self.handle)
self.handle = 0
def close(self):
lib.vec_close(self.handle)
self.handle = 0
def reset(self):
print("CoinRun ignores resets")
obs, _, _, _ = self.step_wait()
return obs
def get_images(self):
if self.hires_render:
return self.buf_render_rgb
else:
return self.buf_rgb
def step_async(self, actions):
assert actions.dtype in [np.int32, np.int64]
actions = actions.astype(np.int32)
lib.vec_step_async_discrete(self.handle, actions)
def step_wait(self):
self.buf_rew = np.zeros_like(self.buf_rew)
self.buf_done = np.zeros_like(self.buf_done)
lib.vec_wait(
self.handle,
self.buf_rgb,
self.buf_render_rgb,
self.buf_rew,
self.buf_done)
obs_frames = self.buf_rgb
if Config.USE_BLACK_WHITE:
obs_frames = np.mean(obs_frames, axis=-1).astype(np.uint8)[...,None]
return obs_frames, self.buf_rew, self.buf_done, self.dummy_info
def make(env_id, num_envs, **kwargs):
assert env_id in game_versions, 'cannot find environment "%s", maybe you mean one of %s' % (env_id, list(game_versions.keys()))
return CoinRunVecEnv(env_id, num_envs, **kwargs)
| 6,996
| 31.09633
| 205
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/ppo2.py
|
"""
This is a copy of PPO from openai/baselines (https://github.com/openai/baselines/blob/52255beda5f5c8760b0ae1f676aa656bb1a61f80/baselines/ppo2/ppo2.py) with some minor changes.
"""
import time
import datetime
import joblib
import numpy as np
import tensorflow as tf
from collections import deque
from mpi4py import MPI
from coinrun.tb_utils import TB_Writer
import coinrun.main_utils as utils
from coinrun.utils import AverageMeter
from coinrun.config import Config
mpi_print = utils.mpi_print
from baselines.common.runners import AbstractEnvRunner
from baselines.common.tf_util import initialize
from baselines.common.mpi_util import sync_from_root
from tensorflow.python.ops.ragged.ragged_util import repeat
class MpiAdamOptimizer(tf.train.AdamOptimizer):
"""Adam optimizer that averages gradients across mpi processes."""
def __init__(self, comm, **kwargs):
self.comm = comm
self.train_frac = 1.0 - Config.get_test_frac()
tf.train.AdamOptimizer.__init__(self, **kwargs)
def compute_gradients(self, loss, var_list, **kwargs):
grads_and_vars = tf.train.AdamOptimizer.compute_gradients(self, loss, var_list, **kwargs)
grads_and_vars = [(g, v) for g, v in grads_and_vars if g is not None]
flat_grad = tf.concat([tf.reshape(g, (-1,)) for g, v in grads_and_vars], axis=0)
if Config.is_test_rank():
flat_grad = tf.zeros_like(flat_grad)
shapes = [v.shape.as_list() for g, v in grads_and_vars]
sizes = [int(np.prod(s)) for s in shapes]
num_tasks = self.comm.Get_size()
buf = np.zeros(sum(sizes), np.float32)
def _collect_grads(flat_grad):
self.comm.Allreduce(flat_grad, buf, op=MPI.SUM)
np.divide(buf, float(num_tasks) * self.train_frac, out=buf)
return buf
avg_flat_grad = tf.py_func(_collect_grads, [flat_grad], tf.float32)
avg_flat_grad.set_shape(flat_grad.shape)
avg_grads = tf.split(avg_flat_grad, sizes, axis=0)
avg_grads_and_vars = [(tf.reshape(g, v.shape), v)
for g, (_, v) in zip(avg_grads, grads_and_vars)]
return avg_grads_and_vars
class Model(object):
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm):
sess = tf.get_default_session()
train_model = policy(sess, ob_space, ac_space, nbatch_train, nsteps)
norm_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
act_model = policy(sess, ob_space, ac_space, nbatch_act, 1)
A = train_model.pdtype.sample_placeholder([None])
ADV = tf.placeholder(tf.float32, [None])
R = tf.placeholder(tf.float32, [None])
OLDNEGLOGPAC = tf.placeholder(tf.float32, [None])
OLDVPRED = tf.placeholder(tf.float32, [None])
LR = tf.placeholder(tf.float32, [])
CLIPRANGE = tf.placeholder(tf.float32, [])
# VF loss
vpred = train_model.vf_train # Same as vf_run for SNI and default, but noisy for SNI2 while the boostrap is not
vpredclipped = OLDVPRED + tf.clip_by_value(train_model.vf_train - OLDVPRED, - CLIPRANGE, CLIPRANGE)
vf_losses1 = tf.square(vpred - R)
vf_losses2 = tf.square(vpredclipped - R)
vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
neglogpac_train = train_model.pd_train.neglogp(A)
ratio_train = tf.exp(OLDNEGLOGPAC - neglogpac_train)
pg_losses_train = -ADV * ratio_train
pg_losses2_train = -ADV * tf.clip_by_value(ratio_train, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss = tf.reduce_mean(tf.maximum(pg_losses_train, pg_losses2_train))
approxkl_train = .5 * tf.reduce_mean(tf.square(neglogpac_train - OLDNEGLOGPAC))
clipfrac_train = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio_train - 1.0), CLIPRANGE)))
if Config.BETA >= 0:
entropy = tf.reduce_mean(train_model.pd_train._components_distribution.entropy())
else:
entropy = tf.reduce_mean(train_model.pd_train.entropy())
# Add entropy and policy loss for the samples as well
if Config.SNI or Config.SNI2:
neglogpac_run = train_model.pd_run.neglogp(A)
ratio_run = tf.exp(OLDNEGLOGPAC - neglogpac_run)
pg_losses_run = -ADV * ratio_run
pg_losses2_run = -ADV * tf.clip_by_value(ratio_run, 1.0 - CLIPRANGE, 1.0 + CLIPRANGE)
pg_loss += tf.reduce_mean(tf.maximum(pg_losses_run, pg_losses2_run))
pg_loss /= 2.
entropy += tf.reduce_mean(train_model.pd_run.entropy())
entropy /= 2.
approxkl_run = .5 * tf.reduce_mean(tf.square(neglogpac_run - OLDNEGLOGPAC))
clipfrac_run = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio_run - 1.0), CLIPRANGE)))
else:
approxkl_run = tf.constant(0.)
clipfrac_run = tf.constant(0.)
params = tf.trainable_variables()
weight_params = [v for v in params if '/b' not in v.name]
total_num_params = 0
for p in params:
shape = p.get_shape().as_list()
num_params = np.prod(shape)
mpi_print('param', p, num_params)
total_num_params += num_params
mpi_print('total num params:', total_num_params)
l2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in weight_params])
# The first occurance should be in the train_model
if Config.BETA >= 0:
info_loss = tf.get_collection(
key="INFO_LOSS",
scope="model/info_loss"
)
beta = Config.BETA
elif Config.BETA_L2A >= 0:
info_loss = tf.get_collection(
key="INFO_LOSS_L2A",
scope="model/info_loss"
)
beta = Config.BETA_L2A
else:
info_loss = [tf.constant(0.)]
beta = 0
print(info_loss)
assert len(info_loss) == 1
info_loss = info_loss[0]
loss = pg_loss - entropy * ent_coef + vf_loss * vf_coef + l2_loss * Config.L2_WEIGHT + beta * info_loss
if Config.SYNC_FROM_ROOT:
trainer = MpiAdamOptimizer(MPI.COMM_WORLD, learning_rate=LR, epsilon=1e-5)
else:
trainer = tf.train.AdamOptimizer(learning_rate=LR, epsilon=1e-5)
grads_and_var = trainer.compute_gradients(loss, params)
grads, var = zip(*grads_and_var)
if max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, max_grad_norm)
grads_and_var = list(zip(grads, var))
_train = trainer.apply_gradients(grads_and_var)
def train(lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
advs = returns - values
adv_mean = np.mean(advs, axis=0, keepdims=True)
adv_std = np.std(advs, axis=0, keepdims=True)
advs = (advs - adv_mean) / (adv_std + 1e-8)
td_map = {train_model.X:obs, A:actions, ADV:advs, R:returns, LR:lr,
CLIPRANGE:cliprange, OLDNEGLOGPAC:neglogpacs, OLDVPRED:values}
if states is not None:
td_map[train_model.S] = states
td_map[train_model.M] = masks
return sess.run(
[pg_loss, vf_loss, entropy, approxkl_train, clipfrac_train, approxkl_run, clipfrac_run, l2_loss, info_loss, _train],
td_map
)[:-1]
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl_train', 'clipfrac_train', 'approxkl_run', 'clipfrac_run', 'l2_loss', 'info_loss_cv']
def save(save_path):
ps = sess.run(params)
joblib.dump(ps, save_path)
def load(load_path):
loaded_params = joblib.load(load_path)
restores = []
for p, loaded_p in zip(params, loaded_params):
restores.append(p.assign(loaded_p))
sess.run(restores)
self.train = train
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.value = act_model.value
self.initial_state = act_model.initial_state
self.save = save
self.load = load
if Config.SYNC_FROM_ROOT:
if MPI.COMM_WORLD.Get_rank() == 0:
initialize()
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
sync_from_root(sess, global_variables) #pylint: disable=E1101
else:
initialize()
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, nsteps, gamma, lam):
super().__init__(env=env, model=model, nsteps=nsteps)
self.lam = lam
self.gamma = gamma
def run(self, update_frac):
# Here, we init the lists that will contain the mb of experiences
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [],[],[],[],[],[]
mb_states = self.states
epinfos = []
# For n in range number of steps
for _ in range(self.nsteps):
# Given observations, get action value and neglopacs
# We already have self.obs because Runner superclass run self.obs[:] = env.reset() on init
actions, values, self.states, neglogpacs = self.model.step(self.obs, update_frac, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
# Take actions in env and look the results
# Infos contains a ton of useful informations
self.obs[:], rewards, self.dones, infos = self.env.step(actions)
for info in infos:
maybeepinfo = info.get('episode')
if maybeepinfo: epinfos.append(maybeepinfo)
mb_rewards.append(rewards)
#batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, update_frac, self.states, self.dones)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t+1]
nextvalues = mb_values[t+1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return (*map(sf01, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs)),
mb_states, epinfos)
def sf01(arr):
"""
swap and then flatten axes 0 and 1
"""
s = arr.shape
return arr.swapaxes(0, 1).reshape(s[0] * s[1], *s[2:])
def constfn(val):
def f(_):
return val
return f
def learn(*, policy, env, nsteps, total_timesteps, ent_coef, lr,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mpi_size = comm.Get_size()
sess = tf.get_default_session()
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
nenvs = env.num_envs
ob_space = env.observation_space
ac_space = env.action_space
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
utils.load_all_params(sess)
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
epinfobuf10 = deque(maxlen=10)
epinfobuf100 = deque(maxlen=100)
tfirststart = time.time()
active_ep_buf = epinfobuf100
nupdates = total_timesteps//nbatch
mean_rewards = []
datapoints = []
run_t_total = 0
train_t_total = 0
can_save = True
checkpoints = [32, 64]
saved_key_checkpoints = [False] * len(checkpoints)
if Config.SYNC_FROM_ROOT and rank != 0:
can_save = False
def save_model(base_name=None):
base_dict = {'datapoints': datapoints}
utils.save_params_in_scopes(sess, ['model'], Config.get_save_file(base_name=base_name), base_dict)
# For logging purposes, allow restoring of update
start_update = 0
if Config.RESTORE_STEP is not None:
start_update = Config.RESTORE_STEP // nbatch
tb_writer = TB_Writer(sess)
meter_update = AverageMeter()
print('Total number of updates:', nupdates)
for update in range(start_update+1, nupdates+1):
assert nbatch % nminibatches == 0
nbatch_train = nbatch // nminibatches
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lrnow = lr(frac)
cliprangenow = cliprange(frac)
mpi_print('collecting rollouts...')
run_tstart = time.time()
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run(update_frac=update/nupdates)
epinfobuf10.extend(epinfos)
epinfobuf100.extend(epinfos)
run_elapsed = time.time() - run_tstart
run_t_total += run_elapsed
mpi_print('rollouts complete')
mblossvals = []
mpi_print('updating parameters...')
train_tstart = time.time()
inds = np.arange(nbatch)
for _ in range(noptepochs):
np.random.shuffle(inds)
for start in range(0, nbatch, nbatch_train):
sess.run([model.train_model.train_dropout_assign_ops])
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
# update the dropout mask
sess.run([model.train_model.train_dropout_assign_ops])
sess.run([model.train_model.run_dropout_assign_ops])
train_elapsed = time.time() - train_tstart
train_t_total += train_elapsed
mpi_print('update complete')
# Update meter reading
meter_update.update(time.time() - run_tstart)
lossvals = np.mean(mblossvals, axis=0)
tnow = time.time()
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
step = update*nbatch
rew_mean_10 = utils.process_ep_buf(active_ep_buf, tb_writer=tb_writer, suffix='', step=step)
ep_len_mean = np.nanmean([epinfo['l'] for epinfo in active_ep_buf])
mpi_print('\n----', update)
mean_rewards.append(rew_mean_10)
datapoints.append([step, rew_mean_10])
tb_writer.log_scalar(ep_len_mean, 'ep_len_mean', step=step)
tb_writer.log_scalar(fps, 'fps', step=step)
mpi_print('time_elapsed', tnow - tfirststart, run_t_total, train_t_total)
mpi_print('timesteps', update*nsteps, total_timesteps)
mpi_print('eplenmean', ep_len_mean)
mpi_print('eprew', rew_mean_10)
mpi_print('fps', fps)
mpi_print('total_timesteps', update*nbatch)
mpi_print([epinfo['r'] for epinfo in epinfobuf10])
# Print running time and estimated arrival time
eta_seconds = (nupdates - update) * meter_update.avg
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
mpi_print('average update time {:.4f}'.format(meter_update.avg))
mpi_print('approx eta', eta)
if len(mblossvals):
for (lossval, lossname) in zip(lossvals, model.loss_names):
mpi_print(lossname, lossval)
tb_writer.log_scalar(lossval, lossname, step=step)
mpi_print('----\n')
if can_save:
if save_interval and (update % save_interval == 0):
save_model()
for j, checkpoint in enumerate(checkpoints):
if (not saved_key_checkpoints[j]) and (step >= (checkpoint * 1e6)):
saved_key_checkpoints[j] = True
save_model(str(checkpoint) + 'M')
save_model()
env.close()
return mean_rewards
| 17,289
| 37.59375
| 175
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/setup_utils.py
|
from coinrun.config import Config
import os
import joblib
def load_for_setup_if_necessary():
print("Restoring from ID: {}".format(Config.RESTORE_ID))
restore_file(Config.RESTORE_ID)
def restore_file(restore_id, load_key='default'):
if restore_id is not None:
load_file = Config.get_load_filename(restore_id=restore_id)
filepath = file_to_path(load_file)
load_data = joblib.load(filepath)
Config.set_load_data(load_data, load_key=load_key)
restored_args = load_data['args']
sub_dict = {}
res_keys = Config.RES_KEYS
for key in res_keys:
if key in restored_args:
sub_dict[key] = restored_args[key]
else:
print('warning key %s not restored' % key)
Config.parse_args_dict(sub_dict)
from coinrun.coinrunenv import init_args_and_threads
init_args_and_threads(4)
def setup_and_load(use_cmd_line_args=True, **kwargs):
"""
Initialize the global config using command line options, defaulting to the values in `config.py`.
`use_cmd_line_args`: set to False to ignore command line arguments passed to the program
`**kwargs`: override the defaults from `config.py` with these values
"""
args = Config.initialize_args(use_cmd_line_args=use_cmd_line_args, **kwargs)
load_for_setup_if_necessary()
return args
def file_to_path(filename):
return os.path.join(Config.WORKDIR, filename)
| 1,466
| 30.212766
| 101
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/utils.py
|
import os
import sys
import time
import os.path as osp
import errno
def mkdir_if_missing(dirname):
"""Create dirname if it is missing."""
if not osp.exists(dirname):
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class Logger(object):
"""Write console output to external text file.
Imported from `<https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py>`_
Args:
fpath (str): directory to save logging file.
Examples::
>>> import sys
>>> import os.path as osp
>>> save_dir = 'output/experiment-1'
>>> log_name = 'train.log'
>>> sys.stdout = Logger(osp.join(save_dir, log_name))
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(osp.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def setup_logger(output=None):
if output is None:
return
if output.endswith('.txt') or output.endswith('.log'):
fpath = output
else:
fpath = osp.join(output, 'log.txt')
fpath += time.strftime('-%Y-%m-%d-%H-%M-%S')
sys.stdout = Logger(fpath)
class AverageMeter(object):
"""Compute and store the average and current value.
Examples::
>>> # 1. Initialize a meter to record loss
>>> losses = AverageMeter()
>>> # 2. Update meter after every mini-batch update
>>> losses.update(loss_value, batch_size)
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 2,424
| 22.095238
| 90
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/main_utils.py
|
import tensorflow as tf
import os
import joblib
import numpy as np
from mpi4py import MPI
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
from coinrun.config import Config
from coinrun import setup_utils, wrappers
import platform
def make_general_env(num_env, seed=0, use_sub_proc=True):
from coinrun import coinrunenv
env = coinrunenv.make(Config.GAME_TYPE, num_env)
if Config.FRAME_STACK > 1:
env = VecFrameStack(env, Config.FRAME_STACK)
epsilon = Config.EPSILON_GREEDY
if epsilon > 0:
env = wrappers.EpsilonGreedyWrapper(env, epsilon)
return env
def file_to_path(filename):
return setup_utils.file_to_path(filename)
def load_all_params(sess):
load_params_for_scope(sess, 'model')
def load_params_for_scope(sess, scope, load_key='default'):
load_data = Config.get_load_data(load_key)
if load_data is None:
return False
params_dict = load_data['params']
if scope in params_dict:
print('Loading saved file for scope', scope)
loaded_params = params_dict[scope]
loaded_params, params = get_savable_params(loaded_params, scope, keep_heads=True)
restore_params(sess, loaded_params, params)
return True
def get_savable_params(loaded_params, scope, keep_heads=False):
params = tf.trainable_variables(scope)
filtered_params = []
filtered_loaded = []
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
for p, loaded_p in zip(params, loaded_params):
keep = True
if any((scope + '/' + x) in p.name for x in ['v','pi']):
keep = keep_heads
if keep:
filtered_params.append(p)
filtered_loaded.append(loaded_p)
else:
print('drop', p)
return filtered_loaded, filtered_params
def restore_params(sess, loaded_params, params):
if len(loaded_params) != len(params):
print('param mismatch', len(loaded_params), len(params))
assert(False)
restores = []
for p, loaded_p in zip(params, loaded_params):
print('restoring', p)
restores.append(p.assign(loaded_p))
sess.run(restores)
def save_params_in_scopes(sess, scopes, filename, base_dict=None):
data_dict = {}
if base_dict is not None:
data_dict.update(base_dict)
save_path = file_to_path(filename)
data_dict['args'] = Config.get_args_dict()
param_dict = {}
for scope in scopes:
params = tf.trainable_variables(scope)
if len(params) > 0:
print('saving scope', scope, filename)
ps = sess.run(params)
param_dict[scope] = ps
data_dict['params'] = param_dict
joblib.dump(data_dict, save_path)
def setup_mpi_gpus():
if 'RCALL_NUM_GPU' not in os.environ:
return
num_gpus = int(os.environ['RCALL_NUM_GPU'])
node_id = platform.node()
nodes = MPI.COMM_WORLD.allgather(node_id)
local_rank = len([n for n in nodes[:MPI.COMM_WORLD.Get_rank()] if n == node_id])
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus + Config.GPU_OFFSET)
# os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def is_mpi_root():
return MPI.COMM_WORLD.Get_rank() == 0
def tf_initialize(sess):
sess.run(tf.initialize_all_variables())
sync_from_root(sess)
def sync_from_root(sess, vars=None):
if vars is None:
vars = tf.trainable_variables()
if Config.SYNC_FROM_ROOT:
rank = MPI.COMM_WORLD.Get_rank()
print('sync from root', rank)
for var in vars:
if rank == 0:
MPI.COMM_WORLD.bcast(sess.run(var))
else:
sess.run(tf.assign(var, MPI.COMM_WORLD.bcast(None)))
def mpi_average(values):
return mpi_average_comm(values, MPI.COMM_WORLD)
def mpi_average_comm(values, comm):
size = comm.size
x = np.array(values)
buf = np.zeros_like(x)
comm.Allreduce(x, buf, op=MPI.SUM)
buf = buf / size
return buf
def mpi_average_train_test(values):
return mpi_average_comm(values, Config.TRAIN_TEST_COMM)
def mpi_print(*args):
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
print(*args)
def process_ep_buf(epinfobuf, tb_writer=None, suffix='', step=0):
rewards = [epinfo['r'] for epinfo in epinfobuf]
rew_mean = np.nanmean(rewards)
if Config.SYNC_FROM_ROOT:
rew_mean = mpi_average_train_test([rew_mean])[0]
if tb_writer is not None:
tb_writer.log_scalar(rew_mean, 'rew_mean' + suffix, step)
aux_dicts = []
if len(epinfobuf) > 0 and 'aux_dict' in epinfobuf[0]:
aux_dicts = [epinfo['aux_dict'] for epinfo in epinfobuf]
if len(aux_dicts) > 0:
keys = aux_dicts[0].keys()
for key in keys:
sub_rews = [aux_dict[key] for aux_dict in aux_dicts]
sub_rew = np.nanmean(sub_rews)
if tb_writer is not None:
tb_writer.log_scalar(sub_rew, key, step)
return rew_mean
| 5,094
| 25.957672
| 89
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/random_agent.py
|
import numpy as np
from coinrun import setup_utils, make
def random_agent(num_envs=1, max_steps=100000):
setup_utils.setup_and_load(use_cmd_line_args=False)
env = make('standard', num_envs=num_envs)
for step in range(max_steps):
acts = np.array([env.action_space.sample() for _ in range(env.num_envs)])
_obs, rews, _dones, _infos = env.step(acts)
print("step", step, "rews", rews)
env.close()
if __name__ == '__main__':
random_agent()
| 482
| 29.1875
| 81
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/wrappers.py
|
import gym
import numpy as np
class EpsilonGreedyWrapper(gym.Wrapper):
"""
Wrapper to perform a random action each step instead of the requested action,
with the provided probability.
"""
def __init__(self, env, prob=0.05):
gym.Wrapper.__init__(self, env)
self.prob = prob
self.num_envs = env.num_envs
def reset(self):
return self.env.reset()
def step(self, action):
if np.random.uniform()<self.prob:
action = np.random.randint(self.env.action_space.n, size=self.num_envs)
return self.env.step(action)
class EpisodeRewardWrapper(gym.Wrapper):
def __init__(self, env):
env.metadata = {'render.modes': []}
env.reward_range = (-float('inf'), float('inf'))
nenvs = env.num_envs
self.num_envs = nenvs
super(EpisodeRewardWrapper, self).__init__(env)
self.aux_rewards = None
self.num_aux_rews = None
def reset(**kwargs):
self.rewards = np.zeros(nenvs)
self.lengths = np.zeros(nenvs)
self.aux_rewards = None
self.long_aux_rewards = None
return self.env.reset(**kwargs)
def step(action):
obs, rew, done, infos = self.env.step(action)
if self.aux_rewards is None:
info = infos[0]
if 'aux_rew' in info:
self.num_aux_rews = len(infos[0]['aux_rew'])
else:
self.num_aux_rews = 0
self.aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.long_aux_rewards = np.zeros((nenvs, self.num_aux_rews), dtype=np.float32)
self.rewards += rew
self.lengths += 1
use_aux = self.num_aux_rews > 0
if use_aux:
for i, info in enumerate(infos):
self.aux_rewards[i,:] += info['aux_rew']
self.long_aux_rewards[i,:] += info['aux_rew']
for i, d in enumerate(done):
if d:
epinfo = {'r': round(self.rewards[i], 6), 'l': self.lengths[i], 't': 0}
aux_dict = {}
for nr in range(self.num_aux_rews):
aux_dict['aux_' + str(nr)] = self.aux_rewards[i,nr]
if 'ale.lives' in infos[i]:
game_over_rew = np.nan
is_game_over = infos[i]['ale.lives'] == 0
if is_game_over:
game_over_rew = self.long_aux_rewards[i,0]
self.long_aux_rewards[i,:] = 0
aux_dict['game_over_rew'] = game_over_rew
epinfo['aux_dict'] = aux_dict
infos[i]['episode'] = epinfo
self.rewards[i] = 0
self.lengths[i] = 0
self.aux_rewards[i,:] = 0
return obs, rew, done, infos
self.reset = reset
self.step = step
def add_final_wrappers(env):
env = EpisodeRewardWrapper(env)
return env
| 3,145
| 30.148515
| 94
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/config.py
|
from mpi4py import MPI
import argparse
import os
class ConfigSingle(object):
"""
A global config object that can be initialized from command line arguments or
keyword arguments.
"""
def __init__(self):
self.WORKDIR = './saved_models'
self.TB_DIR = './tb_log'
if not os.path.exists(self.WORKDIR):
os.makedirs(self.WORKDIR, exist_ok=True)
self.LOG_ALL_MPI = True
self.SYNC_FROM_ROOT = True
arg_keys = []
bool_keys = []
type_keys = []
### MixStyle
# Apply MixStyle
bool_keys.append(('mixstyle', 'mixstyle'))
# Alpha value of Beta distribution in MixStyle
type_keys.append(('alpha', 'alpha', float, 0.3))
# If there are 5 values and the last one is True, it gets saved & loaded (althought I'm not sure that's working properly)
### Only for test_vars and enjoy:
# Helper for the case that we need to replay
bool_keys.append(('replay', 'replay'))
# Probability to save each image when running coinrun.enjoy.
type_keys.append(('save-images', 'save_images', float, 0))
# The runid whose parameters and settings you want to load.
type_keys.append(('residd', 'restore_idd', str, None))
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
bool_keys.append(('sni', 'sni', True))
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
bool_keys.append(('sni2', 'sni2', True))
### Others:
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
bool_keys.append(('long', 'long_training'))
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
bool_keys.append(('short', 'short_training'))
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
type_keys.append(('b', 'beta', float, -1., True))
# Beta value for Info-loss KL divergence. -1 leaves this loss term out. 0 will probably diverge
type_keys.append(('nr-samples', 'nr_samples', int, 1, True))
# Beta value for Info-loss L2 on Activations. -1 leaves this loss term out. 0 will probably diverge
type_keys.append(('bl2a', 'beta_l2a', float, -1., True))
# How many train processes per test process. Defaults to 2
type_keys.append(('test-ratio', 'test_ratio', int, 4, False))
# Deactivated because not needed. To re-activate, uncomment line in main_utils.py as well
# GPU offset: Use RCALL_NUM_GPU, starting from this value
type_keys.append(('gpu-offset', 'gpu_offset', int, 0, False))
# The runid, used to determine the name for save files.
type_keys.append(('runid', 'run_id', str, 'tmp'))
# The runid whose parameters and settings you want to load.
type_keys.append(('resid', 'restore_id', str, None))
# Restore number of updates
type_keys.append(('resstep', 'restore_step', int, None))
# The game to be played.
# One of {'standard', 'platform', 'maze'} (for CoinRun, CoinRun-Platforms, Random-Mazes)
type_keys.append(('gamet', 'game_type', str, 'standard', True))
# The convolutional architecture to use
# One of {'nature', 'impala', 'impalalarge'}
type_keys.append(('arch', 'architecture', str, 'impala', True))
# Should the model include an LSTM
type_keys.append(('lstm', 'use_lstm', int, 0, True))
# The number of parallel environments to run
type_keys.append(('ne', 'num_envs', int, 32, True))
# The number of levels in the training set.
# If NUM_LEVELS = 0, the training set is unbounded. All level seeds will be randomly generated.
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
type_keys.append(('nlev', 'num_levels', int, 0, True))
# Provided as a seed for training set generation.
# If SET_SEED = -1, this seed is not used and level seeds with be drawn from the range [0, NUM_LEVELS).
# Use SET_SEED = -1 and NUM_LEVELS = 500 to train with the same levels in the paper.
# NOTE: This value must and will be saved, in order to use the same training set for evaluation and/or visualization.
type_keys.append(('set-seed', 'set_seed', int, -1, True))
# PPO Hyperparameters
type_keys.append(('ns', 'num_steps', int, 256))
type_keys.append(('nmb', 'num_minibatches', int, 8))
type_keys.append(('ppoeps', 'ppo_epochs', int, 3))
type_keys.append(('ent', 'entropy_coeff', float, .01))
type_keys.append(('lr', 'learning_rate', float, 5e-4))
type_keys.append(('gamma', 'gamma', float, 0.999))
# Should the agent's velocity be painted in the upper left corner of observations.
# 1/0 means True/False
# PAINT_VEL_INFO = -1 uses smart defaulting -- will default to 1 if GAME_TYPE is 'standard' (CoinRun), 0 otherwise
type_keys.append(('pvi', 'paint_vel_info', int, -1, True))
# Should batch normalization be used after each convolutional layer
# 1/0 means True/False
# This code only supports training-mode batch normalization (normalizing with statistics of the current batch).
# In practice, we found this is nearly as effective as tracking the moving average of the statistics.
# NOTE: Only applies to IMPALA and IMPALA-Large architectures
type_keys.append(('norm', 'use_batch_norm', int, 0, True))
# What dropout probability to use
type_keys.append(('dropout', 'dropout', float, 0.0, True))
# Use OPENAI version of dropout
bool_keys.append(('openai', 'openai', True))
# # What dropout probability to use
# type_keys.append(('dropout-openai', 'dropout_openai', float, 0.0, True))
# Should data augmentation be used
# 1/0 means True/False
type_keys.append(('uda', 'use_data_augmentation', int, 0))
# The l2 penalty to use during training
type_keys.append(('l2', 'l2_weight', float, 0.0))
# The probability the agent's action is replaced with a random action
type_keys.append(('eps', 'epsilon_greedy', float, 0.0))
# The number of frames to stack for each observation.
# No frame stack is necessary if PAINT_VEL_INFO = 1
type_keys.append(('fs', 'frame_stack', int, 1, True))
# Should observations be transformed to grayscale
# 1/0 means True/False
type_keys.append(('ubw', 'use_black_white', int, 0, True))
# Overwrite the latest save file after this many updates
type_keys.append(('si', 'save_interval', int, 10))
# The number of evaluation environments to use
type_keys.append(('num-eval', 'num_eval', int, 20, False))
# The number of episodes to evaluate with each evaluation environment
type_keys.append(('rep', 'rep', int, 1))
# Should half the workers act solely has test workers for evaluation
# These workers will run on test levels and not contributing to training
bool_keys.append(('test', 'test'))
# Perform evaluation with all levels sampled from the training set
bool_keys.append(('train-eval', 'train_eval'))
# Perform evaluation with all levels sampled from the test set (unseen levels of high difficulty)
bool_keys.append(('test-eval', 'test_eval'))
# Only generate high difficulty levels
bool_keys.append(('highd', 'high_difficulty'))
# Use high resolution images for rendering
bool_keys.append(('hres', 'is_high_res'))
self.RES_KEYS = []
for tk in type_keys:
arg_keys.append(self.process_field(tk[1]))
if (len(tk) > 4) and tk[4]:
self.RES_KEYS.append(tk[1])
for bk in bool_keys:
arg_keys.append(bk[1])
if (len(bk) > 2) and bk[2]:
self.RES_KEYS.append(bk[1])
self.arg_keys = arg_keys
self.bool_keys = bool_keys
self.type_keys = type_keys
self.load_data = {}
self.args_dict = {}
# print("Rank {} is a test rank: {}".format(MPI.COMM_WORLD.Get_rank(), self.is_test_rank()))
def is_test_rank(self):
if self.TEST:
rank = MPI.COMM_WORLD.Get_rank()
return rank % self.TEST_RATIO == 1
return False
def get_test_frac(self):
return .5 if self.TEST else 0
def get_load_data(self, load_key='default'):
if not load_key in self.load_data:
return None
return self.load_data[load_key]
def set_load_data(self, ld, load_key='default'):
self.load_data[load_key] = ld
def process_field(self, name):
return name.replace('-','_')
def deprocess_field(self, name):
return name.replace('_','-')
def parse_all_args(self, args):
assert isinstance(args, argparse.Namespace), 'expected argparse.Namespace object'
update_dict = vars(args)
self.parse_args_dict(update_dict)
def parse_args_dict(self, update_dict):
self.args_dict.update(update_dict)
for ak in self.args_dict:
val = self.args_dict[ak]
if isinstance(val, str):
val = self.process_field(val)
setattr(self, ak.upper(), val)
self.compute_args_dependencies()
def compute_args_dependencies(self):
if self.is_test_rank():
self.NUM_LEVELS = 0
self.USE_DATA_AUGMENTATION = 0
self.EPSILON_GREEDY = 0
self.HIGH_DIFFICULTY = 1
if self.PAINT_VEL_INFO < 0:
if self.GAME_TYPE == 'standard':
self.PAINT_VEL_INFO = 1
else:
self.PAINT_VEL_INFO = 0
if self.TEST_EVAL:
self.NUM_LEVELS = 0
self.HIGH_DIFFICULTY = 1
self.TRAIN_TEST_COMM = MPI.COMM_WORLD.Split(1 if self.is_test_rank() else 0, 0)
def get_load_filename(self, base_name=None, restore_id=None):
if restore_id is None:
restore_id = Config.RESTORE_ID
if restore_id is None:
return None
filename = Config.get_save_file_for_rank(0, self.process_field(restore_id), base_name=base_name)
return filename
def get_save_path(self, runid=None):
return self.WORKDIR + self.get_save_file(runid)
def get_save_file_for_rank(self, rank, runid=None, base_name=None):
if runid is None:
runid = self.RUN_ID
extra = ''
if base_name is not None:
extra = '_' + base_name
return 'sav_' + runid + extra + '_' + str(rank)
def get_save_file(self, runid=None, base_name=None):
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
return self.get_save_file_for_rank(rank, runid, base_name=base_name)
def get_arg_text(self):
arg_strs = []
for key in self.args_dict:
arg_strs.append(key + '=' + str(self.args_dict[key]))
return arg_strs
def get_args_dict(self):
_args_dict = {}
_args_dict.update(self.args_dict)
return _args_dict
def initialize_args(self, use_cmd_line_args=True, **kwargs):
default_args = {}
for tk in self.type_keys:
default_args[self.process_field(tk[1])] = tk[3]
for bk in self.bool_keys:
default_args[bk[1]] = False
default_args.update(kwargs)
# print("Default args: {}".format(default_args))
parser = argparse.ArgumentParser()
for tk in self.type_keys:
parser.add_argument('-' + tk[0], '--' + self.deprocess_field(tk[1]), type=tk[2], default=default_args[tk[1]])
for bk in self.bool_keys:
parser.add_argument('--' + bk[0], dest=bk[1], action='store_true')
bk_kwargs = {bk[1]: default_args[bk[1]]}
parser.set_defaults(**bk_kwargs)
if use_cmd_line_args:
args = parser.parse_args()
else:
args = parser.parse_args(args=[])
self.parse_all_args(args)
return args
Config = ConfigSingle()
| 12,414
| 35.514706
| 129
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/policies.py
|
import sys
import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from baselines.common.distributions import make_pdtype, _matching_fc
from baselines.common.input import observation_input
ds = tf.contrib.distributions
from coinrun.config import Config
def mixstyle_layer(x, alpha=0.3, eps=1e-6):
# x: (batch, height, width, channel)
mu, var = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
sig = tf.math.sqrt(var + eps)
mu = tf.stop_gradient(mu)
sig = tf.stop_gradient(sig)
x_normed = (x - mu) / sig
idxs = tf.range(start=0, limit=tf.shape(x)[0], dtype=tf.int32)
shuffled_idxs = tf.random.shuffle(idxs)
mu2 = tf.gather(mu, shuffled_idxs)
sig2 = tf.gather(sig, shuffled_idxs)
lmda = tf.distributions.Beta(alpha, alpha).sample([tf.shape(x)[0], 1, 1, 1])
mu_mix = mu * lmda + mu2 * (1 - lmda)
sig_mix = sig * lmda + sig2 * (1 - lmda)
x_out = x_normed * sig_mix + mu_mix
return x_out
def impala_cnn(images, depths=[16, 32, 32]):
use_batch_norm = Config.USE_BATCH_NORM == 1
slow_dropout_assign_ops = []
fast_dropout_assign_ops = []
def dropout_openai(out, rate, name):
out_shape = out.get_shape().as_list()
var_name = 'mask_{}'.format(name)
batch_seed_shape = out_shape[1:]
batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)
batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))
dout_assign_ops = [batch_seed_assign]
curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - rate))
curr_mask = curr_mask * (1.0 / (1.0 - rate))
out = out * curr_mask
return out, dout_assign_ops
def conv_layer(out, depth):
out = tf.layers.conv2d(out, depth, 3, padding='same')
if use_batch_norm:
out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)
return out
def residual_block(inputs):
depth = inputs.get_shape()[-1].value
out = tf.nn.relu(inputs)
out = conv_layer(out, depth)
out = tf.nn.relu(out)
out = conv_layer(out, depth)
return out + inputs
def conv_sequence(inputs, depth):
out = conv_layer(inputs, depth)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
out = residual_block(out)
out = residual_block(out)
return out
if Config.MIXSTYLE:
print('Do MixStyle')
out = images
for nr, depth in enumerate(depths):
out = conv_sequence(out, depth)
if nr in [0, 1] and Config.MIXSTYLE:
#prob = tf.random_uniform([], minval=0, maxval=1)
#do_mixstyle = tf.less(prob, 0.5) # do mixstyle with a probability
#out = tf.cond(do_mixstyle, lambda: mixstyle_layer(out, alpha=Config.ALPHA), lambda: out)
out = mixstyle_layer(out, alpha=Config.ALPHA)
out = tf.layers.flatten(out)
out = tf.nn.relu(out)
if Config.BETA >= 0:
print("Creating VIB layer")
params = tf.layers.dense(out, 256*2)
mu, rho = params[:, :256], params[:, 256:]
encoding = ds.NormalWithSoftplusScale(mu, rho - 5.0)
with tf.variable_scope("info_loss"):
prior = ds.Normal(0.0, 1.0)
info_loss = tf.reduce_sum(tf.reduce_mean(
ds.kl_divergence(encoding, prior), 0)) / np.log(2)
# info_loss = tf.identity(info_loss, name="info_loss")
tf.add_to_collection("INFO_LOSS", info_loss)
# info_loss = tf.Print(info_loss, [info_loss])
with tf.control_dependencies([info_loss]):
batch_size = tf.shape(out)[0]
# batch_size = tf.Print(batch_size, [tf.shape(out)])
out = tf.reshape(
encoding.sample(Config.NR_SAMPLES),
shape=(batch_size * Config.NR_SAMPLES, 256))
out_mean = mu
elif Config.BETA_L2A >= 0:
print("Creating L2A regularized layer")
out = tf.layers.dense(out, 256)
with tf.variable_scope("info_loss"):
info_loss = tf.reduce_sum(tf.reduce_mean(tf.square(out), 0))
tf.add_to_collection("INFO_LOSS_L2A", info_loss)
with tf.control_dependencies([info_loss]):
out = tf.identity(out)
out_mean = out
# elif Config.DROPOUT > 0:
# print("Creating Dropout layer")
# out_mean = tf.layers.dense(out, 256)
# out = tf.nn.dropout(out_mean, rate=Config.DROPOUT)
elif Config.DROPOUT > 0:
print("Creating Dropout layer")
latent = tf.layers.dense(out, 256)
out, fast_dropout_assign_ops = dropout_openai(latent, rate=Config.DROPOUT, name='fast')
out_mean, slow_dropout_assign_ops = dropout_openai(latent, rate=Config.DROPOUT, name='slow')
else:
out = tf.layers.dense(out, 256)
out_mean = out
out = tf.identity(out, name="bottleneck_layer")
tf.add_to_collection("BOTTLENECK_LAYER", out)
out = tf.nn.relu(out)
out_mean = tf.nn.relu(out_mean)
return out, out_mean, slow_dropout_assign_ops, fast_dropout_assign_ops
def choose_cnn(images):
arch = Config.ARCHITECTURE
scaled_images = tf.cast(images, tf.float32) / 255.
if arch == 'nature':
raise NotImplementedError()
out = nature_cnn(scaled_images)
elif arch == 'impala':
return impala_cnn(scaled_images)
elif arch == 'impalalarge':
return impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])
else:
assert(False)
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613
self.pdtype = make_pdtype(ac_space)
# So that I can compute the saliency map
if Config.REPLAY:
X = tf.placeholder(shape=(nbatch,) + ob_space.shape, dtype=np.float32, name='Ob')
processed_x = X
else:
X, processed_x = observation_input(ob_space, nbatch)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, h_vf, slow_dropout_assign_ops, fast_dropout_assigned_ops = choose_cnn(processed_x)
self.train_dropout_assign_ops = fast_dropout_assigned_ops
self.run_dropout_assign_ops = slow_dropout_assign_ops
# Noisy policy and value function for train
if Config.BETA >= 0:
pdparam = _matching_fc(h, 'pi', ac_space.n, init_scale=1.0, init_bias=0)
pdparam = tf.reshape(pdparam, shape=(Config.NR_SAMPLES, -1, ac_space.n))
pdparam = tf.transpose(pdparam, perm=[1,0,2])
dists = ds.Categorical(logits=pdparam)
self.pd_train = ds.MixtureSameFamily(
mixture_distribution=ds.Categorical(probs=[1./Config.NR_SAMPLES]*Config.NR_SAMPLES),
components_distribution=dists)
self.pd_train.neglogp = lambda a: - self.pd_train.log_prob(a)
self.vf_train = tf.reduce_mean(tf.reshape(fc(h, 'v', 1), shape=(Config.NR_SAMPLES, -1, 1)), 0)[:, 0]
else:
self.pd_train, _ = self.pdtype.pdfromlatent(h, init_scale=0.01)
self.vf_train = fc(h, 'v', 1)[:, 0]
if Config.SNI:
assert Config.DROPOUT == 0
assert not Config.OPENAI
# Used with VIB: Noiseless pd_run and _both_ value functions
print("Activating SNI (includes VF)")
# Use deterministic value function for both as VIB for regression seems like a bad idea
self.vf_run = self.vf_train = fc(h_vf, 'v', 1)[:, 0]
# Have a deterministic run policy based on the mean
self.pd_run, _ = self.pdtype.pdfromlatent(h_vf, init_scale=0.01)
elif Config.SNI2:
assert not Config.OPENAI
# Used with Dropout instead of OPENAI modifier
# 'RUN' versions are updated slowly, train versions updated faster, gradients are mixed
print("Activating SNI2")
# Deterministic bootstrap value... doesn't really matter but this is more consistent
self.vf_run = fc(h_vf, 'v', 1)[:, 0]
# Run policy based on slow changing latent
self.pd_run, _ = self.pdtype.pdfromlatent(h_vf, init_scale=0.01)
# Train is updated for each gradient update, slow is only updated once per batch
elif Config.OPENAI:
# Completely overwrite train versions as everything changes slowly
# Train version is same as run version, both of which are slow
self.pd_run, _ = self.pdtype.pdfromlatent(h_vf, init_scale=0.01)
self.pd_train = self.pd_run
self.vf_run = self.vf_train = fc(h_vf, 'v', 1)[:, 0]
# Stochastic version is never used, so can set to ignore
self.train_dropout_assign_ops = []
else:
# Plain Dropout version: Only fast updates / stochastic latent for VIB
self.pd_run = self.pd_train
self.vf_run = self.vf_train
# For Dropout: Always change layer, so slow layer is never used
self.run_dropout_assign_ops = []
# Used in step
a0_run = self.pd_run.sample()
neglogp0_run = self.pd_run.neglogp(a0_run)
self.initial_state = None
def step(ob, update_frac, *_args, **_kwargs):
if Config.REPLAY:
ob = ob.astype(np.float32)
a, v, neglogp = sess.run([a0_run, self.vf_run, neglogp0_run], {X: ob})
return a, v, self.initial_state, neglogp
def value(ob, update_frac, *_args, **_kwargs):
return sess.run(self.vf_run, {X: ob})
self.X = X
self.processed_x = processed_x
self.step = step
self.value = value
def get_policy():
use_lstm = Config.USE_LSTM
if use_lstm == 1:
raise NotImplementedError()
policy = LstmPolicy
elif use_lstm == 0:
policy = CnnPolicy
else:
assert(False)
return policy
| 10,335
| 39.375
| 150
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/interactive.py
|
"""
Run a CoinRun environment in a window where you can interact with it using the keyboard
"""
from coinrun.coinrunenv import lib
from coinrun import setup_utils
def main():
setup_utils.setup_and_load(paint_vel_info=0)
print("""Control with arrow keys,
F1, F2 -- switch resolution,
F5, F6, F7, F8 -- zoom,
F9 -- switch reconstruction target picture,
F10 -- switch lasers
""")
lib.test_main_loop()
if __name__ == '__main__':
main()
| 457
| 20.809524
| 87
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/__init__.py
|
from .coinrunenv import init_args_and_threads
from .coinrunenv import make
__all__ = [
'init_args_and_threads',
'make'
]
| 134
| 15.875
| 45
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/enjoy.py
|
"""
Load an agent trained with train_agent.py and
"""
import time
import tensorflow as tf
import numpy as np
import os
from coinrun import setup_utils
import coinrun.main_utils as utils
from coinrun.config import Config
from coinrun import policies, wrappers
mpi_print = utils.mpi_print
def create_act_model(sess, env, nenvs):
ob_space = env.observation_space
ac_space = env.action_space
policy = policies.get_policy()
act = policy(sess, ob_space, ac_space, nenvs, 1, reuse=False)
return act
def enjoy_env_sess(sess):
should_render = True
should_eval = Config.TRAIN_EVAL or Config.TEST_EVAL
rep_count = Config.REP
if should_eval:
env = utils.make_general_env(Config.NUM_EVAL)
should_render = False
else:
env = utils.make_general_env(1)
env = wrappers.add_final_wrappers(env)
if should_render:
from gym.envs.classic_control import rendering
nenvs = env.num_envs
agent = create_act_model(sess, env, nenvs)
sess.run(tf.global_variables_initializer())
loaded_params = utils.load_params_for_scope(sess, 'model')
if not loaded_params:
print('NO SAVED PARAMS LOADED')
obs = env.reset()
t_step = 0
if should_render:
viewer = rendering.SimpleImageViewer()
should_render_obs = not Config.IS_HIGH_RES
def maybe_render(info=None):
if should_render and not should_render_obs:
env.render()
maybe_render()
scores = np.array([0] * nenvs)
score_counts = np.array([0] * nenvs)
curr_rews = np.zeros((nenvs, 3))
def should_continue():
if should_eval:
return np.sum(score_counts) < rep_count * nenvs
return True
state = agent.initial_state
done = np.zeros(nenvs)
iteration = 0
import scipy.misc
while should_continue():
iteration += 1
action, values, state, _ = agent.step(obs, state, done)
obs, rew, done, info = env.step(action)
rnd = np.random.random()
directory = "./images/"
if Config.SAVE_IMAGES > rnd:
print(env.env.get_images().shape)
scipy.misc.imsave(directory + 'imgL_{}.png'.format(iteration), env.env.get_images()[0])
if Config.SAVE_IMAGES > rnd:
scipy.misc.imsave(directory + 'img_{}.png'.format(iteration), obs[0])
if should_render and should_render_obs:
if np.shape(obs)[-1] % 3 == 0:
ob_frame = obs[0,:,:,-3:]
else:
ob_frame = obs[0,:,:,-1]
ob_frame = np.stack([ob_frame] * 3, axis=2)
viewer.imshow(ob_frame)
curr_rews[:,0] += rew
for i, d in enumerate(done):
if d:
if score_counts[i] < rep_count:
score_counts[i] += 1
if 'episode' in info[i]:
scores[i] += info[i].get('episode')['r']
if t_step % 100 == 0:
mpi_print('t', t_step, values[0], done[0], rew[0], curr_rews[0], np.shape(obs))
maybe_render(info[0])
t_step += 1
if should_render:
time.sleep(.02)
if done[0]:
if should_render:
mpi_print('ep_rew', curr_rews)
curr_rews[:] = 0
result = 0
if should_eval:
mean_score = np.mean(scores) / rep_count
max_idx = np.argmax(scores)
mpi_print('scores', scores / rep_count)
print('mean_score', mean_score)
mpi_print('max idx', max_idx)
mpi_mean_score = utils.mpi_average([mean_score])
mpi_print('mpi_mean', mpi_mean_score)
result = mean_score
return result
def main():
utils.setup_mpi_gpus()
setup_utils.setup_and_load()
with tf.Session() as sess:
enjoy_env_sess(sess)
if __name__ == '__main__':
main()
| 3,856
| 24.543046
| 99
|
py
|
mixstyle-release
|
mixstyle-release-master/rl/coinrun/OldPlots.py
|
# plotname = "VIB_repeats_{}".format(ending)
# experiments = {
# '0424_vibnn12e4_l2w_uda_{}': "L2W + VIB-SNI (1e-4) + UDA",
# '0424_vibnn12e4_{}': "VIB-SNI (1e-4)",
# '0501_0_vibnn12e4_uda_{}': "VIB-SNI (1e-4) + UDA",
# '0501_1_vibnn12e4_uda_{}': "VIB-SNI (1e-4) + UDA",
# '0501_0_vibnn12e4_l2w_uda_{}': 'L2W + VIB-SNI (1e-4) + UDA',
# '0501_1_vibnn12e4_l2w_uda_{}': 'L2W + VIB-SNI (1e-4) + UDA',
# '0501_0_l2w_uda_{}': 'L2W + UDA',
# '0501_1_l2w_uda_{}': 'L2W + UDA',
# }
# plotname = "L2AvsVIB_v2_{}".format(ending)
# experiments = {
# '0424_vibnn12e4_l2w_uda_{}': "L2W + P-VIB_12 (1e-4) + UDA",
# '0424_vibnn1e4_l2w_uda_{}': "L2W + P-VIB_1 (1e-4) + UDA",
# '0419_vibnn_l2w_uda_{}': 'L2W + VIB (1e-4) + UDA',
# '0416_l2a_l2w_uda_{}': 'L2W + L2A (1e-4) + UDA',
# '0416_l2a_{}': 'L2A (1e-4)',
# '0424_vibnn12e4_{}': 'P-VIB_12 (1e-4)'
# }
#
# experiments = {
# '0416_l2a_l2w_uda_{}': 'L2W + L2A (1e-4) + UDA',
# '0419_l2a3_l2w_uda_{}': 'L2W + L2A (1e-3) + UDA',
# '0419_l2a2_l2w_uda_{}': 'L2W + L2A (1e-2) + UDA',
# '0419_vibnn_l2w_uda_{}': 'L2W + VIB (1e-4) + UDA',
# '0419_vibnn3_l2w_uda_{}': 'L2W + VIB (1e-3) + UDA',
# '0419_vibnn2_l2w_uda_{}': 'L2W + VIB (1e-2) + UDA',
# }
# experiments = {
# '0416_l2a_{}': 'L2A',
# '0416_l2w_{}': 'L2W',
# '0416_l2a_uda_{}': 'L2A + UDA',
# '0416_l2w_uda_{}': 'L2W + UDA',
# '0416_l2a_l2w_uda_{}': 'L2W + L2A + UDA',
# '0419_vibnn_{}': 'VIBNN'
# }
# plotname = "Orig_vs_modified_vib_{}".format(ending)
# experiments = {
# '0322_plain_{}': 'No Regularization',
# '0322_plain_all_{}': 'L2W + UDA + BN',
# '0401_l2a1e4_noUda_{}': 'L2A',
# '0401_l2w_{}': 'L2W',
# '0327_l2a1e4_{}': 'L2A + UDA',
# '0401_l2w_uda_{}': 'L2W + UDA',
# '0401_l2a_l2w_uda_{}': 'L2W + L2A + UDA',
# '0405__vib_l2w_uda_2_{}': 'VIB(orig) + L2W + UDA',
# '0405__vib_l2w_uda_nn_{}': 'VIB(modified) + L2W + UDA',
# '0405__vib_l2w_uda_nvfn_{}': 'VIB(modified + nnvf) + L2W + UDA'
# }
# experiments = {
# '0327_l2a1e2_{}': 'L2A (1e2)',
# '0327_l2a1e3_{}': 'L2A (1e3)',
# '0327_l2a1e4_{}': 'L2A (1e4)',
# '0327_l2a1e5_{}': 'L2A (1e5)',
# }
# experiments = {
# '0322_beta1e4_uda_{}': 'VIB (1e4)',
# '0322_beta3e4_uda_{}': 'VIB (3e4)',
# '0322_betacv_1e_8_{}': 'VIB-CV (1e8)',
# }
# experiments = {
# '0416_l2w_uda_{}': 'L2W + UDA',
# '0416_l2a_uda_{}': 'L2A + UDA'
# }
| 2,420
| 31.28
| 66
|
py
|
scipy
|
scipy-main/setup.py
|
#!/usr/bin/env python
"""SciPy: Scientific Library for Python
SciPy (pronounced "Sigh Pie") is open-source software for mathematics,
science, and engineering. The SciPy library
depends on NumPy, which provides convenient and fast N-dimensional
array manipulation. The SciPy library is built to work with NumPy
arrays, and provides many user-friendly and efficient numerical
routines such as routines for numerical integration and optimization.
Together, they run on all popular operating systems, are quick to
install, and are free of charge. NumPy and SciPy are easy to use,
but powerful enough to be depended upon by some of the world's
leading scientists and engineers. If you need to manipulate
numbers on a computer and display or publish the results,
give SciPy a try!
"""
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
import warnings
import sysconfig
from tools.version_utils import write_version_py, get_version_info
from tools.version_utils import IS_RELEASE_BRANCH
import importlib
if sys.version_info[:2] < (3, 9):
raise RuntimeError("Python version >= 3.9 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: 3.11
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# scipy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__SCIPY_SETUP__ = True
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line)
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
NumPy gh-13447). This makes GitHub state correctly how SciPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def get_build_ext_override():
"""
Custom build_ext command to tweak extension building.
"""
from numpy.distutils.command.build_ext import build_ext as npy_build_ext
if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)):
try:
import pythran
from pythran.dist import PythranBuildExt
except ImportError:
BaseBuildExt = npy_build_ext
else:
BaseBuildExt = PythranBuildExt[npy_build_ext]
_pep440 = importlib.import_module('scipy._lib._pep440')
if _pep440.parse(pythran.__version__) < _pep440.Version('0.11.0'):
raise RuntimeError("The installed `pythran` is too old, >= "
"0.11.0 is needed, {} detected. Please "
"upgrade Pythran, or use `export "
"SCIPY_USE_PYTHRAN=0`.".format(
pythran.__version__))
else:
BaseBuildExt = npy_build_ext
class build_ext(BaseBuildExt):
def finalize_options(self):
super().finalize_options()
# Disable distutils parallel build, due to race conditions
# in numpy.distutils (Numpy issue gh-15957)
if self.parallel:
print("NOTE: -j build option not supported. Set NPY_NUM_BUILD_JOBS=4 "
"for parallel build.")
self.parallel = None
def build_extension(self, ext):
# When compiling with GNU compilers, use a version script to
# hide symbols during linking.
if self.__is_using_gnu_linker(ext):
export_symbols = self.get_export_symbols(ext)
text = '{global: %s; local: *; };' % (';'.join(export_symbols),)
script_fn = os.path.join(self.build_temp, 'link-version-{}.map'.format(ext.name))
with open(script_fn, 'w') as f:
f.write(text)
# line below fixes gh-8680
ext.extra_link_args = [arg for arg in ext.extra_link_args if not "version-script" in arg]
ext.extra_link_args.append('-Wl,--version-script=' + script_fn)
# Allow late configuration
hooks = getattr(ext, '_pre_build_hook', ())
_run_pre_build_hooks(hooks, (self, ext))
super().build_extension(ext)
def __is_using_gnu_linker(self, ext):
if not sys.platform.startswith('linux'):
return False
# Fortran compilation with gfortran uses it also for
# linking. For the C compiler, we detect gcc in a similar
# way as distutils does it in
# UnixCCompiler.runtime_library_dir_option
if ext.language == 'f90':
is_gcc = (self._f90_compiler.compiler_type in ('gnu', 'gnu95'))
elif ext.language == 'f77':
is_gcc = (self._f77_compiler.compiler_type in ('gnu', 'gnu95'))
else:
is_gcc = False
if self.compiler.compiler_type == 'unix':
cc = sysconfig.get_config_var("CC")
if not cc:
cc = ""
compiler_name = os.path.basename(cc.split(" ")[0])
is_gcc = "gcc" in compiler_name or "g++" in compiler_name
return is_gcc and sysconfig.get_config_var('GNULD') == 'yes'
return build_ext
def get_build_clib_override():
"""
Custom build_clib command to tweak library building.
"""
from numpy.distutils.command.build_clib import build_clib as old_build_clib
class build_clib(old_build_clib):
def finalize_options(self):
super().finalize_options()
# Disable parallelization (see build_ext above)
self.parallel = None
def build_a_library(self, build_info, lib_name, libraries):
# Allow late configuration
hooks = build_info.get('_pre_build_hook', ())
_run_pre_build_hooks(hooks, (self, build_info))
old_build_clib.build_a_library(self, build_info, lib_name, libraries)
return build_clib
def _run_pre_build_hooks(hooks, args):
"""Call a sequence of pre-build hooks, if any"""
if hooks is None:
hooks = ()
elif not hasattr(hooks, '__iter__'):
hooks = (hooks,)
for hook in hooks:
hook(*args)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'scipy'],
cwd=cwd)
if p != 0:
# Could be due to a too old pip version and build isolation, check that
try:
# Note, pip may not be installed or not have been used
import pip
except (ImportError, ModuleNotFoundError):
raise RuntimeError("Running cythonize failed!")
else:
_pep440 = importlib.import_module('scipy._lib._pep440')
if _pep440.parse(pip.__version__) < _pep440.Version('18.0.0'):
raise RuntimeError("Cython not found or too old. Possibly due "
"to `pip` being too old, found version {}, "
"needed is >= 18.0.0.".format(
pip.__version__))
else:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: for reliable uninstall behaviour and dependency installation
and uninstallation, please use pip instead of using
`setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install scipy` (last SciPy release on PyPI)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
SciPy-specific help
-------------------
To install SciPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest SciPy release
from PyPI, use `pip install scipy`.
For help with build/installation issues, please ask on the
scipy-user mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/scipy/scipy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python dev.py test` (to build and test)
- `python dev.py --no-build` (to test installed scipy)
- `>>> scipy.test()` (run tests for installed scipy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
build_sphinx="`setup.py build_sphinx` is not supported, see doc/README.md",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command ('{}'), proceeding with "
"generating Cython sources and expanding templates".format(
' '.join(sys.argv[1:])))
return True
def check_setuppy_command():
run_build = parse_setuppy_commands()
if run_build:
try:
pkgname = 'numpy'
import numpy
pkgname = 'pybind11'
import pybind11
except ImportError as exc: # We do not have our build deps installed
print(textwrap.dedent(
"""Error: '%s' must be installed before running the build.
"""
% (pkgname,)))
sys.exit(1)
return run_build
def configuration(parent_package='', top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
if sys.platform == "darwin":
msg = ('No BLAS/LAPACK libraries found. '
'Note: Accelerate is no longer supported.')
else:
msg = 'No BLAS/LAPACK libraries found.'
msg += ("\n"
"To build Scipy from sources, BLAS & LAPACK libraries "
"need to be installed.\n"
"See site.cfg.example in the Scipy source directory and\n"
"https://docs.scipy.org/doc/scipy/dev/contributor/building.html "
"for details.")
raise NotFoundError(msg)
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('scipy')
config.add_data_files(('scipy', '*.txt'))
config.get_version('scipy/version.py')
return config
def setup_package():
# In maintenance branch, change np_maxversion to N+3 if numpy is at N
# Update here, in pyproject.toml, and in scipy/__init__.py
# Rationale: SciPy builds without deprecation warnings with N; deprecations
# in N+1 will turn into errors in N+3
# For Python versions, if releases is (e.g.) <=3.9.x, set bound to 3.10
np_minversion = '1.22.4'
np_maxversion = '9.9.99'
python_minversion = '3.9'
python_maxversion = '3.11'
if IS_RELEASE_BRANCH:
req_np = 'numpy>={},<{}'.format(np_minversion, np_maxversion)
req_py = '>={},<{}'.format(python_minversion, python_maxversion)
else:
req_np = 'numpy>={}'.format(np_minversion)
req_py = '>={}'.format(python_minversion)
# Rewrite the version file every time
write_version_py('.')
cmdclass = {'sdist': sdist_checked}
metadata = dict(
name='scipy',
maintainer="SciPy Developers",
maintainer_email="scipy-dev@python.org",
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
url="https://www.scipy.org",
download_url="https://github.com/scipy/scipy/releases",
project_urls={
"Bug Tracker": "https://github.com/scipy/scipy/issues",
"Documentation": "https://docs.scipy.org/doc/scipy/reference/",
"Source Code": "https://github.com/scipy/scipy",
},
license='BSD',
cmdclass=cmdclass,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
install_requires=[req_np],
python_requires=req_py,
zip_safe=False,
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = check_setuppy_command()
# Disable OSX Accelerate, it has too old LAPACK
os.environ['ACCELERATE'] = 'None'
# This import is here because it needs to be done before importing setup()
# from numpy.distutils, but after the MANIFEST removing and sdist import
# higher up in this file.
from setuptools import setup
if run_build:
from numpy.distutils.core import setup
# Customize extension building
cmdclass['build_ext'] = get_build_ext_override()
cmdclass['build_clib'] = get_build_clib_override()
if not 'sdist' in sys.argv:
# Generate Cython sources, unless we're creating an sdist
# Cython is a build dependency, and shipping generated .c files
# can cause problems (see gh-14199)
generate_cython()
metadata['configuration'] = configuration
else:
# Don't import numpy here - non-build actions are required to succeed
# without NumPy for example when pip is used to install Scipy when
# NumPy is not yet present in the system.
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info('.')[0]
setup(**metadata)
if __name__ == '__main__':
setup_package()
| 20,342
| 37.166979
| 109
|
py
|
scipy
|
scipy-main/dev.py
|
#! /usr/bin/env python3
'''
Developer CLI: building (meson), tests, benchmark, etc.
This file contains tasks definitions for doit (https://pydoit.org).
And also a CLI interface using click (https://click.palletsprojects.com).
The CLI is ideal for project contributors while,
doit interface is better suited for authoring the development tasks.
REQUIREMENTS:
--------------
- see environment.yml: doit, pydevtool, click, rich-click
# USAGE:
## 1 - click API
Commands can added using default Click API. i.e.
```
@cli.command()
@click.argument('extra_argv', nargs=-1)
@click.pass_obj
def python(ctx_obj, extra_argv):
"""Start a Python shell with PYTHONPATH set"""
```
## 2 - class based Click command definition
`CliGroup` provides an alternative class based API to create Click commands.
Just use the `cls_cmd` decorator. And define a `run()` method
```
@cli.cls_cmd('test')
class Test():
"""Run tests"""
@classmethod
def run(cls):
print('Running tests...')
```
- Command may make use a Click.Group context defining a `ctx` class attribute
- Command options are also define as class attributes
```
@cli.cls_cmd('test')
class Test():
"""Run tests"""
ctx = CONTEXT
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True, help="verbosity")
@classmethod
def run(cls, **kwargs): # kwargs contains options from class and CONTEXT
print('Running tests...')
```
## 3 - class based interface can be run as a doit task by subclassing from Task
- Extra doit task metadata can be defined as class attribute TASK_META.
- `run()` method will be used as python-action by task
```
@cli.cls_cmd('test')
class Test(Task): # Task base class, doit will create a task
"""Run tests"""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def run(cls, **kwargs):
pass
```
## 4 - doit tasks with cmd-action "shell" or dynamic metadata
Define method `task_meta()` instead of `run()`:
```
@cli.cls_cmd('refguide-check')
class RefguideCheck(Task):
@classmethod
def task_meta(cls, **kwargs):
return {
```
'''
import os
import subprocess
import sys
import warnings
import shutil
import json
import datetime
import time
import platform
import importlib.util
import errno
import contextlib
from sysconfig import get_path
import math
import traceback
from concurrent.futures.process import _MAX_WINDOWS_WORKERS
# distutils is required to infer meson install path
# if this needs to be replaced for Python 3.12 support and there's no
# stdlib alternative, use CmdAction and the hack discussed in gh-16058
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from distutils import dist
from distutils.command.install import INSTALL_SCHEMES
from pathlib import Path
from collections import namedtuple
from types import ModuleType as new_module
from dataclasses import dataclass
import click
from click import Option, Argument
from doit.cmd_base import ModuleTaskLoader
from doit.reporter import ZeroReporter
from doit.exceptions import TaskError
from doit.api import run_tasks
from pydevtool.cli import UnifiedContext, CliGroup, Task
from rich.console import Console
from rich.panel import Panel
from rich.theme import Theme
from rich_click import rich_click
DOIT_CONFIG = {
'verbosity': 2,
'minversion': '0.36.0',
}
console_theme = Theme({
"cmd": "italic gray50",
})
if sys.platform == 'win32':
class EMOJI:
cmd = ">"
else:
class EMOJI:
cmd = ":computer:"
rich_click.STYLE_ERRORS_SUGGESTION = "yellow italic"
rich_click.SHOW_ARGUMENTS = True
rich_click.GROUP_ARGUMENTS_OPTIONS = False
rich_click.SHOW_METAVARS_COLUMN = True
rich_click.USE_MARKDOWN = True
rich_click.OPTION_GROUPS = {
"dev.py": [
{
"name": "Options",
"options": [
"--help", "--build-dir", "--no-build", "--install-prefix"],
},
],
"dev.py test": [
{
"name": "Options",
"options": ["--help", "--verbose", "--parallel", "--coverage",
"--durations"],
},
{
"name": "Options: test selection",
"options": ["--submodule", "--tests", "--mode"],
},
],
}
rich_click.COMMAND_GROUPS = {
"dev.py": [
{
"name": "build & testing",
"commands": ["build", "test"],
},
{
"name": "static checkers",
"commands": ["lint", "mypy"],
},
{
"name": "environments",
"commands": ["shell", "python", "ipython"],
},
{
"name": "documentation",
"commands": ["doc", "refguide-check"],
},
{
"name": "release",
"commands": ["notes", "authors"],
},
{
"name": "benchmarking",
"commands": ["bench"],
},
]
}
class ErrorOnlyReporter(ZeroReporter):
desc = """Report errors only"""
def runtime_error(self, msg):
console = Console()
console.print("[red bold] msg")
def add_failure(self, task, fail_info):
console = Console()
if isinstance(fail_info, TaskError):
console.print(f'[red]Task Error - {task.name}'
f' => {fail_info.message}')
if fail_info.traceback:
console.print(Panel(
"".join(fail_info.traceback),
title=f"{task.name}",
subtitle=fail_info.message,
border_style="red",
))
CONTEXT = UnifiedContext({
'build_dir': Option(
['--build-dir'], metavar='BUILD_DIR',
default='build', show_default=True,
help=':wrench: Relative path to the build directory.'),
'no_build': Option(
["--no-build", "-n"], default=False, is_flag=True,
help=(":wrench: Do not build the project"
" (note event python only modification require build).")),
'install_prefix': Option(
['--install-prefix'], default=None, metavar='INSTALL_DIR',
help=(":wrench: Relative path to the install directory."
" Default is <build-dir>-install.")),
})
def run_doit_task(tasks):
"""
:param tasks: (dict) task_name -> {options}
"""
loader = ModuleTaskLoader(globals())
doit_config = {
'verbosity': 2,
'reporter': ErrorOnlyReporter,
}
return run_tasks(loader, tasks, extra_config={'GLOBAL': doit_config})
class CLI(CliGroup):
context = CONTEXT
run_doit_task = run_doit_task
@click.group(cls=CLI)
@click.pass_context
def cli(ctx, **kwargs):
"""Developer Tool for SciPy
\bCommands that require a built/installed instance are marked with :wrench:.
\b**python dev.py --build-dir my-build test -s stats**
""" # noqa: E501
CLI.update_context(ctx, kwargs)
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'meson.build']
@dataclass
class Dirs:
"""
root:
Directory where scr, build config and tools are located
(and this file)
build:
Directory where build output files (i.e. *.o) are saved
install:
Directory where .so from build and .py from src are put together.
site:
Directory where the built SciPy version was installed.
This is a custom prefix, followed by a relative path matching
the one the system would use for the site-packages of the active
Python interpreter.
"""
# all paths are absolute
root: Path
build: Path
installed: Path
site: Path # <install>/lib/python<version>/site-packages
def __init__(self, args=None):
""":params args: object like Context(build_dir, install_prefix)"""
self.root = Path(__file__).parent.absolute()
if not args:
return
self.build = Path(args.build_dir).resolve()
if args.install_prefix:
self.installed = Path(args.install_prefix).resolve()
else:
self.installed = self.build.parent / (self.build.stem + "-install")
if sys.platform == 'win32' and sys.version_info < (3, 10):
# Work around a pathlib bug; these must be absolute paths
self.build = Path(os.path.abspath(self.build))
self.installed = Path(os.path.abspath(self.installed))
# relative path for site-package with py version
# i.e. 'lib/python3.10/site-packages'
self.site = self.get_site_packages()
def add_sys_path(self):
"""Add site dir to sys.path / PYTHONPATH"""
site_dir = str(self.site)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = \
os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))
def get_site_packages(self):
"""
Depending on whether we have debian python or not,
return dist_packages path or site_packages path.
"""
if 'deb_system' in INSTALL_SCHEMES:
# debian patched python in use
install_cmd = dist.Distribution().get_command_obj('install')
install_cmd.select_scheme('deb_system')
install_cmd.finalize_options()
plat_path = Path(install_cmd.install_platlib)
else:
plat_path = Path(get_path('platlib'))
return self.installed / plat_path.relative_to(sys.exec_prefix)
@contextlib.contextmanager
def working_dir(new_dir):
current_dir = os.getcwd()
try:
os.chdir(new_dir)
yield
finally:
os.chdir(current_dir)
def import_module_from_path(mod_name, mod_path):
"""Import module with name `mod_name` from file path `mod_path`"""
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
def get_test_runner(project_module):
"""
get Test Runner from locally installed/built project
"""
__import__(project_module)
# scipy._lib._testutils:PytestTester
test = sys.modules[project_module].test
version = sys.modules[project_module].__version__
mod_path = sys.modules[project_module].__file__
mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))
return test, version, mod_path
############
@cli.cls_cmd('build')
class Build(Task):
""":wrench: Build & install package on path.
\b
```python
Examples:
$ python dev.py build --asan ;
ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true
LD_PRELOAD=$(gcc --print-file-name=libasan.so)
python dev.py test -v -t
./scipy/ndimage/tests/test_morphology.py -- -s
```
"""
ctx = CONTEXT
werror = Option(
['--werror'], default=False, is_flag=True,
help="Treat warnings as errors")
gcov = Option(
['--gcov'], default=False, is_flag=True,
help="enable C code coverage via gcov (requires GCC)."
"gcov output goes to build/**/*.gc*")
asan = Option(
['--asan'], default=False, is_flag=True,
help=("Build and run with AddressSanitizer support. "
"Note: the build system doesn't check whether "
"the project is already compiled with ASan. "
"If not, you need to do a clean build (delete "
"build and build-install directories)."))
debug = Option(
['--debug', '-d'], default=False, is_flag=True, help="Debug build")
parallel = Option(
['--parallel', '-j'], default=None, metavar='N_JOBS',
help=("Number of parallel jobs for building. "
"This defaults to the number of available physical CPU cores"))
setup_args = Option(
['--setup-args', '-C'], default=[], multiple=True,
help=("Pass along one or more arguments to `meson setup` "
"Repeat the `-C` in case of multiple arguments."))
show_build_log = Option(
['--show-build-log'], default=False, is_flag=True,
help="Show build output rather than using a log file")
win_cp_openblas = Option(
['--win-cp-openblas'], default=False, is_flag=True,
help=("If set, and on Windows, copy OpenBLAS lib to install directory "
"after meson install. "
"Note: this argument may be removed in the future once a "
"`site.cfg`-like mechanism to select BLAS/LAPACK libraries is "
"implemented for Meson"))
@classmethod
def setup_build(cls, dirs, args):
"""
Setting up meson-build
"""
for fn in PROJECT_ROOT_FILES:
if not (dirs.root / fn).exists():
print("To build the project, run dev.py in "
"git checkout or unpacked source")
sys.exit(1)
env = dict(os.environ)
cmd = ["meson", "setup", dirs.build, "--prefix", dirs.installed]
build_dir = dirs.build
run_dir = Path()
if build_dir.exists() and not (build_dir / 'meson-info').exists():
if list(build_dir.iterdir()):
raise RuntimeError("Can't build into non-empty directory "
f"'{build_dir.absolute()}'")
if sys.platform == "cygwin":
# Cygwin only has netlib lapack, but can link against
# OpenBLAS rather than netlib blas at runtime. There is
# no libopenblas-devel to enable linking against
# openblas-specific functions or OpenBLAS Lapack
cmd.extend(["-Dlapack=lapack", "-Dblas=blas"])
build_options_file = (
build_dir / "meson-info" / "intro-buildoptions.json")
if build_options_file.exists():
with open(build_options_file) as f:
build_options = json.load(f)
installdir = None
for option in build_options:
if option["name"] == "prefix":
installdir = option["value"]
break
if installdir != str(dirs.installed):
run_dir = build_dir
cmd = ["meson", "setup", "--reconfigure",
"--prefix", str(dirs.installed)]
else:
return
if args.werror:
cmd += ["--werror"]
if args.gcov:
cmd += ['-Db_coverage=true']
if args.asan:
cmd += ['-Db_sanitize=address,undefined']
if args.setup_args:
cmd += [str(arg) for arg in args.setup_args]
# Setting up meson build
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
ret = subprocess.call(cmd, env=env, cwd=run_dir)
if ret == 0:
print("Meson build setup OK")
else:
print("Meson build setup failed!")
sys.exit(1)
return env
@classmethod
def build_project(cls, dirs, args, env):
"""
Build a dev version of the project.
"""
cmd = ["ninja", "-C", str(dirs.build)]
if args.parallel is None:
# Use number of physical cores rather than ninja's default of 2N+2,
# to avoid out of memory issues (see gh-17941 and gh-18443)
n_cores = cpu_count(only_physical_cores=True)
cmd += [f"-j{n_cores}"]
else:
cmd += ["-j", str(args.parallel)]
# Building with ninja-backend
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
ret = subprocess.call(cmd, env=env, cwd=dirs.root)
if ret == 0:
print("Build OK")
else:
print("Build failed!")
sys.exit(1)
@classmethod
def install_project(cls, dirs, args):
"""
Installs the project after building.
"""
if dirs.installed.exists():
non_empty = len(os.listdir(dirs.installed))
if non_empty and not dirs.site.exists():
raise RuntimeError("Can't install in non-empty directory: "
f"'{dirs.installed}'")
cmd = ["meson", "install", "-C", args.build_dir, "--only-changed"]
log_filename = dirs.root / 'meson-install.log'
start_time = datetime.datetime.now()
cmd_str = ' '.join([str(p) for p in cmd])
cls.console.print(f"{EMOJI.cmd} [cmd] {cmd_str}")
if args.show_build_log:
ret = subprocess.call(cmd, cwd=dirs.root)
else:
print("Installing, see meson-install.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, stdout=log, stderr=log,
cwd=dirs.root)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
elapsed = datetime.datetime.now() - start_time
print(" ... installation in progress ({} "
"elapsed)".format(elapsed))
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
elapsed = datetime.datetime.now() - start_time
if ret != 0:
if not args.show_build_log:
with open(log_filename) as f:
print(f.read())
print(f"Installation failed! ({elapsed} elapsed)")
sys.exit(1)
# ignore everything in the install directory.
with open(dirs.installed / ".gitignore", "w") as f:
f.write("*")
if sys.platform == "cygwin":
rebase_cmd = ["/usr/bin/rebase", "--database", "--oblivious"]
rebase_cmd.extend(Path(dirs.installed).glob("**/*.dll"))
subprocess.check_call(rebase_cmd)
print("Installation OK")
return
@classmethod
def copy_openblas(cls, dirs):
"""
Copies OpenBLAS DLL to the SciPy install dir, and also overwrites the
default `_distributor_init.py` file with the one
we use for wheels uploaded to PyPI so that DLL gets loaded.
Assumes pkg-config is installed and aware of OpenBLAS.
The "dirs" parameter is typically a "Dirs" object with the
structure as the following, say, if dev.py is run from the
folder "repo":
dirs = Dirs(
root=WindowsPath('C:/.../repo'),
build=WindowsPath('C:/.../repo/build'),
installed=WindowsPath('C:/.../repo/build-install'),
site=WindowsPath('C:/.../repo/build-install/Lib/site-packages'
)
"""
# Get OpenBLAS lib path from pkg-config
cmd = ['pkg-config', '--variable', 'libdir', 'openblas']
result = subprocess.run(cmd, capture_output=True, text=True)
# pkg-config does not return any meaningful error message if fails
if result.returncode != 0:
print('"pkg-config --variable libdir openblas" '
'command did not manage to find OpenBLAS '
'succesfully. Try running manually on the '
'command prompt for more information.')
print("OpenBLAS copy failed!")
sys.exit(result.returncode)
# Skip the drive letter of the path -> /c to get Windows drive
# to be appended correctly to avoid "C:\c\..." from stdout.
openblas_lib_path = Path(result.stdout.strip()[2:]).resolve()
if not openblas_lib_path.stem == 'lib':
raise RuntimeError('"pkg-config --variable libdir openblas" '
'command did not return a path ending with'
' "lib" folder. Instead it returned '
f'"{openblas_lib_path}"')
# Look in bin subdirectory for OpenBLAS binaries.
bin_path = openblas_lib_path.parent / 'bin'
# Locate, make output .libs directory in Scipy install directory.
scipy_path = dirs.site / 'scipy'
libs_path = scipy_path / '.libs'
libs_path.mkdir(exist_ok=True)
# Copy DLL files from OpenBLAS install to scipy install .libs subdir.
for dll_fn in bin_path.glob('*.dll'):
out_fname = libs_path / dll_fn.name
print(f'Copying {dll_fn} ----> {out_fname}')
out_fname.write_bytes(dll_fn.read_bytes())
# Write _distributor_init.py to scipy install dir;
# this ensures the .libs file is on the DLL search path at run-time,
# so OpenBLAS gets found
openblas_support = import_module_from_path(
'openblas_support',
dirs.root / 'tools' / 'openblas_support.py'
)
openblas_support.make_init(scipy_path)
print('OpenBLAS copied')
@classmethod
def run(cls, add_path=False, **kwargs):
kwargs.update(cls.ctx.get(kwargs))
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
cls.console = Console(theme=console_theme)
dirs = Dirs(args)
if args.no_build:
print("Skipping build")
else:
env = cls.setup_build(dirs, args)
cls.build_project(dirs, args, env)
cls.install_project(dirs, args)
if args.win_cp_openblas and platform.system() == 'Windows':
cls.copy_openblas(dirs)
# add site to sys.path
if add_path:
dirs.add_sys_path()
@cli.cls_cmd('test')
class Test(Task):
""":wrench: Run tests.
\b
```python
Examples:
$ python dev.py test -s {SAMPLE_SUBMODULE}
$ python dev.py test -t scipy.optimize.tests.test_minimize_constrained
$ python dev.py test -s cluster -m full --durations 20
$ python dev.py test -s stats -- --tb=line # `--` passes next args to pytest
$ python dev.py test -b numpy -b pytorch -s cluster
```
""" # noqa: E501
ctx = CONTEXT
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True,
help="more verbosity")
# removed doctests as currently not supported by _lib/_testutils.py
# doctests = Option(['--doctests'], default=False)
coverage = Option(
['--coverage', '-c'], default=False, is_flag=True,
help=("report coverage of project code. "
"HTML output goes under build/coverage"))
durations = Option(
['--durations', '-d'], default=None, metavar="NUM_TESTS",
help="Show timing for the given number of slowest tests"
)
submodule = Option(
['--submodule', '-s'], default=None, metavar='MODULE_NAME',
help="Submodule whose tests to run (cluster, constants, ...)")
tests = Option(
['--tests', '-t'], default=None, multiple=True, metavar='TESTS',
help='Specify tests to run')
mode = Option(
['--mode', '-m'], default='fast', metavar='MODE', show_default=True,
help=("'fast', 'full', or something that could be passed to "
"`pytest -m` as a marker expression"))
parallel = Option(
['--parallel', '-j'], default=1, metavar='N_JOBS',
help="Number of parallel jobs for testing"
)
array_api_backend = Option(
['--array-api-backend', '-b'], default=None, metavar='ARRAY_BACKEND',
multiple=True,
help=(
"Array API backend ('all', 'numpy', 'pytorch', 'cupy', 'numpy.array_api')."
)
)
# Argument can't have `help=`; used to consume all of `-- arg1 arg2 arg3`
pytest_args = Argument(
['pytest_args'], nargs=-1, metavar='PYTEST-ARGS', required=False
)
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def scipy_tests(cls, args, pytest_args):
dirs = Dirs(args)
dirs.add_sys_path()
print(f"SciPy from development installed path at: {dirs.site}")
# FIXME: support pos-args with doit
extra_argv = pytest_args[:] if pytest_args else []
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.coverage:
dst_dir = dirs.root / args.build_dir / 'coverage'
fn = dst_dir / 'coverage_html.js'
if dst_dir.is_dir() and fn.is_file():
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + str(dst_dir)]
shutil.copyfile(dirs.root / '.coveragerc',
dirs.site / '.coveragerc')
if args.durations:
extra_argv += ['--durations', args.durations]
# convert options to test selection
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
if len(args.array_api_backend) != 0:
os.environ['SCIPY_ARRAY_API'] = json.dumps(list(args.array_api_backend))
runner, version, mod_path = get_test_runner(PROJECT_MODULE)
# FIXME: changing CWD is not a good practice
with working_dir(dirs.site):
print("Running tests for {} version:{}, installed at:{}".format(
PROJECT_MODULE, version, mod_path))
# runner verbosity - convert bool to int
verbose = int(args.verbose) + 1
result = runner( # scipy._lib._testutils:PytestTester
args.mode,
verbose=verbose,
extra_argv=extra_argv,
doctests=False,
coverage=args.coverage,
tests=tests,
parallel=args.parallel)
return result
@classmethod
def run(cls, pytest_args, **kwargs):
"""run unit-tests"""
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
return cls.scipy_tests(args, pytest_args)
@cli.cls_cmd('bench')
class Bench(Task):
""":wrench: Run benchmarks.
\b
```python
Examples:
$ python dev.py bench -t integrate.SolveBVP
$ python dev.py bench -t linalg.Norm
$ python dev.py bench --compare main
```
"""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
submodule = Option(
['--submodule', '-s'], default=None, metavar='SUBMODULE',
help="Submodule whose tests to run (cluster, constants, ...)")
tests = Option(
['--tests', '-t'], default=None, multiple=True,
metavar='TESTS', help='Specify tests to run')
compare = Option(
['--compare', '-c'], default=None, metavar='COMPARE', multiple=True,
help=(
"Compare benchmark results of current HEAD to BEFORE. "
"Use an additional --bench COMMIT to override HEAD with COMMIT. "
"Note that you need to commit your changes first!"))
@staticmethod
def run_asv(dirs, cmd):
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
bench_dir = dirs.root / 'benchmarks'
sys.path.insert(0, str(bench_dir))
# Always use ccache, if installed
env = dict(os.environ)
env['PATH'] = os.pathsep.join(EXTRA_PATH +
env.get('PATH', '').split(os.pathsep))
# Control BLAS/LAPACK threads
env['OPENBLAS_NUM_THREADS'] = '1'
env['MKL_NUM_THREADS'] = '1'
# Limit memory usage
from benchmarks.common import set_mem_rlimit
try:
set_mem_rlimit()
except (ImportError, RuntimeError):
pass
try:
return subprocess.call(cmd, env=env, cwd=bench_dir)
except OSError as err:
if err.errno == errno.ENOENT:
cmd_str = " ".join(cmd)
print(f"Error when running '{cmd_str}': {err}\n")
print("You need to install Airspeed Velocity "
"(https://airspeed-velocity.github.io/asv/)")
print("to run Scipy benchmarks")
return 1
raise
@classmethod
def scipy_bench(cls, args):
dirs = Dirs(args)
dirs.add_sys_path()
print(f"SciPy from development installed path at: {dirs.site}")
with working_dir(dirs.site):
runner, version, mod_path = get_test_runner(PROJECT_MODULE)
extra_argv = []
if args.tests:
extra_argv.append(args.tests)
if args.submodule:
extra_argv.append([args.submodule])
bench_args = []
for a in extra_argv:
bench_args.extend(['--bench', ' '.join(str(x) for x in a)])
if not args.compare:
print("Running benchmarks for Scipy version %s at %s"
% (version, mod_path))
cmd = ['asv', 'run', '--dry-run', '--show-stderr',
'--python=same'] + bench_args
retval = cls.run_asv(dirs, cmd)
sys.exit(retval)
else:
if len(args.compare) == 1:
commit_a = args.compare[0]
commit_b = 'HEAD'
elif len(args.compare) == 2:
commit_a, commit_b = args.compare
else:
print("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*" * 80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*" * 80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd_compare = [
'asv', 'continuous', '--show-stderr', '--factor', '1.05',
commit_a, commit_b
] + bench_args
cls.run_asv(dirs, cmd_compare)
sys.exit(1)
@classmethod
def run(cls, **kwargs):
"""run benchmark"""
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
cls.scipy_bench(args)
###################
# linters
def emit_cmdstr(cmd):
"""Print the command that's being run to stdout
Note: cannot use this in the below tasks (yet), because as is these command
strings are always echoed to the console, even if the command isn't run
(but for example the `build` command is run).
"""
console = Console(theme=console_theme)
# The [cmd] square brackets controls the font styling, typically in italics
# to differentiate it from other stdout content
console.print(f"{EMOJI.cmd} [cmd] {cmd}")
def task_lint():
# Lint just the diff since branching off of main using a
# stricter configuration.
# emit_cmdstr(os.path.join('tools', 'lint.py') + ' --diff-against main')
return {
'basename': 'lint',
'actions': [str(Dirs().root / 'tools' / 'lint.py') +
' --diff-against=main'],
'doc': 'Lint only files modified since last commit (stricter rules)',
}
def task_unicode_check():
# emit_cmdstr(os.path.join('tools', 'unicode-check.py'))
return {
'basename': 'unicode-check',
'actions': [str(Dirs().root / 'tools' / 'unicode-check.py')],
'doc': 'Check for disallowed Unicode characters in the SciPy Python '
'and Cython source code.',
}
def task_check_test_name():
# emit_cmdstr(os.path.join('tools', 'check_test_name.py'))
return {
"basename": "check-testname",
"actions": [str(Dirs().root / "tools" / "check_test_name.py")],
"doc": "Check tests are correctly named so that pytest runs them."
}
@cli.cls_cmd('lint')
class Lint():
""":dash: Run linter on modified files and check for
disallowed Unicode characters and possibly-invalid test names."""
def run():
run_doit_task({
'lint': {},
'unicode-check': {},
'check-testname': {},
})
@cli.cls_cmd('mypy')
class Mypy(Task):
""":wrench: Run mypy on the codebase."""
ctx = CONTEXT
TASK_META = {
'task_dep': ['build'],
}
@classmethod
def run(cls, **kwargs):
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
dirs = Dirs(args)
try:
import mypy.api
except ImportError as e:
raise RuntimeError(
"Mypy not found. Please install it by running "
"pip install -r mypy_requirements.txt from the repo root"
) from e
config = dirs.root / "mypy.ini"
check_path = PROJECT_MODULE
with working_dir(dirs.site):
# By default mypy won't color the output since it isn't being
# invoked from a tty.
os.environ['MYPY_FORCE_COLOR'] = '1'
# Change to the site directory to make sure mypy doesn't pick
# up any type stubs in the source tree.
emit_cmdstr(f"mypy.api.run --config-file {config} {check_path}")
report, errors, status = mypy.api.run([
"--config-file",
str(config),
check_path,
])
print(report, end='')
print(errors, end='', file=sys.stderr)
return status == 0
##########################################
# DOC
@cli.cls_cmd('doc')
class Doc(Task):
""":wrench: Build documentation.
TARGETS: Sphinx build targets [default: 'html']
"""
ctx = CONTEXT
args = Argument(['args'], nargs=-1, metavar='TARGETS', required=False)
list_targets = Option(
['--list-targets', '-t'], default=False, is_flag=True,
help='List doc targets',
)
parallel = Option(
['--parallel', '-j'], default=1, metavar='N_JOBS',
help="Number of parallel jobs"
)
no_cache = Option(
['--no-cache'], default=False, is_flag=True,
help="Forces a full rebuild of the docs. Note that this may be " + \
"needed in order to make docstring changes in C/Cython files " + \
"show up."
)
@classmethod
def task_meta(cls, list_targets, parallel, no_cache, args, **kwargs):
if list_targets: # list MAKE targets, remove default target
task_dep = []
targets = ''
else:
task_dep = ['build']
targets = ' '.join(args) if args else 'html'
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
build_args = Args(**kwargs)
dirs = Dirs(build_args)
make_params = [f'PYTHON="{sys.executable}"']
if parallel or no_cache:
sphinxopts = ""
if parallel:
sphinxopts += f"-j{parallel} "
if no_cache:
sphinxopts += "-E"
make_params.append(f'SPHINXOPTS="{sphinxopts}"')
# Environment variables needed for notebooks
# See gh-17322
make_params.append('SQLALCHEMY_SILENCE_UBER_WARNING=1')
make_params.append('JUPYTER_PLATFORM_DIRS=1')
return {
'actions': [
# move to doc/ so local scipy does not get imported
(f'cd doc; env PYTHONPATH="{dirs.site}" '
f'make {" ".join(make_params)} {targets}'),
],
'task_dep': task_dep,
'io': {'capture': False},
}
@cli.cls_cmd('refguide-check')
class RefguideCheck(Task):
""":wrench: Run refguide check."""
ctx = CONTEXT
submodule = Option(
['--submodule', '-s'], default=None, metavar='SUBMODULE',
help="Submodule whose tests to run (cluster, constants, ...)")
verbose = Option(
['--verbose', '-v'], default=False, is_flag=True, help="verbosity")
@classmethod
def task_meta(cls, **kwargs):
kwargs.update(cls.ctx.get())
Args = namedtuple('Args', [k for k in kwargs.keys()])
args = Args(**kwargs)
dirs = Dirs(args)
cmd = [f'{sys.executable}',
str(dirs.root / 'tools' / 'refguide_check.py'),
'--doctests']
if args.verbose:
cmd += ['-vvv']
if args.submodule:
cmd += [args.submodule]
cmd_str = ' '.join(cmd)
return {
'actions': [f'env PYTHONPATH={dirs.site} {cmd_str}'],
'task_dep': ['build'],
'io': {'capture': False},
}
##########################################
# ENVS
@cli.cls_cmd('python')
class Python():
""":wrench: Start a Python shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Option(
['--pythonpath', '-p'], metavar='PYTHONPATH', default=None,
help='Paths to prepend to PYTHONPATH')
extra_argv = Argument(
['extra_argv'], nargs=-1, metavar='ARGS', required=False)
@classmethod
def _setup(cls, pythonpath, **kwargs):
vals = Build.opt_defaults()
vals.update(kwargs)
Build.run(add_path=True, **vals)
if pythonpath:
for p in reversed(pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
@classmethod
def run(cls, pythonpath, extra_argv=None, **kwargs):
cls._setup(pythonpath, **kwargs)
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0]) as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__', __file__=extra_argv[0])
exec(script, ns)
else:
import code
code.interact()
@cli.cls_cmd('ipython')
class Ipython(Python):
""":wrench: Start IPython shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Python.pythonpath
@classmethod
def run(cls, pythonpath, **kwargs):
cls._setup(pythonpath, **kwargs)
import IPython
IPython.embed(user_ns={})
@cli.cls_cmd('shell')
class Shell(Python):
""":wrench: Start Unix shell with PYTHONPATH set."""
ctx = CONTEXT
pythonpath = Python.pythonpath
extra_argv = Python.extra_argv
@classmethod
def run(cls, pythonpath, extra_argv, **kwargs):
cls._setup(pythonpath, **kwargs)
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + list(extra_argv))
sys.exit(1)
@cli.command()
@click.argument('version_args', nargs=2)
@click.pass_obj
def notes(ctx_obj, version_args):
""":ledger: Release notes and log generation.
\b
```python
Example:
$ python dev.py notes v1.7.0 v1.8.0
```
"""
if version_args:
sys.argv = version_args
log_start = sys.argv[0]
log_end = sys.argv[1]
cmd = f"python tools/write_release_and_log.py {log_start} {log_end}"
click.echo(cmd)
try:
subprocess.run([cmd], check=True, shell=True)
except subprocess.CalledProcessError:
print('Error caught: Incorrect log start or log end version')
@cli.command()
@click.argument('revision_args', nargs=2)
@click.pass_obj
def authors(ctx_obj, revision_args):
""":ledger: Generate list of authors who contributed within revision
interval.
\b
```python
Example:
$ python dev.py authors v1.7.0 v1.8.0
```
"""
if revision_args:
sys.argv = revision_args
start_revision = sys.argv[0]
end_revision = sys.argv[1]
cmd = f"python tools/authors.py {start_revision}..{end_revision}"
click.echo(cmd)
try:
subprocess.run([cmd], check=True, shell=True)
except subprocess.CalledProcessError:
print('Error caught: Incorrect revision start or revision end')
# The following CPU core count functions were taken from loky/backend/context.py
# See https://github.com/joblib/loky
# Cache for the number of physical cores to avoid repeating subprocess calls.
# It should not change during the lifetime of the program.
physical_cores_cache = None
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs the current process can use.
The returned number of CPUs accounts for:
* the number of CPUs in the system, as given by
``multiprocessing.cpu_count``;
* the CPU affinity settings of the current process
(available on some Unix systems);
* Cgroup CPU bandwidth limit (available on Linux only, typically
set by docker and similar container orchestration systems);
* the value of the LOKY_MAX_CPU_COUNT environment variable if defined.
and is given as the minimum of these constraints.
If ``only_physical_cores`` is True, return the number of physical cores
instead of the number of logical cores (hyperthreading / SMT). Note that
this option is not enforced if the number of usable cores is controlled in
any other way such as: process affinity, Cgroup restricted CPU bandwidth
or the LOKY_MAX_CPU_COUNT environment variable. If the number of physical
cores is not found, return the number of logical cores.
Note that on Windows, the returned number of CPUs cannot exceed 61 (or 60 for
Python < 3.10), see:
https://bugs.python.org/issue26903.
It is also always larger or equal to 1.
"""
# Note: os.cpu_count() is allowed to return None in its docstring
os_cpu_count = os.cpu_count() or 1
if sys.platform == "win32":
# On Windows, attempting to use more than 61 CPUs would result in a
# OS-level error. See https://bugs.python.org/issue26903. According to
# https://learn.microsoft.com/en-us/windows/win32/procthread/processor-groups
# it might be possible to go beyond with a lot of extra work but this
# does not look easy.
os_cpu_count = min(os_cpu_count, _MAX_WINDOWS_WORKERS)
cpu_count_user = _cpu_count_user(os_cpu_count)
aggregate_cpu_count = max(min(os_cpu_count, cpu_count_user), 1)
if not only_physical_cores:
return aggregate_cpu_count
if cpu_count_user < os_cpu_count:
# Respect user setting
return max(cpu_count_user, 1)
cpu_count_physical, exception = _count_physical_cores()
if cpu_count_physical != "not found":
return cpu_count_physical
# Fallback to default behavior
if exception is not None:
# warns only the first time
warnings.warn(
"Could not find the number of physical cores for the "
f"following reason:\n{exception}\n"
"Returning the number of logical cores instead. You can "
"silence this warning by setting LOKY_MAX_CPU_COUNT to "
"the number of cores you want to use."
)
traceback.print_tb(exception.__traceback__)
return aggregate_cpu_count
def _cpu_count_cgroup(os_cpu_count):
# Cgroup CPU bandwidth limit available in Linux since 2.6 kernel
cpu_max_fname = "/sys/fs/cgroup/cpu.max"
cfs_quota_fname = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
cfs_period_fname = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
if os.path.exists(cpu_max_fname):
# cgroup v2
# https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html
with open(cpu_max_fname) as fh:
cpu_quota_us, cpu_period_us = fh.read().strip().split()
elif os.path.exists(cfs_quota_fname) and os.path.exists(cfs_period_fname):
# cgroup v1
# https://www.kernel.org/doc/html/latest/scheduler/sched-bwc.html#management
with open(cfs_quota_fname) as fh:
cpu_quota_us = fh.read().strip()
with open(cfs_period_fname) as fh:
cpu_period_us = fh.read().strip()
else:
# No Cgroup CPU bandwidth limit (e.g. non-Linux platform)
cpu_quota_us = "max"
cpu_period_us = 100_000 # unused, for consistency with default values
if cpu_quota_us == "max":
# No active Cgroup quota on a Cgroup-capable platform
return os_cpu_count
else:
cpu_quota_us = int(cpu_quota_us)
cpu_period_us = int(cpu_period_us)
if cpu_quota_us > 0 and cpu_period_us > 0:
return math.ceil(cpu_quota_us / cpu_period_us)
else: # pragma: no cover
# Setting a negative cpu_quota_us value is a valid way to disable
# cgroup CPU bandwith limits
return os_cpu_count
def _cpu_count_affinity(os_cpu_count):
# Number of available CPUs given affinity settings
if hasattr(os, "sched_getaffinity"):
try:
return len(os.sched_getaffinity(0))
except NotImplementedError:
pass
# On PyPy and possibly other platforms, os.sched_getaffinity does not exist
# or raises NotImplementedError, let's try with the psutil if installed.
try:
import psutil
p = psutil.Process()
if hasattr(p, "cpu_affinity"):
return len(p.cpu_affinity())
except ImportError: # pragma: no cover
if (
sys.platform == "linux"
and os.environ.get("LOKY_MAX_CPU_COUNT") is None
):
# PyPy does not implement os.sched_getaffinity on Linux which
# can cause severe oversubscription problems. Better warn the
# user in this particularly pathological case which can wreck
# havoc, typically on CI workers.
warnings.warn(
"Failed to inspect CPU affinity constraints on this system. "
"Please install psutil or explictly set LOKY_MAX_CPU_COUNT."
)
# This can happen for platforms that do not implement any kind of CPU
# infinity such as macOS-based platforms.
return os_cpu_count
def _cpu_count_user(os_cpu_count):
"""Number of user defined available CPUs"""
cpu_count_affinity = _cpu_count_affinity(os_cpu_count)
cpu_count_cgroup = _cpu_count_cgroup(os_cpu_count)
# User defined soft-limit passed as a loky specific environment variable.
cpu_count_loky = int(os.environ.get("LOKY_MAX_CPU_COUNT", os_cpu_count))
return min(cpu_count_affinity, cpu_count_cgroup, cpu_count_loky)
def _count_physical_cores():
"""Return a tuple (number of physical cores, exception)
If the number of physical cores is found, exception is set to None.
If it has not been found, return ("not found", exception).
The number of physical cores is cached to avoid repeating subprocess calls.
"""
exception = None
# First check if the value is cached
global physical_cores_cache
if physical_cores_cache is not None:
return physical_cores_cache, exception
# Not cached yet, find it
try:
if sys.platform == "linux":
cpu_info = subprocess.run(
"lscpu --parse=core".split(), capture_output=True, text=True
)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = {line for line in cpu_info if not line.startswith("#")}
cpu_count_physical = len(cpu_info)
elif sys.platform == "win32":
cpu_info = subprocess.run(
"wmic CPU Get NumberOfCores /Format:csv".split(),
capture_output=True,
text=True,
)
cpu_info = cpu_info.stdout.splitlines()
cpu_info = [
l.split(",")[1]
for l in cpu_info
if (l and l != "Node,NumberOfCores")
]
cpu_count_physical = sum(map(int, cpu_info))
elif sys.platform == "darwin":
cpu_info = subprocess.run(
"sysctl -n hw.physicalcpu".split(),
capture_output=True,
text=True,
)
cpu_info = cpu_info.stdout
cpu_count_physical = int(cpu_info)
else:
raise NotImplementedError(f"unsupported platform: {sys.platform}")
# if cpu_count_physical < 1, we did not find a valid value
if cpu_count_physical < 1:
raise ValueError(f"found {cpu_count_physical} physical cores < 1")
except Exception as e:
exception = e
cpu_count_physical = "not found"
# Put the result in cache
physical_cores_cache = cpu_count_physical
return cpu_count_physical, exception
if __name__ == '__main__':
cli()
| 50,071
| 33.085773
| 87
|
py
|
scipy
|
scipy-main/tools/openblas_support.py
|
import glob
import os
import platform
import sysconfig
import sys
import shutil
import tarfile
import textwrap
import time
import zipfile
from tempfile import mkstemp, gettempdir
from urllib.request import urlopen, Request
from urllib.error import HTTPError
OPENBLAS_V = '0.3.21.dev'
OPENBLAS_LONG = 'v0.3.20-571-g3dec11c6'
BASE_LOC = 'https://anaconda.org/multibuild-wheels-staging/openblas-libs'
BASEURL = f'{BASE_LOC}/{OPENBLAS_LONG}/download'
SUPPORTED_PLATFORMS = [
'linux-aarch64',
'linux-x86_64',
'musllinux-x86_64',
'linux-i686',
'linux-ppc64le',
'linux-s390x',
'win-amd64',
'win-32',
'macosx-x86_64',
'macosx-arm64',
]
IS_32BIT = sys.maxsize < 2**32
def get_plat():
plat = sysconfig.get_platform()
plat_split = plat.split("-")
arch = plat_split[-1]
if arch == "win32":
plat = "win-32"
elif arch in ["universal2", "intel"]:
plat = f"macosx-{platform.uname().machine}"
elif len(plat_split) > 2:
plat = f"{plat_split[0]}-{arch}"
assert plat in SUPPORTED_PLATFORMS, f'invalid platform {plat}'
return plat
def get_ilp64():
if os.environ.get("NPY_USE_BLAS_ILP64", "0") == "0":
return None
if IS_32BIT:
raise RuntimeError("NPY_USE_BLAS_ILP64 set on 32-bit arch")
return "64_"
def get_manylinux(arch):
if arch in ('i686'):
default = '2010'
else:
default = '2014'
ml_ver = os.environ.get("MB_ML_VER", default)
# XXX For PEP 600 this can be a glibc version
assert ml_ver in ('2010', '2014', '_2_24'), f'invalid MB_ML_VER {ml_ver}'
suffix = f'manylinux{ml_ver}_{arch}.tar.gz'
return suffix
def get_musllinux(arch):
musl_ver = "1_1"
suffix = f'musllinux_{musl_ver}_{arch}.tar.gz'
return suffix
def get_linux(arch):
# best way of figuring out whether manylinux or musllinux is to look
# at the packaging tags. If packaging isn't installed (it's not by default)
# fallback to sysconfig (which may be flakier)
try:
from packaging.tags import sys_tags
tags = list(sys_tags())
plat = tags[0].platform
except ImportError:
# fallback to sysconfig for figuring out if you're using musl
plat = 'manylinux'
# value could be None
v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
if 'musl' in v:
plat = 'musllinux'
if 'manylinux' in plat:
return get_manylinux(arch)
elif 'musllinux' in plat:
return get_musllinux(arch)
def download_openblas(target, plat, ilp64):
osname, arch = plat.split("-")
fnsuffix = {None: "", "64_": "64_"}[ilp64]
filename = ''
headers = {'User-Agent':
('Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 ; '
'(KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3')}
suffix = None
if osname == "linux":
suffix = get_linux(arch)
typ = 'tar.gz'
elif plat == 'macosx-x86_64':
suffix = 'macosx_10_9_x86_64-gf_c469a42.tar.gz'
typ = 'tar.gz'
elif plat == 'macosx-arm64':
suffix = 'macosx_11_0_arm64-gf_5272328.tar.gz'
typ = 'tar.gz'
elif osname == 'win':
if plat == "win-32":
suffix = 'win32-gcc_8_3_0.zip'
else:
suffix = 'win_amd64-gcc_10_3_0.zip'
typ = 'zip'
if not suffix:
return None
filename = f'{BASEURL}/openblas{fnsuffix}-{OPENBLAS_LONG}-{suffix}'
req = Request(url=filename, headers=headers)
for _ in range(3):
try:
time.sleep(1)
response = urlopen(req)
break
except HTTPError:
print(f'Could not download "{filename}"', file=sys.stderr)
raise
length = response.getheader('content-length')
if response.status != 200:
print(f'Could not download "{filename}"', file=sys.stderr)
return None
print(f"Downloading {length} from {filename}", file=sys.stderr)
data = response.read()
print("Saving to file", file=sys.stderr)
with open(target, 'wb') as fid:
fid.write(data)
return typ
def setup_openblas(plat=get_plat(), ilp64=get_ilp64()):
'''
Download and setup an openblas library for building. If successful,
the configuration script will find it automatically.
Returns
-------
msg : str
path to extracted files on success, otherwise indicates what went wrong
To determine success, do ``os.path.exists(msg)``
'''
_, tmp = mkstemp()
if not plat:
raise ValueError('unknown platform')
typ = download_openblas(tmp, plat, ilp64)
if not typ:
return ''
osname, arch = plat.split("-")
if osname == 'win':
if not typ == 'zip':
return f'expecting to download zipfile on windows, not {typ}'
return unpack_windows_zip(tmp)
else:
if not typ == 'tar.gz':
return 'expecting to download tar.gz, not %s' % str(typ)
return unpack_targz(tmp)
def unpack_windows_zip(fname):
with zipfile.ZipFile(fname, 'r') as zf:
# Get the openblas.a file, but not openblas.dll.a nor openblas.dev.a
lib = [x for x in zf.namelist() if OPENBLAS_LONG in x and
x.endswith('a') and not x.endswith('dll.a') and
not x.endswith('dev.a')]
if not lib:
return 'could not find libopenblas_%s*.a ' \
'in downloaded zipfile' % OPENBLAS_LONG
if get_ilp64() is None:
target = os.path.join(gettempdir(), 'openblas.a')
else:
target = os.path.join(gettempdir(), 'openblas64_.a')
with open(target, 'wb') as fid:
fid.write(zf.read(lib[0]))
return target
def unpack_targz(fname):
target = os.path.join(gettempdir(), 'openblas')
if not os.path.exists(target):
os.mkdir(target)
with tarfile.open(fname, 'r') as zf:
# Strip common prefix from paths when unpacking
prefix = os.path.commonpath(zf.getnames())
extract_tarfile_to(zf, target, prefix)
return target
def extract_tarfile_to(tarfileobj, target_path, archive_path):
"""Extract TarFile contents under archive_path/ to target_path/"""
target_path = os.path.abspath(target_path)
def get_members():
for member in tarfileobj.getmembers():
if archive_path:
norm_path = os.path.normpath(member.name)
if norm_path.startswith(archive_path + os.path.sep):
member.name = norm_path[len(archive_path)+1:]
else:
continue
dst_path = os.path.abspath(os.path.join(target_path, member.name))
if os.path.commonpath([target_path, dst_path]) != target_path:
# Path not under target_path, probably contains ../
continue
yield member
tarfileobj.extractall(target_path, members=get_members())
def make_init(dirname):
'''
Create a _distributor_init.py file for OpenBlas
'''
with open(os.path.join(dirname, '_distributor_init.py'), 'w') as fid:
fid.write(textwrap.dedent("""
'''
Helper to preload windows dlls to prevent dll not found errors.
Once a DLL is preloaded, its namespace is made available to any
subsequent DLL. This file originated in the numpy-wheels repo,
and is created as part of the scripts that build the wheel.
'''
import os
import glob
if os.name == 'nt':
# convention for storing / loading the DLL from
# numpy/.libs/, if present
try:
from ctypes import WinDLL
basedir = os.path.dirname(__file__)
except:
pass
else:
libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
DLL_filenames = []
if os.path.isdir(libs_dir):
for filename in glob.glob(os.path.join(libs_dir,
'*openblas*dll')):
# NOTE: would it change behavior to load ALL
# DLLs at this path vs. the name restriction?
WinDLL(os.path.abspath(filename))
DLL_filenames.append(filename)
if len(DLL_filenames) > 1:
import warnings
warnings.warn("loaded more than 1 DLL from .libs:"
"\\n%s" % "\\n".join(DLL_filenames),
stacklevel=1)
"""))
def test_setup(plats):
'''
Make sure all the downloadable files exist and can be opened
'''
def items():
""" yields all combinations of arch, ilp64
"""
for plat in plats:
yield plat, None
osname, arch = plat.split("-")
if arch not in ('i686', 'arm64', '32'):
yield plat, '64_'
if osname == "linux" and arch in ('i686', 'x86_64'):
oldval = os.environ.get('MB_ML_VER', None)
os.environ['MB_ML_VER'] = '1'
yield plat, None
# Once we create x86_64 and i686 manylinux2014 wheels...
# os.environ['MB_ML_VER'] = '2014'
# yield arch, None, False
if oldval:
os.environ['MB_ML_VER'] = oldval
else:
os.environ.pop('MB_ML_VER')
errs = []
for plat, ilp64 in items():
osname, _ = plat.split("-")
if plat not in plats:
continue
target = None
try:
try:
target = setup_openblas(plat, ilp64)
except Exception as e:
print(f'Could not setup {plat} with ilp64 {ilp64}, ')
print(e)
errs.append(e)
continue
if not target:
raise RuntimeError(f'Could not setup {plat}')
print(target)
if osname == 'win':
if not target.endswith('.a'):
raise RuntimeError("Not .a extracted!")
else:
files = glob.glob(os.path.join(target, "lib", "*.a"))
if not files:
raise RuntimeError("No lib/*.a unpacked!")
finally:
if target is not None:
if os.path.isfile(target):
os.unlink(target)
else:
shutil.rmtree(target)
if errs:
raise errs[0]
def test_version(expected_version, ilp64=get_ilp64()):
"""
Assert that expected OpenBLAS version is
actually available via SciPy
"""
import scipy
import scipy.linalg
import ctypes
dll = ctypes.CDLL(scipy.linalg.cython_blas.__file__)
if ilp64 == "64_":
get_config = dll.openblas_get_config64_
else:
get_config = dll.openblas_get_config
get_config.restype = ctypes.c_char_p
res = get_config()
print('OpenBLAS get_config returned', str(res))
if not expected_version:
expected_version = OPENBLAS_V
check_str = b'OpenBLAS %s' % expected_version.encode()
print(check_str)
assert check_str in res, f'{expected_version} not found in {res}'
if ilp64:
assert b"USE64BITINT" in res
else:
assert b"USE64BITINT" not in res
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Download and expand an OpenBLAS archive for this '
'architecture')
parser.add_argument('--test', nargs='*', default=None,
help='Test different architectures. "all", or any of '
f'{SUPPORTED_PLATFORMS}')
parser.add_argument('--write-init', nargs=1,
metavar='OUT_SCIPY_DIR',
help='Write distribution init to named dir')
parser.add_argument('--check_version', nargs='?', default='',
help='Check provided OpenBLAS version string '
'against available OpenBLAS')
args = parser.parse_args()
if args.check_version != '':
test_version(args.check_version)
elif args.write_init:
make_init(args.write_init[0])
elif args.test is None:
print(setup_openblas())
else:
if len(args.test) == 0 or 'all' in args.test:
test_setup(SUPPORTED_PLATFORMS)
else:
test_setup(args.test)
| 12,716
| 32.465789
| 81
|
py
|
scipy
|
scipy-main/tools/check_test_name.py
|
#!/usr/bin/env python
"""
MIT License
Copyright (c) 2020 Marco Gorelli
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Check that test names start with `test`, and that test classes start with
`Test`.
"""
from __future__ import annotations
import ast
import os
from pathlib import Path
import sys
from typing import Iterator, Sequence
import itertools
PRAGMA = "# skip name check"
def _find_names(node: ast.Module) -> Iterator[str]:
for _node in ast.walk(node):
if isinstance(_node, ast.Name):
yield _node.id
elif isinstance(_node, ast.Attribute):
yield _node.attr
def _is_fixture(node: ast.expr) -> bool:
if isinstance(node, ast.Call):
node = node.func
return (
isinstance(node, ast.Attribute)
and node.attr == "fixture"
and isinstance(node.value, ast.Name)
and node.value.id == "pytest"
)
def is_misnamed_test_func(
node: ast.expr | ast.stmt, names: Sequence[str], line: str
) -> bool:
return (
isinstance(node, ast.FunctionDef)
and not node.name.startswith("test")
and names.count(node.name) == 0
and not any(
_is_fixture(decorator) for decorator in node.decorator_list
)
and PRAGMA not in line
and node.name
not in ("teardown_method", "setup_method",
"teardown_class", "setup_class",
"setup_module", "teardown_module")
)
def is_misnamed_test_class(
node: ast.expr | ast.stmt, names: Sequence[str], line: str
) -> bool:
return (
isinstance(node, ast.ClassDef)
and not node.name.startswith("Test")
and names.count(node.name) == 0
and PRAGMA not in line
# Some of the KDTreeTest use a decorator to setup tests so these are
# actually fine
and "KDTreeTest" not in [
decorator.id for decorator in node.decorator_list
]
)
def main(content: str, file: str) -> int:
lines = content.splitlines()
tree = ast.parse(content)
names = list(_find_names(tree))
ret = 0
for node in tree.body:
if is_misnamed_test_func(node, names, lines[node.lineno - 1]):
print(
f"{file}:{node.lineno}:{node.col_offset} "
f"found test function '{node.name}' which does not start with"
" 'test'"
)
ret = 1
elif is_misnamed_test_class(node, names, lines[node.lineno - 1]):
print(
f"{file}:{node.lineno}:{node.col_offset} "
f"found test class '{node.name}' which does not start with"
" 'Test'"
)
ret = 1
if (
isinstance(node, ast.ClassDef)
and names.count(node.name) == 0
and PRAGMA not in lines[node.lineno - 1]
):
for _node in node.body:
if is_misnamed_test_func(_node, names,
lines[_node.lineno - 1]):
# It could be that this function is used somewhere by the
# parent class. For example, there might be a base class
# with
#
# class Foo:
# def foo(self):
# assert 1+1==2
# def test_foo(self):
# self.foo()
#
# and then some subclass overwrites `foo`. So, we check
# that `self.foo` doesn't appear in any of the test
# classes. Note some false negatives might get through,
# but that's OK. This is good enough that has helped
# identify several examples of tests not being run.
should_continue = False
for _file in itertools.chain(
Path("scipy").rglob("**/tests/**/test*.py"),
["scipy/_lib/_testutils.py"],
):
with open(os.path.join(_file)) as fd:
_content = fd.read()
if f"self.{_node.name}" in _content:
should_continue = True
break
if should_continue:
continue
print(
f"{file}:{_node.lineno}:{_node.col_offset} "
f"found test function '{_node.name}' which does not "
"start with 'test'"
)
ret = 1
return ret
if __name__ == "__main__":
ret = 0
path = Path("scipy").rglob("**/tests/**/test*.py")
for file in path:
filename = os.path.basename(file)
with open(file, encoding="utf-8") as fd:
content = fd.read()
ret |= main(content, file)
sys.exit(ret)
| 5,952
| 34.017647
| 78
|
py
|
scipy
|
scipy-main/tools/gh_lists.py
|
#!/usr/bin/env python3
# -*- encoding:utf-8 -*-
"""
gh_lists.py MILESTONE
Functions for Github API requests.
"""
import os
import re
import sys
import json
import collections
import argparse
import datetime
import time
from urllib.request import urlopen, Request, HTTPError
Issue = collections.namedtuple('Issue', ('id', 'title', 'url'))
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='scipy/scipy')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json', GithubGet())
try:
milestones = get_milestones(getter, args.project)
if args.milestone not in milestones:
msg = "Milestone {0} not available. Available milestones: {1}"
msg = msg.format(args.milestone, u", ".join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if u'/pull/' in x.url]
issues = [x for x in issues if x not in prs]
def print_list(title, items):
print()
print(title)
print("-"*len(title))
print()
for issue in items:
msg = u"* `#{0} <{1}>`__: {2}"
# sanitize whitespace, `, and *
title = re.sub(u"\\s+", u" ", issue.title.strip())
title = title.replace(u'`', u'\\`').replace(u'*', u'\\*')
if len(title) > 60:
remainder = re.sub(u"\\s.*$", u"...", title[60:])
if len(remainder) > 20:
remainder = title[:80] + u"..."
else:
title = title[:60] + remainder
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = u"Issues closed for {0}".format(args.milestone)
print_list(msg, issues)
msg = u"Pull requests for {0}".format(args.milestone)
print_list(msg, prs)
return 0
def get_milestones(getter, project):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
data = getter.get(url)
milestones = {}
for ms in data:
milestones[ms[u'title']] = ms[u'number']
return milestones
def get_issues(getter, project, milestone):
milestones = get_milestones(getter, project)
mid = milestones[milestone]
url = "https://api.github.com/repos/{project}/issues?milestone={mid}&state=closed&sort=created&direction=asc"
url = url.format(project=project, mid=mid)
data = getter.get(url)
issues = []
# data contains both PR and issue data
for issue_data in data:
# don't include PRs that were closed instead
# of merged
if "pull" in issue_data[u'html_url']:
merge_status = issue_data[u'pull_request'][u'merged_at']
if merge_status is None:
continue
issues.append(Issue(issue_data[u'number'],
issue_data[u'title'],
issue_data[u'html_url']))
return issues
class CachedGet:
def __init__(self, filename, getter):
self._getter = getter
self.filename = filename
if os.path.isfile(filename):
print("[gh_lists] using {0} as cache (remove it if you want fresh data)".format(filename),
file=sys.stderr)
with open(filename, 'r', encoding='utf-8') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
if url not in self.cache:
data = self._getter.get_multipage(url)
self.cache[url] = data
return data
else:
print("[gh_lists] (cached):", url, file=sys.stderr, flush=True)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'w', encoding='utf-8') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
class GithubGet:
def __init__(self, auth=False):
self.headers = {'User-Agent': 'gh_lists.py',
'Accept': 'application/vnd.github.v3+json'}
if auth:
self.authenticate()
req = self.urlopen('https://api.github.com/rate_limit')
try:
if req.getcode() != 200:
raise RuntimeError()
info = json.loads(req.read().decode('utf-8'))
finally:
req.close()
self.ratelimit_remaining = int(info['rate']['remaining'])
self.ratelimit_reset = float(info['rate']['reset'])
def authenticate(self):
print("Input a Github API access token.\n"
"Personal tokens can be created at https://github.com/settings/tokens\n"
"This script does not require any permissions (so don't give it any).",
file=sys.stderr, flush=True)
print("Access token: ", file=sys.stderr, end='', flush=True)
token = input()
self.headers['Authorization'] = 'token {0}'.format(token.strip())
def urlopen(self, url, auth=None):
assert url.startswith('https://')
req = Request(url, headers=self.headers)
return urlopen(req, timeout=60)
def get_multipage(self, url):
data = []
while url:
page_data, info, next_url = self.get(url)
data += page_data
url = next_url
return data
def get(self, url):
while True:
# Wait until rate limit
while self.ratelimit_remaining == 0 and self.ratelimit_reset > time.time():
s = self.ratelimit_reset + 5 - time.time()
if s <= 0:
break
print("[gh_lists] rate limit exceeded: waiting until {0} ({1} s remaining)".format(
datetime.datetime.fromtimestamp(self.ratelimit_reset).strftime('%Y-%m-%d %H:%M:%S'),
int(s)),
file=sys.stderr, flush=True)
time.sleep(min(5*60, s))
# Get page
print("[gh_lists] get:", url, file=sys.stderr, flush=True)
try:
req = self.urlopen(url)
try:
code = req.getcode()
info = req.info()
data = json.loads(req.read().decode('utf-8'))
finally:
req.close()
except HTTPError as err:
code = err.getcode()
info = err.info()
data = None
if code not in (200, 403):
raise RuntimeError()
# Parse reply
next_url = None
if 'Link' in info:
m = re.search('<([^<>]*)>; rel="next"', info['Link'])
if m:
next_url = m.group(1)
# Update rate limit info
if 'X-RateLimit-Remaining' in info:
self.ratelimit_remaining = int(info['X-RateLimit-Remaining'])
if 'X-RateLimit-Reset' in info:
self.ratelimit_reset = float(info['X-RateLimit-Reset'])
# Deal with rate limit exceeded
if code != 200 or data is None:
if self.ratelimit_remaining == 0:
continue
else:
raise RuntimeError()
# Done.
return data, info, next_url
if __name__ == "__main__":
sys.exit(main())
| 7,508
| 30.953191
| 113
|
py
|
scipy
|
scipy-main/tools/unicode-check.py
|
#!/usr/bin/env python
import re
from itertools import chain
from glob import iglob
import sys
import argparse
# The set of Unicode code points greater than 127 that we
# allow in the source code.
latin1_letters = set(chr(cp) for cp in range(192, 256))
box_drawing_chars = set(chr(cp) for cp in range(0x2500, 0x2580))
extra_symbols = set(['®', 'ő', 'λ', 'π', 'ω', '∫', '≠', '≥', '≤', 'μ'])
allowed = latin1_letters | box_drawing_chars | extra_symbols
def unicode_check(showall=False):
"""
If showall is True, all non-ASCII characters are displayed.
"""
# File encoding regular expression from PEP-263.
encoding_pat = re.compile("^[ \t\f]*#.*?coding[:=][ \t]*([-_.a-zA-Z0-9]+)")
nbad = 0
for name in chain(iglob('scipy/**/*.py', recursive=True),
iglob('scipy/**/*.pyx', recursive=True),
iglob('scipy/**/*.px[di]', recursive=True)):
# Read the file as bytes, and check for any bytes greater than 127.
with open(name, 'rb') as f:
content = f.read()
if len(content) == 0:
continue
if max(content) > 127:
# There is at least one non-ASCII character in the file.
# Check the first two lines for an encoding comment.
lines = content.splitlines()
for line in lines[:2]:
match = re.match(encoding_pat,
line.decode(encoding='latin-1'))
if match:
break
# If an explicit encoding was given in a comment, use
# that to decode the contents. Otherwise use UTF-8.
if match:
encoding = match[1]
file_enc_msg = f"(explicit encoding '{encoding}')"
else:
encoding = 'utf-8'
file_enc_msg = "(no explicit encoding; utf-8 assumed)"
content = content.decode(encoding=encoding)
out = []
for n, line in enumerate(content.splitlines()):
for pos, char in enumerate(line):
cp = ord(char)
if cp > 127:
msg = (f"... line {n+1}, position {pos+1}: "
f"character '{char}', code point U+{cp:04X}")
if showall:
out.append(msg)
else:
if char not in allowed:
out.append(msg)
if len(out) > 0:
nbad += 1
print(f"{name} {file_enc_msg}")
for msg in out:
print(msg)
return nbad
if __name__ == "__main__":
descr = ('Check for disallowed Unicode characters in the SciPy Python and '
' Cython source code.')
parser = argparse.ArgumentParser(description=descr)
parser.add_argument('--showall', action='store_true',
help=('Show non-ASCII Unicode characters from all '
'files.'))
args = parser.parse_args()
sys.exit(unicode_check(args.showall) > 0)
| 3,136
| 36.795181
| 79
|
py
|
scipy
|
scipy-main/tools/refguide_summaries.py
|
#!/usr/bin/env python
"""Generate function summaries for the refguide. For example, if the
__init__ file of a submodule contains:
.. autosummary::
:toctree: generated/
foo
foobar
Then it will modify the __init__ file to contain (*)
.. autosummary::
:toctree: generated/
foo -- First line of the documentation of `foo`.
foobar -- First line of the documentation of `foobar`.
If there is already text after the function definitions it will be
overwritten, i.e.
.. autosummary::
:toctree: generated/
foo -- Blah blah blah.
foobar -- Blabbity blabbity.
will also become (*).
"""
import os
import argparse
import importlib
import re
EXCEPTIONS = {
'jn': ('Bessel function of the first kind of real order and '
'complex argument')
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("module",
help="module to add summaries to")
parser.add_argument("--dry-run",
help="print __init__ file instead of overwriting",
action="store_true")
args = parser.parse_args()
filename = os.path.join(os.path.dirname(__file__), '..', 'scipy',
args.module, '__init__.py')
module = importlib.import_module('scipy.' + args.module)
fnew = []
with open(filename, 'r') as f:
line = f.readline()
while line:
if '.. autosummary::' in line:
fnew.append(line.rstrip())
fnew.append(f.readline().rstrip()) # :toctree: generated/
fnew.append(f.readline().rstrip()) # blank line
line = f.readline()
summaries = []
maxlen = 0
while line.strip():
func = line.split('--')[0].strip()
ufunc = '[+]' not in line
if len(func) > maxlen:
maxlen = len(func)
if func in EXCEPTIONS.keys():
summary = [EXCEPTIONS[func]]
else:
summary = []
doc = getattr(module, func).__doc__.split('\n')
i = 0 if doc[0].strip() else 1
while True:
if re.match(func + r'\(.*\)', doc[i].strip()):
# ufunc docstrings contain the signature
i += 2
else:
break
while i < len(doc) and doc[i].strip():
summary.append(doc[i].lstrip())
i += 1
summary = ' '.join([x.lstrip() for x in summary])
summary = '[+]' + summary if not ufunc else summary
summaries.append((func, summary))
line = f.readline()
for (func, summary) in summaries:
spaces = ' '*(maxlen - len(func) + 1)
fnew.append(' ' + func + spaces + '-- ' + summary)
fnew.append(line.rstrip())
else:
fnew.append(line.rstrip())
line = f.readline()
if args.dry_run:
print('\n'.join(fnew))
else:
with open(filename, 'w') as f:
f.write('\n'.join(fnew))
f.write('\n')
if __name__ == "__main__":
main()
| 3,456
| 30.144144
| 74
|
py
|
scipy
|
scipy-main/tools/check_installation.py
|
"""
Script for checking if all the test files are installed after building.
Examples::
$ python check_installation.py install_directory_name
install_directory_name:
the relative path to the directory where SciPy is installed after
building and running `meson install`.
Notes
=====
The script will stop on encountering the first missing file in the install dir,
it will not give a full listing. This should be okay, because the script is
meant for use in CI so it's not like many files will be missing at once.
"""
import os
import glob
import sys
CUR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
ROOT_DIR = os.path.dirname(CUR_DIR)
SCIPY_DIR = os.path.join(ROOT_DIR, 'scipy')
# Files whose installation path will be different from original one
changed_installed_path = {
'scipy/_build_utils/tests/test_scipy_version.py':
'scipy/_lib/tests/test_scipy_version.py'
}
# We do not want the following tests to be checked
exception_list_test_files = [
"_lib/array_api_compat/tests/test_common.py",
"_lib/array_api_compat/tests/test_isdtype.py",
"_lib/array_api_compat/tests/test_vendoring.py",
"_lib/array_api_compat/tests/test_array_namespace.py",
]
def main(install_dir):
INSTALLED_DIR = os.path.join(ROOT_DIR, install_dir)
if not os.path.exists(INSTALLED_DIR):
raise ValueError(f"Provided install dir {INSTALLED_DIR} does not exist")
scipy_test_files = get_test_files(SCIPY_DIR)
installed_test_files = get_test_files(INSTALLED_DIR)
# Check test files detected in repo are installed
for test_file in scipy_test_files.keys():
if test_file in exception_list_test_files:
continue
if test_file not in installed_test_files.keys():
raise Exception("%s is not installed" % scipy_test_files[test_file])
print("----------- All the test files were installed --------------")
scipy_pyi_files = get_pyi_files(SCIPY_DIR)
installed_pyi_files = get_pyi_files(INSTALLED_DIR)
# Check *.pyi files detected in repo are installed
for pyi_file in scipy_pyi_files.keys():
if pyi_file not in installed_pyi_files.keys():
raise Exception("%s is not installed" % scipy_pyi_files[pyi_file])
print("----------- All the .pyi files were installed --------------")
def get_suffix_path(current_path, levels=1):
current_new = current_path
for i in range(levels + 1):
current_new = os.path.dirname(current_new)
return os.path.relpath(current_path, current_new)
def get_test_files(dir):
test_files = dict()
for path in glob.glob(f'{dir}/**/test_*.py', recursive=True):
suffix_path = get_suffix_path(path, 3)
suffix_path = changed_installed_path.get(suffix_path, suffix_path)
if "highspy" not in suffix_path:
test_files[suffix_path] = path
return test_files
def get_pyi_files(dir):
pyi_files = dict()
for path in glob.glob(f'{dir}/**/*.pyi', recursive=True):
suffix_path = get_suffix_path(path, 2)
pyi_files[suffix_path] = path
return pyi_files
if __name__ == '__main__':
if not len(sys.argv) == 2:
raise ValueError("Incorrect number of input arguments, need "
"check_installation.py relpath/to/installed/scipy")
install_dir = sys.argv[1]
main(install_dir)
| 3,388
| 29.809091
| 80
|
py
|
scipy
|
scipy-main/tools/ninjatracing.py
|
# Copyright 2018 Nico Weber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts one (or several) .ninja_log files into chrome's about:tracing format.
If clang -ftime-trace .json files are found adjacent to generated files they
are embedded.
Usage:
ninja -C $BUILDDIR
python ninjatracing.py $BUILDDIR/.ninja_log > trace.json
Then load trace.json into Chrome or into https://ui.perfetto.dev/ to see
the profiling results.
"""
import json
import os
import optparse
import re
import sys
class Target:
"""Represents a single line read for a .ninja_log file. Start and end times
are milliseconds."""
def __init__(self, start, end):
self.start = int(start)
self.end = int(end)
self.targets = []
def read_targets(log, show_all):
"""Reads all targets from .ninja_log file |log_file|, sorted by start
time"""
header = log.readline()
m = re.search(r'^# ninja log v(\d+)\n$', header)
assert m, "unrecognized ninja log version %r" % header
version = int(m.group(1))
assert 5 <= version <= 6, "unsupported ninja log version %d" % version
if version == 6:
# Skip header line
next(log)
targets = {}
last_end_seen = 0
for line in log:
start, end, _, name, cmdhash = line.strip().split('\t') # Ignore restat.
if not show_all and int(end) < last_end_seen:
# An earlier time stamp means that this step is the first in a new
# build, possibly an incremental build. Throw away the previous data
# so that this new build will be displayed independently.
targets = {}
last_end_seen = int(end)
targets.setdefault(cmdhash, Target(start, end)).targets.append(name)
return sorted(targets.values(), key=lambda job: job.end, reverse=True)
class Threads:
"""Tries to reconstruct the parallelism from a .ninja_log"""
def __init__(self):
self.workers = [] # Maps thread id to time that thread is occupied for.
def alloc(self, target):
"""Places target in an available thread, or adds a new thread."""
for worker in range(len(self.workers)):
if self.workers[worker] >= target.end:
self.workers[worker] = target.start
return worker
self.workers.append(target.start)
return len(self.workers) - 1
def read_events(trace, options):
"""Reads all events from time-trace json file |trace|."""
trace_data = json.load(trace)
def include_event(event, options):
"""Only include events if they are complete events, are longer than
granularity, and are not totals."""
return ((event['ph'] == 'X') and
(event['dur'] >= options['granularity']) and
(not event['name'].startswith('Total')))
return [x for x in trace_data['traceEvents'] if include_event(x, options)]
def trace_to_dicts(target, trace, options, pid, tid):
"""Read a file-like object |trace| containing -ftime-trace data and yields
about:tracing dict per eligible event in that log."""
for event in read_events(trace, options):
# Check if any event duration is greater than the duration from ninja.
ninja_time = (target.end - target.start) * 1000
if event['dur'] > ninja_time:
print("Inconsistent timing found (clang time > ninja time). Please"
" ensure that timings are from consistent builds.")
sys.exit(1)
# Set tid and pid from ninja log.
event['pid'] = pid
event['tid'] = tid
# Offset trace time stamp by ninja start time.
event['ts'] += (target.start * 1000)
yield event
def embed_time_trace(ninja_log_dir, target, pid, tid, options):
"""Produce time trace output for the specified ninja target. Expects
time-trace file to be in .json file named based on .o file."""
for t in target.targets:
o_path = os.path.join(ninja_log_dir, t)
json_trace_path = os.path.splitext(o_path)[0] + '.json'
try:
with open(json_trace_path, 'r') as trace:
for time_trace_event in trace_to_dicts(target, trace, options,
pid, tid):
yield time_trace_event
except IOError:
pass
def log_to_dicts(log, pid, options):
"""Reads a file-like object |log| containing a .ninja_log, and yields one
about:tracing dict per command found in the log."""
threads = Threads()
for target in read_targets(log, options['showall']):
tid = threads.alloc(target)
yield {
'name': '%0s' % ', '.join(target.targets), 'cat': 'targets',
'ph': 'X', 'ts': (target.start * 1000),
'dur': ((target.end - target.start) * 1000),
'pid': pid, 'tid': tid, 'args': {},
}
if options.get('embed_time_trace', False):
# Add time-trace information into the ninja trace.
try:
ninja_log_dir = os.path.dirname(log.name)
except AttributeError:
continue
for time_trace in embed_time_trace(ninja_log_dir, target, pid,
tid, options):
yield time_trace
def main(argv):
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option('-a', '--showall', action='store_true', dest='showall',
default=False,
help='report on last build step for all outputs. Default '
'is to report just on the last (possibly incremental) '
'build')
parser.add_option('-g', '--granularity', type='int', default=50000,
dest='granularity',
help='minimum length time-trace event to embed in '
'microseconds. Default: %default')
parser.add_option('-e', '--embed-time-trace', action='store_true',
default=False, dest='embed_time_trace',
help='embed clang -ftime-trace json file found adjacent '
'to a target file')
(options, args) = parser.parse_args()
if len(args) == 0:
print('Must specify at least one .ninja_log file')
parser.print_help()
return 1
entries = []
for pid, log_file in enumerate(args):
with open(log_file, 'r') as log:
entries += list(log_to_dicts(log, pid, vars(options)))
json.dump(entries, sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 7,126
| 36.314136
| 81
|
py
|
scipy
|
scipy-main/tools/cythonize.py
|
"""cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'scipy'.
The number of parallel Cython processes is controlled by the
environment variable SCIPY_NUM_CYTHONIZE_JOBS. If not set, determined
from the number of CPUs.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script thinks that the pyx files have changed relative to the C files
by comparing hashes stored in a database file.
Simple script to invoke Cython (and Tempita) on all .pyx (.pyx.in)
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
For now, this script should be run by developers when changing Cython files
only, and the resulting C files checked in, so that end-users (and Python-only
developers) do not get the Cython/Tempita dependencies.
Originally written by Dag Sverre Seljebotn, and copied here from:
https://raw.github.com/dagss/private-scipy-refactor/cythonize/cythonize.py
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
import os
import re
import sys
import hashlib
import subprocess
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool, Lock
from os.path import dirname, join
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'scipy'
#
# Rules
#
def process_pyx(fromfile, tofile, cwd):
try:
from Cython.Compiler.Version import version as cython_version
from scipy._lib import _pep440
# Try to find pyproject.toml
pyproject_toml = join(dirname(__file__), '..', 'pyproject.toml')
if not os.path.exists(pyproject_toml):
# This exception is ignored in the except block
# change to warning?
raise ImportError('Could not find pyproject.toml file.')
# Try to find the minimum version from pyproject.toml
# by checking the line that mentions "cython"
with open(pyproject_toml, mode='r', encoding='utf-8') as pt:
for line in pt:
if "cython" not in line.lower():
continue
# parse a string such as 'Cython>=x.y.z,<=a.b.c'
# Assume always >= and not a singleton >
# hence split at the first "=" and discard "Cython>" part
line = ''.join(line.split('=', 1)[1:])
# Check also if there is upper bound specified
if ',<' in line:
split_on = ",<=" if ",<=" in line else ",<"
min_req_ver, max_req_ver = line.split(split_on)
# Discard the trailing part from max required version
max_req_ver = max_req_ver.split('"')[0]
else:
min_req_ver = line.split('"')[0]
break
else:
# This exception is ignored in the except block below
# change to warning?
raise ImportError("Could not parse any Cython specification"
" in pyproject.toml file.")
# Note: we only check lower bound, for upper bound we rely on pip
# respecting pyproject.toml. Reason: we want to be able to build/test
# with more recent Cython locally or on main, upper bound is for
# sdist in a release.
if _pep440.parse(cython_version) < _pep440.Version(min_req_ver):
raise Exception(f'Building SciPy requires Cython >= {min_req_ver}'
f', found {cython_version}')
except ImportError:
pass
flags = ['--fast-fail', '-3']
if tofile.endswith('.cxx'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile],
cwd=cwd)
if r != 0:
raise Exception('Cython system call failed:\n'
f' cython {" ".join(flags)} -o {tofile}'
f' {fromfile}')
except OSError as e:
# There are ways of installing Cython that don't result in a
# cython executable on the path, see gh-2397.
py_command = 'import sys;'
py_command += ('from Cython.Compiler.Main '
'import setuptools_main as main;')
py_command += 'sys.exit(main())'
r = subprocess.call([sys.executable, '-c'] + [py_command] +
flags + ["-o", tofile, fromfile], cwd=cwd)
if r != 0:
raise Exception("Cython either isn't installed or "
"it failed.") from e
except OSError as e:
raise OSError('Cython needs to be installed') from e
def process_tempita_pyx(fromfile, tofile, cwd):
try:
try:
from Cython import Tempita as tempita
except ImportError:
import tempita
except ImportError as e:
raise Exception('Building SciPy requires Tempita: '
'pip install --user Tempita') from e
with open(os.path.join(cwd, fromfile), mode='r', encoding='utf-8') as f_in:
template = f_in.read()
pyxcontent = tempita.sub(template)
assert fromfile.endswith('.pyx.in')
pyxfile = fromfile[:-len('.in')]
with open(os.path.join(cwd, pyxfile), "w", encoding='utf8') as f_out:
f_out.write(pyxcontent)
process_pyx(pyxfile, tofile, cwd)
rules = {
# fromext : function
'.pyx': process_pyx,
'.pyx.in': process_tempita_pyx
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, mode='r', encoding='utf-8') as f:
for line in f:
filename, inhash, outhash = line.split()
if outhash == "None":
outhash = None
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w', encoding='utf-8') as f:
for key, value in sorted(hash_db.items()):
f.write(f"{key} {value[0]} {value[1]}\n")
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
if topath:
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
else:
to_hash = None
return (from_hash, to_hash)
def get_cython_dependencies(fullfrompath):
fullfromdir = os.path.dirname(fullfrompath)
deps = set()
with open(fullfrompath, mode='r', encoding='utf-8') as f:
pxipattern = re.compile(r'include "([a-zA-Z0-9_]+\.pxi)"')
pxdpattern1 = re.compile(r'from \. cimport ([a-zA-Z0-9_]+)')
pxdpattern2 = re.compile(r'from \.([a-zA-Z0-9_]+) cimport')
for line in f:
m = pxipattern.match(line)
if m:
deps.add(os.path.join(fullfromdir, m.group(1)))
m = pxdpattern1.match(line)
if m:
deps.add(os.path.join(fullfromdir, m.group(1) + '.pxd'))
m = pxdpattern2.match(line)
if m:
deps.add(os.path.join(fullfromdir, m.group(1) + '.pxd'))
return list(deps)
def process(path, fromfile, tofile, processor_function, hash_db,
dep_hashes, lock):
with lock:
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath), None):
file_changed = False
else:
file_changed = True
deps_changed = False
deps = get_cython_dependencies(fullfrompath)
for dep in deps:
dep_hash = get_hash(dep, None)
if dep_hash == hash_db.get(normpath(dep), None):
continue
else:
dep_hashes[normpath(dep)] = dep_hash
deps_changed = True
if not file_changed and not deps_changed:
print('%s has not changed' % fullfrompath)
sys.stdout.flush()
return
print('Processing %s' % fullfrompath)
sys.stdout.flush()
processor_function(fromfile, tofile, cwd=path)
with lock:
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def process_generate_pyx(path, lock):
with lock:
print('Running {}'.format(path))
ret = subprocess.call([sys.executable, path])
with lock:
if ret != 0:
raise RuntimeError("Running {} failed".format(path))
def find_process_files(root_dir):
lock = Lock()
try:
num_proc = int(os.environ.get('SCIPY_NUM_CYTHONIZE_JOBS', cpu_count()))
pool = Pool(processes=num_proc)
except ImportError:
# Allow building (single-threaded) on GNU/Hurd, which does not
# support semaphores so Pool cannot initialize.
pool = type('', (), {'imap_unordered': lambda self, func,
iterable: map(func, iterable)})()
except ValueError:
pool = Pool()
hash_db = load_hashes(HASH_FILE)
# Keep changed pxi/pxd hashes in a separate dict until the end
# because if we update hash_db and multiple files include the same
# .pxi file the changes won't be detected.
dep_hashes = {}
# Run any _generate_pyx.py scripts
jobs = []
for cur_dir, dirs, files in os.walk(root_dir):
generate_pyx = os.path.join(cur_dir, '_generate_pyx.py')
if os.path.exists(generate_pyx):
jobs.append(generate_pyx)
for result in pool.imap_unordered(lambda fn:
process_generate_pyx(fn, lock), jobs):
pass
# Process pyx files
jobs = []
m_pattern = br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$"
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
in_file = os.path.join(cur_dir, filename + ".in")
if filename.endswith('.pyx') and os.path.isfile(in_file):
continue
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(m_pattern, data, re.I | re.M)
if m:
toext = ".cxx"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
jobs.append((cur_dir, fromfile, tofile, function,
hash_db, dep_hashes, lock))
for result in pool.imap_unordered(lambda args: process(*args), jobs):
pass
hash_db.update(dep_hashes)
save_hashes(hash_db, HASH_FILE)
def main():
try:
root_dir = sys.argv[1]
except IndexError:
root_dir = DEFAULT_ROOT
find_process_files(root_dir)
if __name__ == '__main__':
main()
| 11,648
| 31.90678
| 79
|
py
|
scipy
|
scipy-main/tools/authors.py
|
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
List the authors who contributed within a given revision interval::
python tools/authors.py REV1..REV2
`REVx` being a commit hash.
To change the name mapping, edit .mailmap on the top-level of the
repository.
"""
# Author: Pauli Virtanen <pav@iki.fi>. This script is in the public domain.
import optparse
import re
import sys
import os
import io
import subprocess
import collections
stdout_b = sys.stdout.buffer
MAILMAP_FILE = os.path.join(os.path.dirname(__file__), "..", ".mailmap")
def main():
p = optparse.OptionParser(__doc__.strip())
p.add_option("-d", "--debug", action="store_true",
help="print debug output")
p.add_option("-n", "--new", action="store_true",
help="print debug output")
options, args = p.parse_args()
if len(args) != 1:
p.error("invalid number of arguments")
try:
rev1, rev2 = args[0].split('..')
except ValueError:
p.error("argument is not a revision range")
NAME_MAP = load_name_map(MAILMAP_FILE)
# Analyze log data
all_authors = set()
authors = collections.Counter()
def analyze_line(line, names, disp=False):
line = line.strip().decode('utf-8')
# Check the commit author name
m = re.match(u'^@@@([^@]*)@@@', line)
if m:
name = m.group(1)
line = line[m.end():]
name = NAME_MAP.get(name, name)
if disp:
if name not in names:
stdout_b.write((" - Author: %s\n" % name).encode('utf-8'))
names.update((name,))
# Look for "thanks to" messages in the commit log
m = re.search(r'([Tt]hanks to|[Cc]ourtesy of|Co-authored-by:) ([A-Z][A-Za-z]*? [A-Z][A-Za-z]*? [A-Z][A-Za-z]*|[A-Z][A-Za-z]*? [A-Z]\. [A-Z][A-Za-z]*|[A-Z][A-Za-z ]*? [A-Z][A-Za-z]*|[a-z0-9]+)($|\.| )', line)
if m:
name = m.group(2)
if name not in (u'this',):
if disp:
stdout_b.write(" - Log : %s\n" % line.strip().encode('utf-8'))
name = NAME_MAP.get(name, name)
names.update((name,))
line = line[m.end():].strip()
line = re.sub(r'^(and|, and|, ) ', u'Thanks to ', line)
analyze_line(line.encode('utf-8'), names)
# Find all authors before the named range
for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b',
f'{rev1}'):
analyze_line(line, all_authors)
# Find authors in the named range
for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b',
f'{rev1}..{rev2}'):
analyze_line(line, authors, disp=options.debug)
# Sort
def name_key(fullname):
m = re.search(u' [a-z ]*[A-Za-z-]+$', fullname)
if m:
forename = fullname[:m.start()].strip()
surname = fullname[m.start():].strip()
else:
forename = ""
surname = fullname.strip()
if surname.startswith(u'van der '):
surname = surname[8:]
if surname.startswith(u'de '):
surname = surname[3:]
if surname.startswith(u'von '):
surname = surname[4:]
return (surname.lower(), forename.lower())
# generate set of all new authors
if vars(options)['new']:
new_authors = set(authors.keys()).difference(all_authors)
n_authors = list(new_authors)
n_authors.sort(key=name_key)
# Print some empty lines to separate
stdout_b.write(("\n\n").encode('utf-8'))
for author in n_authors:
stdout_b.write(("- %s\n" % author).encode('utf-8'))
# return for early exit so we only print new authors
return
try:
authors.pop('GitHub')
except KeyError:
pass
# Order by name. Could order by count with authors.most_common()
authors = sorted(authors.items(), key=lambda i: name_key(i[0]))
# Print
stdout_b.write(b"""
Authors
=======
""")
for author, count in authors:
# remove @ if only GH handle is available
author_clean = author.strip('@')
if author in all_authors:
stdout_b.write((f"* {author_clean} ({count})\n").encode('utf-8'))
else:
stdout_b.write((f"* {author_clean} ({count}) +\n").encode('utf-8'))
stdout_b.write(("""
A total of %(count)d people contributed to this release.
People with a "+" by their names contributed a patch for the first time.
This list of names is automatically generated, and may not be fully complete.
""" % dict(count=len(authors))).encode('utf-8'))
stdout_b.write(("\nNOTE: Check this list manually! It is automatically generated "
"and some names\n may be missing.\n").encode('utf-8'))
def load_name_map(filename):
name_map = {}
with io.open(filename, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith(u"#") or not line:
continue
m = re.match(r'^(.*?)\s*<(.*?)>(.*?)\s*<(.*?)>\s*$', line)
if not m:
print("Invalid line in .mailmap: '{!r}'".format(line), file=sys.stderr)
sys.exit(1)
new_name = m.group(1).strip()
old_name = m.group(3).strip()
if old_name and new_name:
name_map[old_name] = new_name
return name_map
#------------------------------------------------------------------------------
# Communicating with Git
#------------------------------------------------------------------------------
class Cmd:
executable = None
def __init__(self, executable):
self.executable = executable
def _call(self, command, args, kw, repository=None, call=False):
cmd = [self.executable, command] + list(args)
cwd = None
if repository is not None:
cwd = os.getcwd()
os.chdir(repository)
try:
if call:
return subprocess.call(cmd, **kw)
else:
return subprocess.Popen(cmd, **kw)
finally:
if cwd is not None:
os.chdir(cwd)
def __call__(self, command, *a, **kw):
ret = self._call(command, a, {}, call=True, **kw)
if ret != 0:
raise RuntimeError("%s failed" % self.executable)
def pipe(self, command, *a, **kw):
stdin = kw.pop('stdin', None)
p = self._call(command, a, dict(stdin=stdin, stdout=subprocess.PIPE),
call=False, **kw)
return p.stdout
def read(self, command, *a, **kw):
p = self._call(command, a, dict(stdout=subprocess.PIPE),
call=False, **kw)
out, err = p.communicate()
if p.returncode != 0:
raise RuntimeError("%s failed" % self.executable)
return out
def readlines(self, command, *a, **kw):
out = self.read(command, *a, **kw)
return out.rstrip("\n").split("\n")
def test(self, command, *a, **kw):
ret = self._call(command, a, dict(stdout=subprocess.PIPE,
stderr=subprocess.PIPE),
call=True, **kw)
return (ret == 0)
git = Cmd("git")
#------------------------------------------------------------------------------
if __name__ == "__main__":
main()
| 7,474
| 30.540084
| 215
|
py
|
scipy
|
scipy-main/tools/refguide_check.py
|
#!/usr/bin/env python3
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a Scipy submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python3 refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python3 refguide_check.py --doctests optimize
"""
import copy
import doctest
import glob
import inspect
import io
import os
import re
import shutil
import sys
import tempfile
import warnings
from argparse import ArgumentParser
from contextlib import contextmanager, redirect_stderr
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
import docutils.core
import numpy as np
from docutils.parsers.rst import directives
from numpydoc.docscrape_sphinx import get_doc_object
from numpydoc.docscrape import NumpyDocString # noqa
from scipy.stats._distr_params import distcont, distdiscrete # noqa
from scipy import stats # noqa
# Enable specific Sphinx directives
from sphinx.directives.other import SeeAlso, Only
directives.register_directive('seealso', SeeAlso)
directives.register_directive('only', Only)
BASE_MODULE = "scipy"
PUBLIC_SUBMODULES = [
'cluster',
'cluster.hierarchy',
'cluster.vq',
'constants',
'datasets',
'fft',
'fftpack',
'fftpack.convolve',
'integrate',
'interpolate',
'io',
'io.arff',
'io.matlab',
'io.wavfile',
'linalg',
'linalg.blas',
'linalg.lapack',
'linalg.interpolative',
'misc',
'ndimage',
'odr',
'optimize',
'signal',
'signal.windows',
'sparse',
'sparse.csgraph',
'sparse.linalg',
'spatial',
'spatial.distance',
'spatial.transform',
'special',
'stats',
'stats.mstats',
'stats.contingency',
'stats.qmc',
'stats.sampling'
]
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {
'fftpack.convolve': 'fftpack',
'io.wavfile': 'io',
'io.arff': 'io',
}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([
'scipy.stats.kstwobign', # inaccurate cdf or ppf
'scipy.stats.levy_stable',
'scipy.special.sinc', # comes from numpy
'scipy.fft.fftfreq',
'scipy.fft.rfftfreq',
'scipy.fft.fftshift',
'scipy.fft.ifftshift',
'scipy.fftpack.fftfreq',
'scipy.fftpack.fftshift',
'scipy.fftpack.ifftshift',
'scipy.integrate.trapezoid',
'scipy.linalg.LinAlgError',
'scipy.optimize.show_options',
'io.rst', # XXX: need to figure out how to deal w/ mat files
'scipy.signal.bspline',
'scipy.signal.cubic',
'scipy.signal.quadratic',
])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = [
r'scipy\.sparse\.csgraph',
r'scipy\.sparse\.linalg',
r'scipy\.linalg\.blas\.[sdczi].*',
r'scipy\.linalg\.lapack\.[sdczi].*',
]
# these names are not required to be in an autosummary:: listing
# despite being in ALL
REFGUIDE_AUTOSUMMARY_SKIPLIST = [
r'scipy\.special\..*_roots', # old aliases for scipy.special.*_roots
r'scipy\.special\.jn', # alias for jv
r'scipy\.ndimage\.sum', # alias for sum_labels
r'scipy\.integrate\.simps', # alias for simpson
r'scipy\.integrate\.trapz', # alias for trapezoid
r'scipy\.integrate\.cumtrapz', # alias for cumulative_trapezoid
r'scipy\.linalg\.solve_lyapunov', # deprecated name
r'scipy\.stats\.contingency\.chi2_contingency',
r'scipy\.stats\.contingency\.expected_freq',
r'scipy\.stats\.contingency\.margins',
r'scipy\.stats\.reciprocal', # alias for lognormal
r'scipy\.stats\.trapz', # alias for trapezoid
]
# deprecated windows in scipy.signal namespace
for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
'kaiser', 'nuttall', 'parzen', 'triang', 'tukey'):
REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
if re.match(pat, module_name + '.' + name):
break
else:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg":None})
except DeprecationWarning:
return True
except Exception:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = only_ref.intersection(deprecated)
only_ref = only_ref.difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
output += "\nThis issue can be fixed by adding these objects to\n"
output += "the function listing in __init__.py for this module\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
output += "\nThis issue should likely be fixed by removing these objects\n"
output += "from the function listing in __init__.py for this module\n"
output += "or adding them to __all__.\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data', 'legacy',
'obj', 'versionadded', 'versionchanged', 'module', 'class', 'meth',
'ref', 'func', 'toctree', 'moduleauthor', 'deprecated',
'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except Exception:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search(".*?([\x00-\x09\x0b-\x1f]).*", text)
if m:
msg = ("Docstring contains a non-printable character "
f"{m.group(1)!r} in the line\n\n{m.group(0)!r}\n\n"
"Maybe forgot r\"\"\"?")
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'masked_array': np.ma.masked_array,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float32': np.float32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf,}
def try_convert_namedtuple(got):
# suppose that "got" is smth like MoodResult(statistic=10, pvalue=0.1).
# Then convert it to the tuple (10, 0.1), so that can later compare tuples.
num = got.count('=')
if num == 0:
# not a nameduple, bail out
return got
regex = (r'[\w\d_]+\(' +
', '.join([r'[\w\d_]+=(.+)']*num) +
r'\)')
grp = re.findall(regex, " ".join(got.split()))
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return got_again
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
self._had_unexpected_error = False
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
# Ignore name errors after failing due to an unexpected exception
exception_type = exc_info[0]
if self._had_unexpected_error and exception_type is NameError:
return
self._had_unexpected_error = True
self._report_item_name(out)
return super().report_unexpected_exception(
out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile(r'at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
'# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
'.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except Exception:
# Maybe we're printing a numpy array? This produces invalid python
# code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
# values. So, reinsert commas and retry.
# TODO: handle (1) abberivation (`print(np.arange(10000))`), and
# (2) n-dim arrays with n > 1
s_want = want.strip()
s_got = got.strip()
cond = (s_want.startswith("[") and s_want.endswith("]") and
s_got.startswith("[") and s_got.endswith("]"))
if cond:
s_want = ", ".join(s_want[1:-1].split())
s_got = ", ".join(s_got[1:-1].split())
return self.check_output(s_want, s_got, optionflags)
# maybe we are dealing with masked arrays?
# their repr uses '--' for masked values and this is invalid syntax
# If so, replace '--' by nans (they are masked anyway) and retry
if 'masked_array' in want or 'masked_array' in got:
s_want = want.replace('--', 'nan')
s_got = got.replace('--', 'nan')
return self.check_output(s_want, s_got, optionflags)
if "=" not in want and "=" not in got:
# if we're here, want and got cannot be eval-ed (hence cannot
# be converted to numpy objects), they are not namedtuples
# (those must have at least one '=' sign).
# Thus they should have compared equal with vanilla doctest.
# Since they did not, it's an error.
return False
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
got_again = try_convert_namedtuple(got)
want_again = try_convert_namedtuple(want)
except Exception:
return False
else:
return self.check_output(want_again, got_again, optionflags)
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogeneous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = io.StringIO(newline='')
success = True
# Redirect stderr to the stdout or output
tmp_stderr = sys.stdout if doctest_warnings else output
from scipy._lib._util import _fixed_default_rng
@contextmanager
def temp_cwd():
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
try:
os.chdir(tmpdir)
yield tmpdir
finally:
os.chdir(cwd)
shutil.rmtree(tmpdir)
# Run tests, trying to restore global state afterward
cwd = os.getcwd()
with np.errstate(), np.printoptions(), temp_cwd(), \
redirect_stderr(tmp_stderr), \
_fixed_default_rng():
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=output.write)
if fails > 0:
success = False
output.seek(0)
return success, output.read()
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except Exception:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
refguide can be signalled to skip testing code by adding
``#doctest: +SKIP`` to the end of the line. If the output varies or is
random, add ``# may vary`` or ``# random`` to the comment. for example
>>> plt.plot(...) # doctest: +SKIP
>>> random.randint(0,10)
5 # random
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
with open(fname, encoding='utf-8') as f:
text = f.read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, output))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def check_dist_keyword_names():
# Look for collisions between names of distribution shape parameters and
# keywords of distribution methods. See gh-5982.
distnames = set(distdata[0] for distdata in distcont + distdiscrete)
mod_results = []
for distname in distnames:
dist = getattr(stats, distname)
method_members = inspect.getmembers(dist, predicate=inspect.ismethod)
method_names = [method[0] for method in method_members
if not method[0].startswith('_')]
for methodname in method_names:
method = getattr(dist, methodname)
try:
params = NumpyDocString(method.__doc__)['Parameters']
except TypeError:
result = (f'stats.{distname}.{methodname}', False,
"Method parameters are not documented properly.")
mod_results.append(result)
continue
if not dist.shapes: # can't have collision if there are no shapes
continue
shape_names = dist.shapes.split(', ')
param_names1 = set(param.name for param in params)
param_names2 = set(inspect.signature(method).parameters)
param_names = param_names1.union(param_names2)
# # Disabling this check in this PR;
# # these discrepancies are a separate issue.
# no_doc_params = {'args', 'kwds', 'kwargs'} # no need to document
# undoc_params = param_names2 - param_names1 - no_doc_params
# if un_doc_params:
# result = (f'stats.{distname}.{methodname}', False,
# f'Parameter(s) {undoc_params} are not documented.')
# mod_results.append(result)
# continue
intersection = param_names.intersection(shape_names)
if intersection:
message = ("Distribution/method keyword collision: "
f"{intersection} ")
result = (f'stats.{distname}.{methodname}', False, message)
else:
result = (f'stats.{distname}.{methodname}', True, '')
mod_results.append(result)
return mod_results
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*', help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true", help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-tutorial", action="store_true",
help="Skip running doctests in the tutorial.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_tutorial = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
prefix = BASE_MODULE + '.'
if not submodule_name.startswith(prefix):
module_name = prefix + submodule_name
else:
module_name = submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_tutorial:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
if module.__name__ == 'scipy.stats':
mod_results += check_dist_keyword_names()
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_tutorial:
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
for filename in sorted(glob.glob(tut_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
dots=dots, doctest_warnings=args.doctest_warnings)
def scratch():
pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, tut_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
| 35,211
| 32.187559
| 101
|
py
|
scipy
|
scipy-main/tools/download-wheels.py
|
#!/usr/bin/env python
"""
Download SciPy wheels from Anaconda staging area.
"""
import os
import re
import shutil
import argparse
import urllib
import urllib.request
import urllib3
from bs4 import BeautifulSoup
__version__ = '0.1'
# Edit these for other projects.
STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/scipy'
PREFIX = 'scipy'
def http_manager():
"""
Return a urllib3 http request manager, leveraging
proxy settings when available.
"""
proxy_dict = urllib.request.getproxies()
if 'http' in proxy_dict:
http = urllib3.ProxyManager(proxy_dict['http'])
elif 'all' in proxy_dict:
http = urllib3.ProxyManager(proxy_dict['all'])
else:
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
return http
def get_wheel_names(version):
""" Get wheel names from Anaconda HTML directory.
This looks in the Anaconda multibuild-wheels-staging page and
parses the HTML to get all the wheel names for a release version.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
"""
http = http_manager()
tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$")
index_url = f"{STAGING_URL}/files"
index_html = http.request('GET', index_url)
soup = BeautifulSoup(index_html.data, 'html.parser')
return soup.findAll(text=tmpl)
def download_wheels(version, wheelhouse):
"""Download release wheels.
The release wheels for the given SciPy version are downloaded
into the given directory.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
wheelhouse : str
Directory in which to download the wheels.
"""
http = http_manager()
wheel_names = get_wheel_names(version)
for i, wheel_name in enumerate(wheel_names):
wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
wheel_path = os.path.join(wheelhouse, wheel_name)
with open(wheel_path, 'wb') as f:
with http.request('GET', wheel_url, preload_content=False,) as r:
print(f"{i + 1:<4}{wheel_name}")
shutil.copyfileobj(r, f)
print(f"\nTotal files downloaded: {len(wheel_names)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"version",
help="SciPy version to download.")
parser.add_argument(
"-w", "--wheelhouse",
default=os.path.join(os.getcwd(), "release", "installers"),
help="Directory in which to store downloaded wheels\n"
"[defaults to <cwd>/release/installers]")
args = parser.parse_args()
wheelhouse = os.path.expanduser(args.wheelhouse)
if not os.path.isdir(wheelhouse):
raise RuntimeError(
f"{wheelhouse} wheelhouse directory is not present."
" Perhaps you need to use the '-w' flag to specify one.")
download_wheels(args.version, wheelhouse)
| 2,978
| 27.92233
| 77
|
py
|
scipy
|
scipy-main/tools/generate_f2pymod.py
|
"""
Process f2py template files (`filename.pyf.src` -> `filename.pyf`)
Usage: python generate_pyf.py filename.pyf.src -o filename.pyf
"""
import os
import sys
import re
import subprocess
import argparse
# START OF CODE VENDORED FROM `numpy.distutils.from_template`
#############################################################
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
def find_and_remove_repl_patterns(astr):
names = find_repl_patterns(astr)
astr = re.subn(named_re, '', astr)[0]
return astr, names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = ''
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
writestr += cleanedstr
names.update(defs)
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+\.src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
with open(source) as fid:
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
# END OF CODE VENDORED FROM `numpy.distutils.from_template`
###########################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument("infile", type=str,
help="Path to the input file")
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.infile.endswith(('.pyf', '.pyf.src', '.f.src')):
raise ValueError(f"Input file has unknown extension: {args.infile}")
outdir_abs = os.path.join(os.getcwd(), args.outdir)
# Write out the .pyf/.f file
if args.infile.endswith(('.pyf.src', '.f.src')):
code = process_file(args.infile)
fname_pyf = os.path.join(args.outdir,
os.path.splitext(os.path.split(args.infile)[1])[0])
with open(fname_pyf, 'w') as f:
f.write(code)
else:
fname_pyf = args.infile
# Now invoke f2py to generate the C API module file
if args.infile.endswith(('.pyf.src', '.pyf')):
p = subprocess.Popen([sys.executable, '-m', 'numpy.f2py', fname_pyf,
'--build-dir', outdir_abs], #'--quiet'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.getcwd())
out, err = p.communicate()
if not (p.returncode == 0):
raise RuntimeError(f"Writing {args.outfile} with f2py failed!\n"
f"{out}\n"
r"{err}")
if __name__ == "__main__":
main()
| 9,372
| 30.989761
| 94
|
py
|
scipy
|
scipy-main/tools/write_release_and_log.py
|
"""
Standalone script for writing release doc and logs::
python tools/write_release_and_log.py <LOG_START> <LOG_END>
Example::
python tools/write_release_and_log.py v1.7.0 v1.8.0
Needs to be run from the root of the repository.
"""
import os
import sys
import subprocess
from hashlib import md5
from hashlib import sha256
from pathlib import Path
sys.path.insert(0, os.path.dirname(__file__))
sys.path.insert(1, os.path.join(os.path.dirname(__file__), 'tools'))
try:
version_utils = __import__("version_utils")
FULLVERSION = version_utils.VERSION
# This is duplicated from setup.py
if os.path.exists('.git'):
GIT_REVISION, _ = version_utils.git_version(
os.path.join(os.path.dirname(__file__), '..'))
else:
GIT_REVISION = "Unknown"
if not version_utils.ISRELEASED:
if GIT_REVISION == "Unknown":
FULLVERSION += '.dev0+Unknown'
else:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
finally:
sys.path.pop(1)
sys.path.pop(0)
try:
# Ensure sensible file permissions
os.umask(0o022)
except AttributeError:
# No umask on non-posix
pass
def get_latest_release_doc(path):
"""
Method to pick the file from 'doc/release' with the highest
release number (e.g., `1.9.0-notes.rst`).
"""
file_paths = os.listdir(path)
file_paths.sort(key=lambda x: list(map(int, (x.split('-')[0].split('.')))))
return os.path.join(path, file_paths[-1])
# ----------------------------
# Release notes and Changelog
# ----------------------------
def compute_md5(idirs):
released = os.listdir(idirs)
checksums = []
for fn in sorted(released):
fn_updated = os.path.join("release", fn)
with open(fn_updated, 'rb') as f:
m = md5(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
released = os.listdir(idirs)
checksums = []
for fn in sorted(released):
fn_updated = os.path.join("release", fn)
with open(fn_updated, 'rb') as f:
m = sha256(f.read())
checksums.append('%s %s' % (m.hexdigest(), os.path.basename(fn)))
return checksums
def write_release_task(filename='NOTES.txt'):
idirs = Path('release')
source = Path(get_latest_release_doc('doc/source/release'))
target = Path(filename)
if target.exists():
target.remove()
tmp_target = Path(filename + '.txt')
os.system(f'cp {source} {tmp_target}')
with open(str(tmp_target), 'a') as ftarget:
ftarget.writelines("""
Checksums
=========
MD5
~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_md5(idirs)])
ftarget.writelines("""
SHA256
~~~~~~
""")
ftarget.writelines(['%s\n' % c for c in compute_sha256(idirs)])
print("Release README generated successfully")
def write_log_task(filename='Changelog'):
st = subprocess.Popen(
['git', 'log', '%s..%s' % (LOG_START, LOG_END)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = st.communicate()
if not st.returncode == 0:
raise RuntimeError("%s failed" % str(error))
out = st.communicate()[0].decode()
with open(filename, 'w') as a:
a.writelines(out)
print("Release logs generated successfully")
def main():
"""
Checks weather release directory is present or not
and calls the method to generate logs and notes
"""
if not os.path.exists("release"):
os.makedirs("release")
write_release_task(os.path.join("release", 'README'))
write_log_task(os.path.join("release", 'Changelog'))
if __name__ == '__main__':
if len(sys.argv) == 3:
LOG_START = str(sys.argv[1])
LOG_END = str(sys.argv[2])
else:
print("invalid number of arguments, please add LOG_START and LOG_END")
main()
| 4,045
| 25.103226
| 79
|
py
|
scipy
|
scipy-main/tools/lint.py
|
#!/usr/bin/env python
import os
import sys
import subprocess
from argparse import ArgumentParser
CONFIG = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'lint.toml',
)
def rev_list(branch, num_commits):
"""List commits in reverse chronological order.
Only the first `num_commits` are shown.
"""
res = subprocess.run(
[
'git',
'rev-list',
'--max-count',
f'{num_commits}',
'--first-parent',
branch
],
stdout=subprocess.PIPE,
encoding='utf-8',
)
res.check_returncode()
return res.stdout.rstrip('\n').split('\n')
def find_branch_point(branch):
"""Find when the current branch split off from the given branch.
It is based off of this Stackoverflow post:
https://stackoverflow.com/questions/1527234/finding-a-branch-point-with-git#4991675
"""
branch_commits = rev_list('HEAD', 1000)
main_commits = set(rev_list(branch, 1000))
for branch_commit in branch_commits:
if branch_commit in main_commits:
return branch_commit
# If a branch split off over 1000 commits ago we will fail to find
# the ancestor.
raise RuntimeError(
'Failed to find a common ancestor in the last 1000 commits'
)
def diff_files(sha):
"""Find the diff since the given SHA."""
res = subprocess.run(
['git', 'diff', '--name-only', '--diff-filter=ACMR', '-z', sha, '--',
'*.py', '*.pyx', '*.pxd', '*.pxi'],
stdout=subprocess.PIPE,
encoding='utf-8'
)
res.check_returncode()
return [f for f in res.stdout.split('\0') if f]
def run_ruff(files, fix):
if not files:
return 0, ""
args = ['--fix', '--exit-non-zero-on-fix'] if fix else []
res = subprocess.run(
['ruff', f'--config={CONFIG}'] + args + list(files),
stdout=subprocess.PIPE,
encoding='utf-8'
)
return res.returncode, res.stdout
def run_cython_lint(files):
if not files:
return 0, ""
res = subprocess.run(
['cython-lint', '--no-pycodestyle'] + list(files),
stdout=subprocess.PIPE,
encoding='utf-8'
)
return res.returncode, res.stdout
def main():
parser = ArgumentParser(description="Also see `pre-commit-hook.py` which "
"lints all files staged in git.")
# In Python 3.9, can use: argparse.BooleanOptionalAction
parser.add_argument("--fix", action='store_true',
help='Attempt to fix linting violations')
parser.add_argument("--diff-against", dest='branch',
type=str, default=None,
help="Diff against "
"this branch and lint modified files. Use either "
"`--diff-against` or `--files`, but not both.")
parser.add_argument("--files", nargs='*',
help="Lint these files or directories; "
"use **/*.py to lint all files")
args = parser.parse_args()
if not ((args.files is None) ^ (args.branch is None)):
print('Specify either `--diff-against` or `--files`. Aborting.')
sys.exit(1)
if args.branch:
branch_point = find_branch_point(args.branch)
files = diff_files(branch_point)
else:
files = args.files
cython_exts = ('.pyx', '.pxd', '.pxi')
cython_files = {f for f in files if any(f.endswith(ext) for ext in cython_exts)}
other_files = set(files) - cython_files
rc_cy, errors = run_cython_lint(cython_files)
if errors:
print(errors)
rc, errors = run_ruff(other_files, fix=args.fix)
if errors:
print(errors)
if rc == 0 and rc_cy != 0:
rc = rc_cy
sys.exit(rc)
if __name__ == '__main__':
main()
| 3,870
| 26.848921
| 87
|
py
|
scipy
|
scipy-main/tools/pre-commit-hook.py
|
#!/usr/bin/env python
#
# Pre-commit linting hook.
#
# Install from root of repository with:
#
# cp tools/pre-commit-hook.py .git/hooks/pre-commit
import subprocess
import sys
import os
# Run lint.py from the scipy source tree
linters = [
'../../tools/lint.py',
'tools/lint.py',
'lint.py' # in case pre-commit hook is run from tools dir
]
linter = [f for f in linters if os.path.exists(f)][0]
# names of files that were staged
# add '*.pxd', '*.pxi' once cython-lint supports it
p = subprocess.run(['git', 'diff',
'--cached', '--name-only', '-z',
'--diff-filter=ACMR',
'--', '*.py', '*.pyx'],
capture_output=True, check=True)
files = p.stdout.decode(sys.getfilesystemencoding()).split('\0')
files = [f for f in files if f]
# create a temporary copy of what would get committed, without unstaged
# modifications (e.g., only certain changes in a file may have been committed)
git_dir = os.environ.get('GIT_DIR', '.git')
work_dir = os.path.join(git_dir, '.pre-commit-work_dir')
p = subprocess.run(['git', 'write-tree'], capture_output=True, check=True)
tree_hash = p.stdout.decode('ascii').split('\n')[0]
p = subprocess.run(['git', 'commit-tree', '-p', 'HEAD',
tree_hash, '-m', '...'], capture_output=True, check=True)
fake_commit = p.stdout.decode('ascii').split('\n')[0]
if not os.path.isdir(work_dir):
subprocess.run(['git', 'clone', '-qns', git_dir, work_dir])
subprocess.run(['git', 'reset', '--quiet', '--hard', 'HEAD'],
env={}, cwd=work_dir, check=True)
subprocess.run(['git', 'checkout', '-q', fake_commit],
env={}, cwd=work_dir, check=True)
subprocess.run(['git', 'reset', '--quiet', '--hard', fake_commit],
env={}, cwd=work_dir, check=True)
if '--fix' in sys.argv:
print('Running linter to fix errors...')
p = subprocess.run([linter, '--fix', '--files'] + files)
# Discover which files were modified
p = subprocess.run([linter, '--fix', '--files'] + files, cwd=work_dir)
p = subprocess.run(['git', 'diff', '--name-only', '--', '*.py', '*.pyx'],
capture_output=True, check=True, cwd=work_dir)
files = p.stdout.decode(sys.getfilesystemencoding()).split('\0')
files = [f for f in files if f]
if files:
print('The following files were modified:')
print()
print('\n'.join(files))
else:
print('No files were modified.\n')
print('Please remember to `git add` modified files.')
sys.exit(p.returncode)
p = subprocess.run([linter, '--files'] + files, cwd=work_dir)
if p.returncode != 0:
print('!! Linting failed; please fix errors, `git add` files, and re-commit.')
print()
print('Some errors may be fixable automatically by running:')
print()
print(' ./tools/pre-commit-hook.py --fix')
sys.exit(p.returncode)
| 2,915
| 31.764045
| 82
|
py
|
scipy
|
scipy-main/tools/version_utils.py
|
import os
import subprocess
import argparse
MAJOR = 1
MINOR = 12
MICRO = 0
ISRELEASED = False
IS_RELEASE_BRANCH = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
def get_version_info(source_root):
# Adding the git rev number needs to be done inside
# write_version_py(), otherwise the import of scipy.version messes
# up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists(os.path.join(source_root, '.git')):
GIT_REVISION, COMMIT_COUNT = git_version(source_root)
elif os.path.exists('scipy/version.py'):
# must be a source distribution, use existing version file
# load it as a separate module to not load scipy/__init__.py
import runpy
ns = runpy.run_path('scipy/version.py')
GIT_REVISION = ns['git_revision']
COMMIT_COUNT = ns['git_revision']
else:
GIT_REVISION = "Unknown"
COMMIT_COUNT = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + COMMIT_COUNT + '.' + GIT_REVISION
return FULLVERSION, GIT_REVISION, COMMIT_COUNT
def write_version_py(source_root, filename='scipy/version.py'):
cnt = """\
# THIS FILE IS GENERATED DURING THE SCIPY BUILD
# See tools/version_utils.py for details
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
commit_count = '%(commit_count)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION, COMMIT_COUNT = get_version_info(source_root)
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'commit_count': COMMIT_COUNT,
'isrelease': str(ISRELEASED)})
finally:
a.close()
# Return the git revision as a string
def git_version(cwd):
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env, cwd=cwd).communicate()[0]
return out
try:
git_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
git_dir = os.path.join(git_dir, ".git")
out = _minimal_ext_cmd(['git',
'--git-dir',
git_dir,
'rev-parse',
'HEAD'])
GIT_REVISION = out.strip().decode('ascii')[:7]
# We need a version number that's regularly incrementing for newer commits,
# so the sort order in a wheelhouse of nightly builds is correct (see
# https://github.com/MacPython/scipy-wheels/issues/114). It should also be
# a reproducible version number, so don't rely on date/time but base it on
# commit history. This gives the commit count since the previous branch
# point from the current branch (assuming a full `git clone`, it may be
# less if `--depth` was used - commonly the default in CI):
prev_version_tag = '^v{}.{}.0'.format(MAJOR, MINOR - 2)
out = _minimal_ext_cmd(['git', '--git-dir', git_dir,
'rev-list', 'HEAD', prev_version_tag,
'--count'])
COMMIT_COUNT = out.strip().decode('ascii')
COMMIT_COUNT = '0' if not COMMIT_COUNT else COMMIT_COUNT
except OSError:
GIT_REVISION = "Unknown"
COMMIT_COUNT = "Unknown"
return GIT_REVISION, COMMIT_COUNT
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--source-root", type=str, default='.',
help="Relative path to the root of the source directory")
args = parser.parse_args()
write_version_py(args.source_root)
| 4,136
| 33.475
| 83
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.