repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
end-to-end-asd | end-to-end-asd-main/experiment_config.py | import torch.nn as nn
import torch.optim as optim
import models.graph_models as g3d
EASEE_R3D_18_inputs = {
# input files
'csv_train_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_train_augmented.csv',
'csv_val_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_val_augmented.csv',
'csv_test_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_test_augmented.csv',
# Data config
'audio_dir': '/Dataset/ava_active_speaker/instance_wavs_time/',
'video_dir': '/Dataset/ava_active_speaker/instance_crops_time/',
'models_out': '/home/alcazajl/Models/ASC2/tan3d/', # save directory
# Pretrained Weights
'video_pretrain_weights': '/home/alcazajl/Models/Pretrained/R3D/r3d18_K_200ep.pth'
}
EASEE_R3D_50_inputs = {
# input files
'csv_train_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_train_augmented.csv',
'csv_val_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_val_augmented.csv',
'csv_test_full': '/Dataset/ava_active_speaker/csv/ava_activespeaker_test_augmented.csv',
# Data config
'audio_dir': '/Dataset/ava_active_speaker/instance_wavs_time/',
'video_dir': '/Dataset/ava_active_speaker/instance_crops_time/',
'models_out': '/home/alcazajl/Models/ASC2/tan3d/', # save directory
# Pretrained Weights
'video_pretrain_weights': '/home/alcazajl/Models/Pretrained/R3D/r3d50_K_200ep.pth'
}
EASEE_R3D_18_4lvl_params = {
# Net Arch
'backbone': g3d.R3D18_4lvlGCN,
# Optimization config
'optimizer': optim.Adam,
'criterion': nn.CrossEntropyLoss(),
'learning_rate': 3e-4,
'epochs': 15,
'gamma': 0.1,
# Batch Config
'batch_size': 17,
'threads': 8
}
EASEE_R3D_50_4lvl_params = {
# Net Arch
'backbone': g3d.R3D50_4lvlGCN,
# Optimization config
'optimizer': optim.Adam,
'criterion': nn.CrossEntropyLoss(),
'learning_rate': 3e-4,
'epochs': 15,
'gamma': 0.1,
# Batch Config
'batch_size': 17,
'threads': 8
}
| 2,020 | 27.871429 | 94 | py |
end-to-end-asd | end-to-end-asd-main/easee_R3D50.py | import os
import torch
import experiment_config as exp_conf
import util.custom_transforms as ct
from torchvision import transforms
from torch_geometric.loader import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from ez_io.logging import setup_optim_outputs
from datasets.graph_datasets import IndependentGraphDatasetETE3D
from models.graph_layouts import get_spatial_connection_pattern
from models.graph_layouts import get_temporal_connection_pattern
from optimization.optimization_amp import optimize_easee
from util.command_line import unpack_command_line_args, get_default_arg_parser
if __name__ == '__main__':
# Parse Command line args
command_line_args = get_default_arg_parser().parse_args()
lr_arg, frames_per_clip, ctx_size, n_clips, strd, img_size = unpack_command_line_args(command_line_args)
# Connection pattern
scp = get_spatial_connection_pattern(ctx_size, n_clips)
tcp = get_temporal_connection_pattern(ctx_size, n_clips)
opt_config = exp_conf.EASEE_R3D_18_4lvl_params
easee_config = exp_conf.EASEE_R3D_18_inputs
# Data Transforms
image_size = (img_size, img_size)
video_train_transform = transforms.Compose([transforms.Resize(image_size), ct.video_train])
video_val_transform = transforms.Compose([transforms.Resize(image_size), ct.video_val])
# output config
model_name = 'easee_R3D_50' + \
'_clip' + str(frames_per_clip) + \
'_ctx' + str(ctx_size) + \
'_len' + str(n_clips) + \
'_str' + str(strd)
log, target_models = setup_optim_outputs(easee_config['models_out'],
easee_config, model_name)
# Create Network and offload to GPU
pretrain_weightds_path = easee_config['video_pretrain_weights']
ez_net = easee_config['backbone'](pretrain_weightds_path)
has_cuda = torch.cuda.is_available()
device = torch.device('cuda' if has_cuda else 'cpu')
print('Cuda info ',has_cuda, device)
ez_net.to(device)
# Optimization config
criterion = easee_config['criterion']
optimizer = easee_config['optimizer'](ez_net.parameters(), lr=easee_config['learning_rate'])
scheduler = MultiStepLR(optimizer, milestones=[6, 8], gamma=0.1)
# Data Paths
video_train_path = os.path.join(easee_config['video_dir'], 'train')
audio_train_path = os.path.join(easee_config['audio_dir'], 'train')
video_val_path = os.path.join(easee_config['video_dir'], 'val')
audio_val_path = os.path.join(easee_config['audio_dir'], 'val')
# Dataloaders
d_train = IndependentGraphDatasetETE3D(audio_train_path,
video_train_path,
easee_config['csv_train_full'],
n_clips,
strd,
ctx_size,
frames_per_clip,
scp, tcp,
video_train_transform,
do_video_augment=True,
crop_ratio=0.95)
d_val = IndependentGraphDatasetETE3D(audio_val_path,
video_val_path,
easee_config['csv_val_full'],
n_clips,
strd,
ctx_size,
frames_per_clip,
scp, tcp,
video_val_transform,
do_video_augment=False)
dl_train = DataLoader(d_train, batch_size=opt_config['batch_size'],
shuffle=True, num_workers=opt_config['threads'],
pin_memory=True)
dl_val = DataLoader(d_val, batch_size=opt_config['batch_size'],
shuffle=True, num_workers=opt_config['threads'],
pin_memory=True)
# Optimization loop
model = optimize_easee(ez_net, dl_train, dl_val, device,
criterion, optimizer, scheduler,
num_epochs=opt_config['epochs'],
spatial_ctx_size=ctx_size,
time_len=n_clips,
models_out=target_models, log=log)
| 4,566 | 43.77451 | 108 | py |
end-to-end-asd | end-to-end-asd-main/easee_R3D18.py | import os
import sys
import torch
import experiment_config as exp_conf
import util.custom_transforms as ct
from torchvision import transforms
from torch_geometric.loader import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from ez_io.logging import setup_optim_outputs
from datasets.graph_datasets import IndependentGraphDatasetETE3D
from models.graph_layouts import get_spatial_connection_pattern
from models.graph_layouts import get_temporal_connection_pattern
from optimization.optimization_amp import optimize_easee
from util.command_line import unpack_command_line_args, get_default_arg_parser
if __name__ == '__main__':
# Parse Command line args
command_line_args = get_default_arg_parser().parse_args()
lr_arg, frames_per_clip, ctx_size, n_clips, strd, img_size = unpack_command_line_args(command_line_args)
# Graph Head Connection pattern
scp = get_spatial_connection_pattern(ctx_size, n_clips)
tcp = get_temporal_connection_pattern(ctx_size, n_clips)
opt_config = exp_conf.EASEE_R3D_18_4lvl_params
easee_config = exp_conf.EASEE_R3D_18_inputs
# Data Transforms
image_size = (img_size, img_size)
video_train_transform = transforms.Compose([transforms.Resize(image_size), ct.video_train])
video_val_transform = transforms.Compose([transforms.Resize(image_size), ct.video_val])
# output config
model_name = 'easee_R3D_18' + \
'_clip' + str(frames_per_clip) + \
'_ctx' + str(ctx_size) + \
'_len' + str(n_clips) + \
'_str' + str(strd)
log, target_models = setup_optim_outputs(easee_config['models_out'],
opt_config, model_name)
# Create Network and offload to GPU
pretrain_weightds_path = easee_config['video_pretrain_weights']
ez_net = opt_config['backbone'](pretrain_weightds_path)
has_cuda = torch.cuda.is_available()
device = torch.device('cuda' if has_cuda else 'cpu')
print('Cuda info ',has_cuda, device)
ez_net.to(device)
# Optimization config
criterion = opt_config['criterion']
optimizer = opt_config['optimizer'](ez_net.parameters(), lr=opt_config['learning_rate'])
scheduler = MultiStepLR(optimizer, milestones=[6, 8], gamma=0.1)
# Data Paths
video_train_path = os.path.join(easee_config['video_dir'], 'train')
audio_train_path = os.path.join(easee_config['audio_dir'], 'train')
video_val_path = os.path.join(easee_config['video_dir'], 'val')
audio_val_path = os.path.join(easee_config['audio_dir'], 'val')
# Dataloaders
d_train = IndependentGraphDatasetETE3D(audio_train_path,
video_train_path,
easee_config['csv_train_full'],
n_clips,
strd,
ctx_size,
frames_per_clip,
scp, tcp,
video_train_transform,
do_video_augment=True,
crop_ratio=0.95)
d_val = IndependentGraphDatasetETE3D(audio_val_path,
video_val_path,
easee_config['csv_val_full'],
n_clips,
strd,
ctx_size,
frames_per_clip,
scp, tcp,
video_val_transform,
do_video_augment=False)
dl_train = DataLoader(d_train, batch_size=opt_config['batch_size'],
shuffle=True, num_workers=opt_config['threads'],
pin_memory=True)
dl_val = DataLoader(d_val, batch_size=opt_config['batch_size'],
shuffle=True, num_workers=opt_config['threads'],
pin_memory=True)
# Optimization loop
model = optimize_easee(ez_net, dl_train, dl_val, device,
criterion, optimizer, scheduler,
num_epochs=opt_config['epochs'],
spatial_ctx_size=ctx_size,
time_len=n_clips,
models_out=target_models, log=log)
| 4,585 | 43.524272 | 108 | py |
end-to-end-asd | end-to-end-asd-main/models/shared_3d.py | import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_inplanes():
return [64, 128, 256, 512]
def conv3x3x3(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def conv1x1x1(in_planes, out_planes, stride=1):
return nn.Conv3d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
class BasicBlock3D(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv3x3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck3D(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, downsample=None):
super().__init__()
self.conv1 = conv1x1x1(in_planes, planes)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = conv3x3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = conv1x1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
| 2,293 | 23.666667 | 73 | py |
end-to-end-asd | end-to-end-asd-main/models/shared_2d.py | import torch.nn as nn
from torch import Tensor
from typing import Type, Any, Callable, Union, List, Optional
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock2D(nn.Module):
expansion: int = 1
def __init__(self, inplanes: int, planes: int, stride: int = 1,
downsample: Optional[nn.Module] = None, groups: int = 1,
base_width: int = 64, dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super(BasicBlock2D, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck2D(nn.Module):
expansion: int = 4
def __init__(self, inplanes: int, planes: int, stride: int = 1,
downsample: Optional[nn.Module] = None, groups: int = 1,
base_width: int = 64, dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None) -> None:
super(Bottleneck2D, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out | 3,514 | 33.80198 | 111 | py |
end-to-end-asd | end-to-end-asd-main/models/graph_models.py | import torch
import torch.nn as nn
import torch.nn.parameter
from functools import partial
from torch_geometric.nn import EdgeConv
from models.graph_layouts import generate_av_mask
from models.shared_2d import BasicBlock2D, conv1x1
from models.shared_3d import BasicBlock3D, Bottleneck3D, conv1x1x1, get_inplanes
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
}
class LinearPathPreact(nn.Module):
def __init__(self, in_channels, hidden_channels):
super(LinearPathPreact, self).__init__()
# Layer 1
self.fc1 = nn.Linear(in_channels, hidden_channels, bias=False)
self.bn1 = nn.BatchNorm1d(in_channels)
# Layer 2
self.fc2 = nn.Linear(hidden_channels, hidden_channels, bias=False)
self.bn2 = nn.BatchNorm1d(hidden_channels)
# Shared
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.bn1(x)
x1 = self.relu(x1)
x1 = self.fc1(x1)
x2 = self.bn2(x1)
x2 = self.relu(x2)
x2 = self.fc2(x2)
return x2
class GraphTwoStreamResNet3D(nn.Module):
def __init__(self, args_2d, args_3d):
super().__init__()
block_2d, layers_2d, zero_init_residual, groups_2d, width_per_group, replace_stride_with_dilation, norm_layer_2d = args_2d
block_3d, layers_3d, block_inplanes_3d, n_input_channels, conv1_t_size, conv1_t_stride, no_max_pool, shortcut_type, widen_factor = args_3d
# Global Args
if norm_layer_2d is None:
norm_layer_2d = nn.BatchNorm2d
self._norm_layer_2d = norm_layer_2d
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
# Audio stream
self.inplanes_2d = 64
self.dilation_2d = 1
self.groups_2d = groups_2d
self.base_width = width_per_group
self.audio_conv1 = nn.Conv2d(1, self.inplanes_2d, kernel_size=7, stride=2, padding=3,
bias=False)
self.a_bn1 = norm_layer_2d(self.inplanes_2d)
self.a_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.a_layer1 = self._make_layer_2D(block_2d, 64, layers_2d[0])
self.a_layer2 = self._make_layer_2D(block_2d, 128, layers_2d[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.a_layer3 = self._make_layer_2D(block_2d, 256, layers_2d[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.a_layer4 = self._make_layer_2D(block_2d, 512, layers_2d[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.a_avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc_128_a = nn.Linear(512 * block_2d.expansion, 128)
# Video Stream
block_inplanes = [int(x * widen_factor) for x in block_inplanes_3d]
self.in_planes_3d = block_inplanes[0]
self.no_max_pool = no_max_pool
self.v_conv1 = nn.Conv3d(n_input_channels, self.in_planes_3d,
kernel_size=(conv1_t_size, 7, 7),
stride=(conv1_t_stride, 2, 2),
padding=(conv1_t_size // 2, 3, 3),
bias=False)
self.v_bn1 = nn.BatchNorm3d(self.in_planes_3d)
self.v_maxpool = nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(2, 2, 2), padding=(0, 1, 1))
self.v_layer1 = self._make_layer_3D(block_3d, block_inplanes[0], layers_3d[0],
shortcut_type)
self.v_layer2 = self._make_layer_3D(block_3d, block_inplanes[1], layers_3d[1],
shortcut_type, stride=2)
self.v_layer3 = self._make_layer_3D(block_3d, block_inplanes[2], layers_3d[2],
shortcut_type, stride=2)
self.v_layer4 = self._make_layer_3D(block_3d, block_inplanes[3], layers_3d[3],
shortcut_type, stride=2)
self.v_avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))
self.fc_128_v = nn.Linear(512 * block_3d.expansion, 128)
# Shared
self.relu = nn.ReLU(inplace=True)
# Dim reduction
self.reduction_a = nn.Linear(512 * block_2d.expansion, 128)
self.reduction_v = nn.Linear(512 * block_3d.expansion, 128)
self.fc_aux_a = nn.Linear(128, 2)
self.fc_aux_v = nn.Linear(128, 2)
# Graph Net
self.edge1 = EdgeConv(LinearPathPreact(128*2, 64))
self.edge2 = EdgeConv(LinearPathPreact(64*2, 64))
self.edge3 = EdgeConv(LinearPathPreact(64*2, 64))
self.edge4 = EdgeConv(LinearPathPreact(64*2, 64))
self.fc = nn.Linear(64, 2)
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer_2D(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer_2d
downsample = None
previous_dilation = self.dilation_2d
if dilate:
self.dilation_2d *= stride
stride = 1
if stride != 1 or self.inplanes_2d != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes_2d, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes_2d, planes, stride, downsample, self.groups_2d,
self.base_width, previous_dilation, norm_layer))
self.inplanes_2d = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes_2d, planes, groups=self.groups_2d,
base_width=self.base_width, dilation=self.dilation_2d,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _downsample_basic_block(self, x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),
out.size(3), out.size(4))
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer_3D(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.in_planes_3d != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
conv1x1x1(self.in_planes_3d, planes
* block.expansion, stride),
nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(in_planes=self.in_planes_3d, planes=planes,
stride=stride, downsample=downsample))
self.in_planes_3d = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.in_planes_3d, planes))
return nn.Sequential(*layers)
def forward_audio(self, a, audio_size):
a = torch.unsqueeze(a[:, 0, 0, :audio_size[1], :audio_size[2]], dim=1)
a = self.audio_conv1(a)
a = self.a_bn1(a)
a = self.relu(a)
a = self.a_maxpool(a)
a = self.a_layer1(a)
a = self.a_layer2(a)
a = self.a_layer3(a)
a = self.a_layer4(a)
a = self.a_avgpool(a)
a = a.reshape(a.size(0), -1)
return a
def forward_video(self, v):
v = self.v_conv1(v)
v = self.v_bn1(v)
v = self.relu(v)
if not self.no_max_pool:
v = self.v_maxpool(v)
v = self.v_layer1(v)
v = self.v_layer2(v)
v = self.v_layer3(v)
v = self.v_layer4(v)
v = self.v_avgpool(v)
v = v.reshape(v.size(0), -1)
return v
def forward(self, data, ctx_size, audio_size):
x, edge_index, _ = data.x, data.edge_index, data.batch
# indexing masks
audio_mask, video_mask = generate_av_mask(ctx_size, x.size(0))
# Initial Conv. forward
audio_feats = self.forward_audio(x[audio_mask], audio_size)
video_feats = self.forward_video(x[video_mask])
# Dim Reduction
audio_feats = self.relu(self.reduction_a(audio_feats))
video_feats = self.relu(self.reduction_v(video_feats))
# Rebuild interleaved tensor
graph_feats = torch.zeros(
(x.size(0), 128), device=audio_feats.get_device(), dtype=audio_feats.dtype)
graph_feats[audio_mask] = audio_feats
graph_feats[video_mask] = video_feats
# Aux supervision
audio_out = self.fc_aux_a(graph_feats[audio_mask])
video_out = self.fc_aux_v(graph_feats[video_mask])
# Graph Stream
graph_feats = self.edge1(graph_feats, edge_index)
graph_feats = self.edge2(graph_feats, edge_index)
graph_feats = self.edge3(graph_feats, edge_index)
graph_feats = self.edge4(graph_feats, edge_index)
return self.fc(graph_feats), audio_out, video_out
class GraphTwoStreamResNet3DTwoGraphs4LVLRes(GraphTwoStreamResNet3D):
def __init__(self, args_2d, args_3d, filter_size):
super().__init__(args_2d, args_3d)
self.edge_spatial_1 = EdgeConv(LinearPathPreact(128*2, filter_size))
self.edge_spatial_2 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_spatial_3 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_spatial_4 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_temporal_1 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_temporal_2 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_temporal_3 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.edge_temporal_4 = EdgeConv(LinearPathPreact(filter_size*2, filter_size))
self.fc = nn.Linear(filter_size, 2)
# IS this necessary?
self.edge1 = None
self.edge2 = None
self.edge3 = None
self.edge4 = None
def forward(self, data, ctx_size, audio_size):
x, joint_edge_index, _ = data.x, data.edge_index, data.batch
spatial_edge_index = joint_edge_index[0]
temporal_edge_index = joint_edge_index[1]
# indexing masks
audio_mask, video_mask = generate_av_mask(ctx_size, x.size(0))
# Initial Conv. forward
audio_feats = self.forward_audio(x[audio_mask], audio_size)
video_feats = self.forward_video(x[video_mask])
# Dim Reduction
audio_feats = self.relu(self.reduction_a(audio_feats))
video_feats = self.relu(self.reduction_v(video_feats))
# Rebuild interleaved tensor
graph_feats = torch.zeros((x.size(0), 128), device=audio_feats.get_device(), dtype=audio_feats.dtype)
graph_feats[audio_mask] = audio_feats
graph_feats[video_mask] = video_feats
# Aux supervision
audio_out = self.fc_aux_a(graph_feats[audio_mask])
video_out = self.fc_aux_v(graph_feats[video_mask])
# Spatial Stream
graph_feats_1s = self.edge_spatial_1(graph_feats, spatial_edge_index)
graph_feats_1st = self.edge_temporal_1(graph_feats_1s, temporal_edge_index)
graph_feats_2s = self.edge_spatial_2(graph_feats_1st, spatial_edge_index)
graph_feats_2st = self.edge_temporal_2(graph_feats_2s, temporal_edge_index)
graph_feats_2st = graph_feats_2st + graph_feats_1st
graph_feats_3s = self.edge_spatial_3(graph_feats_2st, spatial_edge_index)
graph_feats_3st = self.edge_temporal_3(graph_feats_3s, temporal_edge_index)
graph_feats_3st = graph_feats_3st + graph_feats_2st
graph_feats_4s = self.edge_spatial_4(graph_feats_3st, spatial_edge_index)
graph_feats_4st = self.edge_temporal_4(graph_feats_4s, temporal_edge_index)
graph_feats_4st = graph_feats_4st + graph_feats_3st
return self.fc(graph_feats_4st), audio_out, video_out
def _load_video_weights_into_model(model, ws_file):
resnet_state_dict = torch.load(ws_file)['state_dict']
own_state = model.state_dict()
for name, param in resnet_state_dict.items():
if 'v_'+name in own_state:
own_state['v_'+name].copy_(param)
else:
print('No video assignation for ', name)
print('loaded video ws')
return
def _load_audio_weights_into_model(model, arch2d, progress):
resnet_state_dict = load_state_dict_from_url(
model_urls[arch2d], progress=progress)
own_state = model.state_dict()
for name, param in resnet_state_dict.items():
if 'a_'+name in own_state:
own_state['a_'+name].copy_(param)
else:
print('No audio assignation for ', name)
# Audio initial Ws
conv1_weights = resnet_state_dict['conv1.weight']
avgWs = torch.mean(conv1_weights, dim=1, keepdim=True)
own_state['audio_conv1.weight'].copy_(avgWs)
print('loaded audio ws')
return
def R3D18_4lvlGCN(pretrained_weigths, filter_size=128):
args_2d = BasicBlock2D, [2, 2, 2, 2], False, 1, 64, None, None
args_3d = BasicBlock3D, [2, 2, 2, 2], get_inplanes(), 3, 7, 1, False, 'B', 1.0
model = GraphTwoStreamResNet3DTwoGraphs4LVLRes(args_2d, args_3d, filter_size)
_load_audio_weights_into_model(model, 'resnet18', True)
_load_video_weights_into_model(model, pretrained_weigths)
return model
def R3D50_4lvlGCN(pretrained_weigths, filter_size=128):
args_2d = BasicBlock2D, [2, 2, 2, 2], False, 1, 64, None, None
args_3d = Bottleneck3D, [3, 4, 6, 3], get_inplanes(), 3, 7, 1, False, 'B', 1.0
model = GraphTwoStreamResNet3DTwoGraphs4LVLRes(args_2d, args_3d, filter_size)
_load_audio_weights_into_model(model, 'resnet18', True)
_load_video_weights_into_model(model, pretrained_weigths)
return model
| 15,486 | 39.225974 | 146 | py |
end-to-end-asd | end-to-end-asd-main/util/custom_transforms.py | from torchvision import transforms
video_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.3729, 0.2850, 0.2439), (0.2286, 0.2008, 0.1911))
])
video_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.3729, 0.2850, 0.2439), (0.2286, 0.2008, 0.1911))
]) | 318 | 28 | 76 | py |
end-to-end-asd | end-to-end-asd-main/util/augmentations.py | import random
from PIL import Image
from torchvision.transforms import RandomCrop
from torchvision.transforms.functional import hflip
def video_temporal_crop(video_data, crop_ratio):
# random flip
if bool(random.getrandbits(1)):
video_data = [s.transpose(Image.FLIP_LEFT_RIGHT) for s in video_data]
# random crop
mid = int(len(video_data) / 2)
width, height = video_data[mid].size
f = random.uniform(crop_ratio, 1)
i, j, h, w = RandomCrop.get_params(video_data[mid], output_size=(int(height*f), int(width*f)))
video_data = [s.crop(box=(j, i, j+w, i+h)) for s in video_data]
return video_data
def video_flip(video_data, crop_ratio):
# random flip
if bool(random.getrandbits(1)):
video_data = [hflip(vd) for vd in video_data]
return video_data
| 814 | 27.103448 | 98 | py |
end-to-end-asd | end-to-end-asd-main/datasets/graph_datasets.py | import os
import math
import torch
import random
import numpy as np
import ez_io.io_ava as io
import util.clip_utils as cu
from torch_geometric.data import Data, Dataset
from ez_io.file_util import csv_to_list, postprocess_speech_label, postprocess_entity_label
from util.augmentations import video_temporal_crop, video_corner_crop
class GraphContextualDataset(Dataset):
def __init__(self):
# In memory data
self.entity_data = {}
self.speech_data = {}
self.ts_to_entity = {}
self.entity_list = []
def get_speaker_context(self, video_id, target_entity_id, center_ts, ctx_len):
# Get contex and exclude self
context_entities = list(self.ts_to_entity[video_id][center_ts])
random.shuffle(context_entities)
context_entities.remove(target_entity_id)
# but remeber you must include self
if not context_entities:
context_entities.insert(0, target_entity_id)
while len(context_entities) < ctx_len: # self is context
context_entities.append(random.choice(context_entities))
elif len(context_entities) < ctx_len:
context_entities.insert(0, target_entity_id) # make sure is at 0
while len(context_entities) < ctx_len:
context_entities.append(random.choice(context_entities[1:]))
else:
context_entities.insert(0, target_entity_id) # make sure is at 0
context_entities = context_entities[:ctx_len]
return context_entities
def search_ts_in_meta_data(self, entity_metadata, ts):
for idx, em in enumerate(entity_metadata):
if em[1] == ts:
return idx
raise Exception('Bad Context')
def _cache_entity_data(self, csv_file_path):
entity_set = set()
csv_data = csv_to_list(csv_file_path)
csv_data.pop(0) # CSV header
for csv_row in csv_data:
video_id = csv_row[0]
entity_id = csv_row[-3]
timestamp = csv_row[1]
speech_label = postprocess_speech_label(csv_row[-2])
entity_label = postprocess_entity_label(csv_row[-2])
minimal_entity_data = (entity_id, timestamp, entity_label)
# Store minimal entity data
if video_id not in self.entity_data.keys():
self.entity_data[video_id] = {}
if entity_id not in self.entity_data[video_id].keys():
self.entity_data[video_id][entity_id] = []
entity_set.add((video_id, entity_id))
self.entity_data[video_id][entity_id].append(minimal_entity_data)
# Store speech meta-data
if video_id not in self.speech_data.keys():
self.speech_data[video_id] = {}
if timestamp not in self.speech_data[video_id].keys():
self.speech_data[video_id][timestamp] = speech_label
# Max operation yields if someone is speaking.
new_speech_label = max(
self.speech_data[video_id][timestamp], speech_label)
self.speech_data[video_id][timestamp] = new_speech_label
return entity_set
def _entity_list_postprocessing(self, entity_set):
print('Initial', len(entity_set))
# filter out missing data on disk
print('video_root',self.video_root)
all_disk_data = set(os.listdir(self.video_root))
for video_id, entity_id in entity_set.copy():
if entity_id not in all_disk_data:
entity_set.remove((video_id, entity_id))
print('Pruned not in disk', len(entity_set))
for video_id, entity_id in entity_set.copy():
dir = os.path.join(self.video_root, entity_id)
if len(os.listdir(dir)) != len(self.entity_data[video_id][entity_id]):
entity_set.remove((video_id, entity_id))
print('Pruned not complete', len(entity_set))
self.entity_list = sorted(list(entity_set))
# Allocate Simultanous Entities
for video_id, entity_id in entity_set:
if video_id not in self.ts_to_entity.keys():
self.ts_to_entity[video_id] = {}
ent_min_data = self.entity_data[video_id][entity_id]
for ed in ent_min_data:
timestamp = ed[1]
if timestamp not in self.ts_to_entity[video_id].keys():
self.ts_to_entity[video_id][timestamp] = []
self.ts_to_entity[video_id][timestamp].append(entity_id)
class GraphDatasetETE(GraphContextualDataset):
def __init__(self, audio_root, video_root, csv_file_path,
context_size, clip_lenght, connection_pattern,
video_transform=None, do_video_augment=False,
crop_ratio=0.8, norm_audio=False):
super().__init__()
# Data directories
self.audio_root = audio_root
self.video_root = video_root
# Post-processing
self.crop_ratio = crop_ratio
self.video_transform = video_transform
self.do_video_augment = do_video_augment
# Graph Layout
self.context_size = context_size
# Node config
self.norm_audio = norm_audio
self.half_clip_length = math.floor(clip_lenght/2)
# Cache data
entity_set = self._cache_entity_data(csv_file_path)
self._entity_list_postprocessing(entity_set)
# Edge Config
src_edges = connection_pattern['src']
dst_edges = connection_pattern['dst']
self.batch_edges = torch.tensor([src_edges, dst_edges], dtype=torch.long)
# Replicate entity list
self.entity_list.extend(self.entity_list)
self.avg_time = []
def __len__(self):
return int(len(self.entity_list)/1)
def get_audio_size(self,):
video_id, entity_id = self.entity_list[0]
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
mid_index = random.randint(0, len(entity_metadata)-1)
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index, self.half_clip_length)
audio_data = io.load_a_clip_from_metadata(clip_meta_data, self.video_root, self.audio_root, audio_offset)
return np.float32(audio_data).shape
def _get_scene_video_data(self, video_id, entity_id, mid_index):
orginal_entity_metadata = self.entity_data[video_id][entity_id]
time_ent = orginal_entity_metadata[mid_index][1]
context = self.get_speaker_context(
video_id, entity_id, time_ent, self.context_size)
video_data = []
targets = []
for ctx_entity in context:
entity_metadata = self.entity_data[video_id][ctx_entity]
ts_idx = self.search_ts_in_meta_data(entity_metadata, time_ent)
target_ctx = int(entity_metadata[ts_idx][-1])
clip_meta_data = cu.generate_clip_meta(entity_metadata, ts_idx, self.half_clip_length)
video_data.append(io.load_v_clip_from_metadata(clip_meta_data, self.video_root))
targets.append(target_ctx)
if self.do_video_augment:
video_data = [video_temporal_crop(vd, self.crop_ratio) for vd in video_data]
if self.video_transform is not None:
for vd_idx, vd in enumerate(video_data):
tensor_vd = [self.video_transform(f) for f in vd]
video_data[vd_idx] = tensor_vd
video_data = [torch.cat(vd, dim=0) for vd in video_data]
return video_data, targets
def _get_audio_data(self, video_id, entity_id, mid_index):
entity_metadata = self.entity_data[video_id][entity_id]
audio_offset = float(entity_metadata[0][1])
midone = entity_metadata[mid_index]
target_audio = self.speech_data[video_id][midone[1]]
clip_meta_data = cu.generate_clip_meta(entity_metadata, mid_index, self.half_clip_length)
audio_data = io.load_a_clip_from_metadata(clip_meta_data, self.video_root, self.audio_root, audio_offset)
return np.float32(audio_data), target_audio
def __getitem__(self, index):
video_id, entity_id = self.entity_list[index]
target_entity_metadata = self.entity_data[video_id][entity_id]
target_index = random.randint(0, len(target_entity_metadata)-1)
# get av data
video_data, target_v = self._get_scene_video_data(video_id, entity_id, target_index)
audio_data, target_a = self._get_audio_data(video_id, entity_id, target_index)
if self.norm_audio:
audio_data = (audio_data+3.777757875102366)/186.4988690376491
# Fill targets
target_set = []
target_set.append(target_a)
for tv in target_v:
target_set.append(tv)
# Feature data
feature_set = torch.zeros((len(video_data)+1, video_data[0].size(0), video_data[0].size(1), video_data[0].size(2)))
audio_data = torch.from_numpy(audio_data)
feature_set[0, 0, :audio_data.size(
1), :audio_data.size(2)] = audio_data
for i in range(self.context_size):
feature_set[i+1, ...] = video_data[i]
return Data(x=feature_set, edge_index=self.batch_edges, y=torch.tensor(target_set))
class IndependentGraphDatasetETE3D(GraphDatasetETE):
def __init__(self, audio_root, video_root, csv_file_path,
graph_time_steps, stride, context_size, clip_lenght,
spatial_connection_pattern, temporal_connection_pattern,
video_transform=None, do_video_augment=False, crop_ratio=0.95,
norm_audio=False):
super().__init__(audio_root, video_root, csv_file_path,
context_size, clip_lenght,
spatial_connection_pattern, video_transform,
do_video_augment, crop_ratio, norm_audio)
# Superclass Edge Config
self.batch_edges = None
spatial_src_edges = spatial_connection_pattern['src']
spatial_dst_edges = spatial_connection_pattern['dst']
self.spatial_batch_edges = torch.tensor(
[spatial_src_edges, spatial_dst_edges], dtype=torch.long)
temporal_src_edges = temporal_connection_pattern['src']
temporal_dst_edges = temporal_connection_pattern['dst']
self.temporal_batch_edges = torch.tensor(
[temporal_src_edges, temporal_dst_edges], dtype=torch.long)
# Temporal Graph graph Layout
self.graph_time_steps = graph_time_steps
self.stride = stride
def _get_scene_video_data(self, video_id, entity_id, mid_index, cache):
orginal_entity_metadata = self.entity_data[video_id][entity_id]
time_ent = orginal_entity_metadata[mid_index][1]
context = self.get_speaker_context(video_id, entity_id, time_ent, self.context_size)
video_data = []
targets = []
for ctx_entity in context:
entity_metadata = self.entity_data[video_id][ctx_entity]
ts_idx = self.search_ts_in_meta_data(entity_metadata, time_ent)
target_ctx = int(entity_metadata[ts_idx][-1])
clip_meta_data = cu.generate_clip_meta(entity_metadata, ts_idx, self.half_clip_length)
video_data.append(io.load_v_clip_from_metadata_cache(clip_meta_data, self.video_root, cache))
targets.append(target_ctx)
if self.video_transform is not None:
for vd_idx, vd in enumerate(video_data):
tensor_vd = [self.video_transform(f) for f in vd]
video_data[vd_idx] = tensor_vd
if self.do_video_augment:
video_data = [video_corner_crop(
vd, self.crop_ratio) for vd in video_data]
video_data = [torch.stack(vd, dim=1) for vd in video_data]
return video_data, targets
def _get_time_context(self, entity_data, target_index):
all_ts = [ed[1] for ed in entity_data]
center_ts = entity_data[target_index][1]
center_ts_idx = all_ts.index(str(center_ts))
half_time_steps = int(self.graph_time_steps/2)
start = center_ts_idx-(half_time_steps*self.stride)
end = center_ts_idx+((half_time_steps+1)*self.stride)
selected_ts_idx = list(range(start, end, self.stride))
selected_ts = []
for i, idx in enumerate(selected_ts_idx):
if idx < 0:
idx = 0
if idx >= len(all_ts):
idx = len(all_ts)-1
selected_ts.append(all_ts[idx])
return selected_ts
def __getitem__(self, index):
video_id, entity_id = self.entity_list[index]
target_entity_metadata = self.entity_data[video_id][entity_id]
center_index = random.randint(0, len(target_entity_metadata)-1)
time_context = self._get_time_context(
target_entity_metadata, center_index)
feature_set = None
target_set = []
all_ts = [ted[1] for ted in target_entity_metadata]
nodes_per_graph = self.context_size+1
cache = {}
for graph_idx, tc in enumerate(time_context):
target_index = all_ts.index(tc)
# get av data
video_data, target_v = self._get_scene_video_data(video_id, entity_id, target_index, cache)
audio_data, target_a = self._get_audio_data(video_id, entity_id, target_index)
# Fill targets
target_set.append(target_a)
for tv in target_v:
target_set.append(tv)
# Create now that we have the size
if feature_set is None:
feature_set = torch.zeros(nodes_per_graph * (self.graph_time_steps), video_data[0].size(0), video_data[0].size(1), video_data[0].size(2), video_data[0].size(3))
# Fill in
graph_offset = graph_idx*nodes_per_graph
audio_data = torch.from_numpy(audio_data)
feature_set[graph_offset, 0, 0, :audio_data.size(1), :audio_data.size(2)] = audio_data
for i in range(self.context_size):
feature_set[graph_offset + (i+1), ...] = video_data[i]
return Data(x=feature_set, edge_index=(self.spatial_batch_edges, self.temporal_batch_edges), y=torch.tensor(target_set))
| 14,316 | 40.259366 | 176 | py |
end-to-end-asd | end-to-end-asd-main/optimization/losses.py | import torch
import torch.nn as nn
class assignation_loss_audio(torch.nn.Module):
def __init__(self, graph_size):
super(assignation_loss_audio, self).__init__()
self.graph_size = graph_size
self.softmax_layer = torch.nn.Softmax(dim=1)
def forward(self, outputs, audio_targets):
pred = self.softmax_layer(outputs)[:, 1] # Positive predictions
pred = pred.view((-1, self.graph_size))
pred = pred[:, 1:]
max_pred, _ = torch.max(pred, dim=1)
audio_gt = audio_targets
no_assig_penalty = audio_gt*(audio_gt - max_pred)
bad_assig_penalty = (1-audio_gt)*max_pred
total_penalty = no_assig_penalty + bad_assig_penalty
return torch.mean(total_penalty) | 749 | 33.090909 | 71 | py |
end-to-end-asd | end-to-end-asd-main/optimization/optimization_amp.py | import os
import torch
from torch.cuda.amp import autocast
from models.graph_layouts import generate_av_mask
from sklearn.metrics import average_precision_score
from models.graph_layouts import generate_temporal_video_center_mask, generate_temporal_video_mask
def optimize_easee(model, dataloader_train, data_loader_val,
device, criterion, optimizer, scheduler,
num_epochs, spatial_ctx_size, time_len,
a_weight=0.2, v_weight=0.5, models_out=None,
log=None):
for epoch in range(num_epochs):
print()
print('Epoch {}/{}'.format(epoch+1, num_epochs))
print('-' * 10)
outs_train = _train_model_amp_avl(model, dataloader_train, optimizer,
criterion, device, spatial_ctx_size, time_len,
a_weight, v_weight)
outs_val = _test_model_graph_losses(model, data_loader_val, criterion,
device, spatial_ctx_size, time_len)
scheduler.step()
train_loss, ta_loss, tv_loss, train_ap = outs_train
val_loss, va_loss, vv_loss, val_ap, val_tap, val_cap = outs_val
if models_out is not None and epoch > num_epochs-10: # just save last 10 epochs
model_target = os.path.join(models_out, str(epoch+1)+'.pth')
print('save model to ', model_target)
torch.save(model.state_dict(), model_target)
if log is not None:
log.writeDataLog([epoch+1, train_loss, ta_loss, tv_loss,
train_ap, val_loss, va_loss, vv_loss, val_ap, val_tap, val_cap])
return model
def _train_model_amp_avl(model, dataloader, optimizer, criterion,
device, ctx_size, time_len, a_weight,
v_weight):
model.train()
softmax_layer = torch.nn.Softmax(dim=1)
pred_lst = []
label_lst = []
pred_time_lst = []
label_time_lst = []
pred_center_lst = []
label_center_lst = []
running_loss_g = 0.0
running_loss_a = 0.0
running_loss_v = 0.0
audio_size = dataloader.dataset.get_audio_size()
scaler = torch.cuda.amp.GradScaler(enabled=True)
# Iterate over data
for idx, dl in enumerate(dataloader):
print('\t Train iter {:d}/{:d} {:.4f}'.format(idx, len(dataloader), running_loss_g/(idx+1)), end='\r')
graph_data = dl
graph_data = graph_data.to(device)
targets = graph_data.y
optimizer.zero_grad()
with torch.set_grad_enabled(True):
# TODO inneficient here
audio_mask, video_mask = generate_av_mask(ctx_size, graph_data.x.size(0))
temporal_video_mask = generate_temporal_video_mask(ctx_size, graph_data.x.size(0))
center_mask = generate_temporal_video_center_mask(ctx_size, graph_data.x.size(0), time_len)
with autocast(True):
outputs, audio_out, video_out = model(graph_data, ctx_size, audio_size)
aux_loss_a = criterion(audio_out, targets[audio_mask])
aux_loss_v = criterion(video_out, targets[video_mask])
loss_graph = criterion(outputs, targets)
loss = a_weight*aux_loss_a + v_weight*aux_loss_v + loss_graph
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
with torch.set_grad_enabled(False):
label_lst.extend(targets[video_mask].cpu().numpy().tolist())
pred_lst.extend(softmax_layer(outputs[video_mask]).cpu().numpy()[:, 1].tolist())
label_time_lst.extend(targets[temporal_video_mask].cpu().numpy().tolist())
pred_time_lst.extend(softmax_layer(outputs[temporal_video_mask]).cpu().numpy()[:, 1].tolist())
label_center_lst.extend(targets[center_mask].cpu().numpy().tolist())
pred_center_lst.extend(softmax_layer(outputs[center_mask]).cpu().numpy()[:, 1].tolist())
# statistics
running_loss_g += loss_graph.item()
running_loss_a += aux_loss_a.item()
running_loss_v += aux_loss_v.item()
if idx == len(dataloader)-2:
break
epoch_loss_g = running_loss_g / len(dataloader)
epoch_loss_a = running_loss_a / len(dataloader)
epoch_loss_v = running_loss_v / len(dataloader)
epoch_ap = average_precision_score(label_lst, pred_lst)
epoch_time_ap = average_precision_score(label_time_lst, pred_time_lst)
epoch_center_ap = average_precision_score(label_center_lst, pred_center_lst)
print('Train Graph Loss: {:.4f}, Audio Loss: {:.4f}, Video Loss: {:.4f}, VmAP: {:.4f}, TVmAP: {:.4f}, CVmAP: {:.4f}'.format(
epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap))
return epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap
def _test_model_graph_losses(model, dataloader, criterion, device, ctx_size, time_len):
model.eval()
softmax_layer = torch.nn.Softmax(dim=1)
pred_lst = []
label_lst = []
pred_time_lst = []
label_time_lst = []
pred_center_lst = []
label_center_lst = []
running_loss_g = 0.0
running_loss_a = 0.0
running_loss_v = 0.0
audio_size = dataloader.dataset.get_audio_size()
# Iterate over data
for idx, dl in enumerate(dataloader):
print('\t Val iter {:d}/{:d} {:.4f}'.format(idx,
len(dataloader), running_loss_g/(idx+1)), end='\r')
graph_data = dl
graph_data = graph_data.to(device)
targets = graph_data.y
with torch.set_grad_enabled(False):
# TODO inneficient here
audio_mask, video_mask = generate_av_mask(
ctx_size, graph_data.x.size(0))
temporal_video_mask = generate_temporal_video_mask(
ctx_size, graph_data.x.size(0))
center_mask = generate_temporal_video_center_mask(
ctx_size, graph_data.x.size(0), time_len)
outputs, audio_out, video_out = model(
graph_data, ctx_size, audio_size)
loss_graph = criterion(outputs, targets)
aux_loss_a = criterion(audio_out, targets[audio_mask])
aux_loss_v = criterion(video_out, targets[video_mask])
label_lst.extend(targets[video_mask].cpu().numpy().tolist())
pred_lst.extend(softmax_layer(
outputs[video_mask]).cpu().numpy()[:, 1].tolist())
label_time_lst.extend(
targets[temporal_video_mask].cpu().numpy().tolist())
pred_time_lst.extend(softmax_layer(
outputs[temporal_video_mask]).cpu().numpy()[:, 1].tolist())
label_center_lst.extend(
targets[center_mask].cpu().numpy().tolist())
pred_center_lst.extend(softmax_layer(
outputs[center_mask]).cpu().numpy()[:, 1].tolist())
# statistics
running_loss_g += loss_graph.item()
running_loss_a += aux_loss_a.item()
running_loss_v += aux_loss_v.item()
if idx == len(dataloader)-2:
break
epoch_loss_g = running_loss_g / len(dataloader)
epoch_loss_a = running_loss_a / len(dataloader)
epoch_loss_v = running_loss_v / len(dataloader)
epoch_ap = average_precision_score(label_lst, pred_lst)
epoch_time_ap = average_precision_score(label_time_lst, pred_time_lst)
epoch_center_ap = average_precision_score(
label_center_lst, pred_center_lst)
print('Val Graph Loss: {:.4f}, Audio Loss: {:.4f}, Video Loss: {:.4f}, VmAP: {:.4f}, TVmAP: {:.4f}, CVmAP: {:.4f}'.format(
epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap))
return epoch_loss_g, epoch_loss_a, epoch_loss_v, epoch_ap, epoch_time_ap, epoch_center_ap
| 7,919 | 39.615385 | 128 | py |
JEMPP | JEMPP-master/eval_jempp.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import utils
import torch as t, torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torchvision as tv, torchvision.transforms as tr
import sys
import argparse
import numpy as np
from ExpUtils import *
from models.jem_models import F, CCF
from utils import plot, Hamiltonian
# Sampling
from tqdm import tqdm
t.backends.cudnn.benchmark = True
t.backends.cudnn.enabled = True
seed = 1
im_sz = 32
n_ch = 3
n_classes = 10
correct = 0
print = wlog
def init_random(bs):
return t.FloatTensor(bs, 3, 32, 32).uniform_(-1, 1)
conditionals = []
def init_from_centers(args):
global conditionals
from torch.distributions.multivariate_normal import MultivariateNormal
bs = args.buffer_size
if args.dataset == 'svhn':
size = [3, 28, 28]
else:
size = [3, 32, 32]
if args.dataset == 'cifar_test':
args.dataset = 'cifar10'
centers = t.load('%s_mean.pt' % args.dataset)
covs = t.load('%s_cov.pt' % args.dataset)
buffer = []
for i in range(args.n_classes):
mean = centers[i].to(args.device)
cov = covs[i].to(args.device)
dist = MultivariateNormal(mean, covariance_matrix=cov + 1e-4 * t.eye(int(np.prod(size))).to(args.device))
buffer.append(dist.sample((bs // args.n_classes, )).view([bs // args.n_classes] + size).cpu())
conditionals.append(dist)
return t.clamp(t.cat(buffer), -1, 1)
def init_inform(args, bs):
global conditionals
n_ch = 3
size = [3, 32, 32]
im_sz = 32
new = t.zeros(bs, n_ch, im_sz, im_sz)
for i in range(bs):
index = np.random.randint(args.n_classes)
dist = conditionals[index]
new[i] = dist.sample().view(size)
return t.clamp(new, -1, 1).cpu()
def sample_p_0(device, replay_buffer, bs, y=None):
if len(replay_buffer) == 0:
return init_random(bs), []
buffer_size = len(replay_buffer) if y is None else len(replay_buffer) // n_classes
if buffer_size > bs:
inds = t.randint(0, buffer_size, (bs,))
else:
inds = t.arange(0, bs)
# if cond, convert inds to class conditional inds
if y is not None:
inds = y.cpu() * buffer_size + inds
assert not args.uncond, "Can't drawn conditional samples without giving me y"
buffer_samples = replay_buffer[inds]
if args.init == 'i':
random_samples = init_inform(args, bs)
else:
random_samples = init_random(bs)
choose_random = (t.rand(bs) < args.reinit_freq).float()[:, None, None, None]
samples = choose_random * random_samples + (1 - choose_random) * buffer_samples
return samples.to(device), inds
def sample_q(f, replay_buffer, y=None, n_steps=10, in_steps=10, args=None):
"""this func takes in replay_buffer now so we have the option to sample from
scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example.
"""
# f.eval()
# get batch size
bs = args.batch_size if y is None else y.size(0)
# generate initial samples and buffer inds of those samples (if buffer is used)
init_sample, buffer_inds = sample_p_0(args.device, replay_buffer, bs=bs, y=y)
x_k = t.autograd.Variable(init_sample, requires_grad=True).to(args.device)
# sgld
if args.in_steps > 0:
Hamiltonian_func = Hamiltonian(f.f.layer_one)
eps = 1
for it in range(n_steps):
energies = f(x_k, y=y)
e_x = energies.sum()
# wgrad = f.f.conv1.weight.grad
eta = t.autograd.grad(e_x, [x_k], retain_graph=True)[0]
# e_x.backward(retain_graph=True)
# eta = x_k.grad.detach()
# f.f.conv1.weight.grad = wgrad
if in_steps > 0:
p = 1.0 * f.f.layer_one_out.grad
p = p.detach()
tmp_inp = x_k.data
tmp_inp.requires_grad_()
if args.sgld_lr > 0:
# if in_steps == 0: use SGLD other than PYLD
# if in_steps != 0: combine outter and inner gradients
# default 0
if eps > 0:
eta = t.clamp(eta, -eps, eps)
tmp_inp = x_k + eta * args.sgld_lr
if eps > 0:
tmp_inp = t.clamp(tmp_inp, -1, 1)
for i in range(in_steps):
H = Hamiltonian_func(tmp_inp, p)
eta_grad = t.autograd.grad(H, [tmp_inp], only_inputs=True, retain_graph=True)[0]
if eps > 0:
eta_step = t.clamp(eta_grad, -eps, eps)
else:
eta_step = eta_grad * args.pyld_lr
tmp_inp.data = tmp_inp.data + eta_step
if eps > 0:
tmp_inp = t.clamp(tmp_inp, -1, 1)
x_k.data = tmp_inp.data
if args.sgld_std > 0.0:
x_k.data += args.sgld_std * t.randn_like(x_k)
f.train()
final_samples = x_k.detach()
# update replay buffer
if len(replay_buffer) > 0:
replay_buffer[buffer_inds] = final_samples.cpu()
return final_samples
def uncond_samples(f, args, device, save=True):
if args.init == 'i':
init_from_centers(args)
replay_buffer = init_from_centers(args)
else:
replay_buffer = t.FloatTensor(args.buffer_size, 3, 32, 32).uniform_(-1, 1)
for i in range(args.n_sample_steps):
samples = sample_q(f, replay_buffer, y=None, n_steps=args.n_steps, in_steps=args.in_steps, args=args)
if i % args.print_every == 0 and save:
plot('{}/samples_{}.png'.format(args.save_dir, i), samples)
print(i)
return replay_buffer
def cond_samples(f, replay_buffer, args, device, fresh=False):
if fresh:
replay_buffer = uncond_samples(f, args, device, save=True)
n_it = replay_buffer.size(0) // 100
all_y = []
for i in range(n_it):
x = replay_buffer[i * 100: (i + 1) * 100].to(device)
y = f.classify(x).max(1)[1]
all_y.append(y)
all_y = t.cat(all_y, 0)
each_class = [replay_buffer[all_y == l] for l in range(10)]
print([len(c) for c in each_class])
for i in range(100):
this_im = []
for l in range(10):
this_l = each_class[l][i * 10: (i + 1) * 10]
this_im.append(this_l)
this_im = t.cat(this_im, 0)
if this_im.size(0) > 0:
plot('{}/samples_{}.png'.format(args.save_dir, i), this_im)
print(i)
def best_samples(f, replay_buffer, args, device, fresh=False):
sqrt = lambda x: int(t.sqrt(t.Tensor([x])))
plot = lambda p, x: tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0)))
if fresh:
replay_buffer = uncond_samples(f, args, device, save=True)
n_it = replay_buffer.size(0) // 100
all_y = []
all_px = []
probs = []
with t.no_grad():
for i in tqdm(range(n_it)):
x = replay_buffer[i * 100: (i + 1) * 100].to(device)
logits = f.classify(x)
y = logits.max(1)[1]
px = logits.logsumexp(1)
prob = nn.Softmax(dim=1)(logits).max(1)[0]
all_y.append(y)
probs.append(prob)
all_px.append(px)
all_y = t.cat(all_y, 0)
probs = t.cat(probs, 0)
print(probs.min().item())
print((probs < 0).sum().item())
all_px = t.cat(all_px, 0)
print("%f %f %f" % (probs.mean().item(), probs.max().item(), probs.min().item()))
each_class = [replay_buffer[all_y == l] for l in range(10)]
each_class_probs = [probs[all_y == l] for l in range(10)]
each_class_px = [all_px[all_y == l] for l in range(10)]
print([len(c) for c in each_class])
new_buffer = []
ratio = abs(args.ratio)
for c in range(10):
each_probs = each_class_probs[c]
# select
each_metric = each_class_px[c]
# each_metric = each_class_probs[c]
if ratio < 1:
topk = int(len(each_probs) * ratio)
else:
topk = int(ratio)
topk = min(topk, len(each_probs))
if args.ratio > 0:
topks = t.topk(each_metric, topk, largest=args.ratio > 0)
index_list = topks[1]
else:
topks = t.topk(each_metric, topk, largest=args.ratio > 0)
index_list = topks[1]
print('P(x) min %.3f max %.3f' % (-each_metric[index_list].max().item(), -each_metric[index_list].min().item()))
print('Prob(y|x) max %.3f min %.3f' % (each_probs[index_list].max().item(), each_probs[index_list].min().item()))
images = each_class[c][index_list]
new_buffer.append(images)
plot('{}/topk_{}.png'.format(args.save_dir, c), images)
replay_buffer = t.cat(new_buffer, 0)
print(replay_buffer.shape)
def cond_fid(f, replay_buffer, args, device, ratio=0.1):
n_it = replay_buffer.size(0) // 100
all_y = []
probs = []
with t.no_grad():
for i in tqdm(range(n_it)):
x = replay_buffer[i * 100: (i + 1) * 100].to(device)
logits = f.classify(x)
y = logits.max(1)[1]
prob = nn.Softmax(dim=1)(logits).max(1)[0]
all_y.append(y)
probs.append(prob)
all_y = t.cat(all_y, 0)
probs = t.cat(probs, 0)
each_class = [replay_buffer[all_y == l] for l in range(args.n_classes)]
each_class_probs = [probs[all_y == l] for l in range(args.n_classes)]
print([len(c) for c in each_class])
new_buffer = []
for c in range(args.n_classes):
each_probs = each_class_probs[c]
if ratio < 1:
topk = int(len(each_probs) * ratio)
else:
topk = int(ratio)
topk = min(topk, len(each_probs))
topks = t.topk(each_probs, topk)
index_list = topks[1]
images = each_class[c][index_list]
new_buffer.append(images)
replay_buffer = t.cat(new_buffer, 0)
print(replay_buffer.shape)
from Task.eval_buffer import eval_fid
fid = eval_fid(f, replay_buffer, args)
return fid
def logp_hist(f, args, device):
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.switch_backend('agg')
def sample(x, n_steps=args.n_steps):
x_k = t.autograd.Variable(x.clone(), requires_grad=True)
# sgld
for k in range(n_steps):
f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0]
x_k.data += f_prime + 1e-2 * t.randn_like(x_k)
final_samples = x_k.detach()
return final_samples
def grad_norm(x):
x_k = t.autograd.Variable(x, requires_grad=True)
f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0]
grad = f_prime.view(x.size(0), -1)
return grad.norm(p=2, dim=1)
def score_fn(x):
if args.score_fn == "px":
return f(x).detach().cpu()
elif args.score_fn == "py":
return nn.Softmax()(f.classify(x)).max(1)[0].detach().cpu()
else:
return f.classify(x).max(1)[0].detach().cpu()
transform_test = tr.Compose(
[tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]
)
datasets = {
"cifar10": tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False),
"svhn": tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test"),
"cifar100":tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False),
"celeba": tv.datasets.CelebA(root="../data", download=True, split="test",
transform=tr.Compose([tr.Resize(32),
tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]))
}
score_dict = {}
num_workers = 0 if args.debug else 4
for dataset_name in args.datasets:
print(dataset_name)
dataset = datasets[dataset_name]
dataloader = DataLoader(dataset, batch_size=100, shuffle=True, num_workers=num_workers, drop_last=False)
this_scores = []
for x, _ in dataloader:
x = x.to(device)
scores = score_fn(x)
this_scores.extend(scores.numpy())
score_dict[dataset_name] = this_scores
colors = ['green', 'red']
for i, (name, scores) in enumerate(score_dict.items()):
plt.hist(scores, label=name, bins=100, alpha=.5, color=colors[i])
plt.legend(loc='upper left')
plt.xticks([])
plt.yticks([])
plt.savefig(args.save_dir + "/jem_%s_logp.pdf" % args.datasets[1], bbox_inches='tight', pad_inches=0.0)
def OODAUC(f, args, device):
print("OOD Evaluation")
def grad_norm(x):
x_k = t.autograd.Variable(x, requires_grad=True)
f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0]
grad = f_prime.view(x.size(0), -1)
return grad.norm(p=2, dim=1)
transform_test = tr.Compose(
[tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]
)
num_workers = 0 if args.debug else 4
dset_real = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False)
dload_real = DataLoader(dset_real, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False)
if args.ood_dataset == "svhn":
dset_fake = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test")
elif args.ood_dataset == "cifar_100":
dset_fake = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False)
elif args.ood_dataset == "celeba":
dset_fake = tv.datasets.ImageFolder(root="/scratch/gobi1/gwohl/CelebA/splits",
transform=tr.Compose([tr.Resize(32),
tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]))
else:
dset_fake = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False)
dload_fake = DataLoader(dset_fake, batch_size=100, shuffle=True, num_workers=num_workers, drop_last=False)
print(len(dload_real), len(dload_fake))
real_scores = []
print("Real scores...")
def score_fn(x):
if args.score_fn == "px":
return f(x).detach().cpu()
elif args.score_fn == "py":
return nn.Softmax()(f.classify(x)).max(1)[0].detach().cpu()
else:
return -grad_norm(x).detach().cpu()
for x, _ in dload_real:
x = x.to(device)
scores = score_fn(x)
real_scores.append(scores.numpy())
fake_scores = []
print("Fake scores...")
if args.ood_dataset == "cifar_interp":
last_batch = None
for i, (x, _) in enumerate(dload_fake):
x = x.to(device)
if i > 0:
x_mix = (x + last_batch) / 2 + args.sigma * t.randn_like(x)
scores = score_fn(x_mix)
fake_scores.append(scores.numpy())
last_batch = x
else:
for i, (x, _) in enumerate(dload_fake):
x = x.to(device)
scores = score_fn(x)
fake_scores.append(scores.numpy())
real_scores = np.concatenate(real_scores)
fake_scores = np.concatenate(fake_scores)
real_labels = np.ones_like(real_scores)
fake_labels = np.zeros_like(fake_scores)
import sklearn.metrics
scores = np.concatenate([real_scores, fake_scores])
labels = np.concatenate([real_labels, fake_labels])
score = sklearn.metrics.roc_auc_score(labels, scores)
print(score)
def test_clf(f, args, device):
transform_test = tr.Compose(
[tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + t.randn_like(x) * args.sigma]
)
def sample(x, n_steps=args.n_steps):
x_k = t.autograd.Variable(x.clone(), requires_grad=True)
# sgld
for k in range(n_steps):
f_prime = t.autograd.grad(f(x_k).sum(), [x_k], retain_graph=True)[0]
x_k.data += f_prime + 1e-2 * t.randn_like(x_k)
final_samples = x_k.detach()
return final_samples
if args.dataset == "cifar_train":
dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=True)
elif args.dataset == "cifar_test":
dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False)
elif args.dataset == "cifar100_train":
dset = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=True)
elif args.dataset == "cifar100_test":
dset = tv.datasets.CIFAR100(root="../data", transform=transform_test, download=True, train=False)
elif args.dataset == "svhn_train":
dset = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="train")
elif args.dataset == "svhn_test":
dset = tv.datasets.SVHN(root="../data", transform=transform_test, download=True, split="test")
else:
dset = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False)
num_workers = 0 if args.debug else 4
dload = DataLoader(dset, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False)
corrects, losses, pys, preds = [], [], [], []
for x_p_d, y_p_d in tqdm(dload):
x_p_d, y_p_d = x_p_d.to(device), y_p_d.to(device)
if args.n_steps > 0:
x_p_d = sample(x_p_d)
logits = f.classify(x_p_d)
py = nn.Softmax(dim=1)(f.classify(x_p_d)).max(1)[0].detach().cpu().numpy()
loss = nn.CrossEntropyLoss(reduction='none')(logits, y_p_d).cpu().detach().numpy()
losses.extend(loss)
correct = (logits.max(1)[1] == y_p_d).float().cpu().numpy()
corrects.extend(correct)
pys.extend(py)
preds.extend(logits.max(1)[1].cpu().numpy())
loss = np.mean(losses)
correct = np.mean(corrects)
t.save({"losses": losses, "corrects": corrects, "pys": pys}, os.path.join(args.save_dir, "vals.pt"))
print('loss %.5g, accuracy: %g%%' % (loss, correct * 100))
return correct
def calibration(f, args, device):
from Task.calibration import reliability_diagrams
from Task.calibration import ECELoss
transform_test = tr.Compose(
[tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]
)
num_workers = 0 if args.debug else 4
dset_real = tv.datasets.CIFAR10(root="../data", transform=transform_test, download=True, train=False)
dload_real = DataLoader(dset_real, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False)
f.eval()
real_scores = []
labels = []
pred = []
ece_com = ECELoss(20)
ece = 0
c = 0
logits_l = []
for x, y in dload_real:
x = x.to(device)
labels.append(y.numpy())
logits = f.classify(x)
logits_l.append(logits.detach())
scores = nn.Softmax(dim=1)(logits).max(dim=1)[0].detach().cpu()
preds = nn.Softmax(dim=1)(logits).argmax(dim=1).detach().cpu()
real_scores.append(scores.numpy())
pred.append(preds.numpy())
logits_l = torch.cat(logits_l)
temps = torch.LongTensor(np.concatenate(labels))
ece = ece_com(logits_l, temps.to(device)).item()
print("On Calibration of Modern Neural Networks code result:", ece)
real_scores = np.concatenate(real_scores)
labels = np.concatenate(np.array(labels))
pred = np.concatenate(pred)
print(len(real_scores))
# print(pred.shape)
reliability_diagrams(list(pred), list(labels), list(real_scores), bin_size=0.05, title="Accuracy: %.2f%%" % (100.0 * correct), args=args)
def main(args):
global correct
set_file_logger(logger, args)
args.save_dir = args.dir_path
print(args.dir_path)
t.manual_seed(seed)
if t.cuda.is_available():
t.cuda.manual_seed_all(seed)
device = t.device('cuda' if t.cuda.is_available() else 'cpu')
args.device = device
model_cls = F if args.uncond else CCF
f = model_cls(args.depth, args.width, args.norm, n_classes=args.n_classes, model=args.model)
print(f"loading model from {args.load_path}")
# load em up
ckpt_dict = t.load(args.load_path)
f.load_state_dict(ckpt_dict["model_state_dict"])
replay_buffer = ckpt_dict["replay_buffer"]
f = f.to(device)
f.eval()
if args.eval == "OOD":
OODAUC(f, args, device)
if args.eval == "cali":
correct = test_clf(f, args, device)
calibration(f, args, device)
if args.eval == "test_clf":
test_clf(f, args, device)
if args.eval == "cond_samples":
cond_samples(f, replay_buffer, args, device, args.fresh_samples)
if args.eval == "fid":
cond_fid(f, replay_buffer, args, device, ratio=args.ratio)
if args.eval == "uncond_samples":
uncond_samples(f, args, device)
if args.eval == "logp_hist":
logp_hist(f, args, device)
if __name__ == "__main__":
parser = argparse.ArgumentParser("LDA Energy Based Models")
parser.add_argument("--eval", default="OOD", type=str,
choices=["uncond_samples", "cond_samples", "best_samples", "logp_hist", "OOD", "test_clf", "fid", "cali"])
parser.add_argument("--score_fn", default="px", type=str,
choices=["px", "py", "pxgrad"], help="For OODAUC, chooses what score function we use.")
parser.add_argument("--ood_dataset", default="svhn", type=str,
choices=["svhn", "cifar_interp", "cifar_100", "celeba"],
help="Chooses which dataset to compare against for OOD")
parser.add_argument("--dataset", default="cifar_test", type=str,
choices=["cifar_train", "cifar_test", "svhn_test", "svhn_train", "cifar100_test"],
help="Dataset to use when running test_clf for classification accuracy")
parser.add_argument("--datasets", nargs="+", type=str, default=[],
help="The datasets you wanna use to generate a log p(x) histogram")
# optimization
parser.add_argument("--batch_size", type=int, default=64)
# regularization
parser.add_argument("--sigma", type=float, default=3e-2)
# network
parser.add_argument("--norm", type=str, default="batch", choices=[None, "none", "norm", "batch", "instance", "layer", "act"])
parser.add_argument("--init", type=str, default='i', help='r random, i inform')
# EBM specific
parser.add_argument("--n_steps", type=int, default=0)
parser.add_argument("--in_steps", type=int, default=5, help="number of steps of SGLD per iteration, 100 works for short-run, 20 works for PCD")
parser.add_argument("--in_lr", type=float, default=0.01)
parser.add_argument("--width", type=int, default=10)
parser.add_argument("--depth", type=int, default=28)
parser.add_argument("--uncond", action="store_true")
parser.add_argument("--buffer_size", type=int, default=0)
parser.add_argument("--reinit_freq", type=float, default=.0)
parser.add_argument("--sgld_lr", type=float, default=1.0)
parser.add_argument("--sgld_std", type=float, default=1e-2)
parser.add_argument("--model", type=str, default='yopo')
parser.add_argument("--ratio", type=int, default=100)
# logging + evaluation
parser.add_argument("--save_dir", type=str, default='jem_eval')
parser.add_argument("--print_every", type=int, default=100)
parser.add_argument("--n_sample_steps", type=int, default=100)
parser.add_argument("--n_images", type=int, default=60000)
parser.add_argument("--load_path", type=str, default=None)
parser.add_argument("--print_to_log", action="store_true")
parser.add_argument("--fresh_samples", action="store_true",
help="If set, then we generate a new replay buffer from scratch for conditional sampling,"
"Will be much slower.")
parser.add_argument("--gpu-id", type=str, default="")
args = parser.parse_args()
auto_select_gpu(args)
init_debug(args)
run_time = time.strftime('%m%d%H%M%S', time.localtime(time.time()))
if args.save_dir == 'jem_eval':
# by default to eval the model
args.dir_path = args.load_path + "_eval_%s_%s" % (args.eval, run_time)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.n_classes = 100 if "cifar100" in args.dataset else 10
main(args)
print(args.save_dir)
| 25,406 | 37.730183 | 147 | py |
JEMPP | JEMPP-master/train_jempp.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch as t
import torch.nn as nn
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from ExpUtils import *
from utils import eval_classification, Hamiltonian, checkpoint, get_data, set_bn_train, set_bn_eval, plot
from models.jem_models import get_model_and_buffer
t.set_num_threads(2)
t.backends.cudnn.benchmark = True
t.backends.cudnn.enabled = True
seed = 1
inner_his = []
conditionals = []
def init_random(args, bs):
global conditionals
n_ch = 3
size = [3, 32, 32]
im_sz = 32
new = t.zeros(bs, n_ch, im_sz, im_sz)
for i in range(bs):
index = np.random.randint(args.n_classes)
dist = conditionals[index]
new[i] = dist.sample().view(size)
return t.clamp(new, -1, 1).cpu()
def sample_p_0(replay_buffer, bs, y=None):
if len(replay_buffer) == 0:
return init_random(args, bs), []
buffer_size = len(replay_buffer) if y is None else len(replay_buffer) // args.n_classes
inds = t.randint(0, buffer_size, (bs,))
# if cond, convert inds to class conditional inds
if y is not None:
inds = y.cpu() * buffer_size + inds
buffer_samples = replay_buffer[inds]
random_samples = init_random(args, bs)
choose_random = (t.rand(bs) < args.reinit_freq).float()[:, None, None, None]
samples = choose_random * random_samples + (1 - choose_random) * buffer_samples
return samples.to(args.device), inds
def sample_q(f, replay_buffer, y=None, n_steps=10, in_steps=10, args=None, save=True):
"""this func takes in replay_buffer now so we have the option to sample from
scratch (i.e. replay_buffer==[]). See test_wrn_ebm.py for example.
"""
global inner_his
inner_his = []
# Batch norm uses train status
# f.eval()
# get batch size
bs = args.batch_size if y is None else y.size(0)
# generate initial samples and buffer inds of those samples (if buffer is used)
init_sample, buffer_inds = sample_p_0(replay_buffer, bs=bs, y=y)
x_k = t.autograd.Variable(init_sample, requires_grad=True)
# sgld
if in_steps > 0:
Hamiltonian_func = Hamiltonian(f.f.layer_one)
eps = args.eps
if args.pyld_lr <= 0:
in_steps = 0
for it in range(n_steps):
energies = f(x_k, y=y)
e_x = energies.sum()
# wgrad = f.f.conv1.weight.grad
eta = t.autograd.grad(e_x, [x_k], retain_graph=True)[0]
# e_x.backward(retain_graph=True)
# eta = x_k.grad.detach()
# f.f.conv1.weight.grad = wgrad
if in_steps > 0:
p = 1.0 * f.f.layer_one_out.grad
p = p.detach()
tmp_inp = x_k.data
tmp_inp.requires_grad_()
if args.sgld_lr > 0:
# if in_steps == 0: use SGLD other than PYLD
# if in_steps != 0: combine outter and inner gradients
# default 0
tmp_inp = x_k + t.clamp(eta, -eps, eps) * args.sgld_lr
tmp_inp = t.clamp(tmp_inp, -1, 1)
for i in range(in_steps):
H = Hamiltonian_func(tmp_inp, p)
eta_grad = t.autograd.grad(H, [tmp_inp], only_inputs=True, retain_graph=True)[0]
eta_step = t.clamp(eta_grad, -eps, eps) * args.pyld_lr
tmp_inp.data = tmp_inp.data + eta_step
tmp_inp = t.clamp(tmp_inp, -1, 1)
x_k.data = tmp_inp.data
if args.sgld_std > 0.0:
x_k.data += args.sgld_std * t.randn_like(x_k)
if in_steps > 0:
loss = -1.0 * Hamiltonian_func(x_k.data, p)
loss.backward()
f.train()
final_samples = x_k.detach()
# update replay buffer
if len(replay_buffer) > 0 and save:
replay_buffer[buffer_inds] = final_samples.cpu()
return final_samples
def category_mean(dload_train, args):
import time
start = time.time()
if args.dataset == 'svhn':
size = [3, 32, 32]
else:
size = [3, 32, 32]
centers = t.zeros([args.n_classes, int(np.prod(size))])
covs = t.zeros([args.n_classes, int(np.prod(size)), int(np.prod(size))])
im_test, targ_test = [], []
for im, targ in dload_train:
im_test.append(im)
targ_test.append(targ)
im_test, targ_test = t.cat(im_test), t.cat(targ_test)
# conditionals = []
for i in range(args.n_classes):
imc = im_test[targ_test == i]
imc = imc.view(len(imc), -1)
mean = imc.mean(dim=0)
sub = imc - mean.unsqueeze(dim=0)
cov = sub.t() @ sub / len(imc)
centers[i] = mean
covs[i] = cov
print(time.time() - start)
t.save(centers, '%s_mean.pt' % args.dataset)
t.load(covs, '%s_cov.pt' % args.dataset)
def init_from_centers(args):
global conditionals
from torch.distributions.multivariate_normal import MultivariateNormal
bs = args.buffer_size
if args.dataset == 'tinyimagenet':
size = [3, 64, 64]
elif args.dataset == 'svhn':
size = [3, 32, 32]
else:
size = [3, 32, 32]
centers = t.load('%s_mean.pt' % args.dataset)
covs = t.load('%s_cov.pt' % args.dataset)
buffer = []
for i in range(args.n_classes):
mean = centers[i].to(args.device)
cov = covs[i].to(args.device)
dist = MultivariateNormal(mean, covariance_matrix=cov + 1e-4 * t.eye(int(np.prod(size))).to(args.device))
buffer.append(dist.sample((bs // args.n_classes, )).view([bs // args.n_classes] + size).cpu())
conditionals.append(dist)
return t.clamp(t.cat(buffer), -1, 1)
def main(args):
np.random.seed(args.seed)
t.manual_seed(args.seed)
if t.cuda.is_available():
t.cuda.manual_seed_all(args.seed)
device = t.device('cuda' if t.cuda.is_available() else 'cpu')
args.device = device
# datasets
dload_train, dload_train_labeled, dload_valid, dload_test = get_data(args)
# for dataset centers
# if not os.path.isfile('%s_cov.pt' % args.dataset):
# category_mean(dload_train, args)
f, replay_buffer = get_model_and_buffer(args, device)
if args.p_x_weight > 0:
replay_buffer = init_from_centers(args)
# optimizer
params = f.class_output.parameters() if args.clf_only else f.parameters()
if args.optimizer == "adam":
optim = t.optim.Adam(params, lr=args.lr, betas=[.9, .999], weight_decay=args.weight_decay)
else:
optim = t.optim.SGD(params, lr=args.lr, momentum=.9, weight_decay=args.weight_decay)
best_valid_acc = 0.0
cur_iter = 0
# trace learning rate
new_lr = args.lr
n_steps = args.n_steps
in_steps = args.in_steps
for epoch in range(args.n_epochs):
if epoch in args.decay_epochs:
for param_group in optim.param_groups:
new_lr = param_group['lr'] * args.decay_rate
param_group['lr'] = new_lr
print("Decaying lr to {}".format(new_lr))
for i, (x_p_d, _) in tqdm(enumerate(dload_train)):
if cur_iter <= args.warmup_iters:
lr = args.lr * cur_iter / float(args.warmup_iters)
for param_group in optim.param_groups:
param_group['lr'] = lr
x_p_d = x_p_d.to(device)
x_lab, y_lab = dload_train_labeled.__next__()
x_lab, y_lab = x_lab.to(device), y_lab.to(device)
L = 0.
if args.p_x_weight > 0: # maximize log p(x)
if args.plc == 'alltrain1':
fp_all = f(x_p_d)
fp = fp_all.mean()
if args.class_cond_p_x_sample:
assert not args.uncond, "can only draw class-conditional samples if EBM is class-cond"
y_q = t.randint(0, args.n_classes, (args.batch_size,)).to(device)
x_q = sample_q(f, replay_buffer, y=y_q, n_steps=n_steps, in_steps=in_steps, args=args)
else:
x_q = sample_q(f, replay_buffer, n_steps=n_steps, in_steps=in_steps, args=args) # sample from log-sumexp
if args.plc == 'eval':
f.apply(set_bn_eval)
fp_all = f(x_p_d)
fp = fp_all.mean()
if args.plc == 'alltrain2':
fp_all = f(x_p_d)
fp = fp_all.mean()
fq_all = f(x_q)
fq = fq_all.mean()
l_p_x = -(fp - fq)
if args.plc == 'eval':
f.apply(set_bn_train)
if cur_iter % args.print_every == 0:
print('{} P(x) | {}:{:>d} f(x_p_d)={:>9.4f} f(x_q)={:>9.4f} d={:>9.4f}'.format(args.pid, epoch, i, fp, fq, fp - fq))
L += args.p_x_weight * l_p_x
if args.p_y_given_x_weight > 0: # maximize log p(y | x)
logits = f.classify(x_lab)
l_p_y_given_x = nn.CrossEntropyLoss()(logits, y_lab)
if cur_iter % args.print_every == 0:
acc = (logits.max(1)[1] == y_lab).float().mean()
print('{} P(y|x) {}:{:>d} loss={:>9.4f}, acc={:>9.4f}'.format(args.pid, epoch, cur_iter, l_p_y_given_x.item(), acc.item()))
L += args.p_y_given_x_weight * l_p_y_given_x
if args.p_x_y_weight > 0: # maximize log p(x, y)
assert not args.uncond, "this objective can only be trained for class-conditional EBM DUUUUUUUUHHHH!!!"
x_q_lab = sample_q(f, replay_buffer, y=y_lab, n_steps=n_steps, in_steps=in_steps, args=args)
fp, fq = f(x_lab, y_lab).mean(), f(x_q_lab, y_lab).mean()
l_p_x_y = -(fp - fq)
if cur_iter % args.print_every == 0:
print('P(x, y) | {}:{:>d} f(x_p_d)={:>9.4f} f(x_q)={:>9.4f} d={:>9.4f}'.format(epoch, i, fp, fq, fp - fq))
L += args.p_x_y_weight * l_p_x_y
# break if the loss diverged...easier for poppa to run experiments this way
if L.abs().item() > 1e8:
print("BAD BOIIIIIIIIII")
print("min {:>4.3f} max {:>5.3f}".format(x_q.min().item(), x_q.max().item()))
plot('{}/diverge_{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q)
return
optim.zero_grad()
L.backward()
optim.step()
cur_iter += 1
if cur_iter % args.print_every == 0 and args.p_x_weight > 0:
if not args.plot_cond:
if args.class_cond_p_x_sample:
assert not args.uncond, "can only draw class-conditional samples if EBM is class-cond"
y_q = t.randint(0, args.n_classes, (args.batch_size,)).to(device)
x_q = sample_q(f, replay_buffer, y=y_q, n_steps=n_steps, in_steps=in_steps, args=args)
else:
x_q = sample_q(f, replay_buffer, n_steps=n_steps, in_steps=in_steps, args=args)
plot('{}/samples/x_q_{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q)
if args.plot_cond: # generate class-conditional samples
y = t.arange(0, args.n_classes)[None].repeat(args.n_classes, 1).transpose(1, 0).contiguous().view(-1).to(device)
x_q_y = sample_q(f, replay_buffer, y=y, n_steps=n_steps, in_steps=in_steps, args=args)
plot('{}/samples/x_q_y{}_{:>06d}.png'.format(args.save_dir, epoch, i), x_q_y)
if epoch % args.ckpt_every == 0 and args.p_x_weight > 0:
checkpoint(f, replay_buffer, f'ckpt_{epoch}.pt', args, device)
if epoch % args.eval_every == 0 and (args.p_y_given_x_weight > 0 or args.p_x_y_weight > 0):
f.eval()
with t.no_grad():
correct, loss = eval_classification(f, dload_valid, 'Valid', epoch, args, wlog)
if args.dataset != 'tinyimagenet':
t_c, _ = eval_classification(f, dload_test, 'Test', epoch, args, wlog)
if correct > best_valid_acc:
best_valid_acc = correct
print("Epoch {} Best Valid!: {}".format(epoch, correct))
checkpoint(f, replay_buffer, "best_valid_ckpt.pt", args, device)
f.train()
checkpoint(f, replay_buffer, "last_ckpt.pt", args, device)
if __name__ == "__main__":
parser = argparse.ArgumentParser("LDA Energy Based Models")
parser.add_argument("--dataset", type=str, default="cifar10", choices=["cifar10", "svhn", "cifar100", 'tinyimagenet'])
parser.add_argument("--data_root", type=str, default="../data")
# optimization
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--decay_epochs", nargs="+", type=int, default=[60, 90, 120, 135], help="decay learning rate by decay_rate at these epochs")
parser.add_argument("--decay_rate", type=float, default=.2, help="learning rate decay multiplier")
parser.add_argument("--clf_only", action="store_true", help="If set, then only train the classifier")
parser.add_argument("--labels_per_class", type=int, default=-1,
help="number of labeled examples per class, if zero then use all labels")
parser.add_argument("--optimizer", choices=["adam", "sgd"], default="adam")
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_epochs", type=int, default=150)
parser.add_argument("--warmup_iters", type=int, default=-1,
help="number of iters to linearly increase learning rate, if -1 then no warmmup")
# loss weighting
parser.add_argument("--p_x_weight", type=float, default=1.)
parser.add_argument("--p_y_given_x_weight", type=float, default=1.)
parser.add_argument("--p_x_y_weight", type=float, default=0.)
# regularization
parser.add_argument("--dropout_rate", type=float, default=0.0)
parser.add_argument("--sigma", type=float, default=3e-2,
help="stddev of gaussian noise to add to input, .03 works but .1 is more stable")
parser.add_argument("--weight_decay", type=float, default=4e-4)
# network
parser.add_argument("--norm", type=str, default=None, choices=[None, "none", "batch", "instance", "layer", "act"], help="norm to add to weights, none works fine")
# EBM specific
parser.add_argument("--n_steps", type=int, default=10, help="number of steps of SGLD per iteration, 20 works for PCD")
parser.add_argument("--in_steps", type=int, default=5, help="number of steps of SGLD per iteration, 100 works for short-run, 20 works for PCD")
parser.add_argument("--width", type=int, default=10, help="WRN width parameter")
parser.add_argument("--depth", type=int, default=28, help="WRN depth parameter")
parser.add_argument("--uncond", action="store_true", help="If set, then the EBM is unconditional")
parser.add_argument("--class_cond_p_x_sample", action="store_true",
help="If set we sample from p(y)p(x|y), othewise sample from p(x),"
"Sample quality higher if set, but classification accuracy better if not.")
parser.add_argument("--buffer_size", type=int, default=10000)
parser.add_argument("--reinit_freq", type=float, default=0.05)
# SGLD or PYLD
parser.add_argument("--sgld_lr", type=float, default=0.0)
parser.add_argument("--sgld_std", type=float, default=0)
parser.add_argument("--pyld_lr", type=float, default=0.2)
# logging + evaluation
parser.add_argument("--save_dir", type=str, default='./experiment')
parser.add_argument("--dir_path", type=str, default='./experiment')
parser.add_argument("--log_dir", type=str, default='./runs')
parser.add_argument("--log_arg", type=str, default='JEMPP-n_steps-in_steps-pyld_lr-norm-plc')
parser.add_argument("--ckpt_every", type=int, default=10, help="Epochs between checkpoint save")
parser.add_argument("--eval_every", type=int, default=1, help="Epochs between evaluation")
parser.add_argument("--print_every", type=int, default=100, help="Iterations between print")
parser.add_argument("--load_path", type=str, default=None)
parser.add_argument("--print_to_log", action="store_true", help="If true, directs std-out to log file")
parser.add_argument("--plot_cond", action="store_true", help="If set, save class-conditional samples")
parser.add_argument("--plot_uncond", action="store_true", help="If set, save unconditional samples")
parser.add_argument("--n_valid", type=int, default=5000)
parser.add_argument("--plc", type=str, default="alltrain1", help="alltrain1, alltrain2, eval")
parser.add_argument("--eps", type=float, default=1, help="eps bound")
parser.add_argument("--model", type=str, default='yopo')
parser.add_argument("--novis", action="store_true", help="")
parser.add_argument("--debug", action="store_true", help="")
parser.add_argument("--exp_name", type=str, default="JEMPP", help="exp name, for description")
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--gpu-id", type=str, default="0")
args = parser.parse_args()
init_env(args, logger)
args.save_dir = args.dir_path
os.makedirs('{}/samples'.format(args.dir_path))
print = wlog
print(args.dir_path)
main(args)
print(args.dir_path)
| 17,931 | 43.606965 | 166 | py |
JEMPP | JEMPP-master/utils.py | import os
import torch
import torch as t
import torch.nn as nn
import torchvision as tv
import torchvision.transforms as tr
from torch.utils.data import DataLoader, Dataset
import numpy as np
from torch.nn.modules.loss import _Loss
from ExpUtils import AverageMeter
class Hamiltonian(_Loss):
def __init__(self, layer, reg_cof=1e-4):
super(Hamiltonian, self).__init__()
self.layer = layer
self.reg_cof = 0
def forward(self, x, p):
y = self.layer(x)
H = torch.sum(y * p)
# H = H - self.reg_cof * l2
return H
def sqrt(x):
return int(t.sqrt(t.Tensor([x])))
def plot(p, x):
return tv.utils.save_image(t.clamp(x, -1, 1), p, normalize=True, nrow=sqrt(x.size(0)))
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
class DataSubset(Dataset):
def __init__(self, base_dataset, inds=None, size=-1):
self.base_dataset = base_dataset
if inds is None:
inds = np.random.choice(list(range(len(base_dataset))), size, replace=False)
self.inds = inds
def __getitem__(self, index):
base_ind = self.inds[index]
return self.base_dataset[base_ind]
def __len__(self):
return len(self.inds)
def cycle(loader):
while True:
for data in loader:
yield data
def init_random(args, bs, im_sz=32, n_ch=3):
return t.FloatTensor(bs, n_ch, im_sz, im_sz).uniform_(-1, 1)
def get_data(args):
if args.dataset == "svhn":
transform_train = tr.Compose(
[tr.Pad(4, padding_mode="reflect"),
tr.RandomCrop(32),
tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]
)
else:
transform_train = tr.Compose(
[tr.Pad(4, padding_mode="reflect"),
tr.RandomCrop(32),
tr.RandomHorizontalFlip(),
tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5)),
lambda x: x + args.sigma * t.randn_like(x)]
)
transform_test = tr.Compose(
[tr.ToTensor(),
tr.Normalize((.5, .5, .5), (.5, .5, .5))]
)
def dataset_fn(train, transform):
if args.dataset == "cifar10":
return tv.datasets.CIFAR10(root=args.data_root, transform=transform, download=True, train=train)
elif args.dataset == "cifar100":
return tv.datasets.CIFAR100(root=args.data_root, transform=transform, download=True, train=train)
else:
return tv.datasets.SVHN(root=args.data_root, transform=transform, download=True, split="train" if train else "test")
# get all training inds
full_train = dataset_fn(True, transform_train)
all_inds = list(range(len(full_train)))
# set seed
np.random.seed(1234)
# shuffle
np.random.shuffle(all_inds)
# seperate out validation set
if args.n_valid > args.n_classes:
valid_inds, train_inds = all_inds[:args.n_valid], all_inds[args.n_valid:]
else:
valid_inds, train_inds = [], all_inds
train_inds = np.array(train_inds)
train_labeled_inds = []
other_inds = []
if args.labels_per_class > 0:
train_labels = np.array([full_train[ind][1] for ind in train_inds]) # to speed up
for i in range(args.n_classes):
print(i)
train_labeled_inds.extend(train_inds[train_labels == i][:args.labels_per_class])
other_inds.extend(train_inds[train_labels == i][args.labels_per_class:])
else:
train_labeled_inds = train_inds
dset_train = DataSubset(dataset_fn(True, transform_train), inds=train_inds)
dset_train_labeled = DataSubset(dataset_fn(True, transform_train), inds=train_labeled_inds)
dset_valid = DataSubset(dataset_fn(True, transform_test), inds=valid_inds)
num_workers = 0 if args.debug else 4
dload_train = DataLoader(dset_train, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, drop_last=True)
dload_train_labeled = DataLoader(dset_train_labeled, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, drop_last=True)
dload_train_labeled = cycle(dload_train_labeled)
dset_test = dataset_fn(False, transform_test)
dload_valid = DataLoader(dset_valid, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False)
dload_test = DataLoader(dset_test, batch_size=100, shuffle=False, num_workers=num_workers, drop_last=False)
return dload_train, dload_train_labeled, dload_valid, dload_test
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def checkpoint(f, buffer, tag, args, device):
f.cpu()
ckpt_dict = {
"model_state_dict": f.state_dict(),
"replay_buffer": buffer,
}
t.save(ckpt_dict, os.path.join(args.save_dir, tag))
f.to(device)
def set_bn_eval(m):
if isinstance(m, nn.modules.batchnorm._BatchNorm):
m.eval()
def set_bn_train(m):
if isinstance(m, nn.modules.batchnorm._BatchNorm):
m.train()
def eval_classification(f, dload, set_name, epoch, args=None, wlog=None):
corrects, losses = [], []
if args.n_classes >= 200:
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
for x, y in dload:
x, y = x.to(args.device), y.to(args.device)
logits = f.classify(x)
loss = nn.CrossEntropyLoss(reduction='none')(logits, y).detach().cpu().numpy()
losses.extend(loss)
if args.n_classes >= 200:
acc1, acc5 = accuracy(logits, y, topk=(1, 5))
top1.update(acc1[0], x.size(0))
top5.update(acc5[0], x.size(0))
else:
correct = (logits.max(1)[1] == y).float().cpu().numpy()
corrects.extend(correct)
correct = (logits.max(1)[1] == y).float().cpu().numpy()
corrects.extend(correct)
loss = np.mean(losses)
if wlog:
my_print = wlog
else:
my_print = print
if args.n_classes >= 200:
correct = top1.avg
my_print("Epoch %d, %s loss %.5f, top1 acc %.4f, top5 acc %.4f" % (epoch, set_name, loss, top1.avg, top5.avg))
else:
correct = np.mean(corrects)
my_print("Epoch %d, %s loss %.5f, acc %.4f" % (epoch, set_name, loss, correct))
if args.vis:
args.writer.add_scalar('%s/Loss' % set_name, loss, epoch)
if args.n_classes >= 200:
args.writer.add_scalar('%s/Acc_1' % set_name, top1.avg, epoch)
args.writer.add_scalar('%s/Acc_5' % set_name, top5.avg, epoch)
else:
args.writer.add_scalar('%s/Accuracy' % set_name, correct, epoch)
return correct, loss
| 7,385 | 32.572727 | 139 | py |
JEMPP | JEMPP-master/ExpUtils.py | import os
import sys
import json
import time
import socket
import shutil
import signal
import logging
from functools import partial
import torch
import numpy as np
import tensorboardX as tbX
import matplotlib.pyplot as plt
logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(filename)s[%(lineno)d]: %(message)s", datefmt="%m-%d %H:%M:%S")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
wlog = logger.info
def init_env(args, exp_logger):
# 1. debug -> num_workers
init_debug(args)
args.vis = not args.novis
args.hostname = socket.gethostname().split('.')[0]
# 2. select gpu
auto_select_gpu(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.dir_path = form_dir_path(args.exp_name, args)
set_file_logger(exp_logger, args)
init_logger_board(args)
args.n_classes = 10
if args.dataset == "cifar100":
args.n_classes = 100
if args.dataset == "tinyimagenet":
args.n_classes = 200
def init_debug(args):
# verify the debug mode
# pytorch loader has a parameter num_workers
# in debug mode, it should be 0
# so set args.debug
gettrace = getattr(sys, 'gettrace', None)
if gettrace is None:
print('No sys.gettrace')
args.debug = False
elif gettrace():
print('Hmm, Big Debugger is watching me')
args.debug = True
else:
args.debug = False
def auto_select_gpu(args):
if args.gpu_id:
return
try:
import GPUtil
except ImportError:
wlog("please install GPUtil for automatically selecting GPU")
args.gpu_id = '1'
return
if len(GPUtil.getGPUs()) == 0:
return
id_list = GPUtil.getAvailable(order="load", maxLoad=0.7, maxMemory=0.9, limit=8)
if len(id_list) == 0:
print("GPU memory is not enough for predicted usage")
raise NotImplementedError
args.gpu_id = str(id_list[0])
def init_logger_board(args):
if 'vis' in vars(args) and args.vis:
args.writer = tbX.SummaryWriter(log_dir=args.dir_path)
def vlog(writer, cur_iter, set_name, wlog=None, verbose=True, **kwargs):
for k in kwargs:
v = kwargs[k]
writer.add_scalar('%s/%s' % (set_name, k.capitalize()), v, cur_iter)
if wlog:
my_print = wlog
else:
my_print = print
if not verbose:
prompt = "%d " % cur_iter
prompt += ','.join("%s: %.4f" % (k, kwargs[k]) for k in ['loss', 'acc', 'acc1', 'acc5'] if k in kwargs)
my_print(prompt)
def set_file_logger(exp_logger, args):
# Just use "logger" above
# use tensorboard + this function to substitute ExpSaver
args_dict = vars(args)
dir_path = args.dir_path
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
with open(os.path.join(dir_path, "para.json"), "w") as fp:
json.dump(args_dict, fp, indent=4, sort_keys=True)
logfile = os.path.join(dir_path, "exp.log")
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(filename)s[line:%(lineno)d]: %(message)s")
fh.setFormatter(formatter)
exp_logger.addHandler(fh)
copy_script_to_folder(sys.argv[0], args.dir_path)
if os.name != 'nt':
signal.signal(signal.SIGQUIT, partial(rename_quit_handler, args))
signal.signal(signal.SIGTERM, partial(delete_quit_handler, args))
def list_args(args):
for e in sorted(vars(args).items()):
print("args.%s = %s" % (e[0], e[1] if not isinstance(e[1], str) else '"%s"' % e[1]))
def form_dir_path(task, args):
"""
Params:
task: the name of your experiment/research
args: the namespace of argparse
requires:
--dataset: always need a dataset.
--log-arg: the details shown in the name of your directory where logs are.
--log-dir: the directory to save logs, default is ~/projecct/runs.
"""
args.pid = os.getpid()
args_dict = vars(args)
if "log_dir" not in args_dict:
args.log_dir = ""
if "log_arg" not in args_dict:
args.log_arg = ""
run_time = time.strftime('%m%d%H%M%S', time.localtime(time.time()))
log_arg_list = []
if args.debug:
task += '-debug'
for e in args.log_arg.split("-"):
v = args_dict.get(e, None)
if v is None:
log_arg_list.append(str(e))
elif isinstance(v, str):
log_arg_list.append(str(v))
else:
log_arg_list.append("%s=%s" % (e, str(v)))
args.exp_marker = exp_marker = "-".join(log_arg_list)
exp_marker = "%s/%s/%s@%s@%d" % (task, args.dataset, run_time, exp_marker, os.getpid())
base_dir = os.path.join(os.environ['HOME'], 'project/runs') if not args.log_dir else args.log_dir
dir_path = os.path.join(base_dir, exp_marker)
return dir_path
def summary(data):
assert isinstance(data, np.ndarray) or isinstance(data, torch.Tensor)
wlog("shape: %s, num of points: %d, pixels: %d" % (str(data.shape), data.shape[0], np.prod(data.shape[1:])))
wlog("max: %g, min %g" % (data.max(), data.min()))
wlog("mean: %g" % data.mean())
wlog("mean of abs: %g" % np.abs(data).mean())
wlog("mean of square sum: %g" % (data ** 2).mean())
def remove_outliers(x, outlier_constant=1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
iqr = (upper_quartile - lower_quartile) * outlier_constant
quartile_set = (lower_quartile - iqr, upper_quartile + iqr)
result = a[np.where((a >= quartile_set[0]) & (a <= quartile_set[1]))]
return result
def vis_step(writer, step, dicts):
"""
Add several curves.
"""
for k in dicts:
writer.add_scalar(k, dicts[k], step)
def copy_script_to_folder(caller_path, folder):
'''copy script'''
script_filename = caller_path.split('/')[-1]
script_relative_path = os.path.join(folder, script_filename)
shutil.copy(caller_path, script_relative_path)
def time_string():
'''convert time format'''
ISOTIMEFORMAT='%Y-%m-%d %X'
string = '[{}]'.format(time.strftime( ISOTIMEFORMAT, time.gmtime(time.time()) ))
return string
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
assert total_epoch > 0
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_losses = self.epoch_losses - 1
self.epoch_accuracy= np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy= self.epoch_accuracy
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(self.total_epoch, idx)
self.epoch_losses [idx, 0] = train_loss
self.epoch_losses [idx, 1] = val_loss
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
return self.max_accuracy(False) == val_acc
def max_accuracy(self, istrain):
if self.current_epoch <= 0: return 0
if istrain: return self.epoch_accuracy[:self.current_epoch, 0].max()
else: return self.epoch_accuracy[:self.current_epoch, 1].max()
def plot_curve(self, save_path):
title = 'the accuracy/loss curve of train/val'
dpi = 80
width, height = 1200, 800
legend_fontsize = 10
scale_distance = 48.8
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 100)
interval_y = 5
interval_x = 5
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 100 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=20)
plt.xlabel('the training epoch', fontsize=16)
plt.ylabel('accuracy', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0]
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='train-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1]
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='valid-accuracy', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis*50, color='g', linestyle=':', label='train-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis*50, color='y', linestyle=':', label='valid-loss-x50', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
print ('---- save figure {} into {}'.format(title, save_path))
plt.close(fig)
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name='', fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def plotting(exp_dir):
# Load the training log dictionary:
train_dict = pickle.load(open(os.path.join(exp_dir, 'log.pkl'), 'rb'))
###########################################################
# Make the vanilla train and test loss per epoch plot #
###########################################################
plt.plot(np.asarray(train_dict['train_loss']), label='train_loss')
plt.plot(np.asarray(train_dict['test_loss']), label='test_loss')
# plt.ylim(0,2000)
plt.xlabel('evaluation step')
plt.ylabel('metrics')
plt.tight_layout()
plt.legend(loc='upper right')
plt.savefig(os.path.join(exp_dir, 'loss.png' ))
plt.clf()
# accuracy
plt.plot(np.asarray(train_dict['train_acc']), label='train_acc')
plt.plot(np.asarray(train_dict['test_acc']), label='test_acc')
# plt.ylim(0,2000)
plt.xlabel('evaluation step')
plt.ylabel('metrics')
plt.tight_layout()
plt.legend(loc='upper right')
plt.savefig(os.path.join(exp_dir, 'acc.png'))
plt.clf()
def get_axis(axarr, H, W, i, j):
H, W = H - 1, W - 1
if not (H or W):
ax = axarr
elif not (H and W):
ax = axarr[max(i, j)]
else:
ax = axarr[i][j]
return ax
def show_image_row(xlist, ylist=None, fontsize=12, size=(2.5, 2.5), tlist=None, filename=None):
H, W = len(xlist), len(xlist[0])
fig, axarr = plt.subplots(H, W, figsize=(size[0] * W, size[1] * H))
for w in range(W):
for h in range(H):
ax = get_axis(axarr, H, W, h, w)
ax.imshow(xlist[h][w].permute(1, 2, 0))
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
if ylist and w == 0:
ax.set_ylabel(ylist[h], fontsize=fontsize)
if tlist:
ax.set_title(tlist[h][w], fontsize=fontsize)
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
plt.show()
def delete_quit_handler(g_var, signal, frame):
shutil.rmtree(g_var.dir_path)
sys.exit(0)
def rename_quit_handler(g_var, signal, frame):
os.rename(g_var.dir_path, g_var.dir_path + "_stop")
sys.exit(0)
| 12,387 | 31.686016 | 131 | py |
JEMPP | JEMPP-master/Task/data.py | from tensorflow.python.platform import flags
from tensorflow.contrib.data.python.ops import batching
import tensorflow as tf
import json
from torch.utils.data import Dataset
import pickle
import os.path as osp
import os
import numpy as np
import time
from scipy.misc import imread, imresize
from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder
from torchvision import transforms
from Task.imagenet_preprocessing import ImagenetPreprocessor
import torch
import torchvision
FLAGS = flags.FLAGS
# Dataset Options
flags.DEFINE_string('dsprites_path',
'/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz',
'path to dsprites characters')
flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image')
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_type', 'npy', 'npy or png')
flags.DEFINE_bool('single', False, 'single ')
flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single')
# Data augmentation options
# flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image')
# flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
# flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
# flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')
flags.DEFINE_string('eval', '', '')
flags.DEFINE_string('init', '', '')
flags.DEFINE_string('norm', '', '')
flags.DEFINE_string('n_steps', '', '')
flags.DEFINE_string('reinit_freq', '', '')
flags.DEFINE_string('print_every', '', '')
flags.DEFINE_string('n_sample_steps', '', '')
flags.DEFINE_integer('gpu-id', 16, 'size of cutout')
def cutout(mask_color=(0, 0, 0)):
mask_size_half = FLAGS.cutout_mask_size // 2
offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0
def _cutout(image):
image = np.asarray(image).copy()
if np.random.random() > FLAGS.cutout_prob:
return image
h, w = image.shape[:2]
if FLAGS.cutout_inside:
cxmin, cxmax = mask_size_half, w + offset - mask_size_half
cymin, cymax = mask_size_half, h + offset - mask_size_half
else:
cxmin, cxmax = 0, w + offset
cymin, cymax = 0, h + offset
cx = np.random.randint(cxmin, cxmax)
cy = np.random.randint(cymin, cymax)
xmin = cx - mask_size_half
ymin = cy - mask_size_half
xmax = xmin + FLAGS.cutout_mask_size
ymax = ymin + FLAGS.cutout_mask_size
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(w, xmax)
ymax = min(h, ymax)
image[:, ymin:ymax, xmin:xmax] = np.array(mask_color)[:, None, None]
return image
return _cutout
class TFImagenetLoader(Dataset):
def __init__(self, split, batchsize, idx, num_workers, rescale=1):
IMAGENET_NUM_TRAIN_IMAGES = 1281167
IMAGENET_NUM_VAL_IMAGES = 50000
self.rescale = rescale
if split == "train":
im_length = IMAGENET_NUM_TRAIN_IMAGES
records_to_skip = im_length * idx // num_workers
records_to_read = im_length * (idx + 1) // num_workers - records_to_skip
else:
im_length = IMAGENET_NUM_VAL_IMAGES
self.curr_sample = 0
index_path = osp.join(FLAGS.imagenet_datadir, 'index.json')
with open(index_path) as f:
metadata = json.load(f)
counts = metadata['record_counts']
if split == 'train':
file_names = list(sorted([x for x in counts.keys() if x.startswith('train')]))
result_records_to_skip = None
files = []
for filename in file_names:
records_in_file = counts[filename]
if records_to_skip >= records_in_file:
records_to_skip -= records_in_file
continue
elif records_to_read > 0:
if result_records_to_skip is None:
# Record the number to skip in the first file
result_records_to_skip = records_to_skip
files.append(filename)
records_to_read -= (records_in_file - records_to_skip)
records_to_skip = 0
else:
break
else:
files = list(sorted([x for x in counts.keys() if x.startswith('validation')]))
files = [osp.join(FLAGS.imagenet_datadir, x) for x in files]
preprocess_function = ImagenetPreprocessor(128, dtype=tf.float32, train=False).parse_and_preprocess
ds = tf.data.TFRecordDataset.from_generator(lambda: files, output_types=tf.string)
ds = ds.apply(tf.data.TFRecordDataset)
ds = ds.take(im_length)
ds = ds.prefetch(buffer_size=FLAGS.batch_size)
ds = ds.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10000))
ds = ds.apply(batching.map_and_batch(map_func=preprocess_function, batch_size=FLAGS.batch_size, num_parallel_batches=4))
ds = ds.prefetch(buffer_size=2)
ds_iterator = ds.make_initializable_iterator()
labels, images = ds_iterator.get_next()
self.images = tf.clip_by_value(images / 256 + tf.random_uniform(tf.shape(images), 0, 1. / 256), 0.0, 1.0)
self.labels = labels
config = tf.ConfigProto(device_count={'GPU': 0})
sess = tf.Session(config=config)
sess.run(ds_iterator.initializer)
self.im_length = im_length // batchsize
self.sess = sess
def __next__(self):
self.curr_sample += 1
sess = self.sess
im_corrupt = np.random.uniform(0, self.rescale, size=(FLAGS.batch_size, 128, 128, 3))
label, im = sess.run([self.labels, self.images])
im = im * self.rescale
label = np.eye(1000)[label.squeeze() - 1]
im, im_corrupt, label = torch.from_numpy(im), torch.from_numpy(im_corrupt), torch.from_numpy(label)
return im_corrupt, im, label
def __iter__(self):
return self
def __len__(self):
return self.im_length
class CelebA(Dataset):
def __init__(self):
self.path = "/root/data/img_align_celeba"
self.ims = os.listdir(self.path)
self.ims = [osp.join(self.path, im) for im in self.ims]
def __len__(self):
return len(self.ims)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
label = 1
if FLAGS.single:
index = 0
path = self.ims[index]
im = imread(path)
im = imresize(im, (32, 32))
image_size = 32
im = im / 255.
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0, 1, size=(image_size, image_size, 3))
return im_corrupt, im, label
class Cifar10(Dataset):
def __init__(
self, FLAGS,
train=True,
full=False,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
# if FLAGS.cutout:
# transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.full = full
self.data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=train,
download=True)
self.test_data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=False,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
def __len__(self):
if self.full:
return len(self.data) + len(self.test_data)
else:
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
if self.full:
if index >= len(self.data):
im, label = self.test_data[index - len(self.data)]
else:
im, label = self.data[index]
else:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
if self.noise:
im = im * self.rescale + \
np.random.uniform(0, self.rescale * 1 / 256., im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return im_corrupt, im, label
class Cifar100(Dataset):
def __init__(self, FLAGS, train=True, augment=False):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
# if FLAGS.cutout:
# transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.data = CIFAR100(
os.path.join(os.environ['HOME'], 'project/research/data/dataset', "cifar100"),
transform=transform,
train=train,
download=True)
self.one_hot_map = np.eye(100)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Svhn(Dataset):
def __init__(self, FLAGS, train=True, augment=False):
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.data = SVHN(os.path.join(os.environ['HOME'], 'project/research/data/dataset', "svhn"), transform=transform, download=True)
self.one_hot_map = np.eye(10)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
im, label = self.data[index]
else:
em, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Mnist(Dataset):
def __init__(self, train=True, rescale=1.0):
self.data = MNIST(
"/root/mnist",
transform=transforms.ToTensor(),
download=True, train=train)
self.labels = np.eye(10)
self.rescale = rescale
def __len__(self):
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
im, label = self.data[index]
label = self.labels[label]
im = im.squeeze()
# im = im.numpy() / 2 + np.random.uniform(0, 0.5, (28, 28))
# im = im.numpy() / 2 + 0.2
im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28))
im = im * self.rescale
image_size = 28
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(0, self.rescale, (28, 28))
return im_corrupt, im, label
class DSprites(Dataset):
def __init__(
self,
cond_size=False,
cond_shape=False,
cond_pos=False,
cond_rot=False):
dat = np.load(FLAGS.dsprites_path)
if FLAGS.dshape_only:
l = dat['latents_values']
mask = (l[:, 4] == 16 / 31) & (l[:, 5] == 16 /
31) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = self.label[:, 1:2]
elif FLAGS.dpos_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 1] == 1) & (
l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = self.label[:, 4:] + 0.5
elif FLAGS.dsize_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = (self.label[:, 2:3])
elif FLAGS.drot_only:
l = dat['latents_values']
mask = (l[:, 2] == 0.5) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = (self.label[:, 3:4])
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
elif FLAGS.dsprites_restrict:
l = dat['latents_values']
mask = (l[:, 1] == 1) & (l[:, 3] == 0 * np.pi / 39)
self.data = dat['imgs'][mask]
self.label = dat['latents_values'][mask]
else:
self.data = dat['imgs']
self.label = dat['latents_values']
if cond_size:
self.label = self.label[:, 2:3]
elif cond_shape:
self.label = self.label[:, 1:2]
elif cond_pos:
self.label = self.label[:, 4:]
elif cond_rot:
self.label = self.label[:, 3:4]
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
else:
self.label = self.label[:, 1:2]
self.identity = np.eye(3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
im = self.data[index]
image_size = 64
if not (
FLAGS.dpos_only or FLAGS.dsize_only) and (
not FLAGS.cond_size) and (
not FLAGS.cond_pos) and (
not FLAGS.cond_rot) and (
not FLAGS.drot_only):
label = self.identity[self.label[index].astype(
np.int32) - 1].squeeze()
else:
label = self.label[index]
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = 0.5 + 0.5 * np.random.randn(image_size, image_size)
return im_corrupt, im, label
class Imagenet(Dataset):
def __init__(self, train=True, augment=False):
if train:
for i in range(1, 11):
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'train_data_batch_{}'.format(i)),
'rb'))
if i == 1:
labels = f['labels']
data = f['data']
else:
labels.extend(f['labels'])
data = np.vstack((data, f['data']))
else:
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'val_data'),
'rb'))
labels = f['labels']
data = f['data']
self.labels = labels
self.data = data
self.one_hot_map = np.eye(1000)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
im, label = self.data[index], self.labels[index]
else:
im, label = self.data[0], self.labels[0]
label -= 1
im = im.reshape((3, 32, 32)) / 255
im = im.transpose((1, 2, 0))
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Textures(Dataset):
def __init__(self, train=True, augment=False):
self.dataset = ImageFolder("/mnt/nfs/yilundu/data/dtd/images")
def __len__(self):
return 2 * len(self.dataset)
def __getitem__(self, index):
idx = index % (len(self.dataset))
im, label = self.dataset[idx]
im = np.array(im)[:32, :32] / 255
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
return im, im, label
| 19,913 | 33.512998 | 135 | py |
JEMPP | JEMPP-master/Task/calibration.py | import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def expected_calibration_error(predictions, truths, confidences, bin_size=0.1, title='demo'):
upper_bounds = np.arange(bin_size, 1 + bin_size, bin_size)
accs = []
# Compute empirical probability for each bin
plot_x = []
ece = 0
for conf_thresh in upper_bounds:
acc, perc_pred, avg_conf = compute_accuracy(conf_thresh - bin_size, conf_thresh, confidences, predictions, truths)
plot_x.append(avg_conf)
accs.append(acc)
ece += abs(avg_conf - acc) * perc_pred
return ece
def reliability_diagrams(predictions, truths, confidences, bin_size=0.1, title='demo', args=None):
upper_bounds = np.arange(bin_size, 1 + bin_size, bin_size)
accs = []
# Compute empirical probability for each bin
conf_x = []
ece = 0
for conf_thresh in upper_bounds:
acc, perc_pred, avg_conf = compute_accuracy(conf_thresh - bin_size, conf_thresh, confidences, predictions, truths)
conf_x.append(avg_conf)
accs.append(acc)
temp = abs(avg_conf - acc) * perc_pred
print('m %.2f, B_m %d, acc(B_m) %.4f, conf = %.4f, |B_m||acc(B_m) - conf(B_m)|/n = %.5f' % (conf_thresh, int(perc_pred * len(predictions)), acc, avg_conf, temp))
ece += temp
# Produce error bars for each bin
upper_bound_to_bootstrap_est = {x: [] for x in upper_bounds}
for i in range(1):
# Generate bootstrap
boot_strap_outcomes = []
boot_strap_confs = random.sample(confidences, len(confidences))
for samp_conf in boot_strap_confs:
correct = 0
if random.random() < samp_conf:
correct = 1
boot_strap_outcomes.append(correct)
# Compute error frequency in each bin
for upper_bound in upper_bounds:
conf_thresh_upper = upper_bound
conf_thresh_lower = upper_bound - bin_size
filtered_tuples = [x for x in zip(boot_strap_outcomes, boot_strap_confs) if x[1] > conf_thresh_lower and x[1] <= conf_thresh_upper]
correct = len([x for x in filtered_tuples if x[0] == 1])
acc = float(correct) / len(filtered_tuples) if len(filtered_tuples) > 0 else 0
upper_bound_to_bootstrap_est[upper_bound].append(acc)
upper_bound_to_bootstrap_upper_bar = {}
upper_bound_to_bootstrap_lower_bar = {}
for upper_bound, freqs in upper_bound_to_bootstrap_est.items():
top_95_quintile_i = int(0.975 * len(freqs))
lower_5_quintile_i = int(0.025 * len(freqs))
upper_bar = sorted(freqs)[top_95_quintile_i]
lower_bar = sorted(freqs)[lower_5_quintile_i]
upper_bound_to_bootstrap_upper_bar[upper_bound] = upper_bar
upper_bound_to_bootstrap_lower_bar[upper_bound] = lower_bar
upper_bars = []
lower_bars = []
for i, upper_bound in enumerate(upper_bounds):
if upper_bound_to_bootstrap_upper_bar[upper_bound] == 0:
upper_bars.append(0)
lower_bars.append(0)
else:
# The error bar arguments need to be the distance from the data point, not the y-value
upper_bars.append(abs(conf_x[i] - upper_bound_to_bootstrap_upper_bar[upper_bound]))
lower_bars.append(abs(conf_x[i] - upper_bound_to_bootstrap_lower_bar[upper_bound]))
# sns.set(font_scale=2)
fig, ax = plt.subplots()
ax.errorbar(conf_x, conf_x, label="Perfect classifier calibration")
new_conf_x = []
new_accs = []
for i, bars in enumerate(zip(lower_bars, upper_bars)):
if bars[0] == 0 and bars[1] == 0:
continue
new_conf_x.append(conf_x[i])
new_accs.append(accs[i])
print("ECE: %g" % ece)
ax.plot(new_conf_x, new_accs, '-o', label="Accuracy", color="red")
ax.set_ylim([0, 1])
ax.set_xlim([0, 1])
plt.title(title + " ECE: %.2f%%" % (ece * 100))
plt.ylabel('Empirical probability')
plt.xlabel('Estimated probability')
plt.show()
plt.close()
fig, ax = plt.subplots()
ax.errorbar([0, 1], [0, 1], label="Perfect classifier calibration")
# ax.plot(new_conf_x, new_accs, '-o', label="Accuracy", color="black")
ax.bar(upper_bounds - 0.025, accs, width=bin_size, label="Accuracy", color="red", edgecolor='gray', align='center')
ax.set_ylim([0, 1])
ax.set_xlim([0, 1])
plt.title(title + " ECE: %.1f%%" % (ece * 100), fontsize=20)
plt.ylabel('Empirical probability', fontsize=20)
plt.xlabel('Estimated probability', fontsize=16)
# fig.savefig("reliability.tif", format='tif', bbox_inches='tight', dpi=1200)
if args is not None and args.load_path:
fig.savefig(args.load_path + "_calibration.png")
# fig.savefig(args.load_path + "_calibration.eps", format='eps', bbox_inches='tight', dpi=1200)
plt.show()
plt.close()
def compute_accuracy(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0, 0, 0
else:
correct = len([x for x in filtered_tuples if x[0] == x[1]])
avg_conf = sum([x[2] for x in filtered_tuples]) / len(filtered_tuples)
accuracy = float(correct) / len(filtered_tuples)
perc_of_data = float(len(filtered_tuples)) / len(conf)
return accuracy, perc_of_data, avg_conf
def compute_accuracy2(conf_thresh_lower, conf_thresh_upper, conf, pred, true):
num_classes = max(true)
filtered_tuples = [x for x in zip(pred, true, conf) if x[2] > conf_thresh_lower and x[2] <= conf_thresh_upper]
if len(filtered_tuples) < 1:
return 0, 0, 0
else:
corrects = []
acc = []
for i in range(num_classes):
predict = len([x for x in filtered_tuples if x[0] == i])
category = len([x for x in filtered_tuples if x[1] == i])
correct = len([x for x in filtered_tuples if x[0] == i and x[0] == x[1]])
if category == 0:
accuracy = 0
else:
accuracy = float(correct) / category
acc.append(accuracy)
print("category %d: predict num: %d, ground truth num: %d, correct: %d, %.4f" % (i, predict, category, correct, accuracy))
avg_conf = sum([x[2] for x in filtered_tuples]) / len(filtered_tuples)
perc_of_data = float(len(filtered_tuples)) / len(conf)
accuracy = sum(acc) / num_classes
return accuracy, perc_of_data, avg_conf
class ECELoss(nn.Module):
"""
Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece | 8,434 | 39.748792 | 169 | py |
JEMPP | JEMPP-master/Task/eval_buffer.py | import os
import torch as t
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
def norm_ip(img, min, max):
temp = t.clamp(img, min=min, max=max)
temp = (temp + -min) / (max - min + 1e-5)
return temp
def eval_fid(f, replay_buffer, args):
from Task.inception import get_inception_score
from Task.fid import get_fid_score
if isinstance(replay_buffer, list):
images = replay_buffer[0]
elif isinstance(replay_buffer, tuple):
images = replay_buffer[0]
else:
images = replay_buffer
feed_imgs = []
for i, img in enumerate(images):
n_img = norm_ip(img, -1, 1)
new_img = n_img.cpu().numpy().transpose(1, 2, 0) * 255
feed_imgs.append(new_img)
feed_imgs = np.stack(feed_imgs)
if 'cifar100' in args.dataset:
from Task.data import Cifar100
test_dataset = Cifar100(args, augment=False)
elif 'cifar' in args.dataset:
from Task.data import Cifar10
test_dataset = Cifar10(args, full=True, noise=False)
elif 'svhn' in args.dataset:
from Task.data import Svhn
test_dataset = Svhn(args, augment=False)
else:
assert False, 'dataset %s' % args.dataset
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=0, shuffle=True, drop_last=False)
test_ims = []
def rescale_im(im):
return np.clip(im * 256, 0, 255).astype(np.uint8)
for data_corrupt, data, label_gt in tqdm(test_dataloader):
data = data.numpy()
test_ims.extend(list(rescale_im(data)))
# FID score
# n = min(len(images), len(test_ims))
fid = get_fid_score(feed_imgs, test_ims)
print("FID of score {}".format(fid))
return fid
| 1,753 | 28.728814 | 120 | py |
JEMPP | JEMPP-master/models/jem_models.py | import torch as t
import torch.nn as nn
from models import wideresnet
import models
from models import wideresnet_yopo
im_sz = 32
n_ch = 3
class F(nn.Module):
def __init__(self, depth=28, width=2, norm=None, dropout_rate=0.0, n_classes=10, model='wrn', args=None):
super(F, self).__init__()
# default, wrn
self.norm = norm
if model == 'yopo':
self.f = wideresnet_yopo.Wide_ResNet(depth, width, norm=norm, dropout_rate=dropout_rate)
else:
self.f = wideresnet.Wide_ResNet(depth, width, norm=norm, dropout_rate=dropout_rate)
self.energy_output = nn.Linear(self.f.last_dim, 1)
self.class_output = nn.Linear(self.f.last_dim, n_classes)
def feature(self, x):
penult_z = self.f(x, feature=True)
return penult_z
def forward(self, x, y=None):
penult_z = self.f(x, feature=True)
return self.energy_output(penult_z).squeeze()
def classify(self, x):
penult_z = self.f(x, feature=True)
output = self.class_output(penult_z).squeeze()
return output
class CCF(F):
def __init__(self, depth=28, width=2, norm=None, dropout_rate=0.0, n_classes=10, model='wrn', args=None):
super(CCF, self).__init__(depth, width, norm=norm, dropout_rate=dropout_rate, n_classes=n_classes, model=model, args=args)
def forward(self, x, y=None):
logits = self.classify(x)
if y is None:
v = logits.logsumexp(1)
# print("log sum exp", v)
return v
else:
return t.gather(logits, 1, y[:, None])
def init_random(args, bs):
im_sz = 32
if args.dataset == 'tinyimagenet':
im_sz = 64
return t.FloatTensor(bs, n_ch, im_sz, im_sz).uniform_(-1, 1)
def get_model_and_buffer(args, device):
model_cls = F if args.uncond else CCF
f = model_cls(args.depth, args.width, args.norm, dropout_rate=args.dropout_rate, n_classes=args.n_classes, model=args.model)
if not args.uncond:
assert args.buffer_size % args.n_classes == 0, "Buffer size must be divisible by args.n_classes"
if args.load_path is None:
# make replay buffer
replay_buffer = init_random(args, args.buffer_size)
else:
print(f"loading model from {args.load_path}")
ckpt_dict = t.load(args.load_path)
f.load_state_dict(ckpt_dict["model_state_dict"])
replay_buffer = ckpt_dict["replay_buffer"]
f = f.to(device)
return f, replay_buffer
| 2,494 | 32.716216 | 130 | py |
JEMPP | JEMPP-master/models/norms.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
class ConditionalInstanceNorm2dPlus(nn.Module):
def __init__(self, num_features, num_classes, bias=True):
super().__init__()
self.num_features = num_features
self.bias = bias
self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False)
if bias:
self.embed = nn.Embedding(num_classes, num_features * 3)
self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0
else:
self.embed = nn.Embedding(num_classes, 2 * num_features)
self.embed.weight.data.normal_(1, 0.02)
def forward(self, x, y):
means = torch.mean(x, dim=(2, 3))
m = torch.mean(means, dim=-1, keepdim=True)
v = torch.var(means, dim=-1, keepdim=True)
means = (means - m) / (torch.sqrt(v + 1e-5))
h = self.instance_norm(x)
if self.bias:
gamma, alpha, beta = self.embed(y).chunk(3, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1)
else:
gamma, alpha = self.embed(y).chunk(2, dim=-1)
h = h + means[..., None, None] * alpha[..., None, None]
out = gamma.view(-1, self.num_features, 1, 1) * h
return out
class ConditionalActNorm(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.num_classes = num_classes
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data.zero_()
self.init = False
def forward(self, x, y):
if self.init:
scale, bias = self.embed(y).chunk(2, dim=-1)
return x * scale[:, :, None, None] + bias[:, :, None, None]
else:
m, v = torch.mean(x, dim=(0, 2, 3)), torch.var(x, dim=(0, 2, 3))
std = torch.sqrt(v + 1e-5)
scale_init = 1. / std
bias_init = -1. * m / std
self.embed.weight.data[:, :self.num_features] = scale_init[None].repeat(self.num_classes, 1)
self.embed.weight.data[:, self.num_features:] = bias_init[None].repeat(self.num_classes, 1)
self.init = True
return self(x, y)
logabs = lambda x: torch.log(torch.abs(x))
class ActNorm(nn.Module):
def __init__(self, in_channel, logdet=True):
super().__init__()
self.loc = nn.Parameter(torch.zeros(1, in_channel, 1, 1))
self.scale = nn.Parameter(torch.ones(1, in_channel, 1, 1))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.logdet = logdet
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input):
_, _, height, width = input.shape
if self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
log_abs = logabs(self.scale)
logdet = height * width * torch.sum(log_abs)
if self.logdet:
return self.scale * (input + self.loc), logdet
else:
return self.scale * (input + self.loc)
def reverse(self, output):
return output / self.scale - self.loc
class ContinuousConditionalActNorm(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
del num_classes
self.num_features = num_features
self.embed = nn.Sequential(nn.Linear(1, 256),
nn.ELU(inplace=True),
nn.Linear(256, 256),
nn.ELU(inplace=True),
nn.Linear(256, self.num_features*2),
)
def forward(self, x, y):
scale, bias = self.embed(y.unsqueeze(-1)).chunk(2, dim=-1)
return x * scale[:, :, None, None] + bias[:, :, None, None]
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def get_norm(n_filters, norm):
if norm is None or norm.lower() == 'none':
return Identity()
elif norm == "batch":
return nn.BatchNorm2d(n_filters, momentum=0.9)
elif norm == "instance":
return nn.InstanceNorm2d(n_filters, affine=True)
elif norm == "layer":
return nn.GroupNorm(1, n_filters)
elif norm == "act":
return ActNorm(n_filters, False)
else:
return Identity()
| 5,894 | 33.881657 | 107 | py |
JEMPP | JEMPP-master/models/wideresnet_yopo.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
from .norms import get_norm, Identity
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None, leak=.2):
super(wide_basic, self).__init__()
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(self.lrelu(self.bn1(x))))
out = self.conv2(self.lrelu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, num_classes=10, input_channels=3,
sum_pool=False, norm=None, leak=.2, dropout_rate=0.0):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
self.sum_pool = sum_pool
self.norm = norm
self.lrelu = nn.LeakyReLU(leak)
self.n_classes = num_classes
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) // 6
k = widen_factor
print('| Wide-Resnet %dx%d yopo' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.layer_one_out = None
self.conv1 = conv3x3(input_channels, nStages[0])
self.layer_one = self.conv1
self.other_layers = nn.ModuleList()
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.other_layers.append(self.layer1)
self.other_layers.append(self.layer2)
self.other_layers.append(self.layer3)
self.bn1 = get_norm(nStages[3], self.norm)
self.other_layers.append(self.bn1)
self.last_dim = nStages[3]
# self.linear = nn.Linear(nStages[3], num_classes)
# self.other_layers.append(self.linear)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride, norm=self.norm))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x, vx=None, feature=True):
out = self.conv1(x)
# for YOPO
self.layer_one_out = out
self.layer_one_out.requires_grad_()
self.layer_one_out.retain_grad()
# for YOPO
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.lrelu(self.bn1(out))
if self.sum_pool:
out = out.view(out.size(0), out.size(1), -1).sum(2)
else:
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
return out
| 4,536 | 35.58871 | 98 | py |
JEMPP | JEMPP-master/models/wideresnet.py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from .norms import get_norm, Identity
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
init.constant(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant(m.weight, 1)
init.constant(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1, norm=None, leak=.2):
super(wide_basic, self).__init__()
self.norm = norm
self.lrelu = nn.LeakyReLU(leak)
self.bn1 = get_norm(in_planes, norm)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = Identity() if dropout_rate == 0.0 else nn.Dropout(p=dropout_rate)
self.bn2 = get_norm(planes, norm)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.bn1(x)
out = self.dropout(self.conv1(self.lrelu(out)))
out = self.bn2(out)
out = self.conv2(self.lrelu(out))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, num_classes=10, input_channels=3,
sum_pool=False, norm=None, leak=.2, dropout_rate=0.0):
super(Wide_ResNet, self).__init__()
self.leak = leak
self.in_planes = 16
self.sum_pool = sum_pool
self.norm = norm
self.lrelu = nn.LeakyReLU(leak)
self.n_classes = num_classes
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) // 6
k = widen_factor
print('| Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(input_channels, nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1, leak=leak)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2, leak=leak)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2, leak=leak)
self.bn1 = get_norm(nStages[3], self.norm)
self.last_dim = nStages[3]
# self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, leak=0.2):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride, leak=leak, norm=self.norm))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x, logits=False):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.lrelu(self.bn1(out))
if self.sum_pool:
out = out.view(out.size(0), out.size(1), -1).sum(2)
else:
if self.n_classes > 100:
out = F.adaptive_avg_pool2d(out, 1)
else:
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
if logits:
out = self.linear(out)
return out
| 4,325 | 35.974359 | 105 | py |
DiT | DiT-main/sample.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Sample new images from a pre-trained DiT.
"""
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
from torchvision.utils import save_image
from diffusion import create_diffusion
from diffusers.models import AutoencoderKL
from download import find_model
from models import DiT_models
import argparse
def main(args):
# Setup PyTorch:
torch.manual_seed(args.seed)
torch.set_grad_enabled(False)
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.ckpt is None:
assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download."
assert args.image_size in [256, 512]
assert args.num_classes == 1000
# Load model:
latent_size = args.image_size // 8
model = DiT_models[args.model](
input_size=latent_size,
num_classes=args.num_classes
).to(device)
# Auto-download a pre-trained model or load a custom DiT checkpoint from train.py:
ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt"
state_dict = find_model(ckpt_path)
model.load_state_dict(state_dict)
model.eval() # important!
diffusion = create_diffusion(str(args.num_sampling_steps))
vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device)
# Labels to condition the model with (feel free to change):
class_labels = [207, 360, 387, 974, 88, 979, 417, 279]
# Create sampling noise:
n = len(class_labels)
z = torch.randn(n, 4, latent_size, latent_size, device=device)
y = torch.tensor(class_labels, device=device)
# Setup classifier-free guidance:
z = torch.cat([z, z], 0)
y_null = torch.tensor([1000] * n, device=device)
y = torch.cat([y, y_null], 0)
model_kwargs = dict(y=y, cfg_scale=args.cfg_scale)
# Sample images:
samples = diffusion.p_sample_loop(
model.forward_with_cfg, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=True, device=device
)
samples, _ = samples.chunk(2, dim=0) # Remove null class samples
samples = vae.decode(samples / 0.18215).sample
# Save and display images:
save_image(samples, "sample.png", nrow=4, normalize=True, value_range=(-1, 1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2")
parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="mse")
parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
parser.add_argument("--num-classes", type=int, default=1000)
parser.add_argument("--cfg-scale", type=float, default=4.0)
parser.add_argument("--num-sampling-steps", type=int, default=250)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--ckpt", type=str, default=None,
help="Optional path to a DiT checkpoint (default: auto-download a pre-trained DiT-XL/2 model).")
args = parser.parse_args()
main(args)
| 3,269 | 37.928571 | 120 | py |
DiT | DiT-main/download.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Functions for downloading pre-trained DiT models
"""
from torchvision.datasets.utils import download_url
import torch
import os
pretrained_models = {'DiT-XL-2-512x512.pt', 'DiT-XL-2-256x256.pt'}
def find_model(model_name):
"""
Finds a pre-trained DiT model, downloading it if necessary. Alternatively, loads a model from a local path.
"""
if model_name in pretrained_models: # Find/download our pre-trained DiT checkpoints
return download_model(model_name)
else: # Load a custom DiT checkpoint:
assert os.path.isfile(model_name), f'Could not find DiT checkpoint at {model_name}'
checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage)
if "ema" in checkpoint: # supports checkpoints from train.py
checkpoint = checkpoint["ema"]
return checkpoint
def download_model(model_name):
"""
Downloads a pre-trained DiT model from the web.
"""
assert model_name in pretrained_models
local_path = f'pretrained_models/{model_name}'
if not os.path.isfile(local_path):
os.makedirs('pretrained_models', exist_ok=True)
web_path = f'https://dl.fbaipublicfiles.com/DiT/models/{model_name}'
download_url(web_path, 'pretrained_models')
model = torch.load(local_path, map_location=lambda storage, loc: storage)
return model
if __name__ == "__main__":
# Download all DiT checkpoints
for model in pretrained_models:
download_model(model)
print('Done.')
| 1,713 | 32.607843 | 111 | py |
DiT | DiT-main/sample_ddp.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Samples a large number of images from a pre-trained DiT model using DDP.
Subsequently saves a .npz file that can be used to compute FID and other
evaluation metrics via the ADM repo: https://github.com/openai/guided-diffusion/tree/main/evaluations
For a simple single-GPU/CPU sampling script, see sample.py.
"""
import torch
import torch.distributed as dist
from models import DiT_models
from download import find_model
from diffusion import create_diffusion
from diffusers.models import AutoencoderKL
from tqdm import tqdm
import os
from PIL import Image
import numpy as np
import math
import argparse
def create_npz_from_sample_folder(sample_dir, num=50_000):
"""
Builds a single .npz file from a folder of .png samples.
"""
samples = []
for i in tqdm(range(num), desc="Building .npz file from samples"):
sample_pil = Image.open(f"{sample_dir}/{i:06d}.png")
sample_np = np.asarray(sample_pil).astype(np.uint8)
samples.append(sample_np)
samples = np.stack(samples)
assert samples.shape == (num, samples.shape[1], samples.shape[2], 3)
npz_path = f"{sample_dir}.npz"
np.savez(npz_path, arr_0=samples)
print(f"Saved .npz file to {npz_path} [shape={samples.shape}].")
return npz_path
def main(args):
"""
Run sampling.
"""
torch.backends.cuda.matmul.allow_tf32 = args.tf32 # True: fast but may lead to some small numerical differences
assert torch.cuda.is_available(), "Sampling with DDP requires at least one GPU. sample.py supports CPU-only usage"
torch.set_grad_enabled(False)
# Setup DDP:
dist.init_process_group("nccl")
rank = dist.get_rank()
device = rank % torch.cuda.device_count()
seed = args.global_seed * dist.get_world_size() + rank
torch.manual_seed(seed)
torch.cuda.set_device(device)
print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
if args.ckpt is None:
assert args.model == "DiT-XL/2", "Only DiT-XL/2 models are available for auto-download."
assert args.image_size in [256, 512]
assert args.num_classes == 1000
# Load model:
latent_size = args.image_size // 8
model = DiT_models[args.model](
input_size=latent_size,
num_classes=args.num_classes
).to(device)
# Auto-download a pre-trained model or load a custom DiT checkpoint from train.py:
ckpt_path = args.ckpt or f"DiT-XL-2-{args.image_size}x{args.image_size}.pt"
state_dict = find_model(ckpt_path)
model.load_state_dict(state_dict)
model.eval() # important!
diffusion = create_diffusion(str(args.num_sampling_steps))
vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device)
assert args.cfg_scale >= 1.0, "In almost all cases, cfg_scale be >= 1.0"
using_cfg = args.cfg_scale > 1.0
# Create folder to save samples:
model_string_name = args.model.replace("/", "-")
ckpt_string_name = os.path.basename(args.ckpt).replace(".pt", "") if args.ckpt else "pretrained"
folder_name = f"{model_string_name}-{ckpt_string_name}-size-{args.image_size}-vae-{args.vae}-" \
f"cfg-{args.cfg_scale}-seed-{args.global_seed}"
sample_folder_dir = f"{args.sample_dir}/{folder_name}"
if rank == 0:
os.makedirs(sample_folder_dir, exist_ok=True)
print(f"Saving .png samples at {sample_folder_dir}")
dist.barrier()
# Figure out how many samples we need to generate on each GPU and how many iterations we need to run:
n = args.per_proc_batch_size
global_batch_size = n * dist.get_world_size()
# To make things evenly-divisible, we'll sample a bit more than we need and then discard the extra samples:
total_samples = int(math.ceil(args.num_fid_samples / global_batch_size) * global_batch_size)
if rank == 0:
print(f"Total number of images that will be sampled: {total_samples}")
assert total_samples % dist.get_world_size() == 0, "total_samples must be divisible by world_size"
samples_needed_this_gpu = int(total_samples // dist.get_world_size())
assert samples_needed_this_gpu % n == 0, "samples_needed_this_gpu must be divisible by the per-GPU batch size"
iterations = int(samples_needed_this_gpu // n)
pbar = range(iterations)
pbar = tqdm(pbar) if rank == 0 else pbar
total = 0
for _ in pbar:
# Sample inputs:
z = torch.randn(n, model.in_channels, latent_size, latent_size, device=device)
y = torch.randint(0, args.num_classes, (n,), device=device)
# Setup classifier-free guidance:
if using_cfg:
z = torch.cat([z, z], 0)
y_null = torch.tensor([1000] * n, device=device)
y = torch.cat([y, y_null], 0)
model_kwargs = dict(y=y, cfg_scale=args.cfg_scale)
sample_fn = model.forward_with_cfg
else:
model_kwargs = dict(y=y)
sample_fn = model.forward
# Sample images:
samples = diffusion.p_sample_loop(
sample_fn, z.shape, z, clip_denoised=False, model_kwargs=model_kwargs, progress=False, device=device
)
if using_cfg:
samples, _ = samples.chunk(2, dim=0) # Remove null class samples
samples = vae.decode(samples / 0.18215).sample
samples = torch.clamp(127.5 * samples + 128.0, 0, 255).permute(0, 2, 3, 1).to("cpu", dtype=torch.uint8).numpy()
# Save samples to disk as individual .png files
for i, sample in enumerate(samples):
index = i * dist.get_world_size() + rank + total
Image.fromarray(sample).save(f"{sample_folder_dir}/{index:06d}.png")
total += global_batch_size
# Make sure all processes have finished saving their samples before attempting to convert to .npz
dist.barrier()
if rank == 0:
create_npz_from_sample_folder(sample_folder_dir, args.num_fid_samples)
print("Done.")
dist.barrier()
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2")
parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="ema")
parser.add_argument("--sample-dir", type=str, default="samples")
parser.add_argument("--per-proc-batch-size", type=int, default=32)
parser.add_argument("--num-fid-samples", type=int, default=50_000)
parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
parser.add_argument("--num-classes", type=int, default=1000)
parser.add_argument("--cfg-scale", type=float, default=1.5)
parser.add_argument("--num-sampling-steps", type=int, default=250)
parser.add_argument("--global-seed", type=int, default=0)
parser.add_argument("--tf32", action=argparse.BooleanOptionalAction, default=True,
help="By default, use TF32 matmuls. This massively accelerates sampling on Ampere GPUs.")
parser.add_argument("--ckpt", type=str, default=None,
help="Optional path to a DiT checkpoint (default: auto-download a pre-trained DiT-XL/2 model).")
args = parser.parse_args()
main(args)
| 7,411 | 43.383234 | 120 | py |
DiT | DiT-main/models.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# GLIDE: https://github.com/openai/glide-text2im
# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
# --------------------------------------------------------
import torch
import torch.nn as nn
import numpy as np
import math
from timm.models.vision_transformer import PatchEmbed, Attention, Mlp
def modulate(x, shift, scale):
return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
#################################################################################
# Embedding Layers for Timesteps and Class Labels #
#################################################################################
class TimestepEmbedder(nn.Module):
"""
Embeds scalar timesteps into vector representations.
"""
def __init__(self, hidden_size, frequency_embedding_size=256):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(frequency_embedding_size, hidden_size, bias=True),
nn.SiLU(),
nn.Linear(hidden_size, hidden_size, bias=True),
)
self.frequency_embedding_size = frequency_embedding_size
@staticmethod
def timestep_embedding(t, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
# https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
return embedding
def forward(self, t):
t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
t_emb = self.mlp(t_freq)
return t_emb
class LabelEmbedder(nn.Module):
"""
Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance.
"""
def __init__(self, num_classes, hidden_size, dropout_prob):
super().__init__()
use_cfg_embedding = dropout_prob > 0
self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size)
self.num_classes = num_classes
self.dropout_prob = dropout_prob
def token_drop(self, labels, force_drop_ids=None):
"""
Drops labels to enable classifier-free guidance.
"""
if force_drop_ids is None:
drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob
else:
drop_ids = force_drop_ids == 1
labels = torch.where(drop_ids, self.num_classes, labels)
return labels
def forward(self, labels, train, force_drop_ids=None):
use_dropout = self.dropout_prob > 0
if (train and use_dropout) or (force_drop_ids is not None):
labels = self.token_drop(labels, force_drop_ids)
embeddings = self.embedding_table(labels)
return embeddings
#################################################################################
# Core DiT Model #
#################################################################################
class DiTBlock(nn.Module):
"""
A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, **block_kwargs):
super().__init__()
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = Attention(hidden_size, num_heads=num_heads, qkv_bias=True, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
mlp_hidden_dim = int(hidden_size * mlp_ratio)
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
def forward(self, x, c):
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1)
x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1(x), shift_msa, scale_msa))
x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2(x), shift_mlp, scale_mlp))
return x
class FinalLayer(nn.Module):
"""
The final layer of DiT.
"""
def __init__(self, hidden_size, patch_size, out_channels):
super().__init__()
self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
self.adaLN_modulation = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 2 * hidden_size, bias=True)
)
def forward(self, x, c):
shift, scale = self.adaLN_modulation(c).chunk(2, dim=1)
x = modulate(self.norm_final(x), shift, scale)
x = self.linear(x)
return x
class DiT(nn.Module):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
num_classes=1000,
learn_sigma=True,
):
super().__init__()
self.learn_sigma = learn_sigma
self.in_channels = in_channels
self.out_channels = in_channels * 2 if learn_sigma else in_channels
self.patch_size = patch_size
self.num_heads = num_heads
self.x_embedder = PatchEmbed(input_size, patch_size, in_channels, hidden_size, bias=True)
self.t_embedder = TimestepEmbedder(hidden_size)
self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob)
num_patches = self.x_embedder.num_patches
# Will use fixed sin-cos embedding:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, hidden_size), requires_grad=False)
self.blocks = nn.ModuleList([
DiTBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio) for _ in range(depth)
])
self.final_layer = FinalLayer(hidden_size, patch_size, self.out_channels)
self.initialize_weights()
def initialize_weights(self):
# Initialize transformer layers:
def _basic_init(module):
if isinstance(module, nn.Linear):
torch.nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
self.apply(_basic_init)
# Initialize (and freeze) pos_embed by sin-cos embedding:
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.x_embedder.num_patches ** 0.5))
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float().unsqueeze(0))
# Initialize patch_embed like nn.Linear (instead of nn.Conv2d):
w = self.x_embedder.proj.weight.data
nn.init.xavier_uniform_(w.view([w.shape[0], -1]))
nn.init.constant_(self.x_embedder.proj.bias, 0)
# Initialize label embedding table:
nn.init.normal_(self.y_embedder.embedding_table.weight, std=0.02)
# Initialize timestep embedding MLP:
nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
# Zero-out adaLN modulation layers in DiT blocks:
for block in self.blocks:
nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
# Zero-out output layers:
nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0)
nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0)
nn.init.constant_(self.final_layer.linear.weight, 0)
nn.init.constant_(self.final_layer.linear.bias, 0)
def unpatchify(self, x):
"""
x: (N, T, patch_size**2 * C)
imgs: (N, H, W, C)
"""
c = self.out_channels
p = self.x_embedder.patch_size[0]
h = w = int(x.shape[1] ** 0.5)
assert h * w == x.shape[1]
x = x.reshape(shape=(x.shape[0], h, w, p, p, c))
x = torch.einsum('nhwpqc->nchpwq', x)
imgs = x.reshape(shape=(x.shape[0], c, h * p, h * p))
return imgs
def forward(self, x, t, y):
"""
Forward pass of DiT.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N,) tensor of class labels
"""
x = self.x_embedder(x) + self.pos_embed # (N, T, D), where T = H * W / patch_size ** 2
t = self.t_embedder(t) # (N, D)
y = self.y_embedder(y, self.training) # (N, D)
c = t + y # (N, D)
for block in self.blocks:
x = block(x, c) # (N, T, D)
x = self.final_layer(x, c) # (N, T, patch_size ** 2 * out_channels)
x = self.unpatchify(x) # (N, out_channels, H, W)
return x
def forward_with_cfg(self, x, t, y, cfg_scale):
"""
Forward pass of DiT, but also batches the unconditional forward pass for classifier-free guidance.
"""
# https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb
half = x[: len(x) // 2]
combined = torch.cat([half, half], dim=0)
model_out = self.forward(combined, t, y)
# For exact reproducibility reasons, we apply classifier-free guidance on only
# three channels by default. The standard approach to cfg applies it to all channels.
# This can be done by uncommenting the following line and commenting-out the line following that.
# eps, rest = model_out[:, :self.in_channels], model_out[:, self.in_channels:]
eps, rest = model_out[:, :3], model_out[:, 3:]
cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)
half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)
eps = torch.cat([half_eps, half_eps], dim=0)
return torch.cat([eps, rest], dim=1)
#################################################################################
# Sine/Cosine Positional Embedding Functions #
#################################################################################
# https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token and extra_tokens > 0:
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
return pos_embed
def get_2d_sincos_pos_embed_from_grid(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float64)
omega /= embed_dim / 2.
omega = 1. / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
#################################################################################
# DiT Configs #
#################################################################################
def DiT_XL_2(**kwargs):
return DiT(depth=28, hidden_size=1152, patch_size=2, num_heads=16, **kwargs)
def DiT_XL_4(**kwargs):
return DiT(depth=28, hidden_size=1152, patch_size=4, num_heads=16, **kwargs)
def DiT_XL_8(**kwargs):
return DiT(depth=28, hidden_size=1152, patch_size=8, num_heads=16, **kwargs)
def DiT_L_2(**kwargs):
return DiT(depth=24, hidden_size=1024, patch_size=2, num_heads=16, **kwargs)
def DiT_L_4(**kwargs):
return DiT(depth=24, hidden_size=1024, patch_size=4, num_heads=16, **kwargs)
def DiT_L_8(**kwargs):
return DiT(depth=24, hidden_size=1024, patch_size=8, num_heads=16, **kwargs)
def DiT_B_2(**kwargs):
return DiT(depth=12, hidden_size=768, patch_size=2, num_heads=12, **kwargs)
def DiT_B_4(**kwargs):
return DiT(depth=12, hidden_size=768, patch_size=4, num_heads=12, **kwargs)
def DiT_B_8(**kwargs):
return DiT(depth=12, hidden_size=768, patch_size=8, num_heads=12, **kwargs)
def DiT_S_2(**kwargs):
return DiT(depth=12, hidden_size=384, patch_size=2, num_heads=6, **kwargs)
def DiT_S_4(**kwargs):
return DiT(depth=12, hidden_size=384, patch_size=4, num_heads=6, **kwargs)
def DiT_S_8(**kwargs):
return DiT(depth=12, hidden_size=384, patch_size=8, num_heads=6, **kwargs)
DiT_models = {
'DiT-XL/2': DiT_XL_2, 'DiT-XL/4': DiT_XL_4, 'DiT-XL/8': DiT_XL_8,
'DiT-L/2': DiT_L_2, 'DiT-L/4': DiT_L_4, 'DiT-L/8': DiT_L_8,
'DiT-B/2': DiT_B_2, 'DiT-B/4': DiT_B_4, 'DiT-B/8': DiT_B_8,
'DiT-S/2': DiT_S_2, 'DiT-S/4': DiT_S_4, 'DiT-S/8': DiT_S_8,
}
| 14,995 | 39.420485 | 113 | py |
DiT | DiT-main/train.py | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
A minimal training script for DiT using PyTorch DDP.
"""
import torch
# the first flag below was False when we tested this script but True makes A100 training a lot faster:
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision.datasets import ImageFolder
from torchvision import transforms
import numpy as np
from collections import OrderedDict
from PIL import Image
from copy import deepcopy
from glob import glob
from time import time
import argparse
import logging
import os
from models import DiT_models
from diffusion import create_diffusion
from diffusers.models import AutoencoderKL
#################################################################################
# Training Helper Functions #
#################################################################################
@torch.no_grad()
def update_ema(ema_model, model, decay=0.9999):
"""
Step the EMA model towards the current model.
"""
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
# TODO: Consider applying only to params that require_grad to avoid small numerical changes of pos_embed
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
"""
Set requires_grad flag for all parameters in a model.
"""
for p in model.parameters():
p.requires_grad = flag
def cleanup():
"""
End DDP training.
"""
dist.destroy_process_group()
def create_logger(logging_dir):
"""
Create a logger that writes to a log file and stdout.
"""
if dist.get_rank() == 0: # real logger
logging.basicConfig(
level=logging.INFO,
format='[\033[34m%(asctime)s\033[0m] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")]
)
logger = logging.getLogger(__name__)
else: # dummy logger (does nothing)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
return logger
def center_crop_arr(pil_image, image_size):
"""
Center cropping implementation from ADM.
https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
"""
while min(*pil_image.size) >= 2 * image_size:
pil_image = pil_image.resize(
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
)
scale = image_size / min(*pil_image.size)
pil_image = pil_image.resize(
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
)
arr = np.array(pil_image)
crop_y = (arr.shape[0] - image_size) // 2
crop_x = (arr.shape[1] - image_size) // 2
return Image.fromarray(arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size])
#################################################################################
# Training Loop #
#################################################################################
def main(args):
"""
Trains a new DiT model.
"""
assert torch.cuda.is_available(), "Training currently requires at least one GPU."
# Setup DDP:
dist.init_process_group("nccl")
assert args.global_batch_size % dist.get_world_size() == 0, f"Batch size must be divisible by world size."
rank = dist.get_rank()
device = rank % torch.cuda.device_count()
seed = args.global_seed * dist.get_world_size() + rank
torch.manual_seed(seed)
torch.cuda.set_device(device)
print(f"Starting rank={rank}, seed={seed}, world_size={dist.get_world_size()}.")
# Setup an experiment folder:
if rank == 0:
os.makedirs(args.results_dir, exist_ok=True) # Make results folder (holds all experiment subfolders)
experiment_index = len(glob(f"{args.results_dir}/*"))
model_string_name = args.model.replace("/", "-") # e.g., DiT-XL/2 --> DiT-XL-2 (for naming folders)
experiment_dir = f"{args.results_dir}/{experiment_index:03d}-{model_string_name}" # Create an experiment folder
checkpoint_dir = f"{experiment_dir}/checkpoints" # Stores saved model checkpoints
os.makedirs(checkpoint_dir, exist_ok=True)
logger = create_logger(experiment_dir)
logger.info(f"Experiment directory created at {experiment_dir}")
else:
logger = create_logger(None)
# Create model:
assert args.image_size % 8 == 0, "Image size must be divisible by 8 (for the VAE encoder)."
latent_size = args.image_size // 8
model = DiT_models[args.model](
input_size=latent_size,
num_classes=args.num_classes
)
# Note that parameter initialization is done within the DiT constructor
ema = deepcopy(model).to(device) # Create an EMA of the model for use after training
requires_grad(ema, False)
model = DDP(model.to(device), device_ids=[rank])
diffusion = create_diffusion(timestep_respacing="") # default: 1000 steps, linear noise schedule
vae = AutoencoderKL.from_pretrained(f"stabilityai/sd-vae-ft-{args.vae}").to(device)
logger.info(f"DiT Parameters: {sum(p.numel() for p in model.parameters()):,}")
# Setup optimizer (we used default Adam betas=(0.9, 0.999) and a constant learning rate of 1e-4 in our paper):
opt = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=0)
# Setup data:
transform = transforms.Compose([
transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, args.image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)
])
dataset = ImageFolder(args.data_path, transform=transform)
sampler = DistributedSampler(
dataset,
num_replicas=dist.get_world_size(),
rank=rank,
shuffle=True,
seed=args.global_seed
)
loader = DataLoader(
dataset,
batch_size=int(args.global_batch_size // dist.get_world_size()),
shuffle=False,
sampler=sampler,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True
)
logger.info(f"Dataset contains {len(dataset):,} images ({args.data_path})")
# Prepare models for training:
update_ema(ema, model.module, decay=0) # Ensure EMA is initialized with synced weights
model.train() # important! This enables embedding dropout for classifier-free guidance
ema.eval() # EMA model should always be in eval mode
# Variables for monitoring/logging purposes:
train_steps = 0
log_steps = 0
running_loss = 0
start_time = time()
logger.info(f"Training for {args.epochs} epochs...")
for epoch in range(args.epochs):
sampler.set_epoch(epoch)
logger.info(f"Beginning epoch {epoch}...")
for x, y in loader:
x = x.to(device)
y = y.to(device)
with torch.no_grad():
# Map input images to latent space + normalize latents:
x = vae.encode(x).latent_dist.sample().mul_(0.18215)
t = torch.randint(0, diffusion.num_timesteps, (x.shape[0],), device=device)
model_kwargs = dict(y=y)
loss_dict = diffusion.training_losses(model, x, t, model_kwargs)
loss = loss_dict["loss"].mean()
opt.zero_grad()
loss.backward()
opt.step()
update_ema(ema, model.module)
# Log loss values:
running_loss += loss.item()
log_steps += 1
train_steps += 1
if train_steps % args.log_every == 0:
# Measure training speed:
torch.cuda.synchronize()
end_time = time()
steps_per_sec = log_steps / (end_time - start_time)
# Reduce loss history over all processes:
avg_loss = torch.tensor(running_loss / log_steps, device=device)
dist.all_reduce(avg_loss, op=dist.ReduceOp.SUM)
avg_loss = avg_loss.item() / dist.get_world_size()
logger.info(f"(step={train_steps:07d}) Train Loss: {avg_loss:.4f}, Train Steps/Sec: {steps_per_sec:.2f}")
# Reset monitoring variables:
running_loss = 0
log_steps = 0
start_time = time()
# Save DiT checkpoint:
if train_steps % args.ckpt_every == 0 and train_steps > 0:
if rank == 0:
checkpoint = {
"model": model.module.state_dict(),
"ema": ema.state_dict(),
"opt": opt.state_dict(),
"args": args
}
checkpoint_path = f"{checkpoint_dir}/{train_steps:07d}.pt"
torch.save(checkpoint, checkpoint_path)
logger.info(f"Saved checkpoint to {checkpoint_path}")
dist.barrier()
model.eval() # important! This disables randomized embedding dropout
# do any sampling/FID calculation/etc. with ema (or model) in eval mode ...
logger.info("Done!")
cleanup()
if __name__ == "__main__":
# Default args here will train DiT-XL/2 with the hyperparameters we used in our paper (except training iters).
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, required=True)
parser.add_argument("--results-dir", type=str, default="results")
parser.add_argument("--model", type=str, choices=list(DiT_models.keys()), default="DiT-XL/2")
parser.add_argument("--image-size", type=int, choices=[256, 512], default=256)
parser.add_argument("--num-classes", type=int, default=1000)
parser.add_argument("--epochs", type=int, default=1400)
parser.add_argument("--global-batch-size", type=int, default=256)
parser.add_argument("--global-seed", type=int, default=0)
parser.add_argument("--vae", type=str, choices=["ema", "mse"], default="ema") # Choice doesn't affect training
parser.add_argument("--num-workers", type=int, default=4)
parser.add_argument("--log-every", type=int, default=100)
parser.add_argument("--ckpt-every", type=int, default=50_000)
args = parser.parse_args()
main(args)
| 10,949 | 39.555556 | 132 | py |
DiT | DiT-main/diffusion/timestep_sampler.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 6,013 | 38.827815 | 108 | py |
DiT | DiT-main/diffusion/gaussian_diffusion.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import math
import numpy as np
import torch as th
import enum
from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
warmup_time = int(num_diffusion_timesteps * warmup_frac)
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
"""
This is the deprecated API for creating beta schedules.
See get_named_beta_schedule() for the new library of schedules.
"""
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start ** 0.5,
beta_end ** 0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "warmup10":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif beta_schedule == "warmup50":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
return get_beta_schedule(
"linear",
beta_start=scale * 0.0001,
beta_end=scale * 0.02,
num_diffusion_timesteps=num_diffusion_timesteps,
)
elif schedule_name == "squaredcos_cap_v2":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Original ported from this codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type
):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
) if len(self.posterior_variance) > 1 else np.array([])
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
model_output, extra = model_output
else:
extra = None
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"extra": extra,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, t, **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res + th.zeros(broadcast_shape, device=timesteps.device)
| 34,326 | 38.275744 | 129 | py |
DiT | DiT-main/diffusion/diffusion_utils.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import torch as th
import numpy as np
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def continuous_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a continuous Gaussian distribution.
:param x: the targets
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
centered_x = x - means
inv_stdv = th.exp(-log_scales)
normalized_x = centered_x * inv_stdv
log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x)
return log_probs
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| 3,189 | 34.842697 | 108 | py |
DiT | DiT-main/diffusion/respace.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
# self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
# if self.rescale_timesteps:
# new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| 5,485 | 41.2 | 108 | py |
FATE | FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_binary.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
# torch
import torch as t
from torch import nn
from pipeline import fate_torch_hook
# pipeline
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
from pipeline.component.nn import TrainerParam
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
train_data_0 = {"name": "breast_homo_guest", "namespace": "experiment"}
train_data_1 = {"name": "breast_homo_host", "namespace": "experiment"}
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
data_transform_0 = DataTransform(name='data_transform_0')
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=True, output_format="dense")
model = nn.Sequential(
nn.Linear(30, 1),
nn.Sigmoid()
)
loss = nn.BCELoss()
optimizer = t.optim.Adam(model.parameters(), lr=0.01)
nn_component = HomoNN(name='nn_0',
model=model,
loss=loss,
optimizer=optimizer,
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128,
validation_freqs=1),
torch_seed=100
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(Evaluation(name='eval_0'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,330 | 34.817204 | 120 | py |
FATE | FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_regression.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
# torch
import torch as t
from torch import nn
from pipeline import fate_torch_hook
# pipeline
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
from pipeline.component.nn import TrainerParam
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
train_data_0 = {"name": "student_homo_guest", "namespace": "experiment"}
train_data_1 = {"name": "student_homo_host", "namespace": "experiment"}
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
data_transform_0 = DataTransform(name='data_transform_0')
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=True, output_format="dense")
model = nn.Sequential(
nn.Linear(13, 1)
)
loss = nn.MSELoss()
optimizer = t.optim.Adam(model.parameters(), lr=0.01)
nn_component = HomoNN(name='nn_0',
model=model,
loss=loss,
optimizer=optimizer,
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128,
validation_freqs=1),
torch_seed=100
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='regression'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,333 | 35.637363 | 120 | py |
FATE | FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_train_multi.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
# torch
import torch as t
from torch import nn
from pipeline import fate_torch_hook
# pipeline
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
from pipeline.component.nn import TrainerParam, DatasetParam
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
train_data_0 = {"name": "vehicle_scale_homo_guest", "namespace": "experiment"}
train_data_1 = {"name": "vehicle_scale_homo_host", "namespace": "experiment"}
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
data_transform_0 = DataTransform(name='data_transform_0')
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=True, output_format="dense")
model = nn.Sequential(
nn.Linear(18, 4),
nn.Softmax(dim=1) # actually cross-entropy loss does the softmax
)
loss = nn.CrossEntropyLoss()
optimizer = t.optim.Adam(model.parameters(), lr=0.01)
nn_component = HomoNN(name='nn_0',
model=model,
loss=loss,
optimizer=optimizer,
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=50, batch_size=128,
validation_freqs=1),
# reshape and set label to long for CrossEntropyLoss
dataset=DatasetParam(dataset_name='table', flatten_label=True, label_dtype='long'),
torch_seed=100
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='multi'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,628 | 37.2 | 120 | py |
FATE | FATE-master/examples/pipeline/homo_nn/pipeline_homo_nn_aggregate_n_epoch.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
# torch
import torch as t
from torch import nn
from pipeline import fate_torch_hook
# pipeline
from pipeline.backend.pipeline import PipeLine
from pipeline.component import Reader, DataTransform, HomoNN, Evaluation
from pipeline.component.nn import TrainerParam
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
arbiter = parties.arbiter[0]
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host, arbiter=arbiter)
train_data_0 = {"name": "breast_homo_guest", "namespace": "experiment"}
train_data_1 = {"name": "breast_homo_host", "namespace": "experiment"}
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=train_data_0)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=train_data_1)
data_transform_0 = DataTransform(name='data_transform_0')
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=True, output_format="dense")
model = nn.Sequential(
nn.Linear(30, 1),
nn.Sigmoid()
)
loss = nn.BCELoss()
optimizer = t.optim.Adam(model.parameters(), lr=0.01)
nn_component = HomoNN(name='nn_0',
model=model,
loss=loss,
optimizer=optimizer,
trainer=TrainerParam(trainer_name='fedavg_trainer', epochs=20, batch_size=128,
validation_freqs=1, aggregate_every_n_epoch=5),
torch_seed=100
)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(nn_component, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(Evaluation(name='eval_0'), data=Data(data=nn_component.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,357 | 35.107527 | 120 | py |
FATE | FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10, alpha=1, batch_size=-1, mode='plain')
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, hetero_ftl_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,842 | 37.818182 | 103 | py |
FATE | FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-encrypted.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10, alpha=1, batch_size=-1, mode='encrypted')
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,286 | 38.60241 | 103 | py |
FATE | FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-communication-efficient.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10,
alpha=1,
batch_size=-1,
mode='plain',
communication_efficient=True,
local_round=5)
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,471 | 38.908046 | 103 | py |
FATE | FATE-master/examples/pipeline/hetero_ftl/pipeline-hetero-ftl-plain.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10, alpha=1, batch_size=-1, mode='plain')
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,282 | 38.554217 | 103 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-binary-selective-bp.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=5,
interactive_layer_lr=0.01, batch_size=128, validation_freqs=1, task_type='classification',
selector_param={"method": "relative"})
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU(),
nn.Dropout(p=0.2)
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU(),
nn.Dropout(p=0.2)
)
# use interactive layer after fate_torch_hook
# add drop out in this layer
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,307 | 35.201681 | 117 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-binary-drop-out.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=10,
interactive_layer_lr=0.01, batch_size=-1, validation_freqs=1, task_type='classification')
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU(),
nn.Dropout(p=0.2)
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU(),
nn.Dropout(p=0.2)
)
# use interactive layer after fate_torch_hook
# add drop out in this layer
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,241 | 34.949153 | 116 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-with-early-stop.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
guest_val_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_val_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1 = Reader(name="reader_1")
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_val_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_val_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
data_transform_1 = DataTransform(name="data_transform_1")
data_transform_1.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_1.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
intersection_1 = Intersection(name="intersection_1")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=100,
interactive_layer_lr=0.01, batch_size=-1, task_type='classification',
callback_param={
"callbacks": ["EarlyStopping"],
"validation_freqs": 1,
"early_stopping_rounds": 2,
"use_first_metric_only": True,
"metrics": ["AUC"]
}
)
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 2),
nn.ReLU()
)
guest_top = t.nn.Sequential(
nn.Linear(2, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 2),
nn.ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=2, guest_dim=2, host_dim=2, host_num=1)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(data_transform_1, data=Data(data=reader_1.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=data_transform_1.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data,
validate_data=intersection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 5,533 | 39.101449 | 110 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-binary.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=20,
interactive_layer_lr=0.01, batch_size=-1, validation_freqs=1, task_type='classification')
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU()
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,154 | 35.130435 | 116 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-binary-coae.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from collections import OrderedDict
import torch as t
from torch import nn
from torch import optim
from pipeline import fate_torch as ft
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
# this is important, modify torch modules so that Sequential model be parsed by pipeline
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
# define network structure in torch style #
# define guest model
Linear = nn.Linear
ReLU = nn.ReLU
guest_bottom_a = Linear(10, 8, True)
seq = nn.Sequential(
OrderedDict([
('layer_0', guest_bottom_a),
('relu_0', ReLU())
])
)
seq2 = nn.Sequential(
ReLU(),
Linear(8, 2, True),
nn.Softmax(dim=1) # to use coae in binary task, output unit is 2, and need use softmax to compute probability
) # so that we can compute loss using fake labels and 2-dim outputs
# define host model
seq3 = nn.Sequential(
Linear(20, 8, True),
ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=8, guest_dim=8, host_dim=8, host_num=1)
# loss fun
ce_loss_fn = nn.CrossEntropyLoss()
# optimizer, after fate torch hook optimizer can be created without parameters
opt: ft.optim.Adam = optim.Adam(lr=0.01)
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=30, floating_point_precision=None,
interactive_layer_lr=0.1, batch_size=-1, early_stop="diff",
coae_param={'enable': True, 'epoch': 100})
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
guest_nn_0.add_bottom_model(seq)
guest_nn_0.add_top_model(seq2)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
host_nn_0.add_bottom_model(seq3)
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(opt, loss=ce_loss_fn)
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
evaluation_0 = Evaluation(name="evaluation_0")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_nn_1, data=Data(test_data=intersection_0.output.data),
model=Model(model=hetero_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,957 | 36.560606 | 118 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-binary-multi-host.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data_0 = {"name": "breast_hetero_host", "namespace": "experiment"}
host_train_data_1 = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=hosts)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=hosts[0]).component_param(table=host_train_data_0)
reader_0.get_party_instance(role='host', party_id=hosts[1]).component_param(table=host_train_data_1)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=hosts[0]).component_param(with_label=False)
data_transform_0.get_party_instance(role='host', party_id=hosts[1]).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=20,
interactive_layer_lr=0.01, batch_size=-1, validation_freqs=1, task_type='classification')
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0_host_0 = hetero_nn_0.get_party_instance(role='host', party_id=hosts[0])
host_nn_0_host_1 = hetero_nn_0.get_party_instance(role='host', party_id=hosts[1])
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU()
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=2, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0_host_0.add_bottom_model(host_bottom)
host_nn_0_host_1.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,613 | 37.45 | 116 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-with-check-point.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=20,
interactive_layer_lr=0.01, batch_size=-1, task_type='classification',
callback_param={
"validation_freqs": 1,
"callbacks": ["ModelCheckpoint"],
"save_freq": 1},
)
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU()
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,374 | 35.458333 | 110 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-with-warm_start.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=20,
interactive_layer_lr=0.01, batch_size=-1, validation_freqs=1, task_type='classification')
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(10, 4),
nn.ReLU()
)
guest_top = t.nn.Sequential(
nn.Linear(4, 1),
nn.Sigmoid()
)
host_bottom = t.nn.Sequential(
nn.Linear(20, 4),
nn.ReLU()
)
# use interactive layer after fate_torch_hook
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=4, host_dim=4, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.01) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.BCELoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
hetero_nn_1 = HeteroNN(name="hetero_nn_1", epochs=10,
interactive_layer_lr=0.01, batch_size=128, validation_freqs=1, task_type='classification')
evaluation_0 = Evaluation(name='eval_0', eval_type='binary')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_nn_1, data=Data(train_data=intersection_0.output.data),
model=Model(model=hetero_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,500 | 36.508333 | 117 | py |
FATE | FATE-master/examples/pipeline/hetero_nn/pipeline-hetero-nn-train-multi.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.component.nn import DatasetParam
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "vehicle_scale_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "vehicle_scale_hetero_host", "namespace": "experiment"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=15,
interactive_layer_lr=0.05, batch_size=256, validation_freqs=1, task_type='classification',
selector_param={"method": "relative"})
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
# define model
guest_bottom = t.nn.Sequential(
nn.Linear(9, 9),
nn.ReLU(),
)
guest_top = t.nn.Sequential(
nn.Linear(4, 4),
nn.Softmax(dim=1)
)
host_bottom = t.nn.Sequential(
nn.Linear(9, 9),
nn.ReLU(),
)
# use interactive layer after fate_torch_hook
# add drop out in this layer
interactive_layer = t.nn.InteractiveLayer(out_dim=4, guest_dim=9, host_dim=9, host_num=1, dropout=0.2)
guest_nn_0.add_top_model(guest_top)
guest_nn_0.add_bottom_model(guest_bottom)
host_nn_0.add_bottom_model(host_bottom)
optimizer = t.optim.Adam(lr=0.05) # you can initialize optimizer without parameters after fate_torch_hook
loss = t.nn.CrossEntropyLoss()
hetero_nn_0.set_interactive_layer(interactive_layer)
# add dataset param, because CrossEntropy loss need flatten long label, so add this parameter
# will use table dataset in federatedml/nn/dataset/table.py
hetero_nn_0.add_dataset(DatasetParam(dataset_name='table', flatten_label=True, label_dtype='long'))
hetero_nn_0.compile(optimizer=optimizer, loss=loss)
evaluation_0 = Evaluation(name='eval_0', eval_type='multi')
# define components IO
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
print(pipeline.get_component("hetero_nn_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 4,593 | 37.283333 | 117 | py |
FATE | FATE-master/examples/pipeline/homo_graph/pipeline_homo_graph_sage.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
# torch
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.component.nn import TrainerParam
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HomoNN, Evaluation
from pipeline.component.reader import Reader
from pipeline.interface import Data
from pipeline.component.nn import DatasetParam
import os
fate_torch_hook(t)
def main(config="../../config.yaml", namespace=""):
fate_project_path = os.getenv("FATE_PROJECT_BASE")
host = 10000
guest = 9999
arbiter = 10000
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host,
arbiter=arbiter)
data_0 = {"name": "cora_guest", "namespace": "experiment"}
data_1 = {"name": "cora_host", "namespace": "experiment"}
data_path_0 = fate_project_path + '/examples/data/cora4fate/guest'
data_path_1 = fate_project_path + '/examples/data/cora4fate/host'
pipeline.bind_table(name=data_0['name'], namespace=data_0['namespace'], path=data_path_0)
pipeline.bind_table(name=data_1['name'], namespace=data_1['namespace'], path=data_path_1)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=data_0)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=data_1)
dataset_param = DatasetParam("graph",
id_col='id',
label_col='y',
feature_dtype='float',
label_dtype='long',
feats_name='feats.csv',
feats_dataset_col='dataset',
feats_dataset_train='train',
feats_dataset_vali='vali',
feats_dataset_test='test',
adj_name='adj.csv',
adj_src_col='node1',
adj_dst_col='node2')
model = t.nn.Sequential(
t.nn.CustModel(module_name='graphsage', class_name='Sage', in_channels=1433, hidden_channels=64, class_num=7)
)
loss = nn.NLLLoss()
optimizer = t.optim.Adam(model.parameters(), lr=0.001)
homo_graph_0 = HomoNN(
name="homo_graph_0",
model=model,
loss=loss,
optimizer=optimizer,
dataset=dataset_param,
trainer=TrainerParam(trainer_name='fedavg_graph_trainer', epochs=10, batch_size=10,
validation_freqs=1, num_neighbors=[11, 11], task_type='multi'),
torch_seed=100
)
pipeline.add_component(reader_0)
pipeline.add_component(homo_graph_0, data=Data(train_data=reader_0.output.data))
pipeline.add_component(Evaluation(name='eval_0', eval_type='multi'), data=Data(data=homo_graph_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str, help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 3,919 | 38.59596 | 117 | py |
FATE | FATE-master/examples/benchmark_quality/homo_nn/local-homo_nn.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import pathlib
import numpy as np
import torch as t
from torch.utils.data import DataLoader, TensorDataset
import pandas
from pipeline.utils.tools import JobConfig
from federatedml.nn.backend.utils.common import global_seed
dataset = {
"vehicle": {
"guest": "examples/data/vehicle_scale_homo_guest.csv",
"host": "examples/data/vehicle_scale_homo_host.csv",
},
"breast": {
"guest": "examples/data/breast_homo_guest.csv",
"host": "examples/data/breast_homo_host.csv",
},
}
def fit(epoch, model, optimizer, loss, batch_size, dataset):
print(
'model is {}, loss is {}, optimizer is {}'.format(
model,
loss,
optimizer))
dl = DataLoader(dataset, batch_size=batch_size)
for i in range(epoch):
epoch_loss = 0
for feat, label in dl:
optimizer.zero_grad()
pred = model(feat)
l = loss(pred, label)
epoch_loss += l.detach().numpy()
l.backward()
optimizer.step()
print('epoch is {}, epoch loss is {}'.format(i, epoch_loss))
def compute_acc(pred, label, is_multy):
if is_multy:
pred = pred.argmax(axis=1)
else:
pred = (pred > 0.5) + 0
return float((pred == label).sum() / len(label))
def main(config="../../config.yaml", param="param_conf.yaml"):
if isinstance(param, str):
param = JobConfig.load_from_file(param)
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
epoch = param["epoch"]
lr = param["lr"]
batch_size = param.get("batch_size", -1)
is_multy = param["is_multy"]
data = dataset[param.get("dataset", "vehicle")]
global_seed(123)
if is_multy:
loss = t.nn.CrossEntropyLoss()
else:
loss = t.nn.BCELoss()
data_path = pathlib.Path(data_base_dir)
data_with_label = pandas.concat(
[
pandas.read_csv(data_path.joinpath(data["guest"]), index_col=0),
pandas.read_csv(data_path.joinpath(data["host"]), index_col=0),
]
).values
data = t.Tensor(data_with_label[:, 1:])
labels = t.Tensor(data_with_label[:, 0])
if is_multy:
labels = labels.type(t.int64)
else:
labels = labels.reshape((-1, 1))
ds = TensorDataset(data, labels)
input_shape = data.shape[1]
output_shape = 4 if is_multy else 1
out_act = t.nn.Softmax(dim=1) if is_multy else t.nn.Sigmoid()
model = t.nn.Sequential(
t.nn.Linear(input_shape, 16),
t.nn.ReLU(),
t.nn.Linear(16, output_shape),
out_act
)
if batch_size < 0:
batch_size = len(data_with_label)
optimizer = t.optim.Adam(model.parameters(), lr=lr)
fit(epoch, model, optimizer, loss, batch_size, ds)
pred_rs = model(data)
acc = compute_acc(pred_rs, labels, is_multy)
metric_summary = {"accuracy": acc}
print(metric_summary)
data_summary = {}
return data_summary, metric_summary
| 3,738 | 27.761538 | 76 | py |
FATE | FATE-master/examples/benchmark_quality/homo_nn/fate-homo_nn.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform, HomoNN, Evaluation
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
from pipeline.component.nn import TrainerParam, DatasetParam
fate_torch_hook(t)
class dataset(object):
breast = {
"guest": {"name": "breast_homo_guest", "namespace": "experiment"},
"host": [
{"name": "breast_homo_host", "namespace": "experiment"},
{"name": "breast_homo_host", "namespace": "experiment"}
]
}
vehicle = {
"guest": {"name": "vehicle_scale_homo_guest", "namespace": "experiment"},
"host": [
{"name": "vehicle_scale_homo_host", "namespace": "experiment"},
{"name": "vehicle_scale_homo_host", "namespace": "experiment"}
]
}
def main(config="../../config.yaml", param="param_conf.yaml", namespace=""):
num_host = 1
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
epoch = param["epoch"]
lr = param["lr"]
batch_size = param.get("batch_size", -1)
is_multy = param["is_multy"]
data = getattr(dataset, param.get("dataset", "vehicle"))
if is_multy:
loss = t.nn.CrossEntropyLoss()
else:
loss = t.nn.BCELoss()
input_shape = 18 if is_multy else 30
output_shape = 4 if is_multy else 1
out_act = t.nn.Softmax(dim=1) if is_multy else t.nn.Sigmoid()
model = t.nn.Sequential(
t.nn.Linear(input_shape, 16),
t.nn.ReLU(),
t.nn.Linear(16, output_shape),
out_act
)
optimizer = t.optim.Adam(model.parameters(), lr=lr)
guest_train_data = data["guest"]
host_train_data = data["host"][:num_host]
for d in [guest_train_data, *host_train_data]:
d["namespace"] = f"{d['namespace']}{namespace}"
hosts = config.parties.host[:num_host]
pipeline = PipeLine() .set_initiator(
role='guest',
party_id=config.parties.guest[0]) .set_roles(
guest=config.parties.guest[0],
host=hosts,
arbiter=config.parties.arbiter)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(
role='guest',
party_id=config.parties.guest[0]).component_param(
table=guest_train_data)
for i in range(num_host):
reader_0.get_party_instance(role='host', party_id=hosts[i]) \
.component_param(table=host_train_data[i])
data_transform_0 = DataTransform(name="data_transform_0", with_label=True)
data_transform_0.get_party_instance(
role='guest', party_id=config.parties.guest[0]) .component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(
role='host',
party_id=hosts).component_param(
with_label=True)
if is_multy:
ds_param = DatasetParam(
dataset_name='table',
flatten_label=True,
label_dtype='long')
else:
ds_param = DatasetParam(dataset_name='table')
homo_nn_0 = HomoNN(
name="homo_nn_0",
trainer=TrainerParam(
trainer_name='fedavg_trainer',
epochs=epoch,
batch_size=batch_size,
),
dataset=ds_param,
torch_seed=100,
optimizer=optimizer,
loss=loss,
model=model)
homo_nn_1 = HomoNN(name="homo_nn_1")
if is_multy:
eval_type = "multi"
else:
eval_type = "binary"
evaluation_0 = Evaluation(
name='evaluation_0',
eval_type=eval_type,
metrics=[
"accuracy",
"precision",
"recall"])
pipeline.add_component(reader_0)
pipeline.add_component(
data_transform_0, data=Data(
data=reader_0.output.data))
pipeline.add_component(homo_nn_0, data=Data(
train_data=data_transform_0.output.data))
pipeline.add_component(
homo_nn_1, data=Data(
test_data=data_transform_0.output.data), model=Model(
homo_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=homo_nn_0.output.data))
pipeline.compile()
pipeline.fit()
metric_summary = parse_summary_result(
pipeline.get_component("evaluation_0").get_summary())
nn_0_data = pipeline.get_component("homo_nn_0").get_output_data()
nn_1_data = pipeline.get_component("homo_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
if eval_type == "binary":
# metric_nn = {
# "score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label),
# "ks_2samp": classification_metric.KSTest.compute(nn_0_score, nn_1_score),
# "mAP_D_value": classification_metric.AveragePrecisionScore().compute(nn_0_score, nn_1_score, nn_0_label,
# nn_1_label)}
# metric_summary["distribution_metrics"] = {"homo_nn": metric_nn}
if metric_summary is None:
metric_summary = {}
metric_summary["accuracy"] = (
nn_0_score == nn_0_label).sum() / len(nn_0_label)
# elif eval_type == "multi":
# metric_nn = {
# "score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label)}
# metric_summary["distribution_metrics"] = {"homo_nn": metric_nn}
data_summary = dict(
train={"guest": guest_train_data["name"], **{f"host_{i}": host_train_data[i]["name"] for i in range(num_host)}},
test={"guest": guest_train_data["name"], **{f"host_{i}": host_train_data[i]["name"] for i in range(num_host)}}
)
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main(args.param)
| 7,460 | 34.870192 | 120 | py |
FATE | FATE-master/examples/benchmark_quality/hetero_nn/local-hetero_nn.py | import argparse
import numpy as np
import os
import pandas
from sklearn import metrics
from pipeline.utils.tools import JobConfig
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from torch.utils.data import DataLoader, TensorDataset
from federatedml.nn.backend.utils.common import global_seed
fate_torch_hook(t)
class HeteroLocalModel(t.nn.Module):
def __init__(self, guest_btn, host_btn, interactive, top):
super().__init__()
self.guest_btn = guest_btn
self.host_btn = host_btn
self.inter = interactive
self.top = top
def forward(self, x1, x2):
return self.top(self.inter(self.guest_btn(x1), self.host_btn(x2)))
def build(param, shape1, shape2, lr):
global_seed(101)
guest_bottom = t.nn.Sequential(
nn.Linear(shape1, param["bottom_layer_units"]),
nn.ReLU()
)
host_bottom = t.nn.Sequential(
nn.Linear(shape2, param["bottom_layer_units"]),
nn.ReLU()
)
interactive_layer = t.nn.InteractiveLayer(
guest_dim=param["bottom_layer_units"],
host_dim=param["bottom_layer_units"],
host_num=1,
out_dim=param["interactive_layer_units"])
act = nn.Sigmoid() if param["top_layer_units"] == 1 else nn.Softmax(dim=1)
top_layer = t.nn.Sequential(
t.nn.Linear(
param["interactive_layer_units"],
param["top_layer_units"]),
act)
model = HeteroLocalModel(
guest_bottom,
host_bottom,
interactive_layer,
top_layer)
opt = t.optim.Adam(model.parameters(), lr=lr)
return model, opt
def fit(epoch, model, optimizer, loss, batch_size, dataset):
print(
'model is {}, loss is {}, optimizer is {}'.format(
model,
loss,
optimizer))
dl = DataLoader(dataset, batch_size=batch_size)
for i in range(epoch):
epoch_loss = 0
for xa, xb, label in dl:
optimizer.zero_grad()
pred = model(xa, xb)
l = loss(pred, label)
epoch_loss += l.detach().numpy()
l.backward()
optimizer.step()
print('epoch is {}, epoch loss is {}'.format(i, epoch_loss))
def predict(model, Xa, Xb):
pred_rs = model(Xb, Xa)
return pred_rs.detach().numpy()
def main(config="../../config.yaml", param="./hetero_nn_breast_config.yaml"):
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
# prepare data
Xb = pandas.read_csv(
os.path.join(
data_base_dir,
data_guest),
index_col=idx)
Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
y = Xb[label_name]
out = Xa.drop(Xb.index)
Xa = Xa.drop(out.index)
Xb = Xb.drop(label_name, axis=1)
Xa = t.Tensor(Xa.values)
Xb = t.Tensor(Xb.values)
y = t.Tensor(y.values)
if param["loss"] == "categorical_crossentropy":
loss = t.nn.CrossEntropyLoss()
y = y.type(t.int64).flatten()
else:
loss = t.nn.BCELoss()
y = y.reshape((-1, 1))
model, opt = build(
param, Xb.shape[1], Xa.shape[1], lr=param['learning_rate'])
dataset = TensorDataset(Xb, Xa, y)
fit(epoch=param['epochs'], model=model, optimizer=opt,
batch_size=param['batch_size'], dataset=dataset, loss=loss)
eval_result = {}
for metric in param["metrics"]:
if metric.lower() == "auc":
predict_y = predict(model, Xa, Xb)
auc = metrics.roc_auc_score(y, predict_y)
eval_result["auc"] = auc
elif metric == "accuracy":
predict_y = np.argmax(predict(model, Xa, Xb), axis=1)
acc = metrics.accuracy_score(
y_true=y.detach().numpy(), y_pred=predict_y)
eval_result["accuracy"] = acc
print(eval_result)
data_summary = {}
return data_summary, eval_result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
main()
| 4,651 | 27.365854 | 79 | py |
FATE | FATE-master/examples/benchmark_quality/hetero_nn/fate-hetero_nn.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import torch as t
from torch import nn
from pipeline import fate_torch_hook
from pipeline.backend.pipeline import PipeLine
from pipeline.component.nn import DatasetParam
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data, Model
from pipeline.utils.tools import load_job_config, JobConfig
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
fate_torch_hook(t)
def build(param, shape1, shape2):
guest_bottom = t.nn.Sequential(
nn.Linear(shape1, param["bottom_layer_units"]),
nn.ReLU()
)
host_bottom = t.nn.Sequential(
nn.Linear(shape2, param["bottom_layer_units"]),
nn.ReLU()
)
interactive_layer = t.nn.InteractiveLayer(
guest_dim=param["bottom_layer_units"],
host_dim=param["bottom_layer_units"],
host_num=1,
out_dim=param["interactive_layer_units"])
act = nn.Sigmoid() if param["top_layer_units"] == 1 else nn.Softmax(dim=1)
top_layer = t.nn.Sequential(
t.nn.Linear(
param["interactive_layer_units"],
param["top_layer_units"]),
act)
return guest_bottom, host_bottom, interactive_layer, top_layer
def main(
config="../../config.yaml",
param="./hetero_nn_breast_config.yaml",
namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {
"name": param["guest_table_name"],
"namespace": f"experiment{namespace}"}
host_train_data = {
"name": param["host_table_name"],
"namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(
role='guest', party_id=guest).set_roles(
guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(
role='guest',
party_id=guest).component_param(
table=guest_train_data)
reader_0.get_party_instance(
role='host',
party_id=host).component_param(
table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest',
party_id=guest).component_param(
with_label=True)
data_transform_0.get_party_instance(
role='host', party_id=host).component_param(
with_label=False)
intersection_0 = Intersection(name="intersection_0")
guest_bottom, host_bottom, interactive_layer, top_layer = build(
param, param['shape1'], param['shape2'])
if param["loss"] == "categorical_crossentropy":
loss = t.nn.CrossEntropyLoss()
ds_param = DatasetParam(
dataset_name='table',
flatten_label=True,
label_dtype='long')
else:
loss = t.nn.BCELoss()
ds_param = DatasetParam(dataset_name='table')
hetero_nn_0 = HeteroNN(
name="hetero_nn_0",
epochs=param["epochs"],
interactive_layer_lr=param["learning_rate"],
batch_size=param["batch_size"],
seed=100,
dataset=ds_param)
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
guest_nn_0.add_bottom_model(guest_bottom)
guest_nn_0.add_top_model(top_layer)
host_nn_0.add_bottom_model(host_bottom)
hetero_nn_0.set_interactive_layer(interactive_layer)
hetero_nn_0.compile(
optimizer=t.optim.Adam(
lr=param['learning_rate']),
loss=loss)
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
if param["loss"] == "categorical_crossentropy":
eval_type = "multi"
else:
eval_type = "binary"
evaluation_0 = Evaluation(name="evaluation_0", eval_type=eval_type)
pipeline.add_component(reader_0)
pipeline.add_component(
data_transform_0, data=Data(
data=reader_0.output.data))
pipeline.add_component(
intersection_0, data=Data(
data=data_transform_0.output.data))
pipeline.add_component(
hetero_nn_0, data=Data(
train_data=intersection_0.output.data))
pipeline.add_component(
hetero_nn_1, data=Data(
test_data=intersection_0.output.data), model=Model(
hetero_nn_0.output.model))
pipeline.add_component(
evaluation_0, data=Data(
data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
nn_0_data = pipeline.get_component("hetero_nn_0").get_output_data()
nn_1_data = pipeline.get_component("hetero_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(
pipeline.get_component("evaluation_0").get_summary())
if eval_type == "binary":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(
nn_0_score_label,
nn_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(
nn_0_score,
nn_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(
nn_0_score,
nn_1_score,
nn_0_label,
nn_1_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
elif eval_type == "multi":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(
nn_0_score_label, nn_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
data_summary = {
"train": {
"guest": guest_train_data["name"],
"host": host_train_data["name"]},
"test": {
"guest": guest_train_data["name"],
"host": host_train_data["name"]}}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 7,650 | 32.853982 | 81 | py |
FATE | FATE-master/examples/benchmark_quality/hetero_nn_pytorch/local-hetero_nn.py | import argparse
import numpy as np
import os
from tensorflow import keras
import pandas
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import optimizers
from sklearn import metrics
from pipeline.utils.tools import JobConfig
from sklearn.preprocessing import LabelEncoder
import torch as t
from torch import nn
from torch.utils.data import Dataset, DataLoader
import tqdm
from pipeline import fate_torch_hook
fate_torch_hook(t)
class TestModel(t.nn.Module):
def __init__(self, guest_input_shape, host_input_shape):
super(TestModel, self).__init__()
self.guest_bottom = t.nn.Sequential(
nn.Linear(guest_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
self.host_bottom = t.nn.Sequential(
nn.Linear(host_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
self.inter_a, self.inter_b = t.nn.Linear(8, 4, True), t.nn.Linear(8, 4, True)
self.top_model_guest = t.nn.Sequential(
nn.Linear(4, 1, True),
nn.Sigmoid()
)
def forward(self, data):
x_guest, x_host = data[0].type(t.float), data[1].type(t.float)
guest_fw = self.inter_a(self.guest_bottom(x_guest))
host_fw = self.inter_b(self.host_bottom(x_host))
out = self.top_model_guest(guest_fw + host_fw)
return out
def predict(self, data):
rs = self.forward(data)
return rs.detach().numpy()
class TestDataset(Dataset):
def __init__(self, guest_data, host_data, label):
super(TestDataset, self).__init__()
self.g = guest_data
self.h = host_data
self.l = label
def __getitem__(self, idx):
return self.g[idx], self.h[idx], self.l[idx]
def __len__(self):
return len(self.l)
def build(param, shape1, shape2):
return TestModel(shape1, shape2)
def main(config="./config.yaml", param="./hetero_nn_breast_config.yaml"):
try:
if isinstance(config, str):
config = JobConfig.load_from_file(config)
data_base_dir = config["data_base_dir"]
else:
data_base_dir = config.data_base_dir
if isinstance(param, str):
param = JobConfig.load_from_file(param)
data_guest = param["data_guest"]
data_host = param["data_host"]
idx = param["idx"]
label_name = param["label_name"]
# prepare data
Xb = pandas.read_csv(os.path.join(data_base_dir, data_guest), index_col=idx)
Xa = pandas.read_csv(os.path.join(data_base_dir, data_host), index_col=idx)
y = Xb[label_name]
out = Xa.drop(Xb.index)
Xa = Xa.drop(out.index)
Xb = Xb.drop(label_name, axis=1)
# torch model
model = build(param, Xb.shape[1], Xa.shape[1])
Xb = t.Tensor(Xb.values)
Xa = t.Tensor(Xa.values)
y = t.Tensor(y.values)
dataset = TestDataset(Xb, Xa, y)
batch_size = len(dataset) if param['batch_size'] == -1 else param['batch_size']
dataloader = DataLoader(dataset, batch_size=batch_size)
optimizer = t.optim.Adam(lr=param['learning_rate']).to_torch_instance(model.parameters())
if param['eval_type'] == 'binary':
loss_fn = t.nn.BCELoss()
for i in tqdm.tqdm(range(param['epochs'])):
for gd, hd, label in dataloader:
optimizer.zero_grad()
pred = model([gd, hd])
loss = loss_fn(pred.flatten(), label.type(t.float32))
loss.backward()
optimizer.step()
eval_result = {}
for metric in param["metrics"]:
if metric.lower() == "auc":
predict_y = model.predict([Xb, Xa])
auc = metrics.roc_auc_score(y, predict_y)
eval_result["auc"] = auc
elif metric == "accuracy":
predict_y = np.argmax(model.predict([Xb, Xa]), axis=1)
predict_y = label_encoder.inverse_transform(predict_y)
acc = metrics.accuracy_score(y_true=labels, y_pred=predict_y)
eval_result["accuracy"] = acc
data_summary = {}
except Exception as e:
print(e)
return data_summary, eval_result
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY SKLEARN JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 4,792 | 30.741722 | 97 | py |
FATE | FATE-master/examples/benchmark_quality/hetero_nn_pytorch/fate-hetero_nn.py | import argparse
from collections import OrderedDict
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroNN
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.component import Evaluation
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config, JobConfig
from pipeline.interface import Model
from federatedml.evaluation.metrics import classification_metric
from fate_test.utils import extract_data, parse_summary_result
from pipeline import fate_torch_hook
import torch as t
from torch import nn
from torch.nn import init
from torch import optim
from pipeline import fate_torch as ft
fate_torch_hook(t)
def main(config="./config.yaml", param="./hetero_nn_breast_config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
if isinstance(param, str):
param = JobConfig.load_from_file(param)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": param["guest_table_name"], "namespace": f"experiment{namespace}"}
host_train_data = {"name": param["host_table_name"], "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
guest_input_shape = param['guest_input_shape']
host_input_shape = param['host_input_shape']
# define model structures
bottom_model_guest = t.nn.Sequential(
nn.Linear(guest_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
bottom_model_host = t.nn.Sequential(
nn.Linear(host_input_shape, 10, True),
nn.ReLU(),
nn.Linear(10, 8, True),
nn.ReLU()
)
interactive_layer = t.nn.Linear(8, 4, True)
top_model_guest = t.nn.Sequential(
nn.Linear(4, 1, True),
nn.Sigmoid()
)
loss_fn = nn.BCELoss()
opt: ft.optim.Adam = optim.Adam(lr=param['learning_rate'])
hetero_nn_0 = HeteroNN(name="hetero_nn_0", epochs=param["epochs"],
interactive_layer_lr=param["learning_rate"], batch_size=param["batch_size"],
early_stop="diff")
guest_nn_0 = hetero_nn_0.get_party_instance(role='guest', party_id=guest)
guest_nn_0.add_bottom_model(bottom_model_guest)
guest_nn_0.add_top_model(top_model_guest)
guest_nn_0.set_interactve_layer(interactive_layer)
host_nn_0 = hetero_nn_0.get_party_instance(role='host', party_id=host)
host_nn_0.add_bottom_model(bottom_model_host)
# do remember to compile
hetero_nn_0.compile(opt, loss=loss_fn)
hetero_nn_1 = HeteroNN(name="hetero_nn_1")
evaluation_0 = Evaluation(name="evaluation_0", eval_type=param['eval_type'])
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_nn_0, data=Data(train_data=intersection_0.output.data))
pipeline.add_component(hetero_nn_1, data=Data(test_data=intersection_0.output.data),
model=Model(hetero_nn_0.output.model))
pipeline.add_component(evaluation_0, data=Data(data=hetero_nn_0.output.data))
pipeline.compile()
pipeline.fit()
nn_0_data = pipeline.get_component("hetero_nn_0").get_output_data()
nn_1_data = pipeline.get_component("hetero_nn_1").get_output_data()
nn_0_score = extract_data(nn_0_data, "predict_result")
nn_0_label = extract_data(nn_0_data, "label")
nn_1_score = extract_data(nn_1_data, "predict_result")
nn_1_label = extract_data(nn_1_data, "label")
nn_0_score_label = extract_data(nn_0_data, "predict_result", keep_id=True)
nn_1_score_label = extract_data(nn_1_data, "predict_result", keep_id=True)
metric_summary = parse_summary_result(pipeline.get_component("evaluation_0").get_summary())
eval_type = param['eval_type']
if eval_type == "binary":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label),
"ks_2samp": classification_metric.KSTest.compute(nn_0_score, nn_1_score),
"mAP_D_value": classification_metric.AveragePrecisionScore().compute(nn_0_score, nn_1_score, nn_0_label,
nn_1_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
elif eval_type == "multi":
metric_nn = {
"score_diversity_ratio": classification_metric.Distribution.compute(nn_0_score_label, nn_1_score_label)}
metric_summary["distribution_metrics"] = {"hetero_nn": metric_nn}
data_summary = {"train": {"guest": guest_train_data["name"], "host": host_train_data["name"]},
"test": {"guest": guest_train_data["name"], "host": host_train_data["name"]}
}
return data_summary, metric_summary
if __name__ == "__main__":
parser = argparse.ArgumentParser("BENCHMARK-QUALITY PIPELINE JOB")
parser.add_argument("-config", type=str,
help="config file")
parser.add_argument("-param", type=str,
help="config file for params")
args = parser.parse_args()
if args.config is not None:
main(args.config, args.param)
else:
main()
| 6,139 | 40.768707 | 116 | py |
FATE | FATE-master/python/fate_test/fate_test/scripts/data_cli.py | import os
import re
import sys
import time
import uuid
import json
from datetime import timedelta
import click
from pathlib import Path
from ruamel import yaml
from fate_test import _config
from fate_test._config import Config
from fate_test._client import Clients
from fate_test._io import LOGGER, echo
from fate_test.scripts._options import SharedOptions
from fate_test.scripts._utils import _upload_data, _load_testsuites, _delete_data, _big_data_task
@click.group(name="data")
def data_group():
"""
upload or delete data in suite config files
"""
...
@data_group.command("upload")
@click.option('-i', '--include', required=False, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option("-t", "--config-type", type=click.Choice(["min_test", "all_examples"]), default="min_test",
help="config file")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-s', '--suite-type', required=False, type=click.Choice(["testsuite", "benchmark"]), default="testsuite",
help="suite type")
@click.option('-r', '--role', type=str, default='all', help="role to process, default to `all`. "
"use option likes: `guest_0`, `host_0`, `host`")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def upload(ctx, include, exclude, glob, suite_type, role, config_type, **kwargs):
"""
upload data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if len(include) != 0:
echo.echo("loading testsuites:")
suffix = "benchmark.json" if suite_type == "benchmark" else "testsuite.json"
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix=suffix, suite_type=suite_type)
for suite in suites:
if role != "all":
suite.dataset = [d for d in suite.dataset if re.match(d.role_str, role)]
echo.echo(f"\tdataset({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
client_upload(suites=suites, config_inst=config_inst, namespace=namespace)
else:
config = get_config(config_inst)
if config_type == 'min_test':
config_file = config.min_test_data_config
else:
config_file = config.all_examples_data_config
with open(config_file, 'r', encoding='utf-8') as f:
upload_data = json.loads(f.read())
echo.echo(f"\tdataset({len(upload_data['data'])}) {config_file}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
data_upload(client, config_inst, upload_data)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("delete")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *benchmark.json under these paths")
@click.option('-e', '--exclude', type=click.Path(exists=True), multiple=True,
help="exclude *benchmark.json under these paths")
@click.option('-g', '--glob', type=str,
help="glob string to filter sub-directory of path specified by <include>")
@click.option('-s', '--suite-type', required=True, type=click.Choice(["testsuite", "benchmark"]), help="suite type")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def delete(ctx, include, exclude, glob, yes, suite_type, **kwargs):
"""
delete data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
suffix = "benchmark.json" if suite_type == "benchmark" else "testsuite.json"
suites = _load_testsuites(includes=include, excludes=exclude, glob=glob,
suffix=suffix, suite_type=suite_type)
if not yes and not click.confirm("running?"):
return
for suite in suites:
echo.echo(f"\tdataset({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
_delete_data(client, suite)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("generate")
@click.option('-i', '--include', required=True, type=click.Path(exists=True), multiple=True, metavar="<include>",
help="include *testsuite.json / *benchmark.json under these paths")
@click.option('-ht', '--host-data-type', default='tag_value', type=click.Choice(['dense', 'tag', 'tag_value']),
help="Select the format of the host data")
@click.option('-p', '--encryption-type', type=click.Choice(['sha256', 'md5']),
help="Entry ID encryption method for, sha256 and md5")
@click.option('-m', '--match-rate', default=1.0, type=float,
help="Intersection rate relative to guest, between [0, 1]")
@click.option('-s', '--sparsity', default=0.2, type=float,
help="The sparsity of tag data, The value is between (0-1)")
@click.option('-ng', '--guest-data-size', type=int, default=10000,
help="Set guest data set size, not less than 100")
@click.option('-nh', '--host-data-size', type=int,
help="Set host data set size, not less than 100")
@click.option('-fg', '--guest-feature-num', type=int, default=20,
help="Set guest feature dimensions")
@click.option('-fh', '--host-feature-num', type=int, default=200,
help="Set host feature dimensions; the default is equal to the number of guest's size")
@click.option('-o', '--output-path', type=click.Path(exists=True),
help="Customize the output path of generated data")
@click.option('--force', is_flag=True, default=False,
help="Overwrite existing file")
@click.option('--split-host', is_flag=True, default=False,
help="Divide the amount of host data equally among all the host tables in TestSuite")
@click.option('--upload-data', is_flag=True, default=False,
help="Generated data will be uploaded")
@click.option('--remove-data', is_flag=True, default=False,
help="The generated data will be deleted")
@click.option('--parallelize', is_flag=True, default=False,
help="It is directly used to upload data, and will not generate data")
@click.option('--use-local-data', is_flag=True, default=False,
help="The existing data of the server will be uploaded, This parameter is not recommended for "
"distributed applications")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def generate(ctx, include, host_data_type, encryption_type, match_rate, sparsity, guest_data_size,
host_data_size, guest_feature_num, host_feature_num, output_path, force, split_host, upload_data,
remove_data, use_local_data, parallelize, **kwargs):
"""
create data defined in suite config files
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
if ctx.obj["extend_sid"] is not None:
config_inst.extend_sid = ctx.obj["extend_sid"]
if ctx.obj["auto_increasing_sid"] is not None:
config_inst.auto_increasing_sid = ctx.obj["auto_increasing_sid"]
if parallelize and upload_data:
upload_data = False
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
echo.echo("loading testsuites:")
if host_data_size is None:
host_data_size = guest_data_size
suites = _load_testsuites(includes=include, excludes=tuple(), glob=None)
suites += _load_testsuites(includes=include, excludes=tuple(), glob=None,
suffix="benchmark.json", suite_type="benchmark")
for suite in suites:
if upload_data:
echo.echo(f"\tdataget({len(suite.dataset)}) dataset({len(suite.dataset)}) {suite.path}")
else:
echo.echo(f"\tdataget({len(suite.dataset)}) {suite.path}")
if not yes and not click.confirm("running?"):
return
_big_data_task(include, guest_data_size, host_data_size, guest_feature_num, host_feature_num, host_data_type,
config_inst, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize)
if upload_data:
if use_local_data:
_config.use_local_data = 0
_config.data_switch = remove_data
client_upload(suites=suites, config_inst=config_inst, namespace=namespace, output_path=output_path)
@data_group.command("download")
@click.option("-t", "--type", type=click.Choice(["mnist"]), default="mnist",
help="config file")
@click.option('-o', '--output-path', type=click.Path(exists=True),
help="output path of mnist data, the default path is examples/data")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def download_mnists(ctx, output_path, **kwargs):
"""
download mnist data for flow
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
config_inst = ctx.obj["config"]
yes = ctx.obj["yes"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if output_path is None:
config = get_config(config_inst)
output_path = str(config.data_base_dir) + "/examples/data/"
if not yes and not click.confirm("running?"):
return
try:
download_mnist(Path(output_path), "mnist_train")
download_mnist(Path(output_path), "mnist_eval", is_train=False)
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
@data_group.command("query_schema")
@click.option('-cpn', '--component-name', required=False, type=str, help="component name", default='dataio_0')
@click.option('-j', '--job-id', required=True, type=str, help="job id")
@click.option('-r', '--role', required=True, type=click.Choice(["guest", "host", "arbiter"]), help="job id")
@click.option('-p', '--party-id', required=True, type=str, help="party id")
@SharedOptions.get_shared_options(hidden=True)
@click.pass_context
def query_schema(ctx, component_name, job_id, role, party_id, **kwargs):
"""
query the meta of the output data of a component
"""
ctx.obj.update(**kwargs)
ctx.obj.post_process()
namespace = ctx.obj["namespace"]
yes = ctx.obj["yes"]
config_inst = ctx.obj["config"]
echo.welcome()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
if not yes and not click.confirm("running?"):
return
with Clients(config_inst) as client:
query_component_output_data(client, config_inst, component_name, job_id, role, party_id)
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def get_config(conf: Config):
return conf
def query_component_output_data(clients: Clients, config: Config, component_name, job_id, role, party_id):
roles = config.role
clients_role = None
for k, v in roles.items():
if int(party_id) in v and k == role:
clients_role = role + "_" + str(v.index(int(party_id)))
try:
if clients_role is None:
raise ValueError(f"party id {party_id} does not exist")
try:
table_info = clients[clients_role].output_data_table(job_id=job_id, role=role, party_id=party_id,
component_name=component_name)
table_info = clients[clients_role].table_info(table_name=table_info['name'],
namespace=table_info['namespace'])
except Exception as e:
raise RuntimeError(f"An exception occurred while getting data {clients_role}<-{component_name}") from e
echo.echo("query_component_output_data result: {}".format(table_info))
try:
header = table_info['data']['schema']['header']
except ValueError as e:
raise ValueError(f"Obtain header from table error, error msg: {e}")
result = []
for idx, header_name in enumerate(header[1:]):
result.append((idx, header_name))
echo.echo("Queried header is {}".format(result))
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
def download_mnist(base, name, is_train=True):
import torchvision
dataset = torchvision.datasets.MNIST(
root=base.joinpath(".cache"), train=is_train, download=True
)
converted_path = base.joinpath(name)
converted_path.mkdir(exist_ok=True)
inputs_path = converted_path.joinpath("images")
inputs_path.mkdir(exist_ok=True)
targets_path = converted_path.joinpath("targets")
config_path = converted_path.joinpath("config.yaml")
filenames_path = converted_path.joinpath("filenames")
with filenames_path.open("w") as filenames:
with targets_path.open("w") as targets:
for idx, (img, target) in enumerate(dataset):
filename = f"{idx:05d}"
# save img
img.save(inputs_path.joinpath(f"{filename}.jpg"))
# save target
targets.write(f"{filename},{target}\n")
# save filenames
filenames.write(f"{filename}\n")
config = {
"type": "vision",
"inputs": {"type": "images", "ext": "jpg", "PIL_mode": "L"},
"targets": {"type": "integer"},
}
with config_path.open("w") as f:
yaml.safe_dump(config, f, indent=2, default_flow_style=False)
def client_upload(suites, config_inst, namespace, output_path=None):
with Clients(config_inst) as client:
for i, suite in enumerate(suites):
# noinspection PyBroadException
try:
echo.echo(f"[{i + 1}/{len(suites)}]start at {time.strftime('%Y-%m-%d %X')} {suite.path}", fg='red')
try:
_upload_data(client, suite, config_inst, output_path)
except Exception as e:
raise RuntimeError(f"exception occur while uploading data for {suite.path}") from e
except Exception:
exception_id = uuid.uuid1()
echo.echo(f"exception in {suite.path}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
finally:
echo.stdout_newline()
echo.farewell()
echo.echo(f"testsuite namespace: {namespace}", fg='red')
def data_upload(clients: Clients, conf: Config, upload_config):
def _await_finish(job_id, task_name=None):
deadline = time.time() + sys.maxsize
start = time.time()
param = dict(
job_id=job_id,
role=None
)
while True:
stdout = clients["guest_0"].flow_client("job/query", param)
status = stdout["data"][0]["f_status"]
elapse_seconds = int(time.time() - start)
date = time.strftime('%Y-%m-%d %X')
if task_name:
log_msg = f"[{date}][{task_name}]{status}, elapse: {timedelta(seconds=elapse_seconds)}"
else:
log_msg = f"[{date}]{job_id} {status}, elapse: {timedelta(seconds=elapse_seconds)}"
if (status == "running" or status == "waiting") and time.time() < deadline:
print(log_msg, end="\r")
time.sleep(1)
continue
else:
print(" " * 60, end="\r") # clean line
echo.echo(log_msg)
return status
task_data = upload_config["data"]
for i, data in enumerate(task_data):
format_msg = f"@{data['file']} >> {data['namespace']}.{data['table_name']}"
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}]uploading {format_msg}")
try:
data["file"] = str(os.path.join(conf.data_base_dir, data["file"]))
param = dict(
file=data["file"],
head=data["head"],
partition=data["partition"],
table_name=data["table_name"],
namespace=data["namespace"]
)
stdout = clients["guest_0"].flow_client("data/upload", param, drop=1)
job_id = stdout.get('jobId', None)
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}]upload done {format_msg}, job_id={job_id}\n")
if job_id is None:
echo.echo("table already exist. To upload again, Please add '-f 1' in start cmd")
continue
_await_finish(job_id)
param = dict(
table_name=data["table_name"],
namespace=data["namespace"]
)
stdout = clients["guest_0"].flow_client("table/info", param)
count = stdout["data"]["count"]
if count != data["count"]:
raise AssertionError("Count of upload file is not as expect, count is: {},"
"expect is: {}".format(count, data["count"]))
echo.echo(f"[{time.strftime('%Y-%m-%d %X')}] check_data_out {stdout} \n")
except Exception as e:
exception_id = uuid.uuid1()
echo.echo(f"exception in {data['file']}, exception_id={exception_id}")
LOGGER.exception(f"exception id: {exception_id}")
echo.echo(f"upload {i + 1}th data {data['table_name']} fail, exception_id: {exception_id}")
# raise RuntimeError(f"exception occur while uploading data for {data['file']}") from e
finally:
echo.stdout_newline()
| 19,039 | 43.176334 | 119 | py |
FATE | FATE-master/python/fate_client/setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
packages = [
"flow_client",
"flow_client.flow_cli",
"flow_client.flow_cli.commands",
"flow_client.flow_cli.utils",
"flow_sdk",
"flow_sdk.client",
"flow_sdk.client.api",
"pipeline",
"pipeline.backend",
"pipeline.component",
"pipeline.component.nn",
"pipeline.component.nn.backend",
"pipeline.component.nn.backend.torch",
"pipeline.component.nn.models",
"pipeline.demo",
"pipeline.interface",
"pipeline.param",
"pipeline.parser",
"pipeline.runtime",
"pipeline.test",
"pipeline.utils",
"pipeline.utils.invoker",
]
package_data = {"": ["*"]}
install_requires = [
"click>=7.1.2,<8.0.0",
"loguru>=0.6.0",
"poetry>=0.12",
"pandas>=1.1.5",
"requests>=2.24.0,<3.0.0",
"requests_toolbelt>=0.9.1,<0.10.0",
"ruamel.yaml>=0.16.10,<0.17.0",
"setuptools>=65.5.1",
]
entry_points = {
"console_scripts": [
"flow = flow_client.flow:flow_cli",
"pipeline = pipeline.pipeline_cli:cli",
]
}
setup_kwargs = {
"name": "fate-client",
"version": "1.11.2",
"description": "Clients for FATE, including flow_client and pipeline",
"long_description": "FATE Client\n===========\n\nTools for interacting with FATE.\n\nquick start\n-----------\n\n1. (optional) create virtual env\n\n .. code-block:: bash\n\n python -m venv venv\n source venv/bin/activate\n\n\n2. install FATE Client\n\n .. code-block:: bash\n\n pip install fate-client\n\n\nPipeline\n========\n\nA high-level python API that allows user to design, start,\nand query FATE jobs in a sequential manner. For more information,\nplease refer to this `guide <./pipeline/README.rst>`__\n\nInitial Configuration\n---------------------\n\n1. Configure server information\n\n .. code-block:: bash\n\n # configure values in pipeline/config.yaml\n # use real ip address to configure pipeline\n pipeline init --ip 127.0.0.1 --port 9380 --log-directory ./logs\n\n\nFATE Flow Command Line Interface (CLI) v2\n=========================================\n\nA command line interface providing series of commands for user to design, start,\nand query FATE jobs. For more information, please refer to this `guide <./flow_client/README.rst>`__\n\nInitial Configuration\n---------------------\n\n1. Configure server information\n\n .. code-block:: bash\n\n # configure values in conf/service_conf.yaml\n flow init -c /data/projects/fate/conf/service_conf.yaml\n # use real ip address to initialize cli\n flow init --ip 127.0.0.1 --port 9380\n\n",
"author": "FederatedAI",
"author_email": "contact@FedAI.org",
"maintainer": None,
"maintainer_email": None,
"url": "https://fate.fedai.org/",
"packages": packages,
"package_data": package_data,
"install_requires": install_requires,
"entry_points": entry_points,
"python_requires": ">=3.6,<4.0",
}
setup(**setup_kwargs)
| 2,995 | 43.058824 | 1,418 | py |
FATE | FATE-master/python/fate_client/pipeline/__init__.py | try:
from pipeline.component.nn.backend.torch.import_hook import fate_torch_hook
from pipeline.component.nn.backend import torch as fate_torch
except ImportError:
fate_torch_hook, fate_torch = None, None
except ValueError:
fate_torch_hook, fate_torch = None, None
__all__ = ['fate_torch_hook', 'fate_torch']
| 325 | 31.6 | 79 | py |
FATE | FATE-master/python/fate_client/pipeline/param/ftl_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import copy
from pipeline.param.intersect_param import IntersectParam
from types import SimpleNamespace
from pipeline.param.base_param import BaseParam
from pipeline.param import consts
from pipeline.param.encrypt_param import EncryptParam
from pipeline.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from pipeline.param.predict_param import PredictParam
from pipeline.param.callback_param import CallbackParam
class FTLParam(BaseParam):
def __init__(self, alpha=1, tol=0.000001,
n_iter_no_change=False, validation_freqs=None, optimizer={'optimizer': 'Adam', 'learning_rate': 0.01},
nn_define={}, epochs=1, intersect_param=IntersectParam(consts.RSA), config_type='keras', batch_size=-1,
encrypte_param=EncryptParam(),
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(mode="confusion_opt"),
predict_param=PredictParam(), mode='plain', communication_efficient=False,
local_round=5, callback_param=CallbackParam()):
"""
Args:
alpha: float, a loss coefficient defined in paper, it defines the importance of alignment loss
tol: float, loss tolerance
n_iter_no_change: bool, check loss convergence or not
validation_freqs: None or positive integer or container object in python. Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to
speed up training by skipping validation rounds. When it is larger than 1, a number which is
divisible by "epochs" is recommended, otherwise, you will miss the validation scores
of last training epoch.
optimizer: optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD"
nn_define: dict, a dict represents the structure of neural network, it can be output by tf-keras
epochs: int, epochs num
intersect_param: define the intersect method
config_type: now only 'tf-keras' is supported
batch_size: batch size when computing transformed feature embedding, -1 use full data.
encrypte_param: encrypted param
encrypted_mode_calculator_param:
predict_param: predict param
mode:
plain: will not use any encrypt algorithms, data exchanged in plaintext
encrypted: use paillier to encrypt gradients
communication_efficient:
bool, will use communication efficient or not. when communication efficient is enabled, FTL model will
update gradients by several local rounds using intermediate data
local_round: local update round when using communication efficient
"""
super(FTLParam, self).__init__()
self.alpha = alpha
self.tol = tol
self.n_iter_no_change = n_iter_no_change
self.validation_freqs = validation_freqs
self.optimizer = optimizer
self.nn_define = nn_define
self.epochs = epochs
self.intersect_param = copy.deepcopy(intersect_param)
self.config_type = config_type
self.batch_size = batch_size
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.encrypt_param = copy.deepcopy(encrypte_param)
self.predict_param = copy.deepcopy(predict_param)
self.mode = mode
self.communication_efficient = communication_efficient
self.local_round = local_round
self.callback_param = copy.deepcopy(callback_param)
def check(self):
self.intersect_param.check()
self.encrypt_param.check()
self.encrypted_mode_calculator_param.check()
self.optimizer = self._parse_optimizer(self.optimizer)
supported_config_type = ["keras"]
if self.config_type not in supported_config_type:
raise ValueError(f"config_type should be one of {supported_config_type}")
if not isinstance(self.tol, (int, float)):
raise ValueError("tol should be numeric")
if not isinstance(self.epochs, int) or self.epochs <= 0:
raise ValueError("epochs should be a positive integer")
if self.nn_define and not isinstance(self.nn_define, dict):
raise ValueError("bottom_nn_define should be a dict defining the structure of neural network")
if self.batch_size != -1:
if not isinstance(self.batch_size, int) \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(
" {} not supported, should be larger than 10 or -1 represent for all data".format(self.batch_size))
if self.validation_freqs is None:
pass
elif isinstance(self.validation_freqs, int):
if self.validation_freqs < 1:
raise ValueError("validation_freqs should be larger than 0 when it's integer")
elif not isinstance(self.validation_freqs, collections.Container):
raise ValueError("validation_freqs should be None or positive integer or container")
assert isinstance(self.communication_efficient, bool), 'communication efficient must be a boolean'
assert self.mode in [
'encrypted', 'plain'], 'mode options: encrpyted or plain, but {} is offered'.format(
self.mode)
self.check_positive_integer(self.epochs, 'epochs')
self.check_positive_number(self.alpha, 'alpha')
self.check_positive_integer(self.local_round, 'local round')
@staticmethod
def _parse_optimizer(opt):
"""
Examples:
1. "optimize": "SGD"
2. "optimize": {
"optimizer": "SGD",
"learning_rate": 0.05
}
"""
kwargs = {}
if isinstance(opt, str):
return SimpleNamespace(optimizer=opt, kwargs=kwargs)
elif isinstance(opt, dict):
optimizer = opt.get("optimizer", kwargs)
if not optimizer:
raise ValueError(f"optimizer config: {opt} invalid")
kwargs = {k: v for k, v in opt.items() if k != "optimizer"}
return SimpleNamespace(optimizer=optimizer, kwargs=kwargs)
else:
raise ValueError(f"invalid type for optimize: {type(opt)}")
| 7,810 | 47.216049 | 127 | py |
FATE | FATE-master/python/fate_client/pipeline/param/hetero_nn_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import collections
from types import SimpleNamespace
from pipeline.param.base_param import BaseParam
from pipeline.param.callback_param import CallbackParam
from pipeline.param.cross_validation_param import CrossValidationParam
from pipeline.param.encrypt_param import EncryptParam
from pipeline.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from pipeline.param.predict_param import PredictParam
from pipeline.param import consts
class DatasetParam(BaseParam):
def __init__(self, dataset_name=None, **kwargs):
super(DatasetParam, self).__init__()
self.dataset_name = dataset_name
self.param = kwargs
def check(self):
if self.dataset_name is not None:
self.check_string(self.dataset_name, 'dataset_name')
def to_dict(self):
ret = {'dataset_name': self.dataset_name, 'param': self.param}
return ret
class SelectorParam(object):
"""
Parameters
----------
method: None or str
back propagation select method, accept "relative" only, default: None
selective_size: int
deque size to use, store the most recent selective_size historical loss, default: 1024
beta: int
sample whose selective probability >= power(np.random, beta) will be selected
min_prob: Numeric
selective probability is max(min_prob, rank_rate)
"""
def __init__(self, method=None, beta=1, selective_size=consts.SELECTIVE_SIZE, min_prob=0, random_state=None):
self.method = method
self.selective_size = selective_size
self.beta = beta
self.min_prob = min_prob
self.random_state = random_state
def check(self):
if self.method is not None and self.method not in ["relative"]:
raise ValueError('selective method should be None be "relative"')
if not isinstance(self.selective_size, int) or self.selective_size <= 0:
raise ValueError("selective size should be a positive integer")
if not isinstance(self.beta, int):
raise ValueError("beta should be integer")
if not isinstance(self.min_prob, (float, int)):
raise ValueError("min_prob should be numeric")
class CoAEConfuserParam(BaseParam):
"""
A label protect mechanism proposed in paper: "Batch Label Inference and Replacement Attacks in Black-Boxed Vertical Federated Learning"
paper link: https://arxiv.org/abs/2112.05409
Convert true labels to fake soft labels by using an auto-encoder.
Args:
enable: boolean
run CoAE or not
epoch: None or int
auto-encoder training epochs
lr: float
auto-encoder learning rate
lambda1: float
parameter to control the difference between true labels and fake soft labels. Larger the parameter,
autoencoder will give more attention to making true labels and fake soft label different.
lambda2: float
parameter to control entropy loss, see original paper for details
verbose: boolean
print loss log while training auto encoder
"""
def __init__(self, enable=False, epoch=50, lr=0.001, lambda1=1.0, lambda2=2.0, verbose=False):
super(CoAEConfuserParam, self).__init__()
self.enable = enable
self.epoch = epoch
self.lr = lr
self.lambda1 = lambda1
self.lambda2 = lambda2
self.verbose = verbose
def check(self):
self.check_boolean(self.enable, 'enable')
if not isinstance(self.epoch, int) or self.epoch <= 0:
raise ValueError("epoch should be a positive integer")
if not isinstance(self.lr, float):
raise ValueError('lr should be a float number')
if not isinstance(self.lambda1, float):
raise ValueError('lambda1 should be a float number')
if not isinstance(self.lambda2, float):
raise ValueError('lambda2 should be a float number')
self.check_boolean(self.verbose, 'verbose')
class HeteroNNParam(BaseParam):
"""
Parameters used for Hetero Neural Network.
Parameters
----------
task_type: str, task type of hetero nn model, one of 'classification', 'regression'.
bottom_nn_define: a dict represents the structure of bottom neural network.
interactive_layer_define: a dict represents the structure of interactive layer.
interactive_layer_lr: float, the learning rate of interactive layer.
top_nn_define: a dict represents the structure of top neural network.
optimizer: optimizer method, accept following types:
1. a string, one of "Adadelta", "Adagrad", "Adam", "Adamax", "Nadam", "RMSprop", "SGD"
2. a dict, with a required key-value pair keyed by "optimizer",
with optional key-value pairs such as learning rate.
defaults to "SGD".
loss: str, a string to define loss function used
epochs: int, the maximum iteration for aggregation in training.
batch_size : int, batch size when updating model.
-1 means use all data in a batch. i.e. Not to use mini-batch strategy.
defaults to -1.
early_stop : str, accept 'diff' only in this version, default: 'diff'
Method used to judge converge or not.
a) diff: Use difference of loss between two iterations to judge whether converge.
floating_point_precision: None or integer, if not None, means use floating_point_precision-bit to speed up calculation,
e.g.: convert an x to round(x * 2**floating_point_precision) during Paillier operation, divide
the result by 2**floating_point_precision in the end.
callback_param: CallbackParam object
"""
def __init__(self,
task_type='classification',
bottom_nn_define=None,
top_nn_define=None,
config_type='pytorch',
interactive_layer_define=None,
interactive_layer_lr=0.9,
optimizer='SGD',
loss=None,
epochs=100,
batch_size=-1,
early_stop="diff",
tol=1e-5,
encrypt_param=EncryptParam(),
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(),
cv_param=CrossValidationParam(),
validation_freqs=None,
early_stopping_rounds=None,
metrics=None,
use_first_metric_only=True,
selector_param=SelectorParam(),
floating_point_precision=23,
callback_param=CallbackParam(),
coae_param=CoAEConfuserParam(),
dataset=DatasetParam()
):
super(HeteroNNParam, self).__init__()
self.task_type = task_type
self.bottom_nn_define = bottom_nn_define
self.interactive_layer_define = interactive_layer_define
self.interactive_layer_lr = interactive_layer_lr
self.top_nn_define = top_nn_define
self.batch_size = batch_size
self.epochs = epochs
self.early_stop = early_stop
self.tol = tol
self.optimizer = optimizer
self.loss = loss
self.validation_freqs = validation_freqs
self.early_stopping_rounds = early_stopping_rounds
self.metrics = metrics or []
self.use_first_metric_only = use_first_metric_only
self.encrypt_param = copy.deepcopy(encrypt_param)
self.encrypted_model_calculator_param = encrypted_mode_calculator_param
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.selector_param = selector_param
self.floating_point_precision = floating_point_precision
self.callback_param = copy.deepcopy(callback_param)
self.coae_param = coae_param
self.dataset = dataset
self.config_type = 'pytorch' # pytorch only
def check(self):
assert isinstance(self.dataset, DatasetParam), 'dataset must be a DatasetParam()'
self.dataset.check()
if self.task_type not in ["classification", "regression"]:
raise ValueError("config_type should be classification or regression")
if not isinstance(self.tol, (int, float)):
raise ValueError("tol should be numeric")
if not isinstance(self.epochs, int) or self.epochs <= 0:
raise ValueError("epochs should be a positive integer")
if self.bottom_nn_define and not isinstance(self.bottom_nn_define, dict):
raise ValueError("bottom_nn_define should be a dict defining the structure of neural network")
if self.top_nn_define and not isinstance(self.top_nn_define, dict):
raise ValueError("top_nn_define should be a dict defining the structure of neural network")
if self.interactive_layer_define is not None and not isinstance(self.interactive_layer_define, dict):
raise ValueError(
"the interactive_layer_define should be a dict defining the structure of interactive layer")
if self.batch_size != -1:
if not isinstance(self.batch_size, int) \
or self.batch_size < consts.MIN_BATCH_SIZE:
raise ValueError(
" {} not supported, should be larger than 10 or -1 represent for all data".format(self.batch_size))
if self.early_stop != "diff":
raise ValueError("early stop should be diff in this version")
if self.metrics is not None and not isinstance(self.metrics, list):
raise ValueError("metrics should be a list")
if self.floating_point_precision is not None and \
(not isinstance(self.floating_point_precision, int) or
self.floating_point_precision < 0 or self.floating_point_precision > 63):
raise ValueError("floating point precision should be null or a integer between 0 and 63")
self.encrypt_param.check()
self.encrypted_model_calculator_param.check()
self.predict_param.check()
self.selector_param.check()
self.coae_param.check()
descr = "hetero nn param's "
for p in ["early_stopping_rounds", "validation_freqs",
"use_first_metric_only"]:
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("early_stopping_rounds", descr, "callback_param's 'early_stopping_rounds'"):
self.callback_param.early_stopping_rounds = self.early_stopping_rounds
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
if self.metrics:
self.callback_param.metrics = self.metrics
if self._warn_to_deprecate_param("use_first_metric_only", descr, "callback_param's 'use_first_metric_only'"):
self.callback_param.use_first_metric_only = self.use_first_metric_only
| 12,330 | 41.37457 | 139 | py |
FATE | FATE-master/python/fate_client/pipeline/param/boosting_param.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipeline.param.base_param import BaseParam
from pipeline.param.encrypt_param import EncryptParam
from pipeline.param.encrypted_mode_calculation_param import EncryptedModeCalculatorParam
from pipeline.param.cross_validation_param import CrossValidationParam
from pipeline.param.predict_param import PredictParam
from pipeline.param import consts
from pipeline.param.callback_param import CallbackParam
import copy
import collections
class ObjectiveParam(BaseParam):
"""
Define objective parameters that used in federated ml.
Parameters
----------
objective : {None, 'cross_entropy', 'lse', 'lae', 'log_cosh', 'tweedie', 'fair', 'huber'}
None in host's config, should be str in guest'config.
when task_type is classification, only support 'cross_entropy',
other 6 types support in regression task
params : None or list
should be non empty list when objective is 'tweedie','fair','huber',
first element of list shoulf be a float-number large than 0.0 when objective is 'fair', 'huber',
first element of list should be a float-number in [1.0, 2.0) when objective is 'tweedie'
"""
def __init__(self, objective='cross_entropy', params=None):
self.objective = objective
self.params = params
def check(self, task_type=None):
if self.objective is None:
return True
descr = "objective param's"
if task_type not in [consts.CLASSIFICATION, consts.REGRESSION]:
self.objective = self.check_and_change_lower(self.objective,
["cross_entropy", "lse", "lae", "huber", "fair",
"log_cosh", "tweedie"],
descr)
if task_type == consts.CLASSIFICATION:
if self.objective != "cross_entropy":
raise ValueError("objective param's objective {} not supported".format(self.objective))
elif task_type == consts.REGRESSION:
self.objective = self.check_and_change_lower(self.objective,
["lse", "lae", "huber", "fair", "log_cosh", "tweedie"],
descr)
params = self.params
if self.objective in ["huber", "fair", "tweedie"]:
if type(params).__name__ != 'list' or len(params) < 1:
raise ValueError(
"objective param's params {} not supported, should be non-empty list".format(params))
if type(params[0]).__name__ not in ["float", "int", "long"]:
raise ValueError("objective param's params[0] {} not supported".format(self.params[0]))
if self.objective == 'tweedie':
if params[0] < 1 or params[0] >= 2:
raise ValueError("in tweedie regression, objective params[0] should betweend [1, 2)")
if self.objective == 'fair' or 'huber':
if params[0] <= 0.0:
raise ValueError("in {} regression, objective params[0] should greater than 0.0".format(
self.objective))
return True
class DecisionTreeParam(BaseParam):
"""
Define decision tree parameters that used in federated ml.
Parameters
----------
criterion_method : {"xgboost"}, default: "xgboost"
the criterion function to use
criterion_params: list or dict
should be non empty and elements are float-numbers,
if a list is offered, the first one is l2 regularization value, and the second one is
l1 regularization value.
if a dict is offered, make sure it contains key 'l1', and 'l2'.
l1, l2 regularization values are non-negative floats.
default: [0.1, 0] or {'l1':0, 'l2':0,1}
max_depth: positive integer
the max depth of a decision tree, default: 3
min_sample_split: int
least quantity of nodes to split, default: 2
min_impurity_split: float
least gain of a single split need to reach, default: 1e-3
min_child_weight: float
sum of hessian needed in child nodes. default is 0
min_leaf_node: int
when samples no more than min_leaf_node, it becomes a leave, default: 1
max_split_nodes: positive integer
we will use no more than max_split_nodes to
parallel finding their splits in a batch, for memory consideration. default is 65536
feature_importance_type: {'split', 'gain'}
if is 'split', feature_importances calculate by feature split times,
if is 'gain', feature_importances calculate by feature split gain.
default: 'split'
Due to the safety concern, we adjust training strategy of Hetero-SBT in FATE-1.8,
When running Hetero-SBT, this parameter is now abandoned.
In Hetero-SBT of FATE-1.8, guest side will compute split, gain of local features,
and receive anonymous feature importance results from hosts. Hosts will compute split
importance of local features.
use_missing: bool, accepted True, False only, use missing value in training process or not. default: False
zero_as_missing: bool
regard 0 as missing value or not,
will be use only if use_missing=True, default: False
deterministic: bool
ensure stability when computing histogram. Set this to true to ensure stable result when using
same data and same parameter. But it may slow down computation.
"""
def __init__(self, criterion_method="xgboost", criterion_params=[0.1, 0], max_depth=3,
min_sample_split=2, min_impurity_split=1e-3, min_leaf_node=1,
max_split_nodes=consts.MAX_SPLIT_NODES, feature_importance_type='split',
n_iter_no_change=True, tol=0.001, min_child_weight=0,
use_missing=False, zero_as_missing=False, deterministic=False):
super(DecisionTreeParam, self).__init__()
self.criterion_method = criterion_method
self.criterion_params = criterion_params
self.max_depth = max_depth
self.min_sample_split = min_sample_split
self.min_impurity_split = min_impurity_split
self.min_leaf_node = min_leaf_node
self.min_child_weight = min_child_weight
self.max_split_nodes = max_split_nodes
self.feature_importance_type = feature_importance_type
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.use_missing = use_missing
self.zero_as_missing = zero_as_missing
self.deterministic = deterministic
def check(self):
descr = "decision tree param"
self.criterion_method = self.check_and_change_lower(self.criterion_method,
["xgboost"],
descr)
if len(self.criterion_params) == 0:
raise ValueError("decisition tree param's criterio_params should be non empty")
if isinstance(self.criterion_params, list):
assert len(self.criterion_params) == 2, 'length of criterion_param should be 2: l1, l2 regularization ' \
'values are needed'
self.check_nonnegative_number(self.criterion_params[0], 'l2 reg value')
self.check_nonnegative_number(self.criterion_params[1], 'l1 reg value')
elif isinstance(self.criterion_params, dict):
assert 'l1' in self.criterion_params and 'l2' in self.criterion_params, 'l1 and l2 keys are needed in ' \
'criterion_params dict'
self.criterion_params = [self.criterion_params['l2'], self.criterion_params['l1']]
else:
raise ValueError('criterion_params should be a dict or a list contains l1, l2 reg value')
if type(self.max_depth).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's max_depth {} not supported, should be integer".format(
self.max_depth))
if self.max_depth < 1:
raise ValueError("decision tree param's max_depth should be positive integer, no less than 1")
if type(self.min_sample_split).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's min_sample_split {} not supported, should be integer".format(
self.min_sample_split))
if type(self.min_impurity_split).__name__ not in ["int", "long", "float"]:
raise ValueError("decision tree param's min_impurity_split {} not supported, should be numeric".format(
self.min_impurity_split))
if type(self.min_leaf_node).__name__ not in ["int", "long"]:
raise ValueError("decision tree param's min_leaf_node {} not supported, should be integer".format(
self.min_leaf_node))
if type(self.max_split_nodes).__name__ not in ["int", "long"] or self.max_split_nodes < 1:
raise ValueError("decision tree param's max_split_nodes {} not supported, " +
"should be positive integer between 1 and {}".format(self.max_split_nodes,
consts.MAX_SPLIT_NODES))
if type(self.n_iter_no_change).__name__ != "bool":
raise ValueError("decision tree param's n_iter_no_change {} not supported, should be bool type".format(
self.n_iter_no_change))
if type(self.tol).__name__ not in ["float", "int", "long"]:
raise ValueError("decision tree param's tol {} not supported, should be numeric".format(self.tol))
self.feature_importance_type = self.check_and_change_lower(self.feature_importance_type,
["split", "gain"],
descr)
self.check_nonnegative_number(self.min_child_weight, 'min_child_weight')
self.check_boolean(self.deterministic, 'deterministic')
return True
class BoostingParam(BaseParam):
"""
Basic parameter for Boosting Algorithms
Parameters
----------
task_type : {'classification', 'regression'}, default: 'classification'
task type
objective_param : ObjectiveParam Object, default: ObjectiveParam()
objective param
learning_rate : float, int or long
the learning rate of secure boost. default: 0.3
num_trees : int or float
the max number of boosting round. default: 5
subsample_feature_rate : float
a float-number in [0, 1], default: 1.0
n_iter_no_change : bool,
when True and residual error less than tol, tree building process will stop. default: True
bin_num: positive integer greater than 1
bin number use in quantile. default: 32
validation_freqs: None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
"""
def __init__(self, task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, bin_num=32,
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, metrics=None, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR):
super(BoostingParam, self).__init__()
self.task_type = task_type
self.objective_param = copy.deepcopy(objective_param)
self.learning_rate = learning_rate
self.num_trees = num_trees
self.subsample_feature_rate = subsample_feature_rate
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.bin_num = bin_num
self.predict_param = copy.deepcopy(predict_param)
self.cv_param = copy.deepcopy(cv_param)
self.validation_freqs = validation_freqs
self.metrics = metrics
self.random_seed = random_seed
self.binning_error = binning_error
def check(self):
descr = "boosting tree param's"
if self.task_type not in [consts.CLASSIFICATION, consts.REGRESSION]:
raise ValueError("boosting_core tree param's task_type {} not supported, should be {} or {}".format(
self.task_type, consts.CLASSIFICATION, consts.REGRESSION))
self.objective_param.check(self.task_type)
if type(self.learning_rate).__name__ not in ["float", "int", "long"]:
raise ValueError("boosting_core tree param's learning_rate {} not supported, should be numeric".format(
self.learning_rate))
if type(self.subsample_feature_rate).__name__ not in ["float", "int", "long"] or \
self.subsample_feature_rate < 0 or self.subsample_feature_rate > 1:
raise ValueError(
"boosting_core tree param's subsample_feature_rate should be a numeric number between 0 and 1")
if type(self.n_iter_no_change).__name__ != "bool":
raise ValueError("boosting_core tree param's n_iter_no_change {} not supported, should be bool type".format(
self.n_iter_no_change))
if type(self.tol).__name__ not in ["float", "int", "long"]:
raise ValueError("boosting_core tree param's tol {} not supported, should be numeric".format(self.tol))
if type(self.bin_num).__name__ not in ["int", "long"] or self.bin_num < 2:
raise ValueError(
"boosting_core tree param's bin_num {} not supported, should be positive integer greater than 1".format(
self.bin_num))
if self.validation_freqs is None:
pass
elif isinstance(self.validation_freqs, int):
if self.validation_freqs < 1:
raise ValueError("validation_freqs should be larger than 0 when it's integer")
elif not isinstance(self.validation_freqs, collections.Container):
raise ValueError("validation_freqs should be None or positive integer or container")
if self.metrics is not None and not isinstance(self.metrics, list):
raise ValueError("metrics should be a list")
if self.random_seed is not None:
assert isinstance(self.random_seed, int) and self.random_seed >= 0, 'random seed must be an integer >= 0'
self.check_decimal_float(self.binning_error, descr)
return True
class HeteroBoostingParam(BoostingParam):
"""
Parameters
----------
encrypt_param : EncodeParam Object
encrypt method use in secure boost, default: EncryptParam()
encrypted_mode_calculator_param: EncryptedModeCalculatorParam object
the calculation mode use in secureboost,
default: EncryptedModeCalculatorParam()
"""
def __init__(self, task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, encrypt_param=EncryptParam(),
bin_num=32,
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, early_stopping_rounds=None, metrics=None, use_first_metric_only=False,
random_seed=100, binning_error=consts.DEFAULT_RELATIVE_ERROR):
super(HeteroBoostingParam, self).__init__(task_type, objective_param, learning_rate, num_trees,
subsample_feature_rate, n_iter_no_change, tol, bin_num,
predict_param, cv_param, validation_freqs, metrics=metrics,
random_seed=random_seed,
binning_error=binning_error)
self.encrypt_param = copy.deepcopy(encrypt_param)
self.encrypted_mode_calculator_param = copy.deepcopy(encrypted_mode_calculator_param)
self.early_stopping_rounds = early_stopping_rounds
self.use_first_metric_only = use_first_metric_only
def check(self):
super(HeteroBoostingParam, self).check()
self.encrypted_mode_calculator_param.check()
self.encrypt_param.check()
if self.early_stopping_rounds is None:
pass
elif isinstance(self.early_stopping_rounds, int):
if self.early_stopping_rounds < 1:
raise ValueError("early stopping rounds should be larger than 0 when it's integer")
if self.validation_freqs is None:
raise ValueError("validation freqs must be set when early stopping is enabled")
if not isinstance(self.use_first_metric_only, bool):
raise ValueError("use_first_metric_only should be a boolean")
return True
class HeteroSecureBoostParam(HeteroBoostingParam):
"""
Define boosting tree parameters that used in federated ml.
Parameters
----------
task_type : {'classification', 'regression'}, default: 'classification'
task type
tree_param : DecisionTreeParam Object, default: DecisionTreeParam()
tree param
objective_param : ObjectiveParam Object, default: ObjectiveParam()
objective param
learning_rate : float, int or long
the learning rate of secure boost. default: 0.3
num_trees : int or float
the max number of trees to build. default: 5
subsample_feature_rate : float
a float-number in [0, 1], default: 1.0
random_seed: int
seed that controls all random functions
n_iter_no_change : bool,
when True and residual error less than tol, tree building process will stop. default: True
encrypt_param : EncodeParam Object
encrypt method use in secure boost, default: EncryptParam(), this parameter
is only for hetero-secureboost
bin_num: positive integer greater than 1
bin number use in quantile. default: 32
encrypted_mode_calculator_param: EncryptedModeCalculatorParam object
the calculation mode use in secureboost, default: EncryptedModeCalculatorParam(), only for hetero-secureboost
use_missing: bool
use missing value in training process or not. default: False
zero_as_missing: bool
regard 0 as missing value or not, will be use only if use_missing=True, default: False
validation_freqs: None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
The default value is None, 1 is suggested. You can set it to a number larger than 1 in order to
speed up training by skipping validation rounds. When it is larger than 1, a number which is
divisible by "num_trees" is recommended, otherwise, you will miss the validation scores
of last training iteration.
early_stopping_rounds: integer larger than 0
will stop training if one metric of one validation data
doesn’t improve in last early_stopping_round rounds,
need to set validation freqs and will check early_stopping every at every validation epoch,
metrics: list, default: []
Specify which metrics to be used when performing evaluation during training process.
If set as empty, default metrics will be used. For regression tasks, default metrics are
['root_mean_squared_error', 'mean_absolute_error'], For binary-classificatiin tasks, default metrics
are ['auc', 'ks']. For multi-classification tasks, default metrics are ['accuracy', 'precision', 'recall']
use_first_metric_only: bool
use only the first metric for early stopping
complete_secure: int, defualt: 0
if use complete_secure, when use complete secure, build first 'complete secure' tree using only guest features
sparse_optimization:
this parameter is abandoned in FATE-1.7.1
run_goss: bool
activate Gradient-based One-Side Sampling, which selects large gradient and small
gradient samples using top_rate and other_rate.
top_rate: float, the retain ratio of large gradient data, used when run_goss is True
other_rate: float, the retain ratio of small gradient data, used when run_goss is True
cipher_compress_error: This param is now abandoned
cipher_compress: bool, default is True, use cipher compressing to reduce computation cost and transfer cost
boosting_strategy:str
std: standard sbt setting
mix: alternate using guest/host features to build trees. For example, the first 'tree_num_per_party' trees
use guest features,
the second k trees use host features, and so on
layered: only support 2 party, when running layered mode, first 'host_depth' layer will use host features,
and then next 'guest_depth' will only use guest features
work_mode: str
This parameter has the same function as boosting_strategy, but is deprecated
tree_num_per_party: int, every party will alternate build 'tree_num_per_party' trees until reach max tree num, this
param is valid when boosting_strategy is mix
guest_depth: int, guest will build last guest_depth of a decision tree using guest features, is valid when boosting_strategy
is layered
host_depth: int, host will build first host_depth of a decision tree using host features, is valid when work boosting_strategy
layered
multi_mode: str, decide which mode to use when running multi-classification task:
single_output standard gbdt multi-classification strategy
multi_output every leaf give a multi-dimension predict, using multi_mode can save time
by learning a model with less trees.
EINI_inference: bool
default is False, this option changes the inference algorithm used in predict tasks.
a secure prediction method that hides decision path to enhance security in the inference
step. This method is insprired by EINI inference algorithm.
EINI_random_mask: bool
default is False
multiply predict result by a random float number to confuse original predict result. This operation further
enhances the security of naive EINI algorithm.
EINI_complexity_check: bool
default is False
check the complexity of tree models when running EINI algorithms. Complexity models are easy to hide their
decision path, while simple tree models are not, therefore if a tree model is too simple, it is not allowed
to run EINI predict algorithms.
"""
def __init__(self, tree_param: DecisionTreeParam = DecisionTreeParam(), task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1.0, n_iter_no_change=True,
tol=0.0001, encrypt_param=EncryptParam(),
bin_num=32,
encrypted_mode_calculator_param=EncryptedModeCalculatorParam(),
predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, early_stopping_rounds=None, use_missing=False, zero_as_missing=False,
complete_secure=False, metrics=None, use_first_metric_only=False, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR,
sparse_optimization=False, run_goss=False, top_rate=0.2, other_rate=0.1,
cipher_compress_error=None, cipher_compress=0, new_ver=True, boosting_strategy=consts.STD_TREE,
work_mode=None, tree_num_per_party=1, guest_depth=2, host_depth=3, callback_param=CallbackParam(),
multi_mode=consts.SINGLE_OUTPUT, EINI_inference=False, EINI_random_mask=False,
EINI_complexity_check=False):
super(HeteroSecureBoostParam, self).__init__(task_type, objective_param, learning_rate, num_trees,
subsample_feature_rate, n_iter_no_change, tol, encrypt_param,
bin_num, encrypted_mode_calculator_param, predict_param, cv_param,
validation_freqs, early_stopping_rounds, metrics=metrics,
use_first_metric_only=use_first_metric_only,
random_seed=random_seed,
binning_error=binning_error)
self.tree_param = copy.deepcopy(tree_param)
self.zero_as_missing = zero_as_missing
self.use_missing = use_missing
self.complete_secure = complete_secure
self.sparse_optimization = sparse_optimization
self.run_goss = run_goss
self.top_rate = top_rate
self.other_rate = other_rate
self.cipher_compress_error = cipher_compress_error
self.cipher_compress = cipher_compress
self.new_ver = new_ver
self.EINI_inference = EINI_inference
self.EINI_random_mask = EINI_random_mask
self.EINI_complexity_check = EINI_complexity_check
self.boosting_strategy = boosting_strategy
self.work_mode = work_mode
self.tree_num_per_party = tree_num_per_party
self.guest_depth = guest_depth
self.host_depth = host_depth
self.callback_param = copy.deepcopy(callback_param)
self.multi_mode = multi_mode
def check(self):
super(HeteroSecureBoostParam, self).check()
self.tree_param.check()
if not isinstance(self.use_missing, bool):
raise ValueError('use missing should be bool type')
if not isinstance(self.zero_as_missing, bool):
raise ValueError('zero as missing should be bool type')
self.check_boolean(self.run_goss, 'run goss')
self.check_decimal_float(self.top_rate, 'top rate')
self.check_decimal_float(self.other_rate, 'other rate')
self.check_positive_number(self.other_rate, 'other_rate')
self.check_positive_number(self.top_rate, 'top_rate')
self.check_boolean(self.new_ver, 'code version switcher')
self.check_boolean(self.cipher_compress, 'cipher compress')
self.check_boolean(self.EINI_inference, 'eini inference')
self.check_boolean(self.EINI_random_mask, 'eini random mask')
self.check_boolean(self.EINI_complexity_check, 'eini complexity check')
assert isinstance(self.complete_secure,
int) and self.complete_secure >= 0, "complete secure should be an int >= 0"
if self.work_mode is not None:
self.boosting_strategy = self.work_mode
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
if self.multi_mode == consts.MULTI_OUTPUT:
if self.boosting_strategy != consts.STD_TREE:
raise ValueError('MO trees only works when boosting strategy is std tree')
if not self.cipher_compress:
raise ValueError('Mo trees only works when cipher compress is enabled')
if self.boosting_strategy not in [consts.STD_TREE, consts.LAYERED_TREE, consts.MIX_TREE]:
raise ValueError('unknown sbt boosting strategy{}'.format(self.boosting_strategy))
for p in ["early_stopping_rounds", "validation_freqs", "metrics",
"use_first_metric_only"]:
# if self._warn_to_deprecate_param(p, "", ""):
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
descr = "boosting_param's"
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("early_stopping_rounds", descr, "callback_param's 'early_stopping_rounds'"):
self.callback_param.early_stopping_rounds = self.early_stopping_rounds
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
self.callback_param.metrics = self.metrics
if self._warn_to_deprecate_param("use_first_metric_only", descr, "callback_param's 'use_first_metric_only'"):
self.callback_param.use_first_metric_only = self.use_first_metric_only
if self.top_rate + self.other_rate >= 1:
raise ValueError('sum of top rate and other rate should be smaller than 1')
return True
class HomoSecureBoostParam(BoostingParam):
"""
Parameters
----------
backend: {'distributed', 'memory'}
decides which backend to use when computing histograms for homo-sbt
"""
def __init__(self, tree_param: DecisionTreeParam = DecisionTreeParam(), task_type=consts.CLASSIFICATION,
objective_param=ObjectiveParam(),
learning_rate=0.3, num_trees=5, subsample_feature_rate=1, n_iter_no_change=True,
tol=0.0001, bin_num=32, predict_param=PredictParam(), cv_param=CrossValidationParam(),
validation_freqs=None, use_missing=False, zero_as_missing=False, random_seed=100,
binning_error=consts.DEFAULT_RELATIVE_ERROR, backend=consts.DISTRIBUTED_BACKEND,
callback_param=CallbackParam(), multi_mode=consts.SINGLE_OUTPUT):
super(HomoSecureBoostParam, self).__init__(task_type=task_type,
objective_param=objective_param,
learning_rate=learning_rate,
num_trees=num_trees,
subsample_feature_rate=subsample_feature_rate,
n_iter_no_change=n_iter_no_change,
tol=tol,
bin_num=bin_num,
predict_param=predict_param,
cv_param=cv_param,
validation_freqs=validation_freqs,
random_seed=random_seed,
binning_error=binning_error
)
self.use_missing = use_missing
self.zero_as_missing = zero_as_missing
self.tree_param = copy.deepcopy(tree_param)
self.backend = backend
self.callback_param = copy.deepcopy(callback_param)
self.multi_mode = multi_mode
def check(self):
super(HomoSecureBoostParam, self).check()
self.tree_param.check()
if not isinstance(self.use_missing, bool):
raise ValueError('use missing should be bool type')
if not isinstance(self.zero_as_missing, bool):
raise ValueError('zero as missing should be bool type')
if self.backend not in [consts.MEMORY_BACKEND, consts.DISTRIBUTED_BACKEND]:
raise ValueError('unsupported backend')
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
for p in ["validation_freqs", "metrics"]:
# if self._warn_to_deprecate_param(p, "", ""):
if self._deprecated_params_set.get(p):
if "callback_param" in self.get_user_feeded():
raise ValueError(f"{p} and callback param should not be set simultaneously,"
f"{self._deprecated_params_set}, {self.get_user_feeded()}")
else:
self.callback_param.callbacks = ["PerformanceEvaluate"]
break
descr = "boosting_param's"
if self._warn_to_deprecate_param("validation_freqs", descr, "callback_param's 'validation_freqs'"):
self.callback_param.validation_freqs = self.validation_freqs
if self._warn_to_deprecate_param("metrics", descr, "callback_param's 'metrics'"):
self.callback_param.metrics = self.metrics
if self.multi_mode not in [consts.SINGLE_OUTPUT, consts.MULTI_OUTPUT]:
raise ValueError('unsupported multi-classification mode')
if self.multi_mode == consts.MULTI_OUTPUT:
if self.task_type == consts.REGRESSION:
raise ValueError('regression tasks not support multi-output trees')
return True
| 35,176 | 47.253772 | 134 | py |
FATE | FATE-master/python/fate_client/pipeline/param/homo_nn_param.py | from pipeline.param.base_param import BaseParam
class TrainerParam(BaseParam):
def __init__(self, trainer_name=None, **kwargs):
super(TrainerParam, self).__init__()
self.trainer_name = trainer_name
self.param = kwargs
def check(self):
if self.trainer_name is not None:
self.check_string(self.trainer_name, 'trainer_name')
def to_dict(self):
ret = {'trainer_name': self.trainer_name, 'param': self.param}
return ret
class DatasetParam(BaseParam):
def __init__(self, dataset_name=None, **kwargs):
super(DatasetParam, self).__init__()
self.dataset_name = dataset_name
self.param = kwargs
def check(self):
if self.dataset_name is not None:
self.check_string(self.dataset_name, 'dataset_name')
def to_dict(self):
ret = {'dataset_name': self.dataset_name, 'param': self.param}
return ret
class HomoNNParam(BaseParam):
def __init__(self,
trainer: TrainerParam = TrainerParam(),
dataset: DatasetParam = DatasetParam(),
torch_seed: int = 100,
nn_define: dict = None,
loss: dict = None,
optimizer: dict = None,
ds_config: dict = None
):
super(HomoNNParam, self).__init__()
self.trainer = trainer
self.dataset = dataset
self.torch_seed = torch_seed
self.nn_define = nn_define
self.loss = loss
self.optimizer = optimizer
self.ds_config = ds_config
def check(self):
assert isinstance(self.trainer, TrainerParam), 'trainer must be a TrainerParam()'
assert isinstance(self.dataset, DatasetParam), 'dataset must be a DatasetParam()'
self.trainer.check()
self.dataset.check()
self.check_positive_integer(self.torch_seed, 'torch seed')
if self.nn_define is not None:
assert isinstance(self.nn_define, dict), 'nn define should be a dict defining model structures'
if self.loss is not None:
assert isinstance(self.loss, dict), 'loss parameter should be a loss config dict'
if self.optimizer is not None:
assert isinstance(self.optimizer, dict), 'optimizer parameter should be a config dict'
| 2,340 | 31.971831 | 107 | py |
FATE | FATE-master/python/fate_client/pipeline/component/hetero_ftl.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.tools import extract_explicit_parameter
from pipeline.param import consts
try:
from pipeline.component.component_base import FateComponent
from pipeline.component.nn.models.sequantial import Sequential
import numpy as np
except Exception as e:
print(e)
print('Import NN components in HeteroFTL module failed, \
this may casue by the situation that torch/keras are not installed,\
please install them to use this module')
def find_and_convert_float32_in_dict(d, path=""):
for k, v in d.items():
new_path = f"{path}.{k}" if path else k
if isinstance(v, dict):
find_and_convert_float32_in_dict(v, new_path)
elif isinstance(v, np.float32) or isinstance(v, np.float64):
d[k] = float(v)
class HeteroFTL(FateComponent):
@extract_explicit_parameter
def __init__(self, epochs=1, batch_size=-1,
encrypt_param=None, predict_param=None, cv_param=None,
intersect_param={'intersect_method': consts.RSA},
validation_freqs=None, early_stopping_rounds=None, use_first_metric_only=None,
mode='plain', communication_efficient=False, n_iter_no_change=False, tol=1e-5,
local_round=5,
**kwargs):
explicit_parameters = kwargs["explict_parameters"]
explicit_parameters["optimizer"] = None
# explicit_parameters["loss"] = None
# explicit_parameters["metrics"] = None
explicit_parameters["nn_define"] = None
explicit_parameters["config_type"] = "keras"
FateComponent.__init__(self, **explicit_parameters)
if "name" in explicit_parameters:
del explicit_parameters["name"]
for param_key, param_value in explicit_parameters.items():
setattr(self, param_key, param_value)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name, data_type='single')
self._module_name = "FTL"
self.optimizer = None
self.loss = None
self.config_type = "keras"
self.metrics = None
self.bottom_nn_define = None
self.top_nn_define = None
self.interactive_layer_define = None
self._nn_model = Sequential()
self.nn_define = None
def add_nn_layer(self, layer):
self._nn_model.add(layer)
def compile(self, optimizer,):
self.optimizer = self._nn_model.get_optimizer_config(optimizer)
self.config_type = self._nn_model.get_layer_type()
self.nn_define = self._nn_model.get_network_config()
find_and_convert_float32_in_dict(self.nn_define)
find_and_convert_float32_in_dict(self.optimizer)
def __getstate__(self):
state = dict(self.__dict__)
del state["_nn_model"]
return state
| 3,526 | 35.739583 | 95 | py |
FATE | FATE-master/python/fate_client/pipeline/component/homo_nn.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.tools import extract_explicit_parameter
from pipeline.utils.logger import LOGGER
from pipeline.component.component_base import FateComponent
from pipeline.component.nn.interface import TrainerParam, DatasetParam
DEFAULT_PARAM_DICT = {}
try:
import torch as t
OptimizerType = t.optim.Optimizer
except ImportError:
OptimizerType = 't.optim.Optimizer'
try:
import torch as t
from pipeline.component.nn.backend.torch.base import Sequential
from pipeline.component.nn.backend.torch import base
from pipeline.component.nn.backend.torch.cust import CustModel
# default parameter dict
DEFAULT_PARAM_DICT = {
'trainer': TrainerParam(trainer_name='fedavg_trainer'),
'dataset': DatasetParam(dataset_name='table'),
'torch_seed': 100,
'loss': None,
'optimizer': None,
'nn_define': None,
'ds_config': None
}
except Exception as e:
print(e)
print('Import NN components in HomoNN module failed,\
this may casue by the situation that torch are not installed,\
please install torch to use this module')
Sequential = None
class HomoNN(FateComponent):
"""
Parameters
----------
name, name of this component
trainer, trainer param
dataset, dataset param
torch_seed, global random seed
loss, loss function from fate_torch
optimizer, optimizer from fate_torch
model, a fate torch sequential defining the model structure
"""
@extract_explicit_parameter
def __init__(self,
name=None,
trainer: TrainerParam = TrainerParam(trainer_name='fedavg_trainer', epochs=10, batch_size=512, # training parameter
early_stop=None, tol=0.0001, # early stop parameters
secure_aggregate=True, weighted_aggregation=True,
aggregate_every_n_epoch=None, # federation
cuda=False, pin_memory=True, shuffle=True, data_loader_worker=0, # GPU dataloader
validation_freqs=None),
dataset: DatasetParam = DatasetParam(dataset_name='table'),
torch_seed: int = 100,
loss=None,
optimizer: OptimizerType = None,
ds_config: dict = None,
model: Sequential = None, **kwargs):
explicit_parameters = copy.deepcopy(DEFAULT_PARAM_DICT)
if 'name' not in kwargs["explict_parameters"]:
raise RuntimeError('moduel name is not set')
explicit_parameters["name"] = kwargs["explict_parameters"]['name']
FateComponent.__init__(self, **explicit_parameters)
kwargs["explict_parameters"].pop('name')
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name, data_type='single')
self._module_name = "HomoNN"
self._updated = {'trainer': False, 'dataset': False,
'torch_seed': False, 'loss': False, 'optimizer': False, 'model': False}
self._set_param(kwargs["explict_parameters"])
self._check_parameters()
def _set_updated(self, attr, status=True):
if attr in self._updated:
self._updated[attr] = status
else:
raise ValueError('attr {} not in update status {}'.format(attr, self._updated))
def _set_param(self, params):
if "name" in params:
del params["name"]
for param_key, param_value in params.items():
setattr(self, param_key, param_value)
def _check_parameters(self):
if hasattr(self, 'trainer') and self.trainer is not None and not self._updated['trainer']:
assert isinstance(
self.trainer, TrainerParam), 'trainer must be a TrainerPram class'
self.trainer.check()
self.trainer: TrainerParam = self.trainer.to_dict()
self._set_updated('trainer', True)
if hasattr(self, 'dataset') and self.dataset is not None and not self._updated['dataset']:
assert isinstance(
self.dataset, DatasetParam), 'dataset must be a DatasetParam class'
self.dataset.check()
self.dataset: DatasetParam = self.dataset.to_dict()
self._set_updated('dataset', True)
if hasattr(self, 'model') and self.model is not None and not self._updated['model']:
if isinstance(self.model, Sequential):
self.nn_define = self.model.get_network_config()
elif isinstance(self.model, CustModel):
self.model = Sequential(self.model)
self.nn_define = self.model.get_network_config()
else:
raise RuntimeError('Model must be a fate-torch Sequential, but got {} '
'\n do remember to call fate_torch_hook():'
'\n import torch as t'
'\n fate_torch_hook(t)'.format(
type(self.model)))
self._set_updated('model', True)
if hasattr(self, 'optimizer') and self.optimizer is not None and not self._updated['optimizer']:
if not isinstance(self.optimizer, base.FateTorchOptimizer):
raise ValueError('please pass FateTorchOptimizer instances to Homo-nn components, got {}.'
'do remember to use fate_torch_hook():\n'
' import torch as t\n'
' fate_torch_hook(t)'.format(type(self.optimizer)))
optimizer_config = self.optimizer.to_dict()
self.optimizer = optimizer_config
self._set_updated('optimizer', True)
if hasattr(self, 'loss') and self.loss is not None and not self._updated['loss']:
if isinstance(self.loss, base.FateTorchLoss):
loss_config = self.loss.to_dict()
elif issubclass(self.loss, base.FateTorchLoss):
loss_config = self.loss().to_dict()
else:
raise ValueError('unable to parse loss function {}, loss must be an instance'
'of FateTorchLoss subclass or a subclass of FateTorchLoss, '
'do remember to use fate_torch_hook()'.format(self.loss))
self.loss = loss_config
self._set_updated('loss', True)
def component_param(self, **kwargs):
# reset paramerters
used_attr = set()
setattr(self, 'model', None)
if 'model' in kwargs:
self.model = kwargs['model']
kwargs.pop('model')
self._set_updated('model', False)
for attr in self._component_parameter_keywords:
if attr in kwargs:
setattr(self, attr, kwargs[attr])
self._set_updated(attr, False)
used_attr.add(attr)
self._check_parameters() # check and convert homo-nn paramters
not_use_attr = set(kwargs.keys()).difference(used_attr)
for attr in not_use_attr:
LOGGER.warning(f"key {attr}, value {kwargs[attr]} not use")
self._role_parameter_keywords |= used_attr
for attr in self.__dict__:
if attr not in self._component_parameter_keywords:
continue
else:
self._component_param[attr] = getattr(self, attr)
def __getstate__(self):
state = dict(self.__dict__)
if "model" in state:
del state["model"]
return state
| 8,452 | 41.691919 | 136 | py |
FATE | FATE-master/python/fate_client/pipeline/component/__init__.py | from pipeline.component.column_expand import ColumnExpand
from pipeline.component.data_statistics import DataStatistics
from pipeline.component.dataio import DataIO
from pipeline.component.data_transform import DataTransform
from pipeline.component.evaluation import Evaluation
from pipeline.component.hetero_data_split import HeteroDataSplit
from pipeline.component.hetero_fast_secureboost import HeteroFastSecureBoost
from pipeline.component.hetero_feature_binning import HeteroFeatureBinning
from pipeline.component.hetero_feature_selection import HeteroFeatureSelection
from pipeline.component.hetero_linr import HeteroLinR
from pipeline.component.hetero_lr import HeteroLR
from pipeline.component.hetero_pearson import HeteroPearson
from pipeline.component.hetero_poisson import HeteroPoisson
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.homo_data_split import HomoDataSplit
from pipeline.component.homo_lr import HomoLR
from pipeline.component.homo_secureboost import HomoSecureBoost
from pipeline.component.homo_feature_binning import HomoFeatureBinning
from pipeline.component.intersection import Intersection
from pipeline.component.local_baseline import LocalBaseline
from pipeline.component.one_hot_encoder import OneHotEncoder
from pipeline.component.psi import PSI
from pipeline.component.reader import Reader
from pipeline.component.scorecard import Scorecard
from pipeline.component.sampler import FederatedSample
from pipeline.component.scale import FeatureScale
from pipeline.component.union import Union
from pipeline.component.feldman_verifiable_sum import FeldmanVerifiableSum
from pipeline.component.sample_weight import SampleWeight
from pipeline.component.feature_imputation import FeatureImputation
from pipeline.component.label_transform import LabelTransform
from pipeline.component.hetero_sshe_lr import HeteroSSHELR
from pipeline.component.secure_information_retrieval import SecureInformationRetrieval
from pipeline.component.cache_loader import CacheLoader
from pipeline.component.model_loader import ModelLoader
from pipeline.component.hetero_kmeans import HeteroKmeans
from pipeline.component.homo_onehot import HomoOneHotEncoder
from pipeline.component.hetero_sshe_linr import HeteroSSHELinR
from pipeline.component.positive_unlabeled import PositiveUnlabeled
try:
import torch
from pipeline.component.homo_nn import HomoNN
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.hetero_nn import HeteroNN
except BaseException:
print('Import torch failed, this may casue by the situation that torch are not installed, HomoNN, HeteroNN, HeteroFTL are not available')
HomoNN, HeteroNN, HeteroFTL = None, None, None
__all__ = [
"DataStatistics",
"DataIO",
"Evaluation",
"HeteroDataSplit",
"HeteroFastSecureBoost",
"HeteroFeatureBinning",
"HeteroFeatureSelection",
"HeteroFTL",
"HeteroLinR",
"HeteroLR",
"HeteroNN",
"HeteroPearson",
"HeteroPoisson",
"HeteroSecureBoost",
"HomoDataSplit",
"HomoLR",
"HomoNN",
"HomoSecureBoost",
"HomoFeatureBinning",
"Intersection",
"LocalBaseline",
"OneHotEncoder",
"PSI",
"Reader",
"Scorecard",
"FederatedSample",
"FeatureScale",
"Union",
"ColumnExpand",
"FeldmanVerifiableSum",
"SampleWeight",
"DataTransform",
"FeatureImputation",
"LabelTransform",
"SecureInformationRetrieval",
"CacheLoader",
"ModelLoader",
"HeteroSSHELR",
"HeteroKmeans",
"HomoOneHotEncoder",
"HeteroSSHELinR",
"PositiveUnlabeled"]
| 3,628 | 37.606383 | 141 | py |
FATE | FATE-master/python/fate_client/pipeline/component/hetero_nn.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.interface import Input
from pipeline.interface import Output
from pipeline.utils.tools import extract_explicit_parameter
from pipeline.component.nn.interface import DatasetParam
try:
from pipeline.component.component_base import FateComponent
from pipeline.component.nn.models.sequantial import Sequential
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
except Exception as e:
print(e)
print('Import NN components in HeteroNN module failed, \
this may casue by the situation that torch are not installed,\
please install torch to use this module')
class HeteroNN(FateComponent):
@extract_explicit_parameter
def __init__(self, task_type="classification", epochs=None, batch_size=-1, early_stop="diff",
tol=1e-5, encrypt_param=None, predict_param=None, cv_param=None, interactive_layer_lr=0.1,
validation_freqs=None, early_stopping_rounds=None, use_first_metric_only=None,
floating_point_precision=23, selector_param=None, seed=100,
dataset: DatasetParam = DatasetParam(dataset_name='table'), **kwargs
):
"""
Parameters used for Hetero Neural Network.
Parameters
----------
task_type: str, task type of hetero nn model, one of 'classification', 'regression'.
interactive_layer_lr: float, the learning rate of interactive layer.
epochs: int, the maximum iteration for aggregation in training.
batch_size : int, batch size when updating model.
-1 means use all data in a batch. i.e. Not to use mini-batch strategy.
defaults to -1.
early_stop : str, accept 'diff' only in this version, default: 'diff'
Method used to judge converge or not.
a) diff: Use difference of loss between two iterations to judge whether converge.
tol: float, tolerance val for early stop
floating_point_precision: None or integer, if not None, means use floating_point_precision-bit to speed up calculation,
e.g.: convert an x to round(x * 2**floating_point_precision) during Paillier operation, divide
the result by 2**floating_point_precision in the end.
callback_param: dict, CallbackParam, see federatedml/param/callback_param
encrypt_param: dict, see federatedml/param/encrypt_param
dataset_param: dict, interface defining the dataset param
early_stopping_rounds: integer larger than 0
will stop training if one metric of one validation data
doesn’t improve in last early_stopping_round rounds,
need to set validation freqs and will check early_stopping every at every validation epoch
validation_freqs: None or positive integer or container object in python
Do validation in training process or Not.
if equals None, will not do validation in train process;
if equals positive integer, will validate data every validation_freqs epochs passes;
if container object in python, will validate data if epochs belong to this container.
e.g. validation_freqs = [10, 15], will validate data when epoch equals to 10 and 15.
Default: None
"""
explicit_parameters = kwargs["explict_parameters"]
explicit_parameters["optimizer"] = None
explicit_parameters["bottom_nn_define"] = None
explicit_parameters["top_nn_define"] = None
explicit_parameters["interactive_layer_define"] = None
explicit_parameters["loss"] = None
FateComponent.__init__(self, **explicit_parameters)
if "name" in explicit_parameters:
del explicit_parameters["name"]
for param_key, param_value in explicit_parameters.items():
setattr(self, param_key, param_value)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name, data_type='single')
self._module_name = "HeteroNN"
self.optimizer = None
self.bottom_nn_define = None
self.top_nn_define = None
self.interactive_layer_define = None
# model holder
self._bottom_nn_model = Sequential()
self._interactive_layer = Sequential()
self._top_nn_model = Sequential()
# role
self._role = 'common' # common/guest/host
if hasattr(self, 'dataset'):
assert isinstance(
self.dataset, DatasetParam), 'dataset must be a DatasetParam class'
self.dataset.check()
self.dataset: DatasetParam = self.dataset.to_dict()
def set_role(self, role):
self._role = role
def get_party_instance(self, role="guest", party_id=None) -> 'Component':
inst = super().get_party_instance(role, party_id)
inst.set_role(role)
return inst
def add_dataset(self, dataset_param: DatasetParam):
assert isinstance(
dataset_param, DatasetParam), 'dataset must be a DatasetParam class'
dataset_param.check()
self.dataset: DatasetParam = dataset_param.to_dict()
self._component_parameter_keywords.add("dataset")
self._component_param["dataset"] = self.dataset
def add_bottom_model(self, model):
if not hasattr(self, "_bottom_nn_model"):
setattr(self, "_bottom_nn_model", Sequential())
self._bottom_nn_model.add(model)
def set_interactive_layer(self, layer):
if self._role == 'common' or self._role == 'guest':
if not hasattr(self, "_interactive_layer"):
setattr(self, "_interactive_layer", Sequential())
assert isinstance(layer, InteractiveLayer), 'You need to add an interactive layer instance, \n' \
'you can access InteractiveLayer by:\n' \
't.nn.InteractiveLayer after fate_torch_hook(t)\n' \
'or from pipeline.component.nn.backend.torch.interactive ' \
'import InteractiveLayer'
self._interactive_layer.add(layer)
else:
raise RuntimeError(
'You can only set interactive layer in "common" or "guest" hetero nn component')
def add_top_model(self, model):
if self._role == 'host':
raise RuntimeError('top model is not allow to set on host model')
if not hasattr(self, "_top_nn_model"):
setattr(self, "_top_nn_model", Sequential())
self._top_nn_model.add(model)
def _set_optimizer(self, opt):
assert hasattr(
opt, 'to_dict'), 'opt does not have function to_dict(), remember to call fate_torch_hook(t)'
self.optimizer = opt.to_dict()
def _set_loss(self, loss):
assert hasattr(
loss, 'to_dict'), 'loss does not have function to_dict(), remember to call fate_torch_hook(t)'
loss_conf = loss.to_dict()
setattr(self, "loss", loss_conf)
def compile(self, optimizer, loss):
self._set_optimizer(optimizer)
self._set_loss(loss)
self._compile_common_network_config()
self._compile_role_network_config()
self._compile_interactive_layer()
def _compile_interactive_layer(self):
if hasattr(
self,
"_interactive_layer") and not self._interactive_layer.is_empty():
self.interactive_layer_define = self._interactive_layer.get_network_config()
self._component_param["interactive_layer_define"] = self.interactive_layer_define
def _compile_common_network_config(self):
if hasattr(
self,
"_bottom_nn_model") and not self._bottom_nn_model.is_empty():
self.bottom_nn_define = self._bottom_nn_model.get_network_config()
self._component_param["bottom_nn_define"] = self.bottom_nn_define
if hasattr(
self,
"_top_nn_model") and not self._top_nn_model.is_empty():
self.top_nn_define = self._top_nn_model.get_network_config()
self._component_param["top_nn_define"] = self.top_nn_define
def _compile_role_network_config(self):
all_party_instance = self._get_all_party_instance()
for role in all_party_instance:
for party in all_party_instance[role]["party"].keys():
all_party_instance[role]["party"][party]._compile_common_network_config(
)
all_party_instance[role]["party"][party]._compile_interactive_layer(
)
def get_bottom_model(self):
if hasattr(
self,
"_bottom_nn_model") and not getattr(
self,
"_bottom_nn_model").is_empty():
return getattr(self, "_bottom_nn_model").get_model()
bottom_models = {}
all_party_instance = self._get_all_party_instance()
for role in all_party_instance.keys():
for party in all_party_instance[role]["party"].keys():
party_inst = all_party_instance[role]["party"][party]
if party_inst is not None:
btn_model = all_party_instance[role]["party"][party].get_bottom_model(
)
if btn_model is not None:
bottom_models[party] = btn_model
return bottom_models if len(bottom_models) > 0 else None
def get_top_model(self):
if hasattr(
self,
"_top_nn_model") and not getattr(
self,
"_top_nn_model").is_empty():
return getattr(self, "_top_nn_model").get_model()
models = {}
all_party_instance = self._get_all_party_instance()
for role in all_party_instance.keys():
for party in all_party_instance[role]["party"].keys():
party_inst = all_party_instance[role]["party"][party]
if party_inst is not None:
top_model = all_party_instance[role]["party"][party].get_top_model(
)
if top_model is not None:
models[party] = top_model
return models if len(models) > 0 else None
def __getstate__(self):
state = dict(self.__dict__)
if "_bottom_nn_model" in state:
del state["_bottom_nn_model"]
if "_interactive_layer" in state:
del state["_interactive_layer"]
if "_top_nn_model" in state:
del state["_top_nn_model"]
return state
| 11,564 | 43.141221 | 134 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/models/sequantial.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pipeline.component.nn.backend.torch.base import Sequential as Seq
from pipeline.component.nn.backend.torch.cust import CustModel
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
class Sequential(object):
def __init__(self):
self.__config_type = None
self._model = None
def is_empty(self):
return self._model is None
def get_model(self):
return self._model
def add(self, layer):
_IS_TF_KERAS = False
try:
import tensorflow as tf
_IS_TF_KERAS = isinstance(layer, tf.Module)
except ImportError:
pass
if _IS_TF_KERAS:
# please notice that keras backend now is abandoned, hetero & homo nn support keras backend no more,
# but pipeline keras interface is kept
layer_type = "keras"
else:
layer_type = "torch"
is_layer = hasattr(
layer,
"__module__") and "pipeline.component.nn.backend.torch.nn" == getattr(
layer,
"__module__")
is_seq = isinstance(layer, Seq)
is_cust_model = isinstance(layer, CustModel)
is_interactive_layer = isinstance(layer, InteractiveLayer)
if not (is_layer or is_cust_model or is_interactive_layer or is_seq):
raise ValueError(
"Layer type {} not support yet, added layer must be a FateTorchLayer or a fate_torch "
"Sequential, remember to call fate_torch_hook() before using pipeline "
"".format(
type(layer)))
self._add_layer(layer, layer_type)
def _add_layer(self, layer, layer_type, replace=True):
if layer_type == 'torch':
if self._model is None or replace:
self._model = Seq()
self.__config_type = layer_type
elif layer_type == 'keras':
# please notice that keras backend now is abandoned, hetero & homo nn support keras backend no more,
# but pipeline keras interface is kept
from pipeline.component.nn.models.keras_interface import SequentialModel
self.__config_type = layer_type
self._model = SequentialModel()
self._model.add(layer)
def get_layer_type(self):
return self.__config_type
def get_loss_config(self, loss):
return self._model.get_loss_config(loss)
def get_optimizer_config(self, optimizer):
return self._model.get_optimizer_config(optimizer)
def get_network_config(self):
if not self.__config_type:
raise ValueError("Empty layer find, can't get config")
return self._model.get_network_config()
def __repr__(self):
return self._model.__repr__()
| 3,465 | 34.731959 | 112 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/models/keras_interface.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
_TF_KERAS_VALID = False
try:
from tensorflow.keras.models import Sequential
_TF_KERAS_VALID = True
except ImportError:
pass
def build_model(model_type="sequential"):
if model_type != "sequential":
raise ValueError("Only support sequential model now")
return SequentialModel()
class SequentialModel(object):
def __init__(self):
if _TF_KERAS_VALID:
self._model = Sequential()
else:
self._model = None
def add(self, layer):
if not _TF_KERAS_VALID:
raise ImportError(
"Please install tensorflow first, "
"can not import sequential model from tensorflow.keras.model !!!")
self._model.add(layer)
@staticmethod
def get_loss_config(loss):
if isinstance(loss, str):
return loss
if loss.__module__ == "tensorflow.python.keras.losses":
return loss.__name__
raise ValueError(
"keras sequential model' loss should be string of losses function of tf_keras")
@staticmethod
def get_optimizer_config(optimizer):
if isinstance(optimizer, str):
return optimizer
opt_config = optimizer.get_config()
if "name" in opt_config:
opt_config["optimizer"] = opt_config["name"]
del opt_config["name"]
return opt_config
def get_network_config(self):
if not _TF_KERAS_VALID:
raise ImportError(
"Please install tensorflow first, "
"can not import sequential model from tensorflow.keras.model !!!")
return json.loads(self._model.to_json())
| 2,295 | 28.435897 | 91 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/import_hook.py | try:
from pipeline.component.nn.backend.torch import nn as nn_
from pipeline.component.nn.backend.torch import init as init_
from pipeline.component.nn.backend.torch import optim as optim_
from pipeline.component.nn.backend.torch.cust import CustModel, CustLoss
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def monkey_patch(torch_nn, fate_torch_module):
for name in fate_torch_module.__dict__.keys():
if '__' in name: # skip no related variables
continue
if name in torch_nn.__dict__.keys():
torch_nn.__dict__[name] = fate_torch_module.__dict__[name]
def fate_torch_hook(torch_module_var):
"""
This is a monkey patch function that modify torch modules to use fate_torch layers and Components
:param torch_module_var:
:return:
"""
if torch_module_var.__name__ == 'torch':
monkey_patch(torch_module_var.nn, nn_)
monkey_patch(torch_module_var.optim, optim_)
monkey_patch(torch_module_var.nn.init, init_)
setattr(torch_module_var.nn, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.nn':
monkey_patch(torch_module_var, nn_)
setattr(torch_module_var, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.optim':
monkey_patch(torch_module_var, optim_)
elif torch_module_var.__name__ == 'torch.nn.init':
monkey_patch(torch_module_var, init_)
else:
raise ValueError(
'this module: {} does not support fate torch hook'.format(torch_module_var))
return torch_module_var
| 1,920 | 36.666667 | 101 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/base.py | import json
import torch as t
from torch.nn import Sequential as tSequential
from pipeline.component.nn.backend.torch.operation import OpBase
class FateTorchLayer(object):
def __init__(self):
t.nn.Module.__init__(self)
self.param_dict = dict()
self.initializer = {'weight': None, 'bias': None}
self.optimizer = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['layer'] = type(self).__name__
ret_dict['initializer'] = {}
if self.initializer['weight']:
ret_dict['initializer']['weight'] = self.initializer['weight']
if self.initializer['bias']:
ret_dict['initializer']['bias'] = self.initializer['bias']
return ret_dict
def add_optimizer(self, opt):
self.optimizer = opt
class FateTorchLoss(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['loss_fn'] = type(self).__name__
return ret_dict
class FateTorchOptimizer(object):
def __init__(self):
self.param_dict = dict()
self.torch_class = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['optimizer'] = type(self).__name__
ret_dict['config_type'] = 'pytorch'
return ret_dict
def check_params(self, params):
if isinstance(
params,
FateTorchLayer) or isinstance(
params,
Sequential):
params.add_optimizer(self)
params = params.parameters()
else:
params = params
l_param = list(params)
if len(l_param) == 0:
# fake parameters, for the case that there are only cust model
return [t.nn.Parameter(t.Tensor([0]))]
return l_param
def register_optimizer(self, input_):
if input_ is None:
return
if isinstance(
input_,
FateTorchLayer) or isinstance(
input_,
Sequential):
input_.add_optimizer(self)
def to_torch_instance(self, parameters):
return self.torch_class(parameters, **self.param_dict)
class Sequential(tSequential):
def to_dict(self):
"""
get the structure of current sequential
"""
rs = {}
idx = 0
for k in self._modules:
ordered_name = str(idx) + '-' + k
rs[ordered_name] = self._modules[k].to_dict()
idx += 1
return rs
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
def add_optimizer(self, opt):
setattr(self, 'optimizer', opt)
def add(self, layer):
if isinstance(layer, Sequential):
self._modules = layer._modules
# copy optimizer
if hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
elif isinstance(layer, FateTorchLayer):
self.add_module(str(len(self)), layer)
# update optimizer if dont have
if not hasattr(self, 'optimizer') and hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
else:
raise ValueError(
'unknown input layer type {}, this type is not supported'.format(
type(layer)))
@staticmethod
def get_loss_config(loss: FateTorchLoss):
return loss.to_dict()
def get_optimizer_config(self, optimizer=None):
if hasattr(self, 'optimizer'):
return self.optimizer.to_dict()
else:
return optimizer.to_dict()
def get_network_config(self):
return self.to_dict()
def get_torch_instance(fate_torch_nn_class: FateTorchLayer, param):
parent_torch_class = fate_torch_nn_class.__bases__
if issubclass(fate_torch_nn_class, OpBase):
return fate_torch_nn_class(**param)
for cls in parent_torch_class:
if issubclass(cls, t.nn.Module):
return cls(**param)
return None
| 4,209 | 26.880795 | 81 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/optim.py | from torch import optim
from pipeline.component.nn.backend.torch.base import FateTorchOptimizer
class ASGD(optim.ASGD, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lambd=0.0001,
alpha=0.75,
t0=1000000.0,
weight_decay=0,
foreach=None,
maximize=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lambd'] = lambd
self.param_dict['alpha'] = alpha
self.param_dict['t0'] = t0
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.ASGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer ASGD without initiated parameters'.format(type(self).__name__)
class Adadelta(optim.Adadelta, FateTorchOptimizer):
def __init__(self, params=None, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['rho'] = rho
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adadelta.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adadelta without initiated parameters'.format(type(self).__name__)
class Adagrad(optim.Adagrad, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lr_decay'] = lr_decay
self.param_dict['weight_decay'] = weight_decay
self.param_dict['initial_accumulator_value'] = initial_accumulator_value
self.param_dict['eps'] = eps
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adagrad.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adagrad without initiated parameters'.format(type(self).__name__)
class Adam(optim.Adam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adam without initiated parameters'.format(type(self).__name__)
class AdamW(optim.AdamW, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.AdamW.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer AdamW without initiated parameters'.format(type(self).__name__)
class Adamax(optim.Adamax, FateTorchOptimizer):
def __init__(self, params=None, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adamax.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adamax without initiated parameters'.format(type(self).__name__)
class LBFGS(optim.LBFGS, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-07,
tolerance_change=1e-09,
history_size=100,
line_search_fn=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['max_iter'] = max_iter
self.param_dict['max_eval'] = max_eval
self.param_dict['tolerance_grad'] = tolerance_grad
self.param_dict['tolerance_change'] = tolerance_change
self.param_dict['history_size'] = history_size
self.param_dict['line_search_fn'] = line_search_fn
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.LBFGS.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer LBFGS without initiated parameters'.format(type(self).__name__)
class NAdam(optim.NAdam, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.002,
betas=(
0.9,
0.999),
eps=1e-08,
weight_decay=0,
momentum_decay=0.004,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum_decay'] = momentum_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.NAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer NAdam without initiated parameters'.format(type(self).__name__)
class RAdam(optim.RAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RAdam without initiated parameters'.format(type(self).__name__)
class RMSprop(optim.RMSprop, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
alpha=0.99,
eps=1e-08,
weight_decay=0,
momentum=0,
centered=False,
foreach=None,
maximize=False,
differentiable=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['alpha'] = alpha
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum'] = momentum
self.param_dict['centered'] = centered
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.param_dict['differentiable'] = differentiable
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RMSprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RMSprop without initiated parameters'.format(type(self).__name__)
class Rprop(optim.Rprop, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50), foreach=None, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['etas'] = etas
self.param_dict['step_sizes'] = step_sizes
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Rprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Rprop without initiated parameters'.format(type(self).__name__)
class SGD(optim.SGD, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['momentum'] = momentum
self.param_dict['dampening'] = dampening
self.param_dict['weight_decay'] = weight_decay
self.param_dict['nesterov'] = nesterov
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SGD without initiated parameters'.format(type(self).__name__)
class SparseAdam(optim.SparseAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SparseAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SparseAdam without initiated parameters'.format(type(self).__name__)
| 12,959 | 30.228916 | 118 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/cust.py | from torch import nn
import importlib
from pipeline.component.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
import difflib
MODEL_PATH = None
LOSS_PATH = None
def str_simi(str_a, str_b):
return difflib.SequenceMatcher(None, str_a, str_b).quick_ratio()
def get_class(module_name, class_name, param, base_path):
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module('{}.{}'.format(base_path, module_name))
try:
name_simi_list = []
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, nn.Module) and v is not nn.Module:
if v.__name__ == class_name:
return v(**param)
else:
name_simi_list += ([(str_simi(class_name, v.__name__), v)])
sort_by_simi = sorted(name_simi_list, key=lambda x: -x[0])
if len(sort_by_simi) > 0:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}. Do you mean {}?'. format(
module_name, class_name, sort_by_simi[0][1].__name__))
else:
raise ValueError('Did not find any class in {}.py that is subclass of nn.Module and named {}'.
format(module_name, class_name))
except ValueError as e:
raise e
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {'module_name': module_name, 'class_name': class_name, 'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self, module_path=None):
if module_path is None:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
MODEL_PATH)
else:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
module_path)
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
class CustLoss(FateTorchLoss, nn.Module):
def __init__(self, loss_module_name, class_name, **kwargs):
super(CustLoss, self).__init__()
assert isinstance(loss_module_name, str), 'loss module name must be a str, specify the module in the model_zoo'
assert isinstance(class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {'loss_module_name': loss_module_name, 'class_name': class_name, 'param': kwargs}
self._loss_fn = None
def init_loss_fn(self):
if self._loss_fn is None:
self._loss_fn = self.get_pytorch_model()
def forward(self, pred, label):
if self._loss_fn is None:
raise ValueError('loss not init, call init_loss_fn() function')
return self._loss_fn(pred, label)
def get_pytorch_model(self, module_path=None):
module_name: str = self.param_dict['loss_module_name']
class_name: str = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_path is None:
return get_class(module_name=module_name, class_name=class_name, param=module_param, base_path=LOSS_PATH)
else:
return get_class(module_name=module_name, class_name=class_name, param=module_param, base_path=module_path)
def __repr__(self):
return 'CustLoss({})'.format(str(self.param_dict))
| 4,188 | 36.738739 | 119 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/init.py | import copy
import torch as t
from torch.nn import init as torch_init
import functools
from pipeline.component.nn.backend.torch.base import FateTorchLayer
from pipeline.component.nn.backend.torch.base import Sequential
str_init_func_map = {
"uniform": torch_init.uniform_,
"normal": torch_init.normal_,
"constant": torch_init.constant_,
"xavier_uniform": torch_init.xavier_uniform_,
"xavier_normal": torch_init.xavier_normal_,
"kaiming_uniform": torch_init.kaiming_uniform_,
"kaiming_normal": torch_init.kaiming_normal_,
"eye": torch_init.eye_,
"dirac": torch_init.dirac_,
"orthogonal": torch_init.orthogonal_,
"sparse": torch_init.sparse_,
"zeros": torch_init.zeros_,
"ones": torch_init.ones_
}
#
# def extract_param(func):
#
# args = inspect.getargspec(func)
# keys = args[0][1:]
# if len(keys) == 0:
# return {}
# defaults = args[-1]
# args_map = {}
# if defaults is not None:
# for idx, i in enumerate(keys[-len(defaults):]):
# args_map[i] = defaults[idx]
#
# for i in keys:
# if i not in args_map:
# args_map[i] = Required()
#
# return args_map
def init_weight(m, initializer):
if hasattr(m, 'weight'):
initializer(m.weight)
# LSTM RNN
if hasattr(m, 'weight_hh_l0'):
initializer(m.weight_hh_l0)
# LSTM RNN
if hasattr(m, 'weight_ih_l0'):
initializer(m.weight_ih_l0)
def init_bias(m, initializer):
if hasattr(
m,
'bias') and not isinstance(
m.bias,
bool) and m.bias is not None: # LSTM, RNN .bias is bool
initializer(m.bias)
# LSTM RNN
if hasattr(m, 'bias_hh_l0') and m.bias_hh_l0 is not None:
initializer(m.bias_hh_l0)
# LSTM RNN
if hasattr(m, 'bias_ih_l0') and m.bias_ih_l0 is not None:
initializer(m.bias_ih_l0)
def get_init_func_type(init='weight'):
if init == 'weight':
return init_weight
elif init == 'bias':
return init_bias
else:
return None
def recursive_init(m, init_func, obj):
if len(list(m.children())) > 0:
if m == obj:
return
recursive_init(m, init_func, m)
else:
try:
init_func(m)
except Exception as e:
print('initialize layer {} failed, exception is :{}'.format(m, e))
def make_apply_func(torch_initializer, param_dict, init_func, layer):
initializer = functools.partial(torch_initializer, **param_dict)
init_func = functools.partial(init_func, initializer=initializer)
recursive_init_func = functools.partial(
recursive_init, obj=layer, init_func=init_func)
return recursive_init_func, param_dict
def get_init_dict(init_func, param_dict, init_type):
rev_dict = {v: k for k, v in str_init_func_map.items()}
rs = {
'init_type': init_type,
'init_func': rev_dict[init_func],
'param': param_dict}
return rs
def record_initializer(layers, init_dict):
if isinstance(layers, FateTorchLayer):
if init_dict['init_type'] == 'weight':
layers.initializer['weight'] = init_dict
elif init_dict['init_type'] == 'bias':
layers.initializer['bias'] = init_dict
def run_init(torch_initializer, input_var, init, layer):
# recursive init
if isinstance(layer, Sequential):
for sub_layer in layer:
run_init(torch_initializer, input_var, init, sub_layer)
# init layer
elif isinstance(layer, FateTorchLayer) or isinstance(layer, t.nn.Module):
recursive_init_func, param_dict = make_apply_func(
torch_initializer, copy.deepcopy(input_var), get_init_func_type(init), layer)
layer.apply(recursive_init_func)
record_initializer(
layer,
get_init_dict(
torch_initializer,
param_dict,
init))
else:
try:
return torch_initializer(layer, **input_var)
except Exception as e:
print(e)
print('skip initialization')
"""
Init Func
"""
def local_extract(local_dict):
param = {}
for k, v in local_dict.items():
if k != 'layer' and k != 'init':
param[k] = v
return copy.deepcopy(param)
def uniform_(layer, a=0, b=1, init='weight'):
run_init(
str_init_func_map['uniform'],
local_extract(
locals()),
init,
layer)
def normal_(layer, mean=0, std=1, init='weight'):
run_init(str_init_func_map['normal'], local_extract(locals()), init, layer)
def constant_(layer, val, init='weight'):
run_init(
str_init_func_map['constant'],
local_extract(
locals()),
init,
layer)
def ones_(layer, init='weight'):
run_init(str_init_func_map['ones'], local_extract(locals()), init, layer)
def zeros_(layer, init='weight'):
run_init(str_init_func_map['zeros'], local_extract(locals()), init, layer)
def eye_(layer, init='weight'):
run_init(str_init_func_map['eye'], local_extract(locals()), init, layer)
def dirac_(layer, group=1, init='weight'):
run_init(str_init_func_map['dirac'], local_extract(locals()), init, layer)
def xavier_uniform_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_uniform'],
local_extract(locals()), init, layer)
def xavier_normal_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_normal'],
local_extract(locals()), init, layer)
def kaiming_uniform_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_uniform'],
local_extract(locals()), init, layer)
def kaiming_normal_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_normal'],
local_extract(locals()), init, layer)
def orthogonal_(layer, gain=1, init='weight'):
run_init(
str_init_func_map['orthogonal'],
local_extract(
locals()),
init,
layer)
def sparse_(layer, sparsity, std=0.01, init='weight'):
run_init(str_init_func_map['sparse'], local_extract(locals()), init, layer)
str_fate_torch_init_func_map = {
"uniform": uniform_,
"normal": normal_,
"constant": constant_,
"xavier_uniform": xavier_uniform_,
"xavier_normal": xavier_normal_,
"kaiming_uniform": kaiming_uniform_,
"kaiming_normal": kaiming_normal_,
"eye": eye_,
"dirac": dirac_,
"orthogonal": orthogonal_,
"sparse": sparse_,
"zeros": zeros_,
"ones": ones_
}
if __name__ == '__main__':
pass
| 6,775 | 25.677165 | 89 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/nn.py | from pipeline.component.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from pipeline.component.nn.backend.torch.base import Sequential
from torch import nn
class Bilinear(nn.modules.linear.Bilinear, FateTorchLayer):
def __init__(
self,
in1_features,
in2_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in1_features'] = in1_features
self.param_dict['in2_features'] = in2_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Bilinear.__init__(self, **self.param_dict)
class Identity(nn.modules.linear.Identity, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.linear.Identity.__init__(self, **self.param_dict)
class LazyLinear(nn.modules.linear.LazyLinear, FateTorchLayer):
def __init__(
self,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.LazyLinear.__init__(self, **self.param_dict)
class Linear(nn.modules.linear.Linear, FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Linear.__init__(self, **self.param_dict)
class NonDynamicallyQuantizableLinear(
nn.modules.linear.NonDynamicallyQuantizableLinear,
FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.NonDynamicallyQuantizableLinear.__init__(
self, **self.param_dict)
class GRU(nn.modules.rnn.GRU, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.GRU.__init__(self, **self.param_dict)
class GRUCell(nn.modules.rnn.GRUCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.GRUCell.__init__(self, **self.param_dict)
class LSTM(nn.modules.rnn.LSTM, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.LSTM.__init__(self, **self.param_dict)
class LSTMCell(nn.modules.rnn.LSTMCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.LSTMCell.__init__(self, **self.param_dict)
class RNN(nn.modules.rnn.RNN, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.RNN.__init__(self, **self.param_dict)
class RNNBase(nn.modules.rnn.RNNBase, FateTorchLayer):
def __init__(
self,
mode,
input_size,
hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0.0,
bidirectional=False,
proj_size=0,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_layers'] = num_layers
self.param_dict['bias'] = bias
self.param_dict['batch_first'] = batch_first
self.param_dict['dropout'] = dropout
self.param_dict['bidirectional'] = bidirectional
self.param_dict['proj_size'] = proj_size
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['mode'] = mode
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNBase.__init__(self, **self.param_dict)
class RNNCell(nn.modules.rnn.RNNCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
nonlinearity='tanh',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['nonlinearity'] = nonlinearity
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCell.__init__(self, **self.param_dict)
class RNNCellBase(nn.modules.rnn.RNNCellBase, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias,
num_chunks,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict['bias'] = bias
self.param_dict['num_chunks'] = num_chunks
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCellBase.__init__(self, **self.param_dict)
class Embedding(nn.modules.sparse.Embedding, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding_idx'] = padding_idx
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.Embedding.__init__(self, **self.param_dict)
class EmbeddingBag(nn.modules.sparse.EmbeddingBag, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
mode='mean',
sparse=False,
_weight=None,
include_last_offset=False,
padding_idx=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['mode'] = mode
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['include_last_offset'] = include_last_offset
self.param_dict['padding_idx'] = padding_idx
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.EmbeddingBag.__init__(self, **self.param_dict)
class AlphaDropout(nn.modules.dropout.AlphaDropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.AlphaDropout.__init__(self, **self.param_dict)
class Dropout(nn.modules.dropout.Dropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout.__init__(self, **self.param_dict)
class Dropout1d(nn.modules.dropout.Dropout1d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout1d.__init__(self, **self.param_dict)
class Dropout2d(nn.modules.dropout.Dropout2d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout2d.__init__(self, **self.param_dict)
class Dropout3d(nn.modules.dropout.Dropout3d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout3d.__init__(self, **self.param_dict)
class FeatureAlphaDropout(
nn.modules.dropout.FeatureAlphaDropout,
FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.FeatureAlphaDropout.__init__(
self, **self.param_dict)
class _DropoutNd(nn.modules.dropout._DropoutNd, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout._DropoutNd.__init__(self, **self.param_dict)
class CELU(nn.modules.activation.CELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.CELU.__init__(self, **self.param_dict)
class ELU(nn.modules.activation.ELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ELU.__init__(self, **self.param_dict)
class GELU(nn.modules.activation.GELU, FateTorchLayer):
def __init__(self, approximate='none', **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['approximate'] = approximate
self.param_dict.update(kwargs)
nn.modules.activation.GELU.__init__(self, **self.param_dict)
class GLU(nn.modules.activation.GLU, FateTorchLayer):
def __init__(self, dim=-1, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.GLU.__init__(self, **self.param_dict)
class Hardshrink(nn.modules.activation.Hardshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Hardshrink.__init__(self, **self.param_dict)
class Hardsigmoid(nn.modules.activation.Hardsigmoid, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardsigmoid.__init__(self, **self.param_dict)
class Hardswish(nn.modules.activation.Hardswish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardswish.__init__(self, **self.param_dict)
class Hardtanh(nn.modules.activation.Hardtanh, FateTorchLayer):
def __init__(
self,
min_val=-1.0,
max_val=1.0,
inplace=False,
min_value=None,
max_value=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['min_val'] = min_val
self.param_dict['max_val'] = max_val
self.param_dict['inplace'] = inplace
self.param_dict['min_value'] = min_value
self.param_dict['max_value'] = max_value
self.param_dict.update(kwargs)
nn.modules.activation.Hardtanh.__init__(self, **self.param_dict)
class LeakyReLU(nn.modules.activation.LeakyReLU, FateTorchLayer):
def __init__(self, negative_slope=0.01, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['negative_slope'] = negative_slope
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.LeakyReLU.__init__(self, **self.param_dict)
class LogSigmoid(nn.modules.activation.LogSigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.LogSigmoid.__init__(self, **self.param_dict)
class LogSoftmax(nn.modules.activation.LogSoftmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.LogSoftmax.__init__(self, **self.param_dict)
class Mish(nn.modules.activation.Mish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Mish.__init__(self, **self.param_dict)
class MultiheadAttention(
nn.modules.activation.MultiheadAttention,
FateTorchLayer):
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dropout'] = dropout
self.param_dict['bias'] = bias
self.param_dict['add_bias_kv'] = add_bias_kv
self.param_dict['add_zero_attn'] = add_zero_attn
self.param_dict['kdim'] = kdim
self.param_dict['vdim'] = vdim
self.param_dict['batch_first'] = batch_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['embed_dim'] = embed_dim
self.param_dict['num_heads'] = num_heads
self.param_dict.update(kwargs)
nn.modules.activation.MultiheadAttention.__init__(
self, **self.param_dict)
class PReLU(nn.modules.activation.PReLU, FateTorchLayer):
def __init__(
self,
num_parameters=1,
init=0.25,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_parameters'] = num_parameters
self.param_dict['init'] = init
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.activation.PReLU.__init__(self, **self.param_dict)
class RReLU(nn.modules.activation.RReLU, FateTorchLayer):
def __init__(
self,
lower=0.125,
upper=0.3333333333333333,
inplace=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lower'] = lower
self.param_dict['upper'] = upper
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.RReLU.__init__(self, **self.param_dict)
class ReLU(nn.modules.activation.ReLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU.__init__(self, **self.param_dict)
class ReLU6(nn.modules.activation.ReLU6, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU6.__init__(self, **self.param_dict)
class SELU(nn.modules.activation.SELU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SELU.__init__(self, **self.param_dict)
class SiLU(nn.modules.activation.SiLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SiLU.__init__(self, **self.param_dict)
class Sigmoid(nn.modules.activation.Sigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Sigmoid.__init__(self, **self.param_dict)
class Softmax(nn.modules.activation.Softmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmax.__init__(self, **self.param_dict)
class Softmax2d(nn.modules.activation.Softmax2d, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softmax2d.__init__(self, **self.param_dict)
class Softmin(nn.modules.activation.Softmin, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmin.__init__(self, **self.param_dict)
class Softplus(nn.modules.activation.Softplus, FateTorchLayer):
def __init__(self, beta=1, threshold=20, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['beta'] = beta
self.param_dict['threshold'] = threshold
self.param_dict.update(kwargs)
nn.modules.activation.Softplus.__init__(self, **self.param_dict)
class Softshrink(nn.modules.activation.Softshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Softshrink.__init__(self, **self.param_dict)
class Softsign(nn.modules.activation.Softsign, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softsign.__init__(self, **self.param_dict)
class Tanh(nn.modules.activation.Tanh, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanh.__init__(self, **self.param_dict)
class Tanhshrink(nn.modules.activation.Tanhshrink, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanhshrink.__init__(self, **self.param_dict)
class Threshold(nn.modules.activation.Threshold, FateTorchLayer):
def __init__(self, threshold, value, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict['threshold'] = threshold
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.activation.Threshold.__init__(self, **self.param_dict)
class Conv1d(nn.modules.conv.Conv1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv1d.__init__(self, **self.param_dict)
class Conv2d(nn.modules.conv.Conv2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv2d.__init__(self, **self.param_dict)
class Conv3d(nn.modules.conv.Conv3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv3d.__init__(self, **self.param_dict)
class ConvTranspose1d(nn.modules.conv.ConvTranspose1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose1d.__init__(self, **self.param_dict)
class ConvTranspose2d(nn.modules.conv.ConvTranspose2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose2d.__init__(self, **self.param_dict)
class ConvTranspose3d(nn.modules.conv.ConvTranspose3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose3d.__init__(self, **self.param_dict)
class LazyConv1d(nn.modules.conv.LazyConv1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv1d.__init__(self, **self.param_dict)
class LazyConv2d(nn.modules.conv.LazyConv2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv2d.__init__(self, **self.param_dict)
class LazyConv3d(nn.modules.conv.LazyConv3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv3d.__init__(self, **self.param_dict)
class LazyConvTranspose1d(nn.modules.conv.LazyConvTranspose1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose1d.__init__(self, **self.param_dict)
class LazyConvTranspose2d(nn.modules.conv.LazyConvTranspose2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose2d.__init__(self, **self.param_dict)
class LazyConvTranspose3d(nn.modules.conv.LazyConvTranspose3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose3d.__init__(self, **self.param_dict)
class _ConvNd(nn.modules.conv._ConvNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvNd.__init__(self, **self.param_dict)
class _ConvTransposeMixin(nn.modules.conv._ConvTransposeMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeMixin.__init__(self, **self.param_dict)
class _ConvTransposeNd(nn.modules.conv._ConvTransposeNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeNd.__init__(self, **self.param_dict)
class _LazyConvXdMixin(nn.modules.conv._LazyConvXdMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._LazyConvXdMixin.__init__(self, **self.param_dict)
class Transformer(nn.modules.transformer.Transformer, FateTorchLayer):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
custom_encoder=None,
custom_decoder=None,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict['num_encoder_layers'] = num_encoder_layers
self.param_dict['num_decoder_layers'] = num_decoder_layers
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['custom_encoder'] = custom_encoder
self.param_dict['custom_decoder'] = custom_decoder
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.transformer.Transformer.__init__(self, **self.param_dict)
class TransformerDecoder(
nn.modules.transformer.TransformerDecoder,
FateTorchLayer):
def __init__(self, decoder_layer, num_layers, norm=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['decoder_layer'] = decoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoder.__init__(
self, **self.param_dict)
class TransformerDecoderLayer(
nn.modules.transformer.TransformerDecoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoderLayer.__init__(
self, **self.param_dict)
class TransformerEncoder(
nn.modules.transformer.TransformerEncoder,
FateTorchLayer):
def __init__(
self,
encoder_layer,
num_layers,
norm=None,
enable_nested_tensor=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['enable_nested_tensor'] = enable_nested_tensor
self.param_dict['encoder_layer'] = encoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoder.__init__(
self, **self.param_dict)
class TransformerEncoderLayer(
nn.modules.transformer.TransformerEncoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoderLayer.__init__(
self, **self.param_dict)
class AdaptiveAvgPool1d(nn.modules.pooling.AdaptiveAvgPool1d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool1d.__init__(self, **self.param_dict)
class AdaptiveAvgPool2d(nn.modules.pooling.AdaptiveAvgPool2d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool2d.__init__(self, **self.param_dict)
class AdaptiveAvgPool3d(nn.modules.pooling.AdaptiveAvgPool3d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool3d.__init__(self, **self.param_dict)
class AdaptiveMaxPool1d(nn.modules.pooling.AdaptiveMaxPool1d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool1d.__init__(self, **self.param_dict)
class AdaptiveMaxPool2d(nn.modules.pooling.AdaptiveMaxPool2d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool2d.__init__(self, **self.param_dict)
class AdaptiveMaxPool3d(nn.modules.pooling.AdaptiveMaxPool3d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool3d.__init__(self, **self.param_dict)
class AvgPool1d(nn.modules.pooling.AvgPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool1d.__init__(self, **self.param_dict)
class AvgPool2d(nn.modules.pooling.AvgPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool2d.__init__(self, **self.param_dict)
class AvgPool3d(nn.modules.pooling.AvgPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool3d.__init__(self, **self.param_dict)
class FractionalMaxPool2d(
nn.modules.pooling.FractionalMaxPool2d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool2d.__init__(
self, **self.param_dict)
class FractionalMaxPool3d(
nn.modules.pooling.FractionalMaxPool3d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool3d.__init__(
self, **self.param_dict)
class LPPool1d(nn.modules.pooling.LPPool1d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool1d.__init__(self, **self.param_dict)
class LPPool2d(nn.modules.pooling.LPPool2d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool2d.__init__(self, **self.param_dict)
class MaxPool1d(nn.modules.pooling.MaxPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool1d.__init__(self, **self.param_dict)
class MaxPool2d(nn.modules.pooling.MaxPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool2d.__init__(self, **self.param_dict)
class MaxPool3d(nn.modules.pooling.MaxPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool3d.__init__(self, **self.param_dict)
class MaxUnpool1d(nn.modules.pooling.MaxUnpool1d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool1d.__init__(self, **self.param_dict)
class MaxUnpool2d(nn.modules.pooling.MaxUnpool2d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool2d.__init__(self, **self.param_dict)
class MaxUnpool3d(nn.modules.pooling.MaxUnpool3d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool3d.__init__(self, **self.param_dict)
class _AdaptiveAvgPoolNd(
nn.modules.pooling._AdaptiveAvgPoolNd,
FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveAvgPoolNd.__init__(self, **self.param_dict)
class _AdaptiveMaxPoolNd(
nn.modules.pooling._AdaptiveMaxPoolNd,
FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveMaxPoolNd.__init__(self, **self.param_dict)
class _AvgPoolNd(nn.modules.pooling._AvgPoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._AvgPoolNd.__init__(self, **self.param_dict)
class _LPPoolNd(nn.modules.pooling._LPPoolNd, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._LPPoolNd.__init__(self, **self.param_dict)
class _MaxPoolNd(nn.modules.pooling._MaxPoolNd, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._MaxPoolNd.__init__(self, **self.param_dict)
class _MaxUnpoolNd(nn.modules.pooling._MaxUnpoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._MaxUnpoolNd.__init__(self, **self.param_dict)
class BatchNorm1d(nn.modules.batchnorm.BatchNorm1d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm1d.__init__(self, **self.param_dict)
class BatchNorm2d(nn.modules.batchnorm.BatchNorm2d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm2d.__init__(self, **self.param_dict)
class BatchNorm3d(nn.modules.batchnorm.BatchNorm3d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm3d.__init__(self, **self.param_dict)
class LazyBatchNorm1d(nn.modules.batchnorm.LazyBatchNorm1d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm1d.__init__(self, **self.param_dict)
class LazyBatchNorm2d(nn.modules.batchnorm.LazyBatchNorm2d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm2d.__init__(self, **self.param_dict)
class LazyBatchNorm3d(nn.modules.batchnorm.LazyBatchNorm3d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm3d.__init__(self, **self.param_dict)
class SyncBatchNorm(nn.modules.batchnorm.SyncBatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['process_group'] = process_group
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.SyncBatchNorm.__init__(self, **self.param_dict)
class _BatchNorm(nn.modules.batchnorm._BatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._BatchNorm.__init__(self, **self.param_dict)
class _LazyNormBase(nn.modules.batchnorm._LazyNormBase, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm._LazyNormBase.__init__(self, **self.param_dict)
class _NormBase(nn.modules.batchnorm._NormBase, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._NormBase.__init__(self, **self.param_dict)
class ConstantPad1d(nn.modules.padding.ConstantPad1d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad1d.__init__(self, **self.param_dict)
class ConstantPad2d(nn.modules.padding.ConstantPad2d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad2d.__init__(self, **self.param_dict)
class ConstantPad3d(nn.modules.padding.ConstantPad3d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad3d.__init__(self, **self.param_dict)
class ReflectionPad1d(nn.modules.padding.ReflectionPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad1d.__init__(self, **self.param_dict)
class ReflectionPad2d(nn.modules.padding.ReflectionPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad2d.__init__(self, **self.param_dict)
class ReflectionPad3d(nn.modules.padding.ReflectionPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad3d.__init__(self, **self.param_dict)
class ReplicationPad1d(nn.modules.padding.ReplicationPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad1d.__init__(self, **self.param_dict)
class ReplicationPad2d(nn.modules.padding.ReplicationPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad2d.__init__(self, **self.param_dict)
class ReplicationPad3d(nn.modules.padding.ReplicationPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad3d.__init__(self, **self.param_dict)
class ZeroPad2d(nn.modules.padding.ZeroPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ZeroPad2d.__init__(self, **self.param_dict)
class _ConstantPadNd(nn.modules.padding._ConstantPadNd, FateTorchLayer):
def __init__(self, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding._ConstantPadNd.__init__(self, **self.param_dict)
class _ReflectionPadNd(nn.modules.padding._ReflectionPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReflectionPadNd.__init__(self, **self.param_dict)
class _ReplicationPadNd(nn.modules.padding._ReplicationPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReplicationPadNd.__init__(self, **self.param_dict)
class BCELoss(nn.modules.loss.BCELoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.BCELoss.__init__(self, **self.param_dict)
class BCEWithLogitsLoss(nn.modules.loss.BCEWithLogitsLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
pos_weight=None,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['pos_weight'] = pos_weight
self.param_dict.update(kwargs)
nn.modules.loss.BCEWithLogitsLoss.__init__(self, **self.param_dict)
class CTCLoss(nn.modules.loss.CTCLoss, FateTorchLoss):
def __init__(
self,
blank=0,
reduction='mean',
zero_infinity=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['blank'] = blank
self.param_dict['reduction'] = reduction
self.param_dict['zero_infinity'] = zero_infinity
self.param_dict.update(kwargs)
nn.modules.loss.CTCLoss.__init__(self, **self.param_dict)
class CosineEmbeddingLoss(nn.modules.loss.CosineEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.CosineEmbeddingLoss.__init__(self, **self.param_dict)
class CrossEntropyLoss(nn.modules.loss.CrossEntropyLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
label_smoothing=0.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['label_smoothing'] = label_smoothing
self.param_dict.update(kwargs)
nn.modules.loss.CrossEntropyLoss.__init__(self, **self.param_dict)
class GaussianNLLLoss(nn.modules.loss.GaussianNLLLoss, FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.GaussianNLLLoss.__init__(self, **self.param_dict)
class HingeEmbeddingLoss(nn.modules.loss.HingeEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.HingeEmbeddingLoss.__init__(self, **self.param_dict)
class HuberLoss(nn.modules.loss.HuberLoss, FateTorchLoss):
def __init__(self, reduction='mean', delta=1.0, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict['reduction'] = reduction
self.param_dict['delta'] = delta
self.param_dict.update(kwargs)
nn.modules.loss.HuberLoss.__init__(self, **self.param_dict)
class KLDivLoss(nn.modules.loss.KLDivLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
log_target=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['log_target'] = log_target
self.param_dict.update(kwargs)
nn.modules.loss.KLDivLoss.__init__(self, **self.param_dict)
class L1Loss(nn.modules.loss.L1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.L1Loss.__init__(self, **self.param_dict)
class MSELoss(nn.modules.loss.MSELoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MSELoss.__init__(self, **self.param_dict)
class MarginRankingLoss(nn.modules.loss.MarginRankingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MarginRankingLoss.__init__(self, **self.param_dict)
class MultiLabelMarginLoss(
nn.modules.loss.MultiLabelMarginLoss,
FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelMarginLoss.__init__(self, **self.param_dict)
class MultiLabelSoftMarginLoss(
nn.modules.loss.MultiLabelSoftMarginLoss,
FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelSoftMarginLoss.__init__(
self, **self.param_dict)
class MultiMarginLoss(nn.modules.loss.MultiMarginLoss, FateTorchLoss):
def __init__(
self,
p=1,
margin=1.0,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['p'] = p
self.param_dict['margin'] = margin
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiMarginLoss.__init__(self, **self.param_dict)
class NLLLoss(nn.modules.loss.NLLLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss.__init__(self, **self.param_dict)
class NLLLoss2d(nn.modules.loss.NLLLoss2d, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss2d.__init__(self, **self.param_dict)
class PoissonNLLLoss(nn.modules.loss.PoissonNLLLoss, FateTorchLoss):
def __init__(
self,
log_input=True,
full=False,
size_average=None,
eps=1e-08,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['log_input'] = log_input
self.param_dict['full'] = full
self.param_dict['size_average'] = size_average
self.param_dict['eps'] = eps
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.PoissonNLLLoss.__init__(self, **self.param_dict)
class SmoothL1Loss(nn.modules.loss.SmoothL1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
beta=1.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['beta'] = beta
self.param_dict.update(kwargs)
nn.modules.loss.SmoothL1Loss.__init__(self, **self.param_dict)
class SoftMarginLoss(nn.modules.loss.SoftMarginLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.SoftMarginLoss.__init__(self, **self.param_dict)
class TripletMarginLoss(nn.modules.loss.TripletMarginLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
p=2.0,
eps=1e-06,
swap=False,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['p'] = p
self.param_dict['eps'] = eps
self.param_dict['swap'] = swap
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginLoss.__init__(self, **self.param_dict)
class TripletMarginWithDistanceLoss(
nn.modules.loss.TripletMarginWithDistanceLoss,
FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginWithDistanceLoss.__init__(
self, **self.param_dict)
class _Loss(nn.modules.loss._Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._Loss.__init__(self, **self.param_dict)
class _WeightedLoss(nn.modules.loss._WeightedLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._WeightedLoss.__init__(self, **self.param_dict)
| 81,792 | 32.412173 | 82 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/interactive.py | import torch as t
from torch.nn import ReLU, Linear, LazyLinear, Tanh, Sigmoid, Dropout, Sequential
from pipeline.component.nn.backend.torch.base import FateTorchLayer
class InteractiveLayer(t.nn.Module, FateTorchLayer):
r"""A :class: InteractiveLayer.
An interface for InteractiveLayer. In interactive layer, the forward method is:
out = activation( Linear(guest_input) + Linear(host_0_input) + Linear(host_1_input) ..)
Args:
out_dim: int, the output dimension of InteractiveLayer
host_num: int, specify the number of host party, default is 1, need to modify this parameter
when running multi-party modeling
guest_dim: int or None, the input dimension of guest features, if None, will use LazyLinear layer
that automatically infers the input dimension
host_dim: int, or None:
int: the input dimension of all host features
None: automatically infer the input dimension of all host features
activation: str, support relu, tanh, sigmoid
dropout: float in 0-1, if None, dropout is disabled
guest_bias: bias for guest linear layer
host_bias: bias for host linear layers
need_guest: if false, will ignore the input of guest bottom model
"""
def __init__(
self,
out_dim,
guest_dim=None,
host_num=1,
host_dim=None,
activation='relu',
dropout=None,
guest_bias=True,
host_bias=True,
need_guest=True,
):
t.nn.Module.__init__(self)
FateTorchLayer.__init__(self)
self.activation = None
if activation is not None:
if activation.lower() == 'relu':
self.activation = ReLU()
elif activation.lower() == 'tanh':
self.activation = Tanh()
elif activation.lower() == 'sigmoid':
self.activation = Sigmoid()
else:
raise ValueError(
'activation not support {}, avail: relu, tanh, sigmoid'.format(activation))
self.dropout = None
if dropout is not None:
assert isinstance(dropout, float), 'dropout must be a float'
self.dropout = Dropout(p=dropout)
assert isinstance(out_dim, int), 'out_dim must be an int >= 0'
self.param_dict['out_dim'] = out_dim
self.param_dict['activation'] = activation
self.param_dict['dropout'] = dropout
self.param_dict['need_guest'] = need_guest
assert isinstance(
host_num, int) and host_num >= 1, 'host number is an int >= 1'
self.param_dict['host_num'] = host_num
if guest_dim is not None:
assert isinstance(guest_dim, int)
if host_dim is not None:
assert isinstance(host_dim, int)
self.guest_bias = guest_bias
self.param_dict['guest_dim'] = guest_dim
self.param_dict['host_dim'] = host_dim
self.param_dict['guest_bias'] = guest_bias
self.param_dict['host_bias'] = host_bias
if need_guest:
if guest_dim is None:
self.guest_model = LazyLinear(out_dim, guest_bias)
else:
self.guest_model = Linear(guest_dim, out_dim, guest_bias)
else:
self.guest_model = None
self.out_dim = out_dim
self.host_dim = host_dim
self.host_bias = host_bias
self.host_model = None
self.need_guest = need_guest
self.host_model = t.nn.ModuleList()
for i in range(host_num):
self.host_model.append(self.make_host_model())
if self.dropout is not None:
self.act_seq = Sequential(
self.activation,
self.dropout
)
else:
self.act_seq = Sequential(
self.activation
)
def lazy_to_linear(self, guest_dim=None, host_dims=None):
if isinstance(
self.guest_model,
t.nn.LazyLinear) and guest_dim is not None:
self.guest_model = t.nn.Linear(
guest_dim, self.out_dim, bias=self.guest_bias)
if isinstance(
self.host_model[0],
t.nn.LazyLinear) and host_dims is not None:
new_model_list = t.nn.ModuleList()
for dim in host_dims:
new_model_list.append(
t.nn.Linear(
dim,
self.out_dim,
bias=self.host_bias))
self.host_model = new_model_list
def make_host_model(self):
if self.host_dim is None:
return LazyLinear(self.out_dim, self.host_bias)
else:
return Linear(self.host_dim, self.out_dim, self.host_bias)
def forward(self, x_guest, x_host):
if self.need_guest:
g_out = self.guest_model(x_guest)
else:
g_out = 0
h_out = None
if isinstance(x_host, list):
for m, data in zip(self.host_model, x_host):
out_ = m(data)
if h_out is None:
h_out = out_
else:
h_out += out_
else:
h_out = self.host_model[0](x_host)
return self.activation(g_out + h_out)
| 5,522 | 34.178344 | 113 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/__init__.py | try:
from pipeline.component.nn.backend.torch import nn, init, operation, optim, serialization
except ImportError:
nn, init, operation, optim, serialization = None, None, None, None, None
__all__ = ['nn', 'init', 'operation', 'optim', 'serialization']
| 261 | 36.428571 | 93 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/operation.py | import torch
import torch as t
import copy
from torch.nn import Module
class OpBase(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
ret = copy.deepcopy(self.param_dict)
ret['op'] = type(self).__name__
return ret
class Astype(Module, OpBase):
def __init__(self, cast_type: str):
OpBase.__init__(self)
Module.__init__(self)
assert cast_type in [
'float',
'int',
'bool',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'float16']
self.param_dict['cast_type'] = cast_type
self.cast_type = cast_type
self.cast_type_map = {
'float': t.float,
'int': t.int,
'bool': t.bool,
'float32': t.float32,
'float64': t.float64,
'float16': t.float16,
'int8': t.int8,
'int16': t.int16,
'int32': t.int32,
'int64': t.int64,
}
def forward(self, tensor: t.Tensor, **kwargs):
return tensor.type(self.cast_type_map[self.cast_type])
class Flatten(Module, OpBase):
def __init__(self, start_dim=0, end_dim=-1):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict['start_dim'] = start_dim
self.param_dict['end_dim'] = end_dim
def forward(self, tensor):
return tensor.flatten(**self.param_dict)
class Reshape(Module, OpBase):
def __init__(self, shape):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(shape, tuple) or isinstance(shape, list)
self.shape = shape
self.param_dict['shape'] = list(shape)
def forward(self, tensor: t.Tensor):
return tensor.reshape(shape=self.shape)
class Index(Module, OpBase):
def __init__(self, index):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(index, int)
self.param_dict['index'] = index
def forward(self, content):
return content[self.param_dict['index']]
class Select(Module, OpBase):
def __init__(self, dim, idx):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'index': idx}
def forward(self, tensor):
return tensor.select(self.param_dict['dim'], self.param_dict['index'])
class SelectRange(Module, OpBase):
def __init__(self, dim, start, end):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'start': start, 'end': end}
def forward(self, tensor):
return tensor.select(
self.param_dict['dim'], -1)[self.param_dict['start']: self.param_dict['end']]
class Sum(Module, OpBase):
def __init__(self, dim):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(dim, int)
self.param_dict['dim'] = dim
def forward(self, tensor):
return tensor.sum(dim=self.param_dict['dim'])
class Squeeze(Module, OpBase):
def __init__(self, **kwargs):
OpBase.__init__(self)
Module.__init__(self)
def forward(self, tensor: t.Tensor):
return tensor.squeeze()
class Unsqueeze(Sum, OpBase):
def __init__(self, dim):
super(Unsqueeze, self).__init__(dim)
def forward(self, tensor: t.Tensor):
return tensor.unsqueeze(self.param_dict['dim'])
| 3,488 | 23.570423 | 89 | py |
FATE | FATE-master/python/fate_client/pipeline/component/nn/backend/torch/serialization.py | import copy
import inspect
from collections import OrderedDict
try:
from torch.nn import Sequential as tSeq
from pipeline.component.nn.backend.torch import optim, init, nn
from pipeline.component.nn.backend.torch import operation
from pipeline.component.nn.backend.torch.base import Sequential, get_torch_instance
from pipeline.component.nn.backend.torch.cust import CustModel, CustLoss
from pipeline.component.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def recover_layer_from_dict(nn_define, nn_dict):
init_param_dict = copy.deepcopy(nn_define)
if 'layer' in nn_define:
class_name = nn_define['layer']
init_param_dict.pop('layer')
elif 'op' in nn_define:
class_name = nn_define['op']
init_param_dict.pop('op')
else:
raise ValueError(
'no layer or operation info found in nn define, please check your layer config and make'
'sure they are correct for pytorch backend')
if 'initializer' in init_param_dict:
init_param_dict.pop('initializer')
# find corresponding class
if class_name == CustModel.__name__:
nn_layer_class = CustModel
elif class_name == InteractiveLayer.__name__:
nn_layer_class = InteractiveLayer
else:
nn_layer_class = nn_dict[class_name]
# create layer or Module
if nn_layer_class == CustModel: # converto to pytorch model
layer: CustModel = CustModel(module_name=init_param_dict['module_name'],
class_name=init_param_dict['class_name'],
**init_param_dict['param'])
layer = layer.get_pytorch_model()
elif nn_layer_class == InteractiveLayer:
layer: InteractiveLayer = InteractiveLayer(**init_param_dict)
else:
layer = get_torch_instance(nn_layer_class, init_param_dict)
# initialize if there are configs
if 'initializer' in nn_define:
if 'weight' in nn_define['initializer']:
init_para = nn_define['initializer']['weight']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, **init_para['param'])
if 'bias' in nn_define['initializer']:
init_para = nn_define['initializer']['bias']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, init='bias', **init_para['param'])
return layer, class_name
def recover_sequential_from_dict(nn_define):
nn_define_dict = nn_define
nn_dict = dict(inspect.getmembers(nn))
op_dict = dict(inspect.getmembers(operation))
nn_dict.update(op_dict)
class_name_list = []
try:
# submitted model have int prefixes, they make sure that layers are in
# order
add_dict = OrderedDict()
keys = list(nn_define_dict.keys())
keys = sorted(keys, key=lambda x: int(x.split('-')[0]))
for k in keys:
layer, class_name = recover_layer_from_dict(nn_define_dict[k], nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
except BaseException:
add_dict = OrderedDict()
for k, v in nn_define_dict.items():
layer, class_name = recover_layer_from_dict(v, nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
if len(class_name_list) == 1 and class_name_list[0] == CustModel.__name__:
# If there are only a CustModel, return the model only
return list(add_dict.values())[0]
else:
return tSeq(add_dict)
def recover_optimizer_from_dict(define_dict):
opt_dict = dict(inspect.getmembers(optim))
from federatedml.util import LOGGER
LOGGER.debug('define dict is {}'.format(define_dict))
if 'optimizer' not in define_dict:
raise ValueError('please specify optimizer type in the json config')
opt_class = opt_dict[define_dict['optimizer']]
param_dict = copy.deepcopy(define_dict)
if 'optimizer' in param_dict:
param_dict.pop('optimizer')
if 'config_type' in param_dict:
param_dict.pop('config_type')
return opt_class(**param_dict)
def recover_loss_fn_from_dict(define_dict):
loss_fn_dict = dict(inspect.getmembers(nn))
if 'loss_fn' not in define_dict:
raise ValueError('please specify loss function in the json config')
param_dict = copy.deepcopy(define_dict)
param_dict.pop('loss_fn')
if define_dict['loss_fn'] == CustLoss.__name__:
return CustLoss(loss_module_name=param_dict['loss_module_name'],
class_name=param_dict['class_name'],
**param_dict['param']).get_pytorch_model()
else:
return loss_fn_dict[define_dict['loss_fn']](**param_dict)
if __name__ == '__main__':
pass
| 4,867 | 37.03125 | 100 | py |
FATE | FATE-master/python/fate_client/flow_client/flow_cli/commands/model.py | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from datetime import datetime
import click
import requests
from flow_client.flow_cli.utils import cli_args
from contextlib import closing
from flow_client.flow_cli.utils.cli_utils import preprocess, access_server, prettify, get_project_base_directory, \
check_abs_path
@click.group(short_help="Model Operations")
@click.pass_context
def model(ctx):
"""
\b
Provides numbers of model operational commands, including load, store, import and etc.
For more details, please check out the help text.
"""
pass
@model.command("load", short_help="Load Model Command")
@cli_args.JOBID
@click.option("-c", "--conf-path", type=click.Path(exists=True),
help="Configuration file path.")
@click.pass_context
def load(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Load Model Command
\b
- USAGE:
flow model load -c fate_flow/examples/publish_load_model.json
flow model load -j $JOB_ID
"""
if not kwargs.get("conf_path") and not kwargs.get("job_id"):
prettify(
{
"retcode": 100,
"retmsg": "Load model failed. No arguments received, "
"please provide one of arguments from job id and conf path."
}
)
else:
if kwargs.get("conf_path") and kwargs.get("job_id"):
prettify(
{
"retcode": 100,
"retmsg": "Load model failed. Please do not provide job id and "
"conf path at the same time."
}
)
else:
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/load', config_data)
@model.command("bind", short_help="Bind Model Command")
@cli_args.JOBID
@cli_args.CONF_PATH
@click.pass_context
def bind(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Bind Model Command
\b
- USAGE:
flow model bind -c fate_flow/examples/bind_model_service.json
flow model bind -c fate_flow/examples/bind_model_service.json -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/bind', config_data)
@model.command("import", short_help="Import Model Command")
@cli_args.CONF_PATH
@click.option('--from-database', is_flag=True, default=False,
help="If specified and there is a valid database environment, fate flow will import model from database "
"which you specified in configuration file.")
@click.pass_context
def import_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Import the model from a file or storage engine.
\b
- USAGE:
flow model import -c fate_flow/examples/import_model.json
flow model import -c fate_flow/examples/restore_model.json --from-database
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.pop('from_database'):
access_server('post', ctx, 'model/restore', config_data)
return
file_path = config_data.get("file", None)
if not file_path:
prettify({
'retcode': 100,
'retmsg': "Import model failed. Please specify the valid model file path and try again."
})
return
if not os.path.isabs(file_path):
file_path = os.path.join(get_project_base_directory(), file_path)
if not os.path.exists(file_path):
prettify({
'retcode': 100,
'retmsg': 'Import model failed. The file is obtained from the fate flow client machine, '
'but it does not exist, please check the path: {}'.format(file_path),
})
config_data['force_update'] = int(config_data.get('force_update', False))
files = {'file': open(file_path, 'rb')}
access_server('post', ctx, 'model/import', data=config_data, files=files)
@model.command("export", short_help="Export Model Command")
@cli_args.CONF_PATH
@click.option('--to-database', is_flag=True, default=False,
help="If specified and there is a valid database environment, fate flow will export model to database "
"which you specified in configuration file.")
@click.pass_context
def export_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Export the model to a file or storage engine.
\b
- USAGE:
flow model export -c fate_flow/examples/export_model.json
flow model export -c fate_flow/examples/store_model.json --to-database
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('to_database'):
with closing(access_server('get', ctx, 'model/export', config_data, False, stream=True)) as response:
if response.status_code == 200:
archive_file_name = re.findall("filename=(.+)", response.headers["Content-Disposition"])[0]
os.makedirs(config_data["output_path"], exist_ok=True)
archive_file_path = os.path.join(config_data["output_path"], archive_file_name)
with open(archive_file_path, 'wb') as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
response_dict = {'retcode': 0,
'file': archive_file_path,
'retmsg': 'download successfully, please check {}'.format(archive_file_path)}
else:
response_dict = response.json() if isinstance(response, requests.models.Response) else response.json
prettify(response_dict)
else:
access_server('post', ctx, 'model/store', config_data)
@model.command("migrate", short_help="Migrate Model Command")
@cli_args.CONF_PATH
@click.pass_context
def migrate(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Migrate Model Command.
\b
- USAGE:
flow model migrate -c fate_flow/examples/migrate_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/migrate', config_data)
@model.command("tag-model", short_help="Tag Model Command")
@cli_args.JOBID_REQUIRED
@cli_args.TAG_NAME_REQUIRED
@click.option("--remove", is_flag=True, default=False,
help="If specified, the name of specified model will be "
"removed from the model name list of specified tag")
@click.pass_context
def tag_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Tag Model Command.
By default, custom can execute this command to tag model. Or custom could
specify the 'remove' flag to remove the tag from model.
\b
- USAGE:
flow model tag-model -j $JOB_ID -t $TAG_NAME
flow model tag-model -j $JOB_ID -t $TAG_NAME --remove
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('remove'):
access_server('post', ctx, 'model/model_tag/create', config_data)
else:
access_server('post', ctx, 'model/model_tag/remove', config_data)
@model.command("tag-list", short_help="List Tags of Model Command")
@cli_args.JOBID_REQUIRED
@click.pass_context
def list_tag(ctx, **kwargs):
"""
\b
- DESCRIPTION:
List Tags of Model Command.
Custom can query the model by a valid job id, and get the tag list of the specified model.
\b
- USAGE:
flow model tag-list -j $JOB_ID
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/model_tag/retrieve', config_data)
@model.command("get-predict-dsl", short_help="Get predict dsl of model")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def get_predict_dsl(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get predict DSL of the model.
\b
- USAGE:
flow model get-predict-dsl --model-id $MODEL_ID --model-version $MODEL_VERSION -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
dsl_filename = "predict_dsl_{}.json".format(datetime.now().strftime('%Y%m%d%H%M%S'))
output_path = os.path.join(check_abs_path(kwargs.get("output_path")), dsl_filename)
config_data["filename"] = dsl_filename
with closing(access_server('post', ctx, 'model/get/predict/dsl', config_data, False, stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "wb") as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
res = {'retcode': 0,
'retmsg': "Query predict dsl successfully. "
"File path is: {}".format(output_path)}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': "Query predict dsl failed."
"For more details, please check logs/fate_flow/fate_flow_stat.log"}
prettify(res)
@model.command("get-predict-conf", short_help="Get predict conf template")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.OUTPUT_PATH_REQUIRED
@click.pass_context
def get_predict_conf(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get the template of predict config.
\b
- USAGE:
flow model get-predict-conf --model-id $MODEL_ID --model-version $MODEL_VERSION -o ./examples/
"""
config_data, dsl_data = preprocess(**kwargs)
conf_filename = "predict_conf_{}.json".format(datetime.now().strftime('%Y%m%d%H%M%S'))
output_path = os.path.join(check_abs_path(kwargs.get("output_path")), conf_filename)
config_data["filename"] = conf_filename
with closing(access_server('post', ctx, 'model/get/predict/conf', config_data, False, stream=True)) as response:
if response.status_code == 200:
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "wb") as fw:
for chunk in response.iter_content(1024):
if chunk:
fw.write(chunk)
res = {'retcode': 0,
'retmsg': "Query predict conf successfully. "
"File path is: {}".format(output_path)}
else:
try:
res = response.json() if isinstance(response, requests.models.Response) else response
except Exception:
res = {'retcode': 100,
'retmsg': "Query predict conf failed."
"For more details, please check logs/fate_flow/fate_flow_stat.log"}
prettify(res)
@model.command("deploy", short_help="Deploy model")
@cli_args.MODEL_ID_REQUIRED
@cli_args.MODEL_VERSION_REQUIRED
@click.option("--cpn-list", type=click.STRING,
help="User inputs a string to specify component list")
@click.option("--cpn-path", type=click.Path(exists=True),
help="User specifies a file path which records the component list.")
@click.option("--dsl-path", type=click.Path(exists=True),
help="User specified predict dsl file")
@click.option("--cpn-step-index", type=click.STRING, multiple=True,
help="Specify a checkpoint model to replace the pipeline model. "
"Use : to separate component name and step index (E.g. --cpn-step-index cpn_a:123)")
@click.option("--cpn-step-name", type=click.STRING, multiple=True,
help="Specify a checkpoint model to replace the pipeline model. "
"Use : to separate component name and step name (E.g. --cpn-step-name cpn_b:foobar)")
@click.pass_context
def deploy(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Deploy model.
\b
- USAGE:
flow model deploy --model-id $MODEL_ID --model-version $MODEL_VERSION
"""
request_data = {
'model_id': kwargs['model_id'],
'model_version': kwargs['model_version'],
}
if kwargs.get("cpn_list") or kwargs.get("cpn_path"):
if kwargs.get("cpn_list"):
cpn_str = kwargs["cpn_list"]
elif kwargs.get("cpn_path"):
with open(kwargs["cpn_path"], "r") as fp:
cpn_str = fp.read()
else:
cpn_str = ""
if isinstance(cpn_str, list):
cpn_list = cpn_str
else:
if (cpn_str.find("/") and cpn_str.find("\\")) != -1:
raise Exception("Component list string should not contain '/' or '\\'.")
cpn_str = cpn_str.replace(" ", "").replace("\n", "").strip(",[]")
cpn_list = cpn_str.split(",")
request_data['cpn_list'] = cpn_list
elif kwargs.get("dsl_path"):
with open(kwargs["dsl_path"], "r") as ft:
predict_dsl = ft.read()
request_data['dsl'] = predict_dsl
request_data['components_checkpoint'] = {}
for i in ('cpn_step_index', 'cpn_step_name'):
for j in kwargs[i]:
component, checkpoint = j.rsplit(':', 1)
if i == 'cpn_step_index':
checkpoint = int(checkpoint)
if component in request_data['components_checkpoint']:
raise KeyError(f"Duplicated component name '{component}'.")
request_data['components_checkpoint'][component] = {
i[4:]: checkpoint,
}
config_data, dsl_data = preprocess(**request_data)
access_server('post', ctx, 'model/deploy', config_data)
@model.command("get-model-info", short_help="Get model info")
@cli_args.MODEL_ID
@cli_args.MODEL_VERSION_REQUIRED
@cli_args.ROLE
@cli_args.PARTYID
@click.option('--detail', is_flag=True, default=False,
help="If specified, details of model will be shown.")
@click.pass_context
def get_model_info(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Get model information.
\b
- USAGE:
flow model model-info --model-id $MODEL_ID --model-version $MODEL_VERSION
flow model model-info --model-id $MODEL_ID --model-version $MODEL_VERSION --detail
"""
config_data, dsl_data = preprocess(**kwargs)
if not config_data.pop('detail'):
config_data['query_filters'] = ['create_date', 'role', 'party_id', 'roles', 'model_id',
'model_version', 'loaded_times', 'size', 'description', 'parent', 'parent_info']
access_server('post', ctx, 'model/query', config_data)
@model.command("homo-convert", short_help="Convert trained homogenous model")
@cli_args.CONF_PATH
@click.pass_context
def homo_convert_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Convert trained homogenous model to the format of another ML framework. Converted model files
will be saved alongside the original model and can be downloaded via model export command.
The supported conversions are:
HomoLR to `sklearn.linear_model.LogisticRegression`
HomoNN to `tf.keras.Sequential` or `torch.nn.Sequential`, depending on the originally-used backend type.
\b
- USAGE:
flow model homo-convert -c fate_flow/examples/homo_convert_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
access_server('post', ctx, 'model/homo/convert', config_data)
@model.command("homo-deploy", short_help="Deploy trained homogenous model")
@cli_args.CONF_PATH
@click.pass_context
def homo_deploy_model(ctx, **kwargs):
"""
\b
- DESCRIPTION:
Deploy trained homogenous model to a target online serving system. The model must be
converted beforehand.
Currently the supported target serving system is KFServing. Refer to the example json
for detailed parameters.
\b
- USAGE:
flow model homo-deploy -c fate_flow/examples/homo_deploy_model.json
"""
config_data, dsl_data = preprocess(**kwargs)
if config_data.get('deployment_type') == "kfserving":
kube_config = config_data.get('deployment_parameters', {}).get('config_file')
if kube_config:
if check_abs_path(kube_config):
with open(kube_config, 'r') as fp:
config_data['deployment_parameters']['config_file_content'] = fp.read()
del config_data['deployment_parameters']['config_file']
else:
prettify(
{
"retcode": 100,
"retmsg": "The kube_config file is obtained from the fate flow client machine, "
"but it does not exist. Please check the path: {}".format(kube_config)
}
)
return
access_server('post', ctx, 'model/homo/deploy', config_data)
| 17,583 | 35.941176 | 120 | py |
FATE | FATE-master/python/federatedml/nn/model_zoo/graphsage.py | import torch as t
from torch import nn
from torch.nn import Module
import torch_geometric.nn as pyg
class Sage(nn.Module):
def __init__(self, in_channels, hidden_channels, class_num):
super().__init__()
self.model = nn.ModuleList([
pyg.SAGEConv(in_channels=in_channels, out_channels=hidden_channels, project=True),
pyg.SAGEConv(in_channels=hidden_channels, out_channels=class_num),
nn.LogSoftmax()]
)
def forward(self, x, edge_index):
for i, conv in enumerate(self.model):
if isinstance(conv, pyg.SAGEConv):
x = conv(x, edge_index)
else:
x = conv(x)
return x
| 702 | 29.565217 | 94 | py |
FATE | FATE-master/python/federatedml/nn/model_zoo/homographsage.py |
import torch as t
from torch import nn
from torch.nn import Module
import torch_geometric.nn as pyg
class Sage(nn.Module):
def __init__(self, in_channels, hidden_channels, class_num):
super().__init__()
self.model = nn.ModuleList([
pyg.SAGEConv(in_channels=in_channels, out_channels=hidden_channels, project=True),
pyg.SAGEConv(in_channels=hidden_channels, out_channels=class_num),
nn.LogSoftmax()]
)
def forward(self, x, edge_index):
for i, conv in enumerate(self.model):
if isinstance(conv, pyg.SAGEConv):
x = conv(x, edge_index)
else:
x = conv(x)
return x
| 703 | 28.333333 | 94 | py |
FATE | FATE-master/python/federatedml/nn/model_zoo/vision.py | import torch as t
from torchvision.models import get_model
class TorchVisionModels(t.nn.Module):
"""
This Class provides ALL torchvision classification models,
instantiate models and using pretrained weights by providing string model name and weight names
Parameters
----------
vision_model_name: str, name of models provided by torchvision.models, for all available vision model, see:
https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights
pretrain_weights: str, name of pretrained weight, for available vision weights, see:
https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights
"""
def __init__(self, vision_model_name: str, pretrain_weights: str = None):
super(TorchVisionModels, self).__init__()
self.model = get_model(vision_model_name, weights=pretrain_weights)
def forward(self, x):
return self.model(x)
def __repr__(self):
return self.model.__repr__()
| 1,062 | 38.37037 | 114 | py |
FATE | FATE-master/python/federatedml/nn/model_zoo/pretrained_bert.py | from transformers.models.bert import BertModel
from torch.nn import Module
from federatedml.util import LOGGER
class PretrainedBert(Module):
def __init__(self, pretrained_model_name_or_path: str = 'bert-base-uncased', freeze_weight=False):
"""
A pretrained Bert Model based on transformers
Parameters
----------
pretrained_model_name_or_path: string, specify the version of bert pretrained model,
for all available bert model, see:
https://huggingface.co/bert-base-uncased?text=The+goal+of+life+is+%5BMASK%5D.#model-variations
or it can be a path to downloaded bert model
freeze_weight: bool, freeze weight or not when training. if True, bert model will not be added to parameters,
and skip grad calculation
"""
super(PretrainedBert, self).__init__()
self.pretrained_model_str = pretrained_model_name_or_path
self.freeze_weight = freeze_weight
LOGGER.info(
'if you are using non-local models, it will download the pretrained model and will take'
'some time')
self.model = BertModel.from_pretrained(
pretrained_model_name_or_path=self.pretrained_model_str)
if self.freeze_weight:
self.model.requires_grad_(False)
def forward(self, x):
return self.model(x)
def parameters(self, recurse: bool = True):
if self.freeze_weight:
return (),
else:
return self.model.parameters(recurse=recurse)
| 1,566 | 38.175 | 117 | py |
FATE | FATE-master/python/federatedml/nn/dataset/base.py | from torch.utils.data import Dataset as Dataset_
from federatedml.nn.backend.utils.common import ML_PATH, LLM_PATH
import importlib
import abc
import numpy as np
class Dataset(Dataset_):
def __init__(self, **kwargs):
super(Dataset, self).__init__()
self._type = 'local' # train/predict
self._check = False
self._generated_ids = None
self.training = True
@property
def dataset_type(self):
if not hasattr(self, '_type'):
raise AttributeError(
'type variable not exists, call __init__ of super class')
return self._type
@dataset_type.setter
def dataset_type(self, val):
self._type = val
def has_dataset_type(self):
return self.dataset_type
def set_type(self, _type):
self.dataset_type = _type
def get_type(self):
return self.dataset_type
def has_sample_ids(self):
# if not implement get_sample_ids, return False
try:
sample_ids = self.get_sample_ids()
except NotImplementedError as e:
return False
except BaseException as e:
raise e
if sample_ids is None:
return False
else:
if not self._check:
assert isinstance(
sample_ids, list), 'get_sample_ids() must return a list contains str or integer'
for id_ in sample_ids:
if (not isinstance(id_, str)) and (not isinstance(id_, int)):
raise RuntimeError(
'get_sample_ids() must return a list contains str or integer: got id of type {}:{}'.format(
id_, type(id_)))
assert len(sample_ids) == len(
self), 'sample id len:{} != dataset length:{}'.format(len(sample_ids), len(self))
self._check = True
return True
def init_sid_and_getfunc(self, prefix: str = None):
if prefix is not None:
assert isinstance(
prefix, str), 'prefix must be a str, but got {}'.format(prefix)
else:
prefix = self._type
generated_ids = []
for i in range(0, self.__len__()):
generated_ids.append(prefix + '_' + str(i))
self._generated_ids = generated_ids
def get_func():
return self._generated_ids
self.get_sample_ids = get_func
"""
Functions for users
"""
def train(self, ):
self.training = True
def eval(self, ):
self.training = False
# Function to implemented
@abc.abstractmethod
def load(self, file_path):
raise NotImplementedError(
'You must implement load function so that Client can pass file-path to this '
'class')
def __getitem__(self, item):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def get_classes(self):
raise NotImplementedError()
def get_sample_ids(self):
raise NotImplementedError()
class ShuffleWrapDataset(Dataset_):
def __init__(self, dataset: Dataset, shuffle_seed=100):
super(ShuffleWrapDataset, self).__init__()
self.ds = dataset
ids = self.ds.get_sample_ids()
sort_idx = np.argsort(np.array(ids))
assert isinstance(dataset, Dataset)
self.idx = sort_idx
if shuffle_seed is not None:
np.random.seed(shuffle_seed)
self.shuffled_idx = np.copy(self.idx)
np.random.shuffle(self.shuffled_idx)
else:
self.shuffled_idx = np.copy(self.idx)
self.idx_map = {k: v for k, v in zip(self.idx, self.shuffled_idx)}
def train(self, ):
self.ds.train()
def eval(self, ):
self.ds.eval()
def __getitem__(self, item):
return self.ds[self.idx_map[self.idx[item]]]
def __len__(self):
return len(self.ds)
def __repr__(self):
return self.ds.__repr__()
def has_sample_ids(self):
return self.ds.has_sample_ids()
def set_shuffled_idx(self, idx_map: dict):
self.shuffled_idx = np.array(list(idx_map.values()))
self.idx_map = idx_map
def get_sample_ids(self):
ids = self.ds.get_sample_ids()
return np.array(ids)[self.shuffled_idx].tolist()
def get_classes(self):
return self.ds.get_classes()
def get_dataset_class(dataset_module_name: str):
if dataset_module_name.endswith('.py'):
dataset_module_name = dataset_module_name.replace('.py', '')
try:
ds_modules = importlib.import_module(
'{}.dataset.{}'.format(
ML_PATH, dataset_module_name)
)
except BaseException:
ds_modules = importlib.import_module(
'{}.dataset.{}'.format(
LLM_PATH, dataset_module_name)
)
try:
ds = []
for k, v in ds_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, Dataset) and v is not Dataset:
ds.append(v)
if len(ds) == 0:
raise ValueError('Did not find any class in {}.py that is the subclass of Dataset class'.
format(dataset_module_name))
else:
return ds[-1] # return the last defined class
except ValueError as e:
raise e
| 5,430 | 28.677596 | 119 | py |
FATE | FATE-master/python/federatedml/nn/dataset/image.py | import torch
from federatedml.nn.dataset.base import Dataset
from torchvision.datasets import ImageFolder
from torchvision import transforms
import numpy as np
class ImageDataset(Dataset):
"""
A basic Image Dataset built on pytorch ImageFolder, supports simple image transform
Given a folder path, ImageDataset will load images from this folder, images in this
folder need to be organized in a Torch-ImageFolder format, see
https://pytorch.org/vision/main/generated/torchvision.datasets.ImageFolder.html for details.
Image name will be automatically taken as the sample id.
Parameters
----------
center_crop : bool, use center crop transformer
center_crop_shape: tuple or list
generate_id_from_file_name: bool, whether to take image name as sample id
file_suffix: str, default is '.jpg', if generate_id_from_file_name is True, will remove this suffix from file name,
result will be the sample id
return_label: bool, return label or not, this option is for host dataset, when running hetero-NN
float64: bool, returned image tensors will be transformed to double precision
label_dtype: str, long, float, or double, the dtype of return label
"""
def __init__(self, center_crop=False, center_crop_shape=None,
generate_id_from_file_name=True, file_suffix='.jpg',
return_label=True, float64=False, label_dtype='long'):
super(ImageDataset, self).__init__()
self.image_folder: ImageFolder = None
self.center_crop = center_crop
self.size = center_crop_shape
self.return_label = return_label
self.generate_id_from_file_name = generate_id_from_file_name
self.file_suffix = file_suffix
self.float64 = float64
self.dtype = torch.float32 if not self.float64 else torch.float64
avail_label_type = ['float', 'long', 'double']
self.sample_ids = None
assert label_dtype in avail_label_type, 'available label dtype : {}'.format(
avail_label_type)
if label_dtype == 'double':
self.label_dtype = torch.float64
elif label_dtype == 'long':
self.label_dtype = torch.int64
else:
self.label_dtype = torch.float32
def load(self, folder_path):
# read image from folders
if self.center_crop:
transformer = transforms.Compose(
[transforms.CenterCrop(size=self.size), transforms.ToTensor()])
else:
transformer = transforms.Compose([transforms.ToTensor()])
if folder_path.endswith('/'):
folder_path = folder_path[: -1]
image_folder_path = folder_path
folder = ImageFolder(root=image_folder_path, transform=transformer)
self.image_folder = folder
if self.generate_id_from_file_name:
# use image name as its sample id
file_name = self.image_folder.imgs
ids = []
for name in file_name:
sample_id = name[0].split(
'/')[-1].replace(self.file_suffix, '')
ids.append(sample_id)
self.sample_ids = ids
def __getitem__(self, item):
if self.return_label:
item = self.image_folder[item]
return item[0].type(
self.dtype), torch.tensor(
item[1]).type(
self.label_dtype)
else:
return self.image_folder[item][0].type(self.dtype)
def __len__(self):
return len(self.image_folder)
def __repr__(self):
return self.image_folder.__repr__()
def get_classes(self):
return np.unique(self.image_folder.targets).tolist()
def get_sample_ids(self):
return self.sample_ids
if __name__ == '__main__':
pass
| 3,837 | 35.552381 | 119 | py |
FATE | FATE-master/python/federatedml/nn/dataset/graph.py | import numpy as np
import pandas as pd
from federatedml.statistic.data_overview import with_weight
from federatedml.nn.dataset.base import Dataset
try:
from torch_geometric.data import Data
except BaseException:
pass
import torch
from federatedml.util import LOGGER
class GraphDataset(Dataset):
"""
A Graph Dataset includes feature table, edge table and input_nodes table. The data come from a given csv path, or transform from FATE DTable
Parameters
----------
id_col, name of the id column in csv, default 'id'
label_col str, name of label column in csv, if None, will automatically take 'y' or 'label' or 'target' as label
feature_dtype dtype of feature, supports int, long, float, double
label_dtype: dtype of label, supports int, long, float, double
feats_name: name of the node feature csv, default 'feats.csv'
feats_dataset_col: name of the dataset column indicating to which dataset the node belongs, default 'dataset'
feats_dataset_train: flag of the train set
feats_dataset_vali: flag of the validation set
feats_dataset_test: flag of the test set
adj_name: name of the adjacent matrix, default 'adj.csv'
adj_src_col: source node in the adjacent matrix, default 'node1'
adj_dst_col: destination node in the adjacent matrix, default 'node2'
"""
def __init__(
self,
id_col='id',
label_col='y',
feature_dtype='float',
label_dtype='long',
feats_name='feats.csv',
feats_dataset_col='dataset',
feats_dataset_train='train',
feats_dataset_vali='vali',
feats_dataset_test='test',
adj_name='adj.csv',
adj_src_col='node1',
adj_dst_col='node2'):
super(GraphDataset, self).__init__()
self.key2idx: dict = {}
self.f_dtype = self.check_dtype(feature_dtype)
self.l_dtype = self.check_dtype(label_dtype)
self.data: Data = Data()
self.sample_ids = None
self.input_nodes_train = None
self.input_nodes_vali = None
self.input_nodes_test = None
self.id_col = id_col
self.label_col = label_col
self.feats_name = feats_name
self.feats_dataset_col = feats_dataset_col
self.feats_dataset_train = feats_dataset_train
self.feats_dataset_vali = feats_dataset_vali
self.feats_dataset_test = feats_dataset_test
self.adj_name = adj_name
self.adj_src_col = adj_src_col
self.adj_dst_col = adj_dst_col
def __len__(self):
return self.data.num_nodes
@staticmethod
def check_dtype(dtype):
if dtype is not None:
avail = ['long', 'int', 'float', 'double']
assert dtype in avail, 'available dtype is {}, but got {}'.format(
avail, dtype)
if dtype == 'long':
return torch.int64
if dtype == 'int':
return torch.int32
if dtype == 'float':
return torch.float32
if dtype == 'double':
return torch.float64
return dtype
def __process_feats(self, data_path):
LOGGER.info("processing feats")
tmp = pd.read_csv(data_path + "/" + self.feats_name)
self.input_nodes_train = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_train].index.to_list()
self.input_nodes_vali = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_vali].index.to_list()
self.input_nodes_test = tmp[tmp[self.feats_dataset_col] == self.feats_dataset_test].index.to_list()
self.data.x = torch.tensor(tmp.drop([self.id_col, self.feats_dataset_col,
self.label_col], axis=1).to_numpy(), dtype=self.f_dtype)
self.data.y = torch.tensor(tmp[self.label_col], dtype=self.l_dtype)
def __process_adj(self, data_path):
LOGGER.info("processing edges")
tmp = pd.read_csv(data_path + "/" + self.adj_name)
self.data.edge_index = torch.tensor(tmp[[self.adj_src_col, self.adj_dst_col]].T.to_numpy(), dtype=torch.long)
if len(tmp.columns) > 2:
self.data.edge_attr = torch.tensor(
tmp.drop([self.adj_src_col, self.adj_dst_col], axis=1).to_numpy(), dtype=torch.float)
def load(self, data_path):
LOGGER.info("Loading graph data...")
self.__process_feats(data_path)
self.__process_adj(data_path)
# Assign each node its global node index:
self.data.n_id = torch.arange(self.data.num_nodes)
def get_sample_ids(self):
return self.sample_ids
| 4,680 | 39.353448 | 145 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/import_hook.py | try:
from federatedml.component.nn.backend.torch import nn as nn_
from federatedml.component.nn.backend.torch import init as init_
from federatedml.component.nn.backend.torch import optim as optim_
from federatedml.component.nn.backend.torch.cust import CustModel, CustLoss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def monkey_patch(torch_nn, fate_torch_module):
for name in fate_torch_module.__dict__.keys():
if '__' in name: # skip no related variables
continue
if name in torch_nn.__dict__.keys():
torch_nn.__dict__[name] = fate_torch_module.__dict__[name]
def fate_torch_hook(torch_module_var):
"""
This is a monkey patch function that modify torch modules to use fate_torch layers and Components
:param torch_module_var:
:return:
"""
if torch_module_var.__name__ == 'torch':
monkey_patch(torch_module_var.nn, nn_)
monkey_patch(torch_module_var.optim, optim_)
monkey_patch(torch_module_var.nn.init, init_)
setattr(torch_module_var.nn, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.nn':
monkey_patch(torch_module_var, nn_)
setattr(torch_module_var, 'CustModel', CustModel)
setattr(torch_module_var.nn, 'InteractiveLayer', InteractiveLayer)
setattr(torch_module_var.nn, 'CustLoss', CustLoss)
elif torch_module_var.__name__ == 'torch.optim':
monkey_patch(torch_module_var, optim_)
elif torch_module_var.__name__ == 'torch.nn.init':
monkey_patch(torch_module_var, init_)
else:
raise ValueError(
'this module: {} does not support fate torch hook'.format(torch_module_var))
return torch_module_var
| 1,925 | 36.764706 | 101 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/base.py | import json
import torch as t
from torch.nn import Sequential as tSequential
from federatedml.nn.backend.torch.operation import OpBase
class FateTorchLayer(object):
def __init__(self):
t.nn.Module.__init__(self)
self.param_dict = dict()
self.initializer = {'weight': None, 'bias': None}
self.optimizer = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['layer'] = type(self).__name__
ret_dict['initializer'] = {}
if self.initializer['weight']:
ret_dict['initializer']['weight'] = self.initializer['weight']
if self.initializer['bias']:
ret_dict['initializer']['bias'] = self.initializer['bias']
return ret_dict
def add_optimizer(self, opt):
self.optimizer = opt
class FateTorchLoss(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['loss_fn'] = type(self).__name__
return ret_dict
class FateTorchOptimizer(object):
def __init__(self):
self.param_dict = dict()
self.torch_class = None
def to_dict(self):
import copy
ret_dict = copy.deepcopy(self.param_dict)
ret_dict['optimizer'] = type(self).__name__
ret_dict['config_type'] = 'pytorch'
return ret_dict
def check_params(self, params):
if isinstance(
params,
FateTorchLayer) or isinstance(
params,
Sequential):
params.add_optimizer(self)
params = params.parameters()
else:
params = params
l_param = list(params)
if len(l_param) == 0:
# fake parameters, for the case that there are only cust model
return [t.nn.Parameter(t.Tensor([0]))]
return l_param
def register_optimizer(self, input_):
if input_ is None:
return
if isinstance(
input_,
FateTorchLayer) or isinstance(
input_,
Sequential):
input_.add_optimizer(self)
def to_torch_instance(self, parameters):
return self.torch_class(parameters, **self.param_dict)
class Sequential(tSequential):
def to_dict(self):
"""
get the structure of current sequential
"""
rs = {}
idx = 0
for k in self._modules:
ordered_name = str(idx) + '-' + k
rs[ordered_name] = self._modules[k].to_dict()
idx += 1
return rs
def to_json(self):
return json.dumps(self.to_dict(), indent=4)
def add_optimizer(self, opt):
setattr(self, 'optimizer', opt)
def add(self, layer):
if isinstance(layer, Sequential):
self._modules = layer._modules
# copy optimizer
if hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
elif isinstance(layer, FateTorchLayer):
self.add_module(str(len(self)), layer)
# update optimizer if dont have
if not hasattr(self, 'optimizer') and hasattr(layer, 'optimizer'):
setattr(self, 'optimizer', layer.optimizer)
else:
raise ValueError(
'unknown input layer type {}, this type is not supported'.format(
type(layer)))
@staticmethod
def get_loss_config(loss: FateTorchLoss):
return loss.to_dict()
def get_optimizer_config(self, optimizer=None):
if hasattr(self, 'optimizer'):
return self.optimizer.to_dict()
else:
return optimizer.to_dict()
def get_network_config(self):
return self.to_dict()
def get_torch_instance(fate_torch_nn_class: FateTorchLayer, param):
parent_torch_class = fate_torch_nn_class.__bases__
if issubclass(fate_torch_nn_class, OpBase):
return fate_torch_nn_class(**param)
for cls in parent_torch_class:
if issubclass(cls, t.nn.Module):
return cls(**param)
return None
| 4,203 | 26.657895 | 81 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/optim.py | from torch import optim
from federatedml.nn.backend.torch.base import FateTorchLayer, Sequential
from federatedml.nn.backend.torch.base import FateTorchOptimizer
class ASGD(optim.ASGD, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lambd=0.0001,
alpha=0.75,
t0=1000000.0,
weight_decay=0,
foreach=None,
maximize=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lambd'] = lambd
self.param_dict['alpha'] = alpha
self.param_dict['t0'] = t0
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.ASGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer ASGD without initiated parameters'.format(type(self).__name__)
class Adadelta(optim.Adadelta, FateTorchOptimizer):
def __init__(self, params=None, lr=1.0, rho=0.9, eps=1e-06, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['rho'] = rho
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adadelta.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adadelta without initiated parameters'.format(type(self).__name__)
class Adagrad(optim.Adagrad, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
eps=1e-10,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['lr_decay'] = lr_decay
self.param_dict['weight_decay'] = weight_decay
self.param_dict['initial_accumulator_value'] = initial_accumulator_value
self.param_dict['eps'] = eps
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adagrad.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adagrad without initiated parameters'.format(type(self).__name__)
class Adam(optim.Adam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adam without initiated parameters'.format(type(self).__name__)
class AdamW(optim.AdamW, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['amsgrad'] = amsgrad
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.AdamW.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer AdamW without initiated parameters'.format(type(self).__name__)
class Adamax(optim.Adamax, FateTorchOptimizer):
def __init__(self, params=None, lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Adamax.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Adamax without initiated parameters'.format(type(self).__name__)
class LBFGS(optim.LBFGS, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=1,
max_iter=20,
max_eval=None,
tolerance_grad=1e-07,
tolerance_change=1e-09,
history_size=100,
line_search_fn=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['max_iter'] = max_iter
self.param_dict['max_eval'] = max_eval
self.param_dict['tolerance_grad'] = tolerance_grad
self.param_dict['tolerance_change'] = tolerance_change
self.param_dict['history_size'] = history_size
self.param_dict['line_search_fn'] = line_search_fn
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.LBFGS.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer LBFGS without initiated parameters'.format(type(self).__name__)
class NAdam(optim.NAdam, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.002,
betas=(
0.9,
0.999),
eps=1e-08,
weight_decay=0,
momentum_decay=0.004,
foreach=None,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum_decay'] = momentum_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.NAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer NAdam without initiated parameters'.format(type(self).__name__)
class RAdam(optim.RAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, foreach=None, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['foreach'] = foreach
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RAdam without initiated parameters'.format(type(self).__name__)
class RMSprop(optim.RMSprop, FateTorchOptimizer):
def __init__(
self,
params=None,
lr=0.01,
alpha=0.99,
eps=1e-08,
weight_decay=0,
momentum=0,
centered=False,
foreach=None,
maximize=False,
differentiable=False,
):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['alpha'] = alpha
self.param_dict['eps'] = eps
self.param_dict['weight_decay'] = weight_decay
self.param_dict['momentum'] = momentum
self.param_dict['centered'] = centered
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.param_dict['differentiable'] = differentiable
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.RMSprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer RMSprop without initiated parameters'.format(type(self).__name__)
class Rprop(optim.Rprop, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, etas=(0.5, 1.2), step_sizes=(1e-06, 50), foreach=None, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['etas'] = etas
self.param_dict['step_sizes'] = step_sizes
self.param_dict['foreach'] = foreach
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.Rprop.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer Rprop without initiated parameters'.format(type(self).__name__)
class SGD(optim.SGD, FateTorchOptimizer):
def __init__(self, params=None, lr=0.01, momentum=0, dampening=0, weight_decay=0, nesterov=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['momentum'] = momentum
self.param_dict['dampening'] = dampening
self.param_dict['weight_decay'] = weight_decay
self.param_dict['nesterov'] = nesterov
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SGD.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SGD without initiated parameters'.format(type(self).__name__)
class SparseAdam(optim.SparseAdam, FateTorchOptimizer):
def __init__(self, params=None, lr=0.001, betas=(0.9, 0.999), eps=1e-08, maximize=False, ):
FateTorchOptimizer.__init__(self)
self.param_dict['lr'] = lr
self.param_dict['betas'] = betas
self.param_dict['eps'] = eps
self.param_dict['maximize'] = maximize
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.SparseAdam.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except BaseException:
return 'Optimizer SparseAdam without initiated parameters'.format(type(self).__name__)
| 13,025 | 30.3125 | 118 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/cust_model.py | import importlib
from torch import nn
from federatedml.nn.backend.torch.base import FateTorchLayer
from federatedml.nn.backend.utils.common import ML_PATH
PATH = '{}.model_zoo'.format(ML_PATH)
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(
module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'module_name': module_name,
'class_name': class_name,
'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self):
module_name: str = self.param_dict['module_name']
class_name = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module('{}.{}'.format(PATH, module_name))
try:
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(
v, nn.Module) and v is not nn.Module and v.__name__ == class_name:
return v(**module_param)
raise ValueError(
'Did not find any class in {}.py that is pytorch nn.Module and named {}'. format(
module_name, class_name))
except ValueError as e:
raise e
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
| 1,984 | 34.446429 | 97 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/cust.py | from torch import nn
import importlib
from federatedml.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from federatedml.nn.backend.utils.common import ML_PATH, LLM_PATH
import difflib
LLM_MODEL_PATH = '{}.model_zoo'.format(LLM_PATH)
MODEL_PATH = '{}.model_zoo'.format(ML_PATH)
LOSS_PATH = '{}.loss'.format(ML_PATH)
def str_simi(str_a, str_b):
return difflib.SequenceMatcher(None, str_a, str_b).quick_ratio()
def get_class(module_name, class_name, param, base_path):
if module_name.endswith('.py'):
module_name = module_name.replace('.py', '')
nn_modules = importlib.import_module(
'{}.{}'.format(base_path, module_name))
try:
name_simi_list = []
for k, v in nn_modules.__dict__.items():
if isinstance(v, type):
if issubclass(v, nn.Module) and v is not nn.Module:
if v.__name__ == class_name:
return v(**param)
else:
name_simi_list += ([(str_simi(class_name, v.__name__), v)])
sort_by_simi = sorted(name_simi_list, key=lambda x: -x[0])
if len(sort_by_simi) > 0:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}. Do you mean {}?'. format(
module_name, class_name, sort_by_simi[0][1].__name__))
else:
raise ValueError(
'Did not find any class in {}.py that is subclass of nn.Module and named {}'. format(
module_name, class_name))
except ValueError as e:
raise e
class CustModel(FateTorchLayer, nn.Module):
def __init__(self, module_name, class_name, **kwargs):
super(CustModel, self).__init__()
assert isinstance(
module_name, str), 'name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'module_name': module_name,
'class_name': class_name,
'param': kwargs}
self._model = None
def init_model(self):
if self._model is None:
self._model = self.get_pytorch_model()
def forward(self, x):
if self._model is None:
raise ValueError('model not init, call init_model() function')
return self._model(x)
def get_pytorch_model(self, module_path=None):
if module_path is None:
try:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
MODEL_PATH)
except BaseException:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
LLM_MODEL_PATH)
else:
return get_class(
self.param_dict['module_name'],
self.param_dict['class_name'],
self.param_dict['param'],
module_path)
def __repr__(self):
return 'CustModel({})'.format(str(self.param_dict))
class CustLoss(FateTorchLoss, nn.Module):
def __init__(self, loss_module_name, class_name, **kwargs):
super(CustLoss, self).__init__()
assert isinstance(
loss_module_name, str), 'loss module name must be a str, specify the module in the model_zoo'
assert isinstance(
class_name, str), 'class name must be a str, specify the class in the module'
self.param_dict = {
'loss_module_name': loss_module_name,
'class_name': class_name,
'param': kwargs}
self._loss_fn = None
def init_loss_fn(self):
if self._loss_fn is None:
self._loss_fn = self.get_pytorch_model()
def forward(self, pred, label):
if self._loss_fn is None:
raise ValueError('loss not init, call init_loss_fn() function')
return self._loss_fn(pred, label)
def get_pytorch_model(self, module_path=None):
module_name: str = self.param_dict['loss_module_name']
class_name: str = self.param_dict['class_name']
module_param: dict = self.param_dict['param']
if module_path is None:
return get_class(
module_name=module_name,
class_name=class_name,
param=module_param,
base_path=LOSS_PATH)
else:
return get_class(
module_name=module_name,
class_name=class_name,
param=module_param,
base_path=module_path)
def __repr__(self):
return 'CustLoss({})'.format(str(self.param_dict))
| 4,907 | 34.057143 | 118 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/init.py | import copy
import torch as t
from torch.nn import init as torch_init
import functools
from federatedml.nn.backend.torch.base import FateTorchLayer
from federatedml.nn.backend.torch.base import Sequential
str_init_func_map = {
"uniform": torch_init.uniform_,
"normal": torch_init.normal_,
"constant": torch_init.constant_,
"xavier_uniform": torch_init.xavier_uniform_,
"xavier_normal": torch_init.xavier_normal_,
"kaiming_uniform": torch_init.kaiming_uniform_,
"kaiming_normal": torch_init.kaiming_normal_,
"eye": torch_init.eye_,
"dirac": torch_init.dirac_,
"orthogonal": torch_init.orthogonal_,
"sparse": torch_init.sparse_,
"zeros": torch_init.zeros_,
"ones": torch_init.ones_
}
#
# def extract_param(func):
#
# args = inspect.getargspec(func)
# keys = args[0][1:]
# if len(keys) == 0:
# return {}
# defaults = args[-1]
# args_map = {}
# if defaults is not None:
# for idx, i in enumerate(keys[-len(defaults):]):
# args_map[i] = defaults[idx]
#
# for i in keys:
# if i not in args_map:
# args_map[i] = Required()
#
# return args_map
def init_weight(m, initializer):
if hasattr(m, 'weight'):
initializer(m.weight)
# LSTM RNN
if hasattr(m, 'weight_hh_l0'):
initializer(m.weight_hh_l0)
# LSTM RNN
if hasattr(m, 'weight_ih_l0'):
initializer(m.weight_ih_l0)
def init_bias(m, initializer):
if hasattr(
m,
'bias') and not isinstance(
m.bias,
bool) and m.bias is not None: # LSTM, RNN .bias is bool
initializer(m.bias)
# LSTM RNN
if hasattr(m, 'bias_hh_l0') and m.bias_hh_l0 is not None:
initializer(m.bias_hh_l0)
# LSTM RNN
if hasattr(m, 'bias_ih_l0') and m.bias_ih_l0 is not None:
initializer(m.bias_ih_l0)
def get_init_func_type(init='weight'):
if init == 'weight':
return init_weight
elif init == 'bias':
return init_bias
else:
return None
def recursive_init(m, init_func, obj):
if len(list(m.children())) > 0:
if m == obj:
return
recursive_init(m, init_func, m)
else:
try:
init_func(m)
except Exception as e:
print('initialize layer {} failed, exception is :{}'.format(m, e))
def make_apply_func(torch_initializer, param_dict, init_func, layer):
initializer = functools.partial(torch_initializer, **param_dict)
init_func = functools.partial(init_func, initializer=initializer)
recursive_init_func = functools.partial(
recursive_init, obj=layer, init_func=init_func)
return recursive_init_func, param_dict
def get_init_dict(init_func, param_dict, init_type):
rev_dict = {v: k for k, v in str_init_func_map.items()}
rs = {
'init_type': init_type,
'init_func': rev_dict[init_func],
'param': param_dict}
return rs
def record_initializer(layers, init_dict):
if isinstance(layers, FateTorchLayer):
if init_dict['init_type'] == 'weight':
layers.initializer['weight'] = init_dict
elif init_dict['init_type'] == 'bias':
layers.initializer['bias'] = init_dict
def run_init(torch_initializer, input_var, init, layer):
# recursive init
if isinstance(layer, Sequential):
for sub_layer in layer:
run_init(torch_initializer, input_var, init, sub_layer)
# init layer
elif isinstance(layer, FateTorchLayer) or isinstance(layer, t.nn.Module):
recursive_init_func, param_dict = make_apply_func(
torch_initializer, copy.deepcopy(input_var), get_init_func_type(init), layer)
layer.apply(recursive_init_func)
record_initializer(
layer,
get_init_dict(
torch_initializer,
param_dict,
init))
else:
try:
return torch_initializer(layer, **input_var)
except Exception as e:
print(e)
print('skip initialization')
"""
Init Func
"""
def local_extract(local_dict):
param = {}
for k, v in local_dict.items():
if k != 'layer' and k != 'init':
param[k] = v
return copy.deepcopy(param)
def uniform_(layer, a=0, b=1, init='weight'):
run_init(
str_init_func_map['uniform'],
local_extract(
locals()),
init,
layer)
def normal_(layer, mean=0, std=1, init='weight'):
run_init(str_init_func_map['normal'], local_extract(locals()), init, layer)
def constant_(layer, val, init='weight'):
run_init(
str_init_func_map['constant'],
local_extract(
locals()),
init,
layer)
def ones_(layer, init='weight'):
run_init(str_init_func_map['ones'], local_extract(locals()), init, layer)
def zeros_(layer, init='weight'):
run_init(str_init_func_map['zeros'], local_extract(locals()), init, layer)
def eye_(layer, init='weight'):
run_init(str_init_func_map['eye'], local_extract(locals()), init, layer)
def dirac_(layer, group=1, init='weight'):
run_init(str_init_func_map['dirac'], local_extract(locals()), init, layer)
def xavier_uniform_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_uniform'],
local_extract(locals()), init, layer)
def xavier_normal_(layer, gain=1.0, init='weight'):
run_init(str_init_func_map['xavier_normal'],
local_extract(locals()), init, layer)
def kaiming_uniform_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_uniform'],
local_extract(locals()), init, layer)
def kaiming_normal_(
layer,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
init='weight'):
run_init(str_init_func_map['kaiming_normal'],
local_extract(locals()), init, layer)
def orthogonal_(layer, gain=1, init='weight'):
run_init(
str_init_func_map['orthogonal'],
local_extract(
locals()),
init,
layer)
def sparse_(layer, sparsity, std=0.01, init='weight'):
run_init(str_init_func_map['sparse'], local_extract(locals()), init, layer)
str_fate_torch_init_func_map = {
"uniform": uniform_,
"normal": normal_,
"constant": constant_,
"xavier_uniform": xavier_uniform_,
"xavier_normal": xavier_normal_,
"kaiming_uniform": kaiming_uniform_,
"kaiming_normal": kaiming_normal_,
"eye": eye_,
"dirac": dirac_,
"orthogonal": orthogonal_,
"sparse": sparse_,
"zeros": zeros_,
"ones": ones_
}
if __name__ == '__main__':
pass
| 6,761 | 25.622047 | 89 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/nn.py | from torch import nn
from federatedml.nn.backend.torch.base import FateTorchLayer, FateTorchLoss
from federatedml.nn.backend.torch.base import Sequential
class Bilinear(nn.modules.linear.Bilinear, FateTorchLayer):
def __init__(
self,
in1_features,
in2_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in1_features'] = in1_features
self.param_dict['in2_features'] = in2_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Bilinear.__init__(self, **self.param_dict)
class Identity(nn.modules.linear.Identity, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.linear.Identity.__init__(self, **self.param_dict)
class LazyLinear(nn.modules.linear.LazyLinear, FateTorchLayer):
def __init__(
self,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.LazyLinear.__init__(self, **self.param_dict)
class Linear(nn.modules.linear.Linear, FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.Linear.__init__(self, **self.param_dict)
class NonDynamicallyQuantizableLinear(
nn.modules.linear.NonDynamicallyQuantizableLinear,
FateTorchLayer):
def __init__(
self,
in_features,
out_features,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_features'] = in_features
self.param_dict['out_features'] = out_features
self.param_dict.update(kwargs)
nn.modules.linear.NonDynamicallyQuantizableLinear.__init__(
self, **self.param_dict)
class GRU(nn.modules.rnn.GRU, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.GRU.__init__(self, **self.param_dict)
class GRUCell(nn.modules.rnn.GRUCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.GRUCell.__init__(self, **self.param_dict)
class LSTM(nn.modules.rnn.LSTM, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.LSTM.__init__(self, **self.param_dict)
class LSTMCell(nn.modules.rnn.LSTMCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.LSTMCell.__init__(self, **self.param_dict)
class RNN(nn.modules.rnn.RNN, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.rnn.RNN.__init__(self, **self.param_dict)
class RNNBase(nn.modules.rnn.RNNBase, FateTorchLayer):
def __init__(
self,
mode,
input_size,
hidden_size,
num_layers=1,
bias=True,
batch_first=False,
dropout=0.0,
bidirectional=False,
proj_size=0,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_layers'] = num_layers
self.param_dict['bias'] = bias
self.param_dict['batch_first'] = batch_first
self.param_dict['dropout'] = dropout
self.param_dict['bidirectional'] = bidirectional
self.param_dict['proj_size'] = proj_size
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['mode'] = mode
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNBase.__init__(self, **self.param_dict)
class RNNCell(nn.modules.rnn.RNNCell, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias=True,
nonlinearity='tanh',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['bias'] = bias
self.param_dict['nonlinearity'] = nonlinearity
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCell.__init__(self, **self.param_dict)
class RNNCellBase(nn.modules.rnn.RNNCellBase, FateTorchLayer):
def __init__(
self,
input_size,
hidden_size,
bias,
num_chunks,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['input_size'] = input_size
self.param_dict['hidden_size'] = hidden_size
self.param_dict['bias'] = bias
self.param_dict['num_chunks'] = num_chunks
self.param_dict.update(kwargs)
nn.modules.rnn.RNNCellBase.__init__(self, **self.param_dict)
class Embedding(nn.modules.sparse.Embedding, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding_idx'] = padding_idx
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.Embedding.__init__(self, **self.param_dict)
class EmbeddingBag(nn.modules.sparse.EmbeddingBag, FateTorchLayer):
def __init__(
self,
num_embeddings,
embedding_dim,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
mode='mean',
sparse=False,
_weight=None,
include_last_offset=False,
padding_idx=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['max_norm'] = max_norm
self.param_dict['norm_type'] = norm_type
self.param_dict['scale_grad_by_freq'] = scale_grad_by_freq
self.param_dict['mode'] = mode
self.param_dict['sparse'] = sparse
self.param_dict['_weight'] = _weight
self.param_dict['include_last_offset'] = include_last_offset
self.param_dict['padding_idx'] = padding_idx
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_embeddings'] = num_embeddings
self.param_dict['embedding_dim'] = embedding_dim
self.param_dict.update(kwargs)
nn.modules.sparse.EmbeddingBag.__init__(self, **self.param_dict)
class AlphaDropout(nn.modules.dropout.AlphaDropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.AlphaDropout.__init__(self, **self.param_dict)
class Dropout(nn.modules.dropout.Dropout, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout.__init__(self, **self.param_dict)
class Dropout1d(nn.modules.dropout.Dropout1d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout1d.__init__(self, **self.param_dict)
class Dropout2d(nn.modules.dropout.Dropout2d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout2d.__init__(self, **self.param_dict)
class Dropout3d(nn.modules.dropout.Dropout3d, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.Dropout3d.__init__(self, **self.param_dict)
class FeatureAlphaDropout(
nn.modules.dropout.FeatureAlphaDropout,
FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout.FeatureAlphaDropout.__init__(
self, **self.param_dict)
class _DropoutNd(nn.modules.dropout._DropoutNd, FateTorchLayer):
def __init__(self, p=0.5, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['p'] = p
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.dropout._DropoutNd.__init__(self, **self.param_dict)
class CELU(nn.modules.activation.CELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.CELU.__init__(self, **self.param_dict)
class ELU(nn.modules.activation.ELU, FateTorchLayer):
def __init__(self, alpha=1.0, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['alpha'] = alpha
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ELU.__init__(self, **self.param_dict)
class GELU(nn.modules.activation.GELU, FateTorchLayer):
def __init__(self, approximate='none', **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['approximate'] = approximate
self.param_dict.update(kwargs)
nn.modules.activation.GELU.__init__(self, **self.param_dict)
class GLU(nn.modules.activation.GLU, FateTorchLayer):
def __init__(self, dim=-1, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.GLU.__init__(self, **self.param_dict)
class Hardshrink(nn.modules.activation.Hardshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Hardshrink.__init__(self, **self.param_dict)
class Hardsigmoid(nn.modules.activation.Hardsigmoid, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardsigmoid.__init__(self, **self.param_dict)
class Hardswish(nn.modules.activation.Hardswish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Hardswish.__init__(self, **self.param_dict)
class Hardtanh(nn.modules.activation.Hardtanh, FateTorchLayer):
def __init__(
self,
min_val=-1.0,
max_val=1.0,
inplace=False,
min_value=None,
max_value=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['min_val'] = min_val
self.param_dict['max_val'] = max_val
self.param_dict['inplace'] = inplace
self.param_dict['min_value'] = min_value
self.param_dict['max_value'] = max_value
self.param_dict.update(kwargs)
nn.modules.activation.Hardtanh.__init__(self, **self.param_dict)
class LeakyReLU(nn.modules.activation.LeakyReLU, FateTorchLayer):
def __init__(self, negative_slope=0.01, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['negative_slope'] = negative_slope
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.LeakyReLU.__init__(self, **self.param_dict)
class LogSigmoid(nn.modules.activation.LogSigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.LogSigmoid.__init__(self, **self.param_dict)
class LogSoftmax(nn.modules.activation.LogSoftmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.LogSoftmax.__init__(self, **self.param_dict)
class Mish(nn.modules.activation.Mish, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.Mish.__init__(self, **self.param_dict)
class MultiheadAttention(
nn.modules.activation.MultiheadAttention,
FateTorchLayer):
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dropout'] = dropout
self.param_dict['bias'] = bias
self.param_dict['add_bias_kv'] = add_bias_kv
self.param_dict['add_zero_attn'] = add_zero_attn
self.param_dict['kdim'] = kdim
self.param_dict['vdim'] = vdim
self.param_dict['batch_first'] = batch_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['embed_dim'] = embed_dim
self.param_dict['num_heads'] = num_heads
self.param_dict.update(kwargs)
nn.modules.activation.MultiheadAttention.__init__(
self, **self.param_dict)
class PReLU(nn.modules.activation.PReLU, FateTorchLayer):
def __init__(
self,
num_parameters=1,
init=0.25,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['num_parameters'] = num_parameters
self.param_dict['init'] = init
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.activation.PReLU.__init__(self, **self.param_dict)
class RReLU(nn.modules.activation.RReLU, FateTorchLayer):
def __init__(
self,
lower=0.125,
upper=0.3333333333333333,
inplace=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lower'] = lower
self.param_dict['upper'] = upper
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.RReLU.__init__(self, **self.param_dict)
class ReLU(nn.modules.activation.ReLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU.__init__(self, **self.param_dict)
class ReLU6(nn.modules.activation.ReLU6, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.ReLU6.__init__(self, **self.param_dict)
class SELU(nn.modules.activation.SELU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SELU.__init__(self, **self.param_dict)
class SiLU(nn.modules.activation.SiLU, FateTorchLayer):
def __init__(self, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict.update(kwargs)
nn.modules.activation.SiLU.__init__(self, **self.param_dict)
class Sigmoid(nn.modules.activation.Sigmoid, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Sigmoid.__init__(self, **self.param_dict)
class Softmax(nn.modules.activation.Softmax, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmax.__init__(self, **self.param_dict)
class Softmax2d(nn.modules.activation.Softmax2d, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softmax2d.__init__(self, **self.param_dict)
class Softmin(nn.modules.activation.Softmin, FateTorchLayer):
def __init__(self, dim=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim'] = dim
self.param_dict.update(kwargs)
nn.modules.activation.Softmin.__init__(self, **self.param_dict)
class Softplus(nn.modules.activation.Softplus, FateTorchLayer):
def __init__(self, beta=1, threshold=20, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['beta'] = beta
self.param_dict['threshold'] = threshold
self.param_dict.update(kwargs)
nn.modules.activation.Softplus.__init__(self, **self.param_dict)
class Softshrink(nn.modules.activation.Softshrink, FateTorchLayer):
def __init__(self, lambd=0.5, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['lambd'] = lambd
self.param_dict.update(kwargs)
nn.modules.activation.Softshrink.__init__(self, **self.param_dict)
class Softsign(nn.modules.activation.Softsign, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Softsign.__init__(self, **self.param_dict)
class Tanh(nn.modules.activation.Tanh, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanh.__init__(self, **self.param_dict)
class Tanhshrink(nn.modules.activation.Tanhshrink, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.activation.Tanhshrink.__init__(self, **self.param_dict)
class Threshold(nn.modules.activation.Threshold, FateTorchLayer):
def __init__(self, threshold, value, inplace=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['inplace'] = inplace
self.param_dict['threshold'] = threshold
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.activation.Threshold.__init__(self, **self.param_dict)
class Conv1d(nn.modules.conv.Conv1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv1d.__init__(self, **self.param_dict)
class Conv2d(nn.modules.conv.Conv2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv2d.__init__(self, **self.param_dict)
class Conv3d(nn.modules.conv.Conv3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.Conv3d.__init__(self, **self.param_dict)
class ConvTranspose1d(nn.modules.conv.ConvTranspose1d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose1d.__init__(self, **self.param_dict)
class ConvTranspose2d(nn.modules.conv.ConvTranspose2d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose2d.__init__(self, **self.param_dict)
class ConvTranspose3d(nn.modules.conv.ConvTranspose3d, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.ConvTranspose3d.__init__(self, **self.param_dict)
class LazyConv1d(nn.modules.conv.LazyConv1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv1d.__init__(self, **self.param_dict)
class LazyConv2d(nn.modules.conv.LazyConv2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv2d.__init__(self, **self.param_dict)
class LazyConv3d(nn.modules.conv.LazyConv3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConv3d.__init__(self, **self.param_dict)
class LazyConvTranspose1d(nn.modules.conv.LazyConvTranspose1d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose1d.__init__(self, **self.param_dict)
class LazyConvTranspose2d(nn.modules.conv.LazyConvTranspose2d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose2d.__init__(self, **self.param_dict)
class LazyConvTranspose3d(nn.modules.conv.LazyConvTranspose3d, FateTorchLayer):
def __init__(
self,
out_channels,
kernel_size,
stride=1,
padding=0,
output_padding=0,
groups=1,
bias=True,
dilation=1,
padding_mode='zeros',
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['dilation'] = dilation
self.param_dict['padding_mode'] = padding_mode
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.conv.LazyConvTranspose3d.__init__(self, **self.param_dict)
class _ConvNd(nn.modules.conv._ConvNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvNd.__init__(self, **self.param_dict)
class _ConvTransposeMixin(nn.modules.conv._ConvTransposeMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeMixin.__init__(self, **self.param_dict)
class _ConvTransposeNd(nn.modules.conv._ConvTransposeNd, FateTorchLayer):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
bias,
padding_mode,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['in_channels'] = in_channels
self.param_dict['out_channels'] = out_channels
self.param_dict['kernel_size'] = kernel_size
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['transposed'] = transposed
self.param_dict['output_padding'] = output_padding
self.param_dict['groups'] = groups
self.param_dict['bias'] = bias
self.param_dict['padding_mode'] = padding_mode
self.param_dict.update(kwargs)
nn.modules.conv._ConvTransposeNd.__init__(self, **self.param_dict)
class _LazyConvXdMixin(nn.modules.conv._LazyConvXdMixin, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.conv._LazyConvXdMixin.__init__(self, **self.param_dict)
class Transformer(nn.modules.transformer.Transformer, FateTorchLayer):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
custom_encoder=None,
custom_decoder=None,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict['num_encoder_layers'] = num_encoder_layers
self.param_dict['num_decoder_layers'] = num_decoder_layers
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['custom_encoder'] = custom_encoder
self.param_dict['custom_decoder'] = custom_decoder
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.transformer.Transformer.__init__(self, **self.param_dict)
class TransformerDecoder(
nn.modules.transformer.TransformerDecoder,
FateTorchLayer):
def __init__(self, decoder_layer, num_layers, norm=None, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['decoder_layer'] = decoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoder.__init__(
self, **self.param_dict)
class TransformerDecoderLayer(
nn.modules.transformer.TransformerDecoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerDecoderLayer.__init__(
self, **self.param_dict)
class TransformerEncoder(
nn.modules.transformer.TransformerEncoder,
FateTorchLayer):
def __init__(
self,
encoder_layer,
num_layers,
norm=None,
enable_nested_tensor=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['norm'] = norm
self.param_dict['enable_nested_tensor'] = enable_nested_tensor
self.param_dict['encoder_layer'] = encoder_layer
self.param_dict['num_layers'] = num_layers
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoder.__init__(
self, **self.param_dict)
class TransformerEncoderLayer(
nn.modules.transformer.TransformerEncoderLayer,
FateTorchLayer):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm_eps=1e-05,
batch_first=False,
norm_first=False,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['dim_feedforward'] = dim_feedforward
self.param_dict['dropout'] = dropout
self.param_dict['layer_norm_eps'] = layer_norm_eps
self.param_dict['batch_first'] = batch_first
self.param_dict['norm_first'] = norm_first
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['d_model'] = d_model
self.param_dict['nhead'] = nhead
self.param_dict.update(kwargs)
nn.modules.transformer.TransformerEncoderLayer.__init__(
self, **self.param_dict)
class AdaptiveAvgPool1d(nn.modules.pooling.AdaptiveAvgPool1d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool1d.__init__(self, **self.param_dict)
class AdaptiveAvgPool2d(nn.modules.pooling.AdaptiveAvgPool2d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool2d.__init__(self, **self.param_dict)
class AdaptiveAvgPool3d(nn.modules.pooling.AdaptiveAvgPool3d, FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveAvgPool3d.__init__(self, **self.param_dict)
class AdaptiveMaxPool1d(nn.modules.pooling.AdaptiveMaxPool1d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool1d.__init__(self, **self.param_dict)
class AdaptiveMaxPool2d(nn.modules.pooling.AdaptiveMaxPool2d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool2d.__init__(self, **self.param_dict)
class AdaptiveMaxPool3d(nn.modules.pooling.AdaptiveMaxPool3d, FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling.AdaptiveMaxPool3d.__init__(self, **self.param_dict)
class AvgPool1d(nn.modules.pooling.AvgPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool1d.__init__(self, **self.param_dict)
class AvgPool2d(nn.modules.pooling.AvgPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool2d.__init__(self, **self.param_dict)
class AvgPool3d(nn.modules.pooling.AvgPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
ceil_mode=False,
count_include_pad=True,
divisor_override=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['count_include_pad'] = count_include_pad
self.param_dict['divisor_override'] = divisor_override
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.AvgPool3d.__init__(self, **self.param_dict)
class FractionalMaxPool2d(
nn.modules.pooling.FractionalMaxPool2d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool2d.__init__(
self, **self.param_dict)
class FractionalMaxPool3d(
nn.modules.pooling.FractionalMaxPool3d,
FateTorchLayer):
def __init__(
self,
kernel_size,
output_size=None,
output_ratio=None,
return_indices=False,
_random_samples=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict['output_ratio'] = output_ratio
self.param_dict['return_indices'] = return_indices
self.param_dict['_random_samples'] = _random_samples
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.FractionalMaxPool3d.__init__(
self, **self.param_dict)
class LPPool1d(nn.modules.pooling.LPPool1d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool1d.__init__(self, **self.param_dict)
class LPPool2d(nn.modules.pooling.LPPool2d, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.LPPool2d.__init__(self, **self.param_dict)
class MaxPool1d(nn.modules.pooling.MaxPool1d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool1d.__init__(self, **self.param_dict)
class MaxPool2d(nn.modules.pooling.MaxPool2d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool2d.__init__(self, **self.param_dict)
class MaxPool3d(nn.modules.pooling.MaxPool3d, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxPool3d.__init__(self, **self.param_dict)
class MaxUnpool1d(nn.modules.pooling.MaxUnpool1d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool1d.__init__(self, **self.param_dict)
class MaxUnpool2d(nn.modules.pooling.MaxUnpool2d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool2d.__init__(self, **self.param_dict)
class MaxUnpool3d(nn.modules.pooling.MaxUnpool3d, FateTorchLayer):
def __init__(self, kernel_size, stride=None, padding=0, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling.MaxUnpool3d.__init__(self, **self.param_dict)
class _AdaptiveAvgPoolNd(
nn.modules.pooling._AdaptiveAvgPoolNd,
FateTorchLayer):
def __init__(self, output_size, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveAvgPoolNd.__init__(self, **self.param_dict)
class _AdaptiveMaxPoolNd(
nn.modules.pooling._AdaptiveMaxPoolNd,
FateTorchLayer):
def __init__(self, output_size, return_indices=False, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['return_indices'] = return_indices
self.param_dict['output_size'] = output_size
self.param_dict.update(kwargs)
nn.modules.pooling._AdaptiveMaxPoolNd.__init__(self, **self.param_dict)
class _AvgPoolNd(nn.modules.pooling._AvgPoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._AvgPoolNd.__init__(self, **self.param_dict)
class _LPPoolNd(nn.modules.pooling._LPPoolNd, FateTorchLayer):
def __init__(
self,
norm_type,
kernel_size,
stride=None,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['norm_type'] = norm_type
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._LPPoolNd.__init__(self, **self.param_dict)
class _MaxPoolNd(nn.modules.pooling._MaxPoolNd, FateTorchLayer):
def __init__(
self,
kernel_size,
stride=None,
padding=0,
dilation=1,
return_indices=False,
ceil_mode=False,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['stride'] = stride
self.param_dict['padding'] = padding
self.param_dict['dilation'] = dilation
self.param_dict['return_indices'] = return_indices
self.param_dict['ceil_mode'] = ceil_mode
self.param_dict['kernel_size'] = kernel_size
self.param_dict.update(kwargs)
nn.modules.pooling._MaxPoolNd.__init__(self, **self.param_dict)
class _MaxUnpoolNd(nn.modules.pooling._MaxUnpoolNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.pooling._MaxUnpoolNd.__init__(self, **self.param_dict)
class BatchNorm1d(nn.modules.batchnorm.BatchNorm1d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm1d.__init__(self, **self.param_dict)
class BatchNorm2d(nn.modules.batchnorm.BatchNorm2d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm2d.__init__(self, **self.param_dict)
class BatchNorm3d(nn.modules.batchnorm.BatchNorm3d, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.BatchNorm3d.__init__(self, **self.param_dict)
class LazyBatchNorm1d(nn.modules.batchnorm.LazyBatchNorm1d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm1d.__init__(self, **self.param_dict)
class LazyBatchNorm2d(nn.modules.batchnorm.LazyBatchNorm2d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm2d.__init__(self, **self.param_dict)
class LazyBatchNorm3d(nn.modules.batchnorm.LazyBatchNorm3d, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm.LazyBatchNorm3d.__init__(self, **self.param_dict)
class SyncBatchNorm(nn.modules.batchnorm.SyncBatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['process_group'] = process_group
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm.SyncBatchNorm.__init__(self, **self.param_dict)
class _BatchNorm(nn.modules.batchnorm._BatchNorm, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._BatchNorm.__init__(self, **self.param_dict)
class _LazyNormBase(nn.modules.batchnorm._LazyNormBase, FateTorchLayer):
def __init__(
self,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict.update(kwargs)
nn.modules.batchnorm._LazyNormBase.__init__(self, **self.param_dict)
class _NormBase(nn.modules.batchnorm._NormBase, FateTorchLayer):
def __init__(
self,
num_features,
eps=1e-05,
momentum=0.1,
affine=True,
track_running_stats=True,
device=None,
dtype=None,
**kwargs):
FateTorchLayer.__init__(self)
self.param_dict['eps'] = eps
self.param_dict['momentum'] = momentum
self.param_dict['affine'] = affine
self.param_dict['track_running_stats'] = track_running_stats
self.param_dict['device'] = device
self.param_dict['dtype'] = dtype
self.param_dict['num_features'] = num_features
self.param_dict.update(kwargs)
nn.modules.batchnorm._NormBase.__init__(self, **self.param_dict)
class ConstantPad1d(nn.modules.padding.ConstantPad1d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad1d.__init__(self, **self.param_dict)
class ConstantPad2d(nn.modules.padding.ConstantPad2d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad2d.__init__(self, **self.param_dict)
class ConstantPad3d(nn.modules.padding.ConstantPad3d, FateTorchLayer):
def __init__(self, padding, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding.ConstantPad3d.__init__(self, **self.param_dict)
class ReflectionPad1d(nn.modules.padding.ReflectionPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad1d.__init__(self, **self.param_dict)
class ReflectionPad2d(nn.modules.padding.ReflectionPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad2d.__init__(self, **self.param_dict)
class ReflectionPad3d(nn.modules.padding.ReflectionPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReflectionPad3d.__init__(self, **self.param_dict)
class ReplicationPad1d(nn.modules.padding.ReplicationPad1d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad1d.__init__(self, **self.param_dict)
class ReplicationPad2d(nn.modules.padding.ReplicationPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad2d.__init__(self, **self.param_dict)
class ReplicationPad3d(nn.modules.padding.ReplicationPad3d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ReplicationPad3d.__init__(self, **self.param_dict)
class ZeroPad2d(nn.modules.padding.ZeroPad2d, FateTorchLayer):
def __init__(self, padding, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['padding'] = padding
self.param_dict.update(kwargs)
nn.modules.padding.ZeroPad2d.__init__(self, **self.param_dict)
class _ConstantPadNd(nn.modules.padding._ConstantPadNd, FateTorchLayer):
def __init__(self, value, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict['value'] = value
self.param_dict.update(kwargs)
nn.modules.padding._ConstantPadNd.__init__(self, **self.param_dict)
class _ReflectionPadNd(nn.modules.padding._ReflectionPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReflectionPadNd.__init__(self, **self.param_dict)
class _ReplicationPadNd(nn.modules.padding._ReplicationPadNd, FateTorchLayer):
def __init__(self, **kwargs):
FateTorchLayer.__init__(self)
self.param_dict.update(kwargs)
nn.modules.padding._ReplicationPadNd.__init__(self, **self.param_dict)
class BCELoss(nn.modules.loss.BCELoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.BCELoss.__init__(self, **self.param_dict)
class BCEWithLogitsLoss(nn.modules.loss.BCEWithLogitsLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
pos_weight=None,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['pos_weight'] = pos_weight
self.param_dict.update(kwargs)
nn.modules.loss.BCEWithLogitsLoss.__init__(self, **self.param_dict)
class CTCLoss(nn.modules.loss.CTCLoss, FateTorchLoss):
def __init__(
self,
blank=0,
reduction='mean',
zero_infinity=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['blank'] = blank
self.param_dict['reduction'] = reduction
self.param_dict['zero_infinity'] = zero_infinity
self.param_dict.update(kwargs)
nn.modules.loss.CTCLoss.__init__(self, **self.param_dict)
class CosineEmbeddingLoss(nn.modules.loss.CosineEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.CosineEmbeddingLoss.__init__(self, **self.param_dict)
class CrossEntropyLoss(nn.modules.loss.CrossEntropyLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
label_smoothing=0.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['label_smoothing'] = label_smoothing
self.param_dict.update(kwargs)
nn.modules.loss.CrossEntropyLoss.__init__(self, **self.param_dict)
class GaussianNLLLoss(nn.modules.loss.GaussianNLLLoss, FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.GaussianNLLLoss.__init__(self, **self.param_dict)
class HingeEmbeddingLoss(nn.modules.loss.HingeEmbeddingLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.HingeEmbeddingLoss.__init__(self, **self.param_dict)
class HuberLoss(nn.modules.loss.HuberLoss, FateTorchLoss):
def __init__(self, reduction='mean', delta=1.0, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict['reduction'] = reduction
self.param_dict['delta'] = delta
self.param_dict.update(kwargs)
nn.modules.loss.HuberLoss.__init__(self, **self.param_dict)
class KLDivLoss(nn.modules.loss.KLDivLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
log_target=False,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['log_target'] = log_target
self.param_dict.update(kwargs)
nn.modules.loss.KLDivLoss.__init__(self, **self.param_dict)
class L1Loss(nn.modules.loss.L1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.L1Loss.__init__(self, **self.param_dict)
class MSELoss(nn.modules.loss.MSELoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MSELoss.__init__(self, **self.param_dict)
class MarginRankingLoss(nn.modules.loss.MarginRankingLoss, FateTorchLoss):
def __init__(
self,
margin=0.0,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MarginRankingLoss.__init__(self, **self.param_dict)
class MultiLabelMarginLoss(
nn.modules.loss.MultiLabelMarginLoss,
FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelMarginLoss.__init__(self, **self.param_dict)
class MultiLabelSoftMarginLoss(
nn.modules.loss.MultiLabelSoftMarginLoss,
FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiLabelSoftMarginLoss.__init__(
self, **self.param_dict)
class MultiMarginLoss(nn.modules.loss.MultiMarginLoss, FateTorchLoss):
def __init__(
self,
p=1,
margin=1.0,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['p'] = p
self.param_dict['margin'] = margin
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.MultiMarginLoss.__init__(self, **self.param_dict)
class NLLLoss(nn.modules.loss.NLLLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss.__init__(self, **self.param_dict)
class NLLLoss2d(nn.modules.loss.NLLLoss2d, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
ignore_index=-100,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['ignore_index'] = ignore_index
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.NLLLoss2d.__init__(self, **self.param_dict)
class PoissonNLLLoss(nn.modules.loss.PoissonNLLLoss, FateTorchLoss):
def __init__(
self,
log_input=True,
full=False,
size_average=None,
eps=1e-08,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['log_input'] = log_input
self.param_dict['full'] = full
self.param_dict['size_average'] = size_average
self.param_dict['eps'] = eps
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.PoissonNLLLoss.__init__(self, **self.param_dict)
class SmoothL1Loss(nn.modules.loss.SmoothL1Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
beta=1.0,
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict['beta'] = beta
self.param_dict.update(kwargs)
nn.modules.loss.SmoothL1Loss.__init__(self, **self.param_dict)
class SoftMarginLoss(nn.modules.loss.SoftMarginLoss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.SoftMarginLoss.__init__(self, **self.param_dict)
class TripletMarginLoss(nn.modules.loss.TripletMarginLoss, FateTorchLoss):
def __init__(
self,
margin=1.0,
p=2.0,
eps=1e-06,
swap=False,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['margin'] = margin
self.param_dict['p'] = p
self.param_dict['eps'] = eps
self.param_dict['swap'] = swap
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginLoss.__init__(self, **self.param_dict)
class TripletMarginWithDistanceLoss(
nn.modules.loss.TripletMarginWithDistanceLoss,
FateTorchLoss):
def __init__(self, **kwargs):
FateTorchLoss.__init__(self)
self.param_dict.update(kwargs)
nn.modules.loss.TripletMarginWithDistanceLoss.__init__(
self, **self.param_dict)
class _Loss(nn.modules.loss._Loss, FateTorchLoss):
def __init__(
self,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._Loss.__init__(self, **self.param_dict)
class _WeightedLoss(nn.modules.loss._WeightedLoss, FateTorchLoss):
def __init__(
self,
weight=None,
size_average=None,
reduce=None,
reduction='mean',
**kwargs):
FateTorchLoss.__init__(self)
self.param_dict['weight'] = weight
self.param_dict['size_average'] = size_average
self.param_dict['reduce'] = reduce
self.param_dict['reduction'] = reduction
self.param_dict.update(kwargs)
nn.modules.loss._WeightedLoss.__init__(self, **self.param_dict)
| 81,778 | 32.406454 | 79 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/interactive.py | import torch as t
from torch.nn import ReLU, Linear, LazyLinear, Tanh, Sigmoid, Dropout, Sequential
from federatedml.nn.backend.torch.base import FateTorchLayer
class InteractiveLayer(t.nn.Module, FateTorchLayer):
r"""A :class: InteractiveLayer.
An interface for InteractiveLayer. In interactive layer, the forward method is:
out = activation( Linear(guest_input) + Linear(host_0_input) + Linear(host_1_input) ..)
Args:
out_dim: int, the output dimension of InteractiveLayer
host_num: int, specify the number of host party, default is 1, need to modify this parameter
when running multi-party modeling
guest_dim: int or None, the input dimension of guest features, if None, will use LazyLinear layer
that automatically infers the input dimension
host_dim: int, or None:
int: the input dimension of all host features
None: automatically infer the input dimension of all host features
activation: str, support relu, tanh, sigmoid
dropout: float in 0-1, if None, dropout is disabled
guest_bias: bias for guest linear layer
host_bias: bias for host linear layers
need_guest: if false, will ignore the input of guest bottom model
"""
def __init__(
self,
out_dim,
guest_dim=None,
host_num=1,
host_dim=None,
activation='relu',
dropout=None,
guest_bias=True,
host_bias=True,
need_guest=True,
):
t.nn.Module.__init__(self)
FateTorchLayer.__init__(self)
self.activation = None
if activation is not None:
if activation.lower() == 'relu':
self.activation = ReLU()
elif activation.lower() == 'tanh':
self.activation = Tanh()
elif activation.lower() == 'sigmoid':
self.activation = Sigmoid()
else:
raise ValueError(
'activation not support {}, avail: relu, tanh, sigmoid'.format(activation))
self.dropout = None
if dropout is not None:
assert isinstance(dropout, float), 'dropout must be a float'
self.dropout = Dropout(p=dropout)
assert isinstance(out_dim, int), 'out_dim must be an int >= 0'
self.param_dict['out_dim'] = out_dim
self.param_dict['activation'] = activation
self.param_dict['dropout'] = dropout
self.param_dict['need_guest'] = need_guest
assert isinstance(
host_num, int) and host_num >= 1, 'host number is an int >= 1'
self.param_dict['host_num'] = host_num
if guest_dim is not None:
assert isinstance(guest_dim, int)
if host_dim is not None:
assert isinstance(host_dim, int)
self.guest_bias = guest_bias
self.param_dict['guest_dim'] = guest_dim
self.param_dict['host_dim'] = host_dim
self.param_dict['guest_bias'] = guest_bias
self.param_dict['host_bias'] = host_bias
if need_guest:
if guest_dim is None:
self.guest_model = LazyLinear(out_dim, guest_bias)
else:
self.guest_model = Linear(guest_dim, out_dim, guest_bias)
else:
self.guest_model = None
self.out_dim = out_dim
self.host_dim = host_dim
self.host_bias = host_bias
self.host_model = None
self.need_guest = need_guest
self.host_model = t.nn.ModuleList()
for i in range(host_num):
self.host_model.append(self.make_host_model())
if self.dropout is not None:
self.act_seq = Sequential(
self.activation,
self.dropout
)
else:
self.act_seq = Sequential(
self.activation
)
def lazy_to_linear(self, guest_dim=None, host_dims=None):
if isinstance(
self.guest_model,
t.nn.LazyLinear) and guest_dim is not None:
self.guest_model = t.nn.Linear(
guest_dim, self.out_dim, bias=self.guest_bias)
if isinstance(
self.host_model[0],
t.nn.LazyLinear) and host_dims is not None:
new_model_list = t.nn.ModuleList()
for dim in host_dims:
new_model_list.append(
t.nn.Linear(
dim,
self.out_dim,
bias=self.host_bias))
self.host_model = new_model_list
def make_host_model(self):
if self.host_dim is None:
return LazyLinear(self.out_dim, self.host_bias)
else:
return Linear(self.host_dim, self.out_dim, self.host_bias)
def forward(self, x_guest, x_host):
if self.need_guest:
g_out = self.guest_model(x_guest)
else:
g_out = 0
h_out = None
if isinstance(x_host, list):
for m, data in zip(self.host_model, x_host):
out_ = m(data)
if h_out is None:
h_out = out_
else:
h_out += out_
else:
h_out = self.host_model[0](x_host)
return self.activation(g_out + h_out)
| 5,516 | 33.917722 | 113 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/__init__.py | try:
from federatedml.nn.backend.torch import nn, init, operation, optim, serialization
except ImportError:
nn, init, operation, optim, serialization = None, None, None, None, None
__all__ = ['nn', 'init', 'operation', 'optim', 'serialization']
| 254 | 35.428571 | 86 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/operation.py | import torch as t
import copy
from torch.nn import Module
class OpBase(object):
def __init__(self):
self.param_dict = {}
def to_dict(self):
ret = copy.deepcopy(self.param_dict)
ret['op'] = type(self).__name__
return ret
class Astype(Module, OpBase):
def __init__(self, cast_type: str):
OpBase.__init__(self)
Module.__init__(self)
assert cast_type in [
'float',
'int',
'bool',
'float32',
'float64',
'int8',
'int16',
'int32',
'int64',
'float16']
self.param_dict['cast_type'] = cast_type
self.cast_type = cast_type
self.cast_type_map = {
'float': t.float,
'int': t.int,
'bool': t.bool,
'float32': t.float32,
'float64': t.float64,
'float16': t.float16,
'int8': t.int8,
'int16': t.int16,
'int32': t.int32,
'int64': t.int64,
}
def forward(self, tensor: t.Tensor, **kwargs):
return tensor.type(self.cast_type_map[self.cast_type])
class Flatten(Module, OpBase):
def __init__(self, start_dim=0, end_dim=-1):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict['start_dim'] = start_dim
self.param_dict['end_dim'] = end_dim
def forward(self, tensor):
return tensor.flatten(**self.param_dict)
class Reshape(Module, OpBase):
def __init__(self, shape):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(shape, tuple) or isinstance(shape, list)
self.shape = shape
self.param_dict['shape'] = list(shape)
def forward(self, tensor: t.Tensor):
return tensor.reshape(shape=self.shape)
class Index(Module, OpBase):
def __init__(self, index):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(index, int)
self.param_dict['index'] = index
def forward(self, content):
return content[self.param_dict['index']]
class Select(Module, OpBase):
def __init__(self, dim, idx):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'index': idx}
def forward(self, tensor):
return tensor.select(self.param_dict['dim'], self.param_dict['index'])
class SelectRange(Module, OpBase):
def __init__(self, dim, start, end):
OpBase.__init__(self)
Module.__init__(self)
self.param_dict = {'dim': dim, 'start': start, 'end': end}
def forward(self, tensor):
return tensor.select(
self.param_dict['dim'], -1)[self.param_dict['start']: self.param_dict['end']]
class Sum(Module, OpBase):
def __init__(self, dim):
OpBase.__init__(self)
Module.__init__(self)
assert isinstance(dim, int)
self.param_dict['dim'] = dim
def forward(self, tensor):
return tensor.sum(dim=self.param_dict['dim'])
class Squeeze(Module, OpBase):
def __init__(self, **kwargs):
OpBase.__init__(self)
Module.__init__(self)
def forward(self, tensor: t.Tensor):
return tensor.squeeze()
class Unsqueeze(Sum, OpBase):
def __init__(self, dim):
super(Unsqueeze, self).__init__(dim)
def forward(self, tensor: t.Tensor):
return tensor.unsqueeze(self.param_dict['dim'])
| 3,475 | 23.652482 | 89 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/serialization.py | import copy
import inspect
from collections import OrderedDict
try:
from torch.nn import Sequential as tSeq
from federatedml.nn.backend.torch import optim, init, nn
from federatedml.nn.backend.torch import operation
from federatedml.nn.backend.torch.base import Sequential, get_torch_instance
from federatedml.nn.backend.torch.cust import CustModel, CustLoss
from federatedml.nn.backend.torch.interactive import InteractiveLayer
except ImportError:
pass
def recover_layer_from_dict(nn_define, nn_dict):
init_param_dict = copy.deepcopy(nn_define)
if 'layer' in nn_define:
class_name = nn_define['layer']
init_param_dict.pop('layer')
elif 'op' in nn_define:
class_name = nn_define['op']
init_param_dict.pop('op')
else:
raise ValueError(
'no layer or operation info found in nn define, please check your layer config and make'
'sure they are correct for pytorch backend')
if 'initializer' in init_param_dict:
init_param_dict.pop('initializer')
# find corresponding class
if class_name == CustModel.__name__:
nn_layer_class = CustModel
elif class_name == InteractiveLayer.__name__:
nn_layer_class = InteractiveLayer
else:
nn_layer_class = nn_dict[class_name]
# create layer or Module
if nn_layer_class == CustModel: # converto to pytorch model
layer: CustModel = CustModel(module_name=init_param_dict['module_name'],
class_name=init_param_dict['class_name'],
**init_param_dict['param'])
layer = layer.get_pytorch_model()
elif nn_layer_class == InteractiveLayer:
layer: InteractiveLayer = InteractiveLayer(**init_param_dict)
else:
layer = get_torch_instance(nn_layer_class, init_param_dict)
# initialize if there are configs
if 'initializer' in nn_define:
if 'weight' in nn_define['initializer']:
init_para = nn_define['initializer']['weight']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, **init_para['param'])
if 'bias' in nn_define['initializer']:
init_para = nn_define['initializer']['bias']
init_func = init.str_fate_torch_init_func_map[init_para['init_func']]
init_func(layer, init='bias', **init_para['param'])
return layer, class_name
def recover_sequential_from_dict(nn_define):
nn_define_dict = nn_define
nn_dict = dict(inspect.getmembers(nn))
op_dict = dict(inspect.getmembers(operation))
nn_dict.update(op_dict)
class_name_list = []
try:
# submitted model have int prefixes, they make sure that layers are in
# order
add_dict = OrderedDict()
keys = list(nn_define_dict.keys())
keys = sorted(keys, key=lambda x: int(x.split('-')[0]))
for k in keys:
layer, class_name = recover_layer_from_dict(nn_define_dict[k], nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
except BaseException:
add_dict = OrderedDict()
for k, v in nn_define_dict.items():
layer, class_name = recover_layer_from_dict(v, nn_dict)
add_dict[k] = layer
class_name_list.append(class_name)
if len(class_name_list) == 1 and class_name_list[0] == CustModel.__name__:
# If there are only a CustModel, return the model only
return list(add_dict.values())[0]
else:
return tSeq(add_dict)
def recover_optimizer_from_dict(define_dict):
opt_dict = dict(inspect.getmembers(optim))
from federatedml.util import LOGGER
LOGGER.debug('define dict is {}'.format(define_dict))
if 'optimizer' not in define_dict:
raise ValueError('please specify optimizer type in the json config')
opt_class = opt_dict[define_dict['optimizer']]
param_dict = copy.deepcopy(define_dict)
if 'optimizer' in param_dict:
param_dict.pop('optimizer')
if 'config_type' in param_dict:
param_dict.pop('config_type')
return opt_class(**param_dict)
def recover_loss_fn_from_dict(define_dict):
loss_fn_dict = dict(inspect.getmembers(nn))
if 'loss_fn' not in define_dict:
raise ValueError('please specify loss function in the json config')
param_dict = copy.deepcopy(define_dict)
param_dict.pop('loss_fn')
if define_dict['loss_fn'] == CustLoss.__name__:
return CustLoss(loss_module_name=param_dict['loss_module_name'],
class_name=param_dict['class_name'],
**param_dict['param']).get_pytorch_model()
else:
return loss_fn_dict[define_dict['loss_fn']](**param_dict)
if __name__ == '__main__':
pass
| 4,832 | 36.757813 | 100 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/torch_modules_extract/extract_pytorch_modules.py | import inspect
from torch.nn.modules import linear, activation, rnn, dropout, sparse, pooling, conv, transformer, batchnorm
from torch.nn.modules import padding, pixelshuffle
from torch.nn.modules import loss
class Required(object):
def __init__(self):
pass
def __repr__(self):
return '(Required Parameter)'
def get_all_class_obj(module, key_word=''):
members = inspect.getmembers(module)
rs = []
module_name = None
for name, obj in members:
if inspect.isclass(obj):
if 'modules.' + key_word in obj.__module__:
rs.append(obj)
# print(obj)
module_name = obj.__module__.split('.')[-1]
return rs, module_name
def extract_init_param(class_):
args = inspect.getfullargspec(class_.__init__)
print(class_)
print(args)
keys = args[0][1:]
if len(keys) == 0:
return {}
defaults = args[3]
args_map = {}
print(keys)
print(defaults)
if defaults is not None:
for idx, i in enumerate(keys[-len(defaults):]):
print(args_map)
print(defaults)
args_map[i] = defaults[idx]
for i in keys:
if i not in args_map:
args_map[i] = Required()
return args_map
def code_assembly(param, nn_class, module_name):
if module_name == 'loss':
parent_class = 'FateTorchLoss'
else:
parent_class = 'FateTorchLayer'
para_str = ""
non_default_param = ""
init_str = """"""
for k, v in param.items():
new_para = "\n self.param_dict['{}'] = {}".format(k, k)
init_str += new_para
if isinstance(v, Required):
non_default_param += str(k)
non_default_param += ', '
continue
para_str += str(k)
if isinstance(v, str):
para_str += "='{}'".format(v)
else:
para_str += "={}".format(str(v))
para_str += ', '
para_str = non_default_param + para_str
init_ = """
def __init__(self, {}**kwargs):
{}.__init__(self){}
self.param_dict.update(kwargs)
nn.modules.{}.{}.__init__(self, **self.param_dict)
""".format(para_str, parent_class, init_str, module_name, nn_class)
code = """
class {}({}, {}):
{}
""".format(nn_class, 'nn.modules.{}.{}'.format(module_name, nn_class), parent_class, init_)
return code
if __name__ == '__main__':
rs1 = get_all_class_obj(linear, 'linear')
rs2 = get_all_class_obj(rnn, 'rnn')
rs3 = get_all_class_obj(sparse, 'sparse')
rs4 = get_all_class_obj(dropout, 'dropout')
rs5 = get_all_class_obj(activation, 'activation')
rs6 = get_all_class_obj(conv, 'conv')
rs7 = get_all_class_obj(transformer, 'transformer')
rs8 = get_all_class_obj(pooling, 'pooling')
rs9 = get_all_class_obj(batchnorm, 'batchnorm')
rs10 = get_all_class_obj(padding, 'padding')
rs11 = get_all_class_obj(pixelshuffle, 'pixielshuffle')
rs12 = get_all_class_obj(loss, 'loss')
module_str = """"""
module_str += "from torch import nn\n\n"
for rs in [rs1, rs2, rs3, rs4, rs5, rs6, rs7, rs8, rs9, rs10, rs11, rs12]:
module_name = rs[1]
for i in rs[0]:
# print(i)
param = extract_init_param(i)
class_str = code_assembly(param, i.__name__, module_name)
module_str += class_str
module_str = module_str
open('../_nn.py', 'w').write(module_str)
| 3,479 | 27.064516 | 108 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/torch_modules_extract/extract_pytorch_optim.py | import inspect
from torch import optim
from federatedml.nn.backend.torch.torch_modules_extract.extract_pytorch_modules import extract_init_param, Required
from torch.optim.optimizer import required
def code_assembly(param, nn_class):
para_str = ""
non_default_param = ""
init_str = """"""
special_param = ''
for k, v in param.items():
if k == 'params':
k = 'params'
v = None
special_param = k + '=' + str(v) + ', '
continue
else:
new_para = "\n self.param_dict['{}'] = {}".format(k, k)
init_str += new_para
if isinstance(v, Required) or v == required:
non_default_param += str(k)
non_default_param += ', '
continue
para_str += str(k)
if isinstance(v, str):
para_str += "='{}'".format(v)
else:
para_str += "={}".format(str(v))
para_str += ', '
para_str = non_default_param + special_param + para_str
init_ = """
def __init__(self, {}):
FateTorchOptimizer.__init__(self){}
self.torch_class = type(self).__bases__[0]
if params is None:
return
params = self.check_params(params)
self.torch_class.__init__(self, params, **self.param_dict)
# optim.{}.__init__(self, **self.param_dict)
def __repr__(self):
try:
return type(self).__bases__[0].__repr__(self)
except:
return 'Optimizer {} without initiated parameters'.format(type(self).__name__)
""".format(para_str, init_str, nn_class, nn_class)
code = """
class {}(optim.{}, FateTorchOptimizer):
{}
""".format(nn_class, nn_class, init_)
return code
if __name__ == '__main__':
memb = inspect.getmembers(optim)
module_str = """"""
module_str += "from torch import optim\nfrom federatedml.nn.backend.torch.base import FateTorchLayer, Sequential\n" \
"from federatedml.nn.backend.torch.base import FateTorchOptimizer\n\n"
for k, v in memb:
if inspect.isclass(v) and k != 'Optimizer':
param = extract_init_param(v)
code = code_assembly(param, k)
module_str += code
open('../_optim.py', 'w').write(module_str)
| 2,304 | 27.45679 | 121 | py |
FATE | FATE-master/python/federatedml/nn/backend/torch/test/test_cust_model.py | from federatedml.nn.backend.torch import nn, init
import json
from federatedml.nn.backend.torch import serialization as s
import torch as t
from federatedml.nn.backend.torch.import_hook import fate_torch_hook
from federatedml.nn.backend.torch.cust import CustModel
fate_torch_hook(t)
cust_resnet = CustModel(name='resnet')
transformer = nn.Transformer()
seq = nn.Sequential(
nn.Linear(10, 10),
CustModel(name='lr', param={'input_size': 2}),
CustModel(name='mf', param={'u_num': 100, 'i_num': 100, 'embd_dim': 32}),
CustModel(name='resnet'),
transformer,
)
nn_define_json = json.dumps(seq.to_dict(), indent=3)
nn_define = seq.to_dict()
recover_seq = s.recover_sequential_from_dict(nn_define)
| 713 | 31.454545 | 77 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.