code stringlengths 17 6.64M |
|---|
class InferMobileNetV2(nn.Module):
def __init__(self, num_classes, xchannels, xblocks, dropout):
super(InferMobileNetV2, self).__init__()
block = InvertedResidual
inverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
assert (len(inverted_residual_setting) == len(xblocks)), 'invalid number of layers : {:} vs {:}'.format(len(inverted_residual_setting), len(xblocks))
for (block_num, ir_setting) in zip(xblocks, inverted_residual_setting):
assert (block_num <= ir_setting[2]), '{:} vs {:}'.format(block_num, ir_setting)
xchannels = parse_channel_info(xchannels)
self.xchannels = xchannels
self.message = 'InferMobileNetV2 : xblocks={:}'.format(xblocks)
features = [ConvBNReLU(xchannels[0][0], xchannels[0][1], 3, 2, 1)]
last_channel_idx = 1
for (stage, (t, c, n, s)) in enumerate(inverted_residual_setting):
for i in range(n):
stride = (s if (i == 0) else 1)
additv = (True if (i > 0) else False)
module = block(self.xchannels[last_channel_idx], stride, t, additv)
features.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, Cs={:}, stride={:}, expand={:}, original-C={:}'.format(stage, i, n, len(features), self.xchannels[last_channel_idx], stride, t, c)
last_channel_idx += 1
if ((i + 1) == xblocks[stage]):
out_channel = module.out_dim
for iiL in range((i + 1), n):
last_channel_idx += 1
self.xchannels[last_channel_idx][0] = module.out_dim
break
features.append(ConvBNReLU(self.xchannels[last_channel_idx][0], self.xchannels[last_channel_idx][1], 1, 1, 1))
assert ((last_channel_idx + 2) == len(self.xchannels)), '{:} vs {:}'.format(last_channel_idx, len(self.xchannels))
self.features = nn.Sequential(*features)
self.classifier = nn.Sequential(nn.Dropout(dropout), nn.Linear(self.xchannels[last_channel_idx][1], num_classes))
self.apply(initialize_resnet)
def get_message(self):
return self.message
def forward(self, inputs):
features = self.features(inputs)
vectors = features.mean([2, 3])
predicts = self.classifier(vectors)
return (features, predicts)
|
class DynamicShapeTinyNet(nn.Module):
def __init__(self, channels: List[int], genotype: Any, num_classes: int):
super(DynamicShapeTinyNet, self).__init__()
self._channels = channels
if ((len(channels) % 3) != 2):
raise ValueError('invalid number of layers : {:}'.format(len(channels)))
self._num_stage = N = (len(channels) // 3)
self.stem = nn.Sequential(nn.Conv2d(3, channels[0], kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(channels[0]))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
c_prev = channels[0]
self.cells = nn.ModuleList()
for (index, (c_curr, reduction)) in enumerate(zip(channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(c_prev, c_curr, 2, True)
else:
cell = InferCell(genotype, c_prev, c_curr, 1)
self.cells.append(cell)
c_prev = cell.out_dim
self._num_layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(c_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(c_prev, num_classes)
def get_message(self) -> Text:
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_channels}, N={_num_stage}, L={_num_layer})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return (out, logits)
|
def parse_channel_info(xstring):
blocks = xstring.split(' ')
blocks = [x.split('-') for x in blocks]
blocks = [[int(_) for _ in x] for x in blocks]
return blocks
|
def get_depth_choices(nDepth, return_num):
if (nDepth == 2):
choices = (1, 2)
elif (nDepth == 3):
choices = (1, 2, 3)
elif (nDepth > 3):
choices = list(range(1, (nDepth + 1), 2))
if (choices[(- 1)] < nDepth):
choices.append(nDepth)
else:
raise ValueError('invalid nDepth : {:}'.format(nDepth))
if return_num:
return len(choices)
else:
return choices
|
class ConvBNReLU(nn.Module):
num_conv = 1
def __init__(self, nIn, nOut, kernel, stride, padding, bias, has_avg, has_bn, has_relu):
super(ConvBNReLU, self).__init__()
self.InShape = None
self.OutShape = None
self.choices = get_width_choices(nOut)
self.register_buffer('choices_tensor', torch.Tensor(self.choices))
if has_avg:
self.avg = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)
else:
self.avg = None
self.conv = nn.Conv2d(nIn, nOut, kernel_size=kernel, stride=stride, padding=padding, dilation=1, groups=1, bias=bias)
if has_bn:
self.bn = nn.BatchNorm2d(nOut)
else:
self.bn = None
if has_relu:
self.relu = nn.ReLU(inplace=False)
else:
self.relu = None
self.in_dim = nIn
self.out_dim = nOut
def get_flops(self, divide=1):
(iC, oC) = (self.in_dim, self.out_dim)
assert ((iC <= self.conv.in_channels) and (oC <= self.conv.out_channels)), '{:} vs {:} | {:} vs {:}'.format(iC, self.conv.in_channels, oC, self.conv.out_channels)
assert (isinstance(self.InShape, tuple) and (len(self.InShape) == 2)), 'invalid in-shape : {:}'.format(self.InShape)
assert (isinstance(self.OutShape, tuple) and (len(self.OutShape) == 2)), 'invalid out-shape : {:}'.format(self.OutShape)
conv_per_position_flops = (((self.conv.kernel_size[0] * self.conv.kernel_size[1]) * 1.0) / self.conv.groups)
all_positions = (self.OutShape[0] * self.OutShape[1])
flops = ((((conv_per_position_flops * all_positions) / divide) * iC) * oC)
if (self.conv.bias is not None):
flops += (all_positions / divide)
return flops
def forward(self, inputs):
if self.avg:
out = self.avg(inputs)
else:
out = inputs
conv = self.conv(out)
if self.bn:
out = self.bn(conv)
else:
out = conv
if self.relu:
out = self.relu(out)
else:
out = out
if (self.InShape is None):
self.InShape = (inputs.size((- 2)), inputs.size((- 1)))
self.OutShape = (out.size((- 2)), out.size((- 1)))
return out
|
class ResNetBasicblock(nn.Module):
expansion = 1
num_conv = 2
def __init__(self, inplanes, planes, stride):
super(ResNetBasicblock, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_a = ConvBNReLU(inplanes, planes, 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_b = ConvBNReLU(planes, planes, 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=False)
if (stride == 2):
self.downsample = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
elif (inplanes != planes):
self.downsample = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = planes
self.search_mode = 'basic'
def get_flops(self, divide=1):
flop_A = self.conv_a.get_flops(divide)
flop_B = self.conv_b.get_flops(divide)
if hasattr(self.downsample, 'get_flops'):
flop_C = self.downsample.get_flops(divide)
else:
flop_C = 0
return ((flop_A + flop_B) + flop_C)
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = additive_func(residual, basicblock)
return nn.functional.relu(out, inplace=True)
|
class ResNetBottleneck(nn.Module):
expansion = 4
num_conv = 3
def __init__(self, inplanes, planes, stride):
super(ResNetBottleneck, self).__init__()
assert ((stride == 1) or (stride == 2)), 'invalid stride {:}'.format(stride)
self.conv_1x1 = ConvBNReLU(inplanes, planes, 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_3x3 = ConvBNReLU(planes, planes, 3, stride, 1, False, has_avg=False, has_bn=True, has_relu=True)
self.conv_1x4 = ConvBNReLU(planes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
if (stride == 2):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=True, has_bn=False, has_relu=False)
elif (inplanes != (planes * self.expansion)):
self.downsample = ConvBNReLU(inplanes, (planes * self.expansion), 1, 1, 0, False, has_avg=False, has_bn=True, has_relu=False)
else:
self.downsample = None
self.out_dim = (planes * self.expansion)
self.search_mode = 'basic'
def get_range(self):
return ((self.conv_1x1.get_range() + self.conv_3x3.get_range()) + self.conv_1x4.get_range())
def get_flops(self, divide):
flop_A = self.conv_1x1.get_flops(divide)
flop_B = self.conv_3x3.get_flops(divide)
flop_C = self.conv_1x4.get_flops(divide)
if hasattr(self.downsample, 'get_flops'):
flop_D = self.downsample.get_flops(divide)
else:
flop_D = 0
return (((flop_A + flop_B) + flop_C) + flop_D)
def forward(self, inputs):
bottleneck = self.conv_1x1(inputs)
bottleneck = self.conv_3x3(bottleneck)
bottleneck = self.conv_1x4(bottleneck)
if (self.downsample is not None):
residual = self.downsample(inputs)
else:
residual = inputs
out = additive_func(residual, bottleneck)
return nn.functional.relu(out, inplace=True)
|
class SearchDepthCifarResNet(nn.Module):
def __init__(self, block_name, depth, num_classes):
super(SearchDepthCifarResNet, self).__init__()
if (block_name == 'ResNetBasicblock'):
block = ResNetBasicblock
assert (((depth - 2) % 6) == 0), 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = ((depth - 2) // 6)
elif (block_name == 'ResNetBottleneck'):
block = ResNetBottleneck
assert (((depth - 2) % 9) == 0), 'depth should be one of 164'
layer_blocks = ((depth - 2) // 9)
else:
raise ValueError('invalid block : {:}'.format(block_name))
self.message = 'SearchShapeCifarResNet : Depth : {:} , Layers for each block : {:}'.format(depth, layer_blocks)
self.num_classes = num_classes
self.channels = [16]
self.layers = nn.ModuleList([ConvBNReLU(3, 16, 3, 1, 1, False, has_avg=False, has_bn=True, has_relu=True)])
self.InShape = None
self.depth_info = OrderedDict()
self.depth_at_i = OrderedDict()
for stage in range(3):
cur_block_choices = get_depth_choices(layer_blocks, False)
assert (cur_block_choices[(- 1)] == layer_blocks), 'stage={:}, {:} vs {:}'.format(stage, cur_block_choices, layer_blocks)
self.message += '\nstage={:} ::: depth-block-choices={:} for {:} blocks.'.format(stage, cur_block_choices, layer_blocks)
(block_choices, xstart) = ([], len(self.layers))
for iL in range(layer_blocks):
iC = self.channels[(- 1)]
planes = (16 * (2 ** stage))
stride = (2 if ((stage > 0) and (iL == 0)) else 1)
module = block(iC, planes, stride)
self.channels.append(module.out_dim)
self.layers.append(module)
self.message += '\nstage={:}, ilayer={:02d}/{:02d}, block={:03d}, iC={:3d}, oC={:3d}, stride={:}'.format(stage, iL, layer_blocks, (len(self.layers) - 1), iC, module.out_dim, stride)
layer_index = (len(self.layers) - 1)
if ((iL + 1) in cur_block_choices):
block_choices.append(layer_index)
if ((iL + 1) == layer_blocks):
self.depth_info[layer_index] = {'choices': block_choices, 'stage': stage, 'xstart': xstart}
self.depth_info_list = []
for (xend, info) in self.depth_info.items():
self.depth_info_list.append((xend, info))
(xstart, xstage) = (info['xstart'], info['stage'])
for ilayer in range(xstart, (xend + 1)):
idx = bisect_right(info['choices'], (ilayer - 1))
self.depth_at_i[ilayer] = (xstage, idx)
self.avgpool = nn.AvgPool2d(8)
self.classifier = nn.Linear(module.out_dim, num_classes)
self.InShape = None
self.tau = (- 1)
self.search_mode = 'basic'
self.register_parameter('depth_attentions', nn.Parameter(torch.Tensor(3, get_depth_choices(layer_blocks, True))))
nn.init.normal_(self.depth_attentions, 0, 0.01)
self.apply(initialize_resnet)
def arch_parameters(self):
return [self.depth_attentions]
def base_parameters(self):
return ((list(self.layers.parameters()) + list(self.avgpool.parameters())) + list(self.classifier.parameters()))
def get_flop(self, mode, config_dict, extra_info):
if (config_dict is not None):
config_dict = config_dict.copy()
if (mode == 'genotype'):
with torch.no_grad():
depth_probs = nn.functional.softmax(self.depth_attentions, dim=1)
choices = torch.argmax(depth_probs, dim=1).cpu().tolist()
elif (mode == 'max'):
choices = [(depth_probs.size(1) - 1) for _ in range(depth_probs.size(0))]
elif (mode == 'random'):
with torch.no_grad():
depth_probs = nn.functional.softmax(self.depth_attentions, dim=1)
choices = torch.multinomial(depth_probs, 1, False).cpu().tolist()
else:
raise ValueError('invalid mode : {:}'.format(mode))
selected_layers = []
for (choice, xvalue) in zip(choices, self.depth_info_list):
xtemp = ((xvalue[1]['choices'][choice] - xvalue[1]['xstart']) + 1)
selected_layers.append(xtemp)
flop = 0
for (i, layer) in enumerate(self.layers):
if (i in self.depth_at_i):
(xstagei, xatti) = self.depth_at_i[i]
if (xatti <= choices[xstagei]):
flop += layer.get_flops()
else:
flop += 0
else:
flop += layer.get_flops()
flop += (self.classifier.in_features * self.classifier.out_features)
if (config_dict is None):
return (flop / 1000000.0)
else:
config_dict['xblocks'] = selected_layers
config_dict['super_type'] = 'infer-depth'
config_dict['estimated_FLOP'] = (flop / 1000000.0)
return ((flop / 1000000.0), config_dict)
def get_arch_info(self):
string = 'for depth, there are {:} attention probabilities.'.format(len(self.depth_attentions))
string += '\n{:}'.format(self.depth_info)
discrepancy = []
with torch.no_grad():
for (i, att) in enumerate(self.depth_attentions):
prob = nn.functional.softmax(att, dim=0)
prob = prob.cpu()
selc = prob.argmax().item()
prob = prob.tolist()
prob = ['{:.3f}'.format(x) for x in prob]
xstring = '{:03d}/{:03d}-th : {:}'.format(i, len(self.depth_attentions), ' '.join(prob))
logt = ['{:.4f}'.format(x) for x in att.cpu().tolist()]
xstring += ' || {:17s}'.format(' '.join(logt))
prob = sorted([float(x) for x in prob])
disc = (prob[(- 1)] - prob[(- 2)])
xstring += ' || discrepancy={:.2f} || select={:}/{:}'.format(disc, selc, len(prob))
discrepancy.append(disc)
string += '\n{:}'.format(xstring)
return (string, discrepancy)
def set_tau(self, tau_max, tau_min, epoch_ratio):
assert ((epoch_ratio >= 0) and (epoch_ratio <= 1)), 'invalid epoch-ratio : {:}'.format(epoch_ratio)
tau = (tau_min + (((tau_max - tau_min) * (1 + math.cos((math.pi * epoch_ratio)))) / 2))
self.tau = tau
def get_message(self):
return self.message
def forward(self, inputs):
if (self.search_mode == 'basic'):
return self.basic_forward(inputs)
elif (self.search_mode == 'search'):
return self.search_forward(inputs)
else:
raise ValueError('invalid search_mode = {:}'.format(self.search_mode))
def search_forward(self, inputs):
flop_depth_probs = nn.functional.softmax(self.depth_attentions, dim=1)
flop_depth_probs = torch.flip(torch.cumsum(torch.flip(flop_depth_probs, [1]), 1), [1])
selected_depth_probs = select2withP(self.depth_attentions, self.tau, True)
(x, flops) = (inputs, [])
feature_maps = []
for (i, layer) in enumerate(self.layers):
layer_i = layer(x)
feature_maps.append(layer_i)
if (i in self.depth_info):
choices = self.depth_info[i]['choices']
xstagei = self.depth_info[i]['stage']
possible_tensors = []
for (tempi, A) in enumerate(choices):
xtensor = feature_maps[A]
possible_tensors.append(xtensor)
weighted_sum = sum(((xtensor * W) for (xtensor, W) in zip(possible_tensors, selected_depth_probs[xstagei])))
x = weighted_sum
else:
x = layer_i
if (i in self.depth_at_i):
(xstagei, xatti) = self.depth_at_i[i]
x_expected_flop = (flop_depth_probs[(xstagei, xatti)] * layer.get_flops(1000000.0))
else:
x_expected_flop = layer.get_flops(1000000.0)
flops.append(x_expected_flop)
flops.append((((self.classifier.in_features * self.classifier.out_features) * 1.0) / 1000000.0))
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = linear_forward(features, self.classifier)
return (logits, torch.stack([sum(flops)]))
def basic_forward(self, inputs):
if (self.InShape is None):
self.InShape = (inputs.size((- 2)), inputs.size((- 1)))
x = inputs
for (i, layer) in enumerate(self.layers):
x = layer(x)
features = self.avgpool(x)
features = features.view(features.size(0), (- 1))
logits = self.classifier(features)
return (features, logits)
|
class NetworkCIFAR(nn.Module):
def __init__(self, C, N, stem_multiplier, auxiliary, genotype, num_classes):
super(NetworkCIFAR, self).__init__()
self._C = C
self._layerN = N
self._stem_multiplier = stem_multiplier
C_curr = (self._stem_multiplier * C)
self.stem = CifarHEAD(C_curr)
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
block_indexs = ((((([0] * N) + [(- 1)]) + ([1] * N)) + [(- 1)]) + ([2] * N))
block2index = {0: [], 1: [], 2: []}
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
(reduction_prev, spatial, dims) = (False, 1, [])
self.auxiliary_index = None
self.auxiliary_head = None
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = InferCell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells.append(cell)
(C_prev_prev, C_prev) = (C_prev, (cell._multiplier * C_curr))
if (reduction and (C_curr == (C * 4))):
if auxiliary:
self.auxiliary_head = AuxiliaryHeadCIFAR(C_prev, num_classes)
self.auxiliary_index = index
if reduction:
spatial *= 2
dims.append((C_prev, spatial))
self._Layer = len(self.cells)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = (- 1)
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def auxiliary_param(self):
if (self.auxiliary_head is None):
return []
else:
return list(self.auxiliary_head.parameters())
def get_message(self):
return self.extra_repr()
def extra_repr(self):
return '{name}(C={_C}, N={_layerN}, L={_Layer}, stem={_stem_multiplier}, drop-path={drop_path_prob})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs):
(stem_feature, logits_aux) = (self.stem(inputs), None)
cell_results = [stem_feature, stem_feature]
for (i, cell) in enumerate(self.cells):
cell_feature = cell(cell_results[(- 2)], cell_results[(- 1)], self.drop_path_prob)
cell_results.append(cell_feature)
if ((self.auxiliary_index is not None) and (i == self.auxiliary_index) and self.training):
logits_aux = self.auxiliary_head(cell_results[(- 1)])
out = self.global_pooling(cell_results[(- 1)])
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
if (logits_aux is None):
return (out, logits)
else:
return (out, [logits, logits_aux])
|
class NetworkImageNet(nn.Module):
def __init__(self, C, N, auxiliary, genotype, num_classes):
super(NetworkImageNet, self).__init__()
self._C = C
self._layerN = N
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
self.stem0 = nn.Sequential(nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d((C // 2)), nn.ReLU(inplace=True), nn.Conv2d((C // 2), C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
self.stem1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
(C_prev_prev, C_prev, C_curr, reduction_prev) = (C, C, C, True)
self.cells = nn.ModuleList()
self.auxiliary_index = None
for (i, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
cell = InferCell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell._multiplier * C_curr))
if (reduction and (C_curr == (C * 4))):
C_to_auxiliary = C_prev
self.auxiliary_index = i
self._NNN = len(self.cells)
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
else:
self.auxiliary_head = None
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
self.drop_path_prob = (- 1)
def update_drop_path(self, drop_path_prob):
self.drop_path_prob = drop_path_prob
def extra_repr(self):
return '{name}(C={_C}, N=[{_layerN}, {_NNN}], aux-index={auxiliary_index}, drop-path={drop_path_prob})'.format(name=self.__class__.__name__, **self.__dict__)
def get_message(self):
return self.extra_repr()
def auxiliary_param(self):
if (self.auxiliary_head is None):
return []
else:
return list(self.auxiliary_head.parameters())
def forward(self, inputs):
s0 = self.stem0(inputs)
s1 = self.stem1(s0)
logits_aux = None
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if ((i == self.auxiliary_index) and self.auxiliary_head and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
if (logits_aux is None):
return (out, logits)
else:
return (out, [logits, logits_aux])
|
class MixedOp(nn.Module):
def __init__(self, C, stride, PRIMITIVES):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
self.name2idx = {}
for (idx, primitive) in enumerate(PRIMITIVES):
op = OPS[primitive](C, C, stride, False)
self._ops.append(op)
assert (primitive not in self.name2idx), '{:} has already in'.format(primitive)
self.name2idx[primitive] = idx
def forward(self, x, weights, op_name):
if (op_name is None):
if (weights is None):
return [op(x) for op in self._ops]
else:
return sum(((w * op(x)) for (w, op) in zip(weights, self._ops)))
else:
op_index = self.name2idx[op_name]
return self._ops[op_index](x)
|
class SearchCell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev, PRIMITIVES, use_residual):
super(SearchCell, self).__init__()
self.reduction = reduction
self.PRIMITIVES = deepcopy(PRIMITIVES)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, 2, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._use_residual = use_residual
self._ops = nn.ModuleList()
for i in range(self._steps):
for j in range((2 + i)):
stride = (2 if (reduction and (j < 2)) else 1)
op = MixedOp(C, stride, self.PRIMITIVES)
self._ops.append(op)
def extra_repr(self):
return '{name}(residual={_use_residual}, steps={_steps}, multiplier={_multiplier})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, S0, S1, weights, connect, adjacency, drop_prob, modes):
if (modes[0] is None):
if (modes[1] == 'normal'):
output = self.__forwardBoth(S0, S1, weights, connect, adjacency, drop_prob)
elif (modes[1] == 'only_W'):
output = self.__forwardOnlyW(S0, S1, drop_prob)
else:
test_genotype = modes[0]
if self.reduction:
(operations, concats) = (test_genotype.reduce, test_genotype.reduce_concat)
else:
(operations, concats) = (test_genotype.normal, test_genotype.normal_concat)
(s0, s1) = (self.preprocess0(S0), self.preprocess1(S1))
(states, offset) = ([s0, s1], 0)
assert (self._steps == len(operations)), '{:} vs. {:}'.format(self._steps, len(operations))
for (i, (opA, opB)) in enumerate(operations):
A = self._ops[(offset + opA[1])](states[opA[1]], None, opA[0])
B = self._ops[(offset + opB[1])](states[opB[1]], None, opB[0])
state = (A + B)
offset += len(states)
states.append(state)
output = torch.cat([states[i] for i in concats], dim=1)
if (self._use_residual and (S1.size() == output.size())):
return (S1 + output)
else:
return output
def __forwardBoth(self, S0, S1, weights, connect, adjacency, drop_prob):
(s0, s1) = (self.preprocess0(S0), self.preprocess1(S1))
(states, offset) = ([s0, s1], 0)
for i in range(self._steps):
clist = []
for (j, h) in enumerate(states):
x = self._ops[(offset + j)](h, weights[(offset + j)], None)
if (self.training and (drop_prob > 0.0)):
x = drop_path(x, math.pow(drop_prob, (1.0 / len(states))))
clist.append(x)
connection = torch.mm(connect['{:}'.format(i)], adjacency[i]).squeeze(0)
state = sum(((w * node) for (w, node) in zip(connection, clist)))
offset += len(states)
states.append(state)
return torch.cat(states[(- self._multiplier):], dim=1)
def __forwardOnlyW(self, S0, S1, drop_prob):
(s0, s1) = (self.preprocess0(S0), self.preprocess1(S1))
(states, offset) = ([s0, s1], 0)
for i in range(self._steps):
clist = []
for (j, h) in enumerate(states):
xs = self._ops[(offset + j)](h, None, None)
clist += xs
if (self.training and (drop_prob > 0.0)):
xlist = [drop_path(x, math.pow(drop_prob, (1.0 / len(states)))) for x in clist]
else:
xlist = clist
state = ((sum(xlist) * 2) / len(xlist))
offset += len(states)
states.append(state)
return torch.cat(states[(- self._multiplier):], dim=1)
|
class InferCell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(InferCell, self).__init__()
print(C_prev_prev, C_prev, C)
if (reduction_prev is None):
self.preprocess0 = Identity()
elif reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, 2)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
(step_ops, concat) = (genotype.reduce, genotype.reduce_concat)
else:
(step_ops, concat) = (genotype.normal, genotype.normal_concat)
self._steps = len(step_ops)
self._concat = concat
self._multiplier = len(concat)
self._ops = nn.ModuleList()
self._indices = []
for operations in step_ops:
for (name, index) in operations:
stride = (2 if (reduction and (index < 2)) else 1)
if ((reduction_prev is None) and (index == 0)):
op = OPS[name](C_prev_prev, C, stride, True)
else:
op = OPS[name](C, C, stride, True)
self._ops.append(op)
self._indices.append(index)
def extra_repr(self):
return '{name}(steps={_steps}, concat={_concat})'.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, S0, S1, drop_prob):
s0 = self.preprocess0(S0)
s1 = self.preprocess1(S1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
state = (h1 + h2)
states += [state]
output = torch.cat([states[i] for i in self._concat], dim=1)
return output
|
def build_genotype_from_dict(xdict):
def remove_value(nodes):
return [tuple([(x[0], x[1]) for x in node]) for node in nodes]
genotype = Genotype(normal=remove_value(xdict['normal']), normal_concat=xdict['normal_concat'], reduce=remove_value(xdict['reduce']), reduce_concat=xdict['reduce_concat'], connectN=None, connects=None)
return genotype
|
class ImageNetHEAD(nn.Sequential):
def __init__(self, C, stride=2):
super(ImageNetHEAD, self).__init__()
self.add_module('conv1', nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False))
self.add_module('bn1', nn.BatchNorm2d((C // 2)))
self.add_module('relu1', nn.ReLU(inplace=True))
self.add_module('conv2', nn.Conv2d((C // 2), C, kernel_size=3, stride=stride, padding=1, bias=False))
self.add_module('bn2', nn.BatchNorm2d(C))
|
class CifarHEAD(nn.Sequential):
def __init__(self, C):
super(CifarHEAD, self).__init__()
self.add_module('conv', nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False))
self.add_module('bn', nn.BatchNorm2d(C))
|
class AuxiliaryHeadCIFAR(nn.Module):
def __init__(self, C, num_classes):
'assuming input size 8x8'
super(AuxiliaryHeadCIFAR, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x
|
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
'assuming input size 14x14'
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x
|
def obtain_nas_infer_model(config, extra_model_path=None):
if (config.arch == 'dxys'):
from .DXYs import CifarNet, ImageNet, Networks
from .DXYs import build_genotype_from_dict
if (config.genotype is None):
if ((extra_model_path is not None) and (not os.path.isfile(extra_model_path))):
raise ValueError('When genotype in confiig is None, extra_model_path must be set as a path instead of {:}'.format(extra_model_path))
xdata = torch.load(extra_model_path)
current_epoch = xdata['epoch']
genotype_dict = xdata['genotypes'][(current_epoch - 1)]
genotype = build_genotype_from_dict(genotype_dict)
else:
genotype = Networks[config.genotype]
if (config.dataset == 'cifar'):
return CifarNet(config.ichannel, config.layers, config.stem_multi, config.auxiliary, genotype, config.class_num)
elif (config.dataset == 'imagenet'):
return ImageNet(config.ichannel, config.layers, config.auxiliary, genotype, config.class_num)
else:
raise ValueError('invalid dataset : {:}'.format(config.dataset))
else:
raise ValueError('invalid nas arch type : {:}'.format(config.arch))
|
def get_procedures(procedure):
from .basic_main import basic_train, basic_valid
from .search_main import search_train, search_valid
from .search_main_v2 import search_train_v2
from .simple_KD_main import simple_KD_train, simple_KD_valid
train_funcs = {'basic': basic_train, 'search': search_train, 'Simple-KD': simple_KD_train, 'search-v2': search_train_v2}
valid_funcs = {'basic': basic_valid, 'search': search_valid, 'Simple-KD': simple_KD_valid, 'search-v2': search_valid}
train_func = train_funcs[procedure]
valid_func = valid_funcs[procedure]
return (train_func, valid_func)
|
def get_device(tensors):
if isinstance(tensors, (list, tuple)):
return get_device(tensors[0])
elif isinstance(tensors, dict):
for (key, value) in tensors.items():
return get_device(value)
else:
return tensors.device
|
def basic_train_fn(xloader, network, criterion, optimizer, metric, logger):
results = procedure(xloader, network, criterion, optimizer, metric, 'train', logger)
return results
|
def basic_eval_fn(xloader, network, metric, logger):
with torch.no_grad():
results = procedure(xloader, network, None, None, metric, 'valid', logger)
return results
|
def procedure(xloader, network, criterion, optimizer, metric, mode: Text, logger_fn: Callable=None):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
if (mode.lower() == 'train'):
network.train()
elif (mode.lower() == 'valid'):
network.eval()
else:
raise ValueError('The mode is not right : {:}'.format(mode))
end = time.time()
for (i, (inputs, targets)) in enumerate(xloader):
data_time.update((time.time() - end))
if (mode == 'train'):
optimizer.zero_grad()
outputs = network(inputs)
targets = targets.to(get_device(outputs))
if (mode == 'train'):
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
with torch.no_grad():
results = metric(outputs, targets)
batch_time.update((time.time() - end))
end = time.time()
return metric.get_info()
|
def basic_train(xloader, network, criterion, scheduler, optimizer, optim_config, extra_info, print_freq, logger):
(loss, acc1, acc5) = procedure(xloader, network, criterion, scheduler, optimizer, 'train', optim_config, extra_info, print_freq, logger)
return (loss, acc1, acc5)
|
def basic_valid(xloader, network, criterion, optim_config, extra_info, print_freq, logger):
with torch.no_grad():
(loss, acc1, acc5) = procedure(xloader, network, criterion, None, None, 'valid', None, extra_info, print_freq, logger)
return (loss, acc1, acc5)
|
def procedure(xloader, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger):
(data_time, batch_time, losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
if (mode == 'train'):
network.train()
elif (mode == 'valid'):
network.eval()
else:
raise ValueError('The mode is not right : {:}'.format(mode))
logger.log('[{:5s}] config :: auxiliary={:}'.format(mode, (config.auxiliary if hasattr(config, 'auxiliary') else (- 1))))
end = time.time()
for (i, (inputs, targets)) in enumerate(xloader):
if (mode == 'train'):
scheduler.update(None, ((1.0 * i) / len(xloader)))
data_time.update((time.time() - end))
targets = targets.cuda(non_blocking=True)
if (mode == 'train'):
optimizer.zero_grad()
(features, logits) = network(inputs)
if isinstance(logits, list):
assert (len(logits) == 2), 'logits must has {:} items instead of {:}'.format(2, len(logits))
(logits, logits_aux) = logits
else:
(logits, logits_aux) = (logits, None)
loss = criterion(logits, targets)
if ((config is not None) and hasattr(config, 'auxiliary') and (config.auxiliary > 0)):
loss_aux = criterion(logits_aux, targets)
loss += (config.auxiliary * loss_aux)
if (mode == 'train'):
loss.backward()
optimizer.step()
(prec1, prec5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((i % print_freq) == 0) or ((i + 1) == len(xloader))):
Sstr = ((' {:5s} '.format(mode.upper()) + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)))
if (scheduler is not None):
Sstr += ' {:}'.format(scheduler.get_min_info())
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5)
Istr = 'Size={:}'.format(list(inputs.size()))
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Istr))
logger.log(' **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(mode=mode.upper(), top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), loss=losses.avg))
return (losses.avg, top1.avg, top5.avg)
|
def obtain_accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def pure_evaluate(xloader, network, criterion=torch.nn.CrossEntropyLoss()):
(data_time, batch_time, batch) = (AverageMeter(), AverageMeter(), None)
(losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter())
(latencies, device) = ([], torch.cuda.current_device())
network.eval()
with torch.no_grad():
end = time.time()
for (i, (inputs, targets)) in enumerate(xloader):
targets = targets.cuda(device=device, non_blocking=True)
inputs = inputs.cuda(device=device, non_blocking=True)
data_time.update((time.time() - end))
(features, logits) = network(inputs)
loss = criterion(logits, targets)
batch_time.update((time.time() - end))
if ((batch is None) or (batch == inputs.size(0))):
batch = inputs.size(0)
latencies.append((batch_time.val - data_time.val))
(prec1, prec5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
end = time.time()
if (len(latencies) > 2):
latencies = latencies[1:]
return (losses.avg, top1.avg, top5.avg, latencies)
|
def procedure(xloader, network, criterion, scheduler, optimizer, mode: str, **kwargs):
(losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter())
normalizer = kwargs['normalizer']
if (mode == 'train'):
network.train()
elif (mode == 'valid'):
network.eval()
else:
raise ValueError('The mode is not right : {:}'.format(mode))
device = torch.cuda.current_device()
(data_time, batch_time, end) = (AverageMeter(), AverageMeter(), time.time())
for (i, (inputs, targets)) in enumerate(xloader):
if (mode == 'train'):
scheduler.update(None, ((1.0 * i) / len(xloader)))
targets = targets.cuda(device=device, non_blocking=True)
normalizer.cuda()
if (mode == 'train'):
optimizer.zero_grad()
targets = normalizer.decode(targets)
(features, logits) = network(inputs)
logits = logits.squeeze()
logits = normalizer.decode(logits)
loss = criterion(logits.view(logits.size(0), (- 1)), targets.view(targets.size(0), (- 1)))
loss = (loss / logits.size(0))
if (mode == 'train'):
loss.backward()
optimizer.step()
(prec1, prec5) = ((1 - loss), 0)
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(0, inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
return (losses.avg, top1.avg, top5.avg, batch_time.sum)
|
def evaluate_for_seed(arch_config, opt_config, train_loader, valid_loaders, seed: int, logger, **kwargs):
'A modular function to train and evaluate a single network, using the given random seed and optimization config with the provided loaders.'
prepare_seed(seed)
net = get_cell_based_tiny_net(arch_config)
(flop, param) = get_model_infos(net, opt_config.xshape)
logger.log('Network : {:}'.format(net.get_message()), False)
logger.log('{:} Seed-------------------------- {:} --------------------------'.format(time_string(), seed))
logger.log('FLOP = {:} MB, Param = {:} MB'.format(flop, param))
(optimizer, scheduler, criterion) = get_optim_scheduler(net.parameters(), opt_config)
default_device = torch.cuda.current_device()
network = torch.nn.DataParallel(net, device_ids=[default_device]).cuda(device=default_device)
(start_time, epoch_time, total_epoch) = (time.time(), AverageMeter(), (opt_config.epochs + opt_config.warmup))
(train_losses, train_acc1es, train_acc5es, valid_losses, valid_acc1es, valid_acc5es) = ({}, {}, {}, {}, {}, {})
(train_times, valid_times, lrs) = ({}, {}, {})
for epoch in range(total_epoch):
scheduler.update(epoch, 0.0)
lr = min(scheduler.get_lr())
(train_loss, train_acc1, train_acc5, train_tm) = procedure(train_loader, network, criterion, scheduler, optimizer, 'train', normalizer=kwargs['normalizer'])
train_losses[epoch] = train_loss
train_acc1es[epoch] = train_acc1
train_acc5es[epoch] = train_acc5
train_times[epoch] = train_tm
lrs[epoch] = lr
with torch.no_grad():
for (key, xloder) in valid_loaders.items():
(valid_loss, valid_acc1, valid_acc5, valid_tm) = procedure(xloder, network, criterion, None, None, 'valid', normalizer=kwargs['normalizer'])
valid_losses['{:}@{:}'.format(key, epoch)] = valid_loss
valid_acc1es['{:}@{:}'.format(key, epoch)] = valid_acc1
valid_acc5es['{:}@{:}'.format(key, epoch)] = valid_acc5
valid_times['{:}@{:}'.format(key, epoch)] = valid_tm
epoch_time.update((time.time() - start_time))
start_time = time.time()
need_time = 'Time Left: {:}'.format(convert_secs2time((epoch_time.avg * ((total_epoch - epoch) - 1)), True))
logger.log('{:} {:} epoch={:03d}/{:03d} :: Train [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%] Valid [loss={:.5f}, acc@1={:.2f}%, acc@5={:.2f}%], lr={:}'.format(time_string(), need_time, epoch, total_epoch, train_loss, train_acc1, train_acc5, valid_loss, valid_acc1, valid_acc5, lr))
info_seed = {'flop': flop, 'param': param, 'arch_config': arch_config._asdict(), 'opt_config': opt_config._asdict(), 'total_epoch': total_epoch, 'train_losses': train_losses, 'train_acc1es': train_acc1es, 'train_acc5es': train_acc5es, 'train_times': train_times, 'valid_losses': valid_losses, 'valid_acc1es': valid_acc1es, 'valid_acc5es': valid_acc5es, 'valid_times': valid_times, 'learning_rates': lrs, 'net_state_dict': net.state_dict(), 'net_string': '{:}'.format(net), 'finish-train': True}
return info_seed
|
def get_nas_bench_loaders(workers):
torch.set_num_threads(workers)
root_dir = ((pathlib.Path(__file__).parent / '..') / '..').resolve()
torch_dir = pathlib.Path(os.environ['TORCH_HOME'])
cifar_config_path = (((root_dir / 'configs') / 'nas-benchmark') / 'CIFAR.config')
cifar_config = load_config(cifar_config_path, None, None)
get_datasets = datasets.get_datasets
break_line = ('-' * 150)
print('{:} Create data-loader for all datasets'.format(time_string()))
print(break_line)
(TRAIN_CIFAR10, VALID_CIFAR10, xshape, class_num) = get_datasets('cifar10', str((torch_dir / 'cifar.python')), (- 1))
print('original CIFAR-10 : {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR10), len(VALID_CIFAR10), xshape, class_num))
cifar10_splits = load_config((((root_dir / 'configs') / 'nas-benchmark') / 'cifar-split.txt'), None, None)
assert ((cifar10_splits.train[:10] == [0, 5, 7, 11, 13, 15, 16, 17, 20, 24]) and (cifar10_splits.valid[:10] == [1, 2, 3, 4, 6, 8, 9, 10, 12, 14]))
temp_dataset = copy.deepcopy(TRAIN_CIFAR10)
temp_dataset.transform = VALID_CIFAR10.transform
trainval_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
train_cifar10_loader = torch.utils.data.DataLoader(TRAIN_CIFAR10, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.train), num_workers=workers, pin_memory=True)
valid_cifar10_loader = torch.utils.data.DataLoader(temp_dataset, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar10_splits.valid), num_workers=workers, pin_memory=True)
test__cifar10_loader = torch.utils.data.DataLoader(VALID_CIFAR10, batch_size=cifar_config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)
print('CIFAR-10 : trval-loader has {:3d} batch with {:} per batch'.format(len(trainval_cifar10_loader), cifar_config.batch_size))
print('CIFAR-10 : train-loader has {:3d} batch with {:} per batch'.format(len(train_cifar10_loader), cifar_config.batch_size))
print('CIFAR-10 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_cifar10_loader), cifar_config.batch_size))
print('CIFAR-10 : test--loader has {:3d} batch with {:} per batch'.format(len(test__cifar10_loader), cifar_config.batch_size))
print(break_line)
(TRAIN_CIFAR100, VALID_CIFAR100, xshape, class_num) = get_datasets('cifar100', str((torch_dir / 'cifar.python')), (- 1))
print('original CIFAR-100: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_CIFAR100), len(VALID_CIFAR100), xshape, class_num))
cifar100_splits = load_config((((root_dir / 'configs') / 'nas-benchmark') / 'cifar100-test-split.txt'), None, None)
assert ((cifar100_splits.xvalid[:10] == [1, 3, 4, 5, 8, 10, 13, 14, 15, 16]) and (cifar100_splits.xtest[:10] == [0, 2, 6, 7, 9, 11, 12, 17, 20, 24]))
train_cifar100_loader = torch.utils.data.DataLoader(TRAIN_CIFAR100, batch_size=cifar_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True)
test__cifar100_loader = torch.utils.data.DataLoader(VALID_CIFAR100, batch_size=cifar_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest), num_workers=workers, pin_memory=True)
print('CIFAR-100 : train-loader has {:3d} batch'.format(len(train_cifar100_loader)))
print('CIFAR-100 : valid-loader has {:3d} batch'.format(len(valid_cifar100_loader)))
print('CIFAR-100 : test--loader has {:3d} batch'.format(len(test__cifar100_loader)))
print(break_line)
imagenet16_config_path = 'configs/nas-benchmark/ImageNet-16.config'
imagenet16_config = load_config(imagenet16_config_path, None, None)
(TRAIN_ImageNet16_120, VALID_ImageNet16_120, xshape, class_num) = get_datasets('ImageNet16-120', str(((torch_dir / 'cifar.python') / 'ImageNet16')), (- 1))
print('original TRAIN_ImageNet16_120: {:} training images and {:} test images : {:} input shape : {:} number of classes'.format(len(TRAIN_ImageNet16_120), len(VALID_ImageNet16_120), xshape, class_num))
imagenet_splits = load_config((((root_dir / 'configs') / 'nas-benchmark') / 'imagenet-16-120-test-split.txt'), None, None)
assert ((imagenet_splits.xvalid[:10] == [1, 2, 3, 6, 7, 8, 9, 12, 16, 18]) and (imagenet_splits.xtest[:10] == [0, 4, 5, 10, 11, 13, 14, 15, 17, 20]))
train_imagenet_loader = torch.utils.data.DataLoader(TRAIN_ImageNet16_120, batch_size=imagenet16_config.batch_size, shuffle=True, num_workers=workers, pin_memory=True)
valid_imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xvalid), num_workers=workers, pin_memory=True)
test__imagenet_loader = torch.utils.data.DataLoader(VALID_ImageNet16_120, batch_size=imagenet16_config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet_splits.xtest), num_workers=workers, pin_memory=True)
print('ImageNet-16-120 : train-loader has {:3d} batch with {:} per batch'.format(len(train_imagenet_loader), imagenet16_config.batch_size))
print('ImageNet-16-120 : valid-loader has {:3d} batch with {:} per batch'.format(len(valid_imagenet_loader), imagenet16_config.batch_size))
print('ImageNet-16-120 : test--loader has {:3d} batch with {:} per batch'.format(len(test__imagenet_loader), imagenet16_config.batch_size))
loaders = {'cifar10@trainval': trainval_cifar10_loader, 'cifar10@train': train_cifar10_loader, 'cifar10@valid': valid_cifar10_loader, 'cifar10@test': test__cifar10_loader, 'cifar100@train': train_cifar100_loader, 'cifar100@valid': valid_cifar100_loader, 'cifar100@test': test__cifar100_loader, 'ImageNet16-120@train': train_imagenet_loader, 'ImageNet16-120@valid': valid_imagenet_loader, 'ImageNet16-120@test': test__imagenet_loader}
return loaders
|
class AverageMeter(object):
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0.0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __repr__(self):
return '{name}(val={val}, avg={avg}, count={count})'.format(name=self.__class__.__name__, **self.__dict__)
|
class Metric(abc.ABC):
'The default meta metric class.'
def __init__(self):
self.reset()
def reset(self):
raise NotImplementedError
def __call__(self, predictions, targets):
raise NotImplementedError
def get_info(self):
raise NotImplementedError
def __repr__(self):
return '{name}({inner})'.format(name=self.__class__.__name__, inner=self.inner_repr())
def inner_repr(self):
return ''
|
class ComposeMetric(Metric):
'The composed metric class.'
def __init__(self, *metric_list):
self.reset()
for metric in metric_list:
self.append(metric)
def reset(self):
self._metric_list = []
def append(self, metric):
if (not isinstance(metric, Metric)):
raise ValueError('The input metric is not correct: {:}'.format(type(metric)))
self._metric_list.append(metric)
def __len__(self):
return len(self._metric_list)
def __call__(self, predictions, targets):
results = list()
for metric in self._metric_list:
results.append(metric(predictions, targets))
return results
def get_info(self):
results = dict()
for metric in self._metric_list:
for (key, value) in metric.get_info().items():
results[key] = value
return results
def inner_repr(self):
xlist = []
for metric in self._metric_list:
xlist.append(str(metric))
return ','.join(xlist)
|
class MSEMetric(Metric):
'The metric for mse.'
def __init__(self, ignore_batch):
super(MSEMetric, self).__init__()
self._ignore_batch = ignore_batch
def reset(self):
self._mse = AverageMeter()
def __call__(self, predictions, targets):
if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)):
loss = torch.nn.functional.mse_loss(predictions.data, targets.data).item()
if self._ignore_batch:
self._mse.update(loss, 1)
else:
self._mse.update(loss, predictions.shape[0])
return loss
else:
raise NotImplementedError
def get_info(self):
return {'mse': self._mse.avg, 'score': self._mse.avg}
|
class Top1AccMetric(Metric):
'The metric for the top-1 accuracy.'
def __init__(self, ignore_batch):
super(Top1AccMetric, self).__init__()
self._ignore_batch = ignore_batch
def reset(self):
self._accuracy = AverageMeter()
def __call__(self, predictions, targets):
if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)):
max_prob_indexes = torch.argmax(predictions, dim=(- 1))
corrects = torch.eq(max_prob_indexes, targets)
accuracy = corrects.float().mean().float()
if self._ignore_batch:
self._accuracy.update(accuracy, 1)
else:
self._accuracy.update(accuracy, predictions.shape[0])
return accuracy
else:
raise NotImplementedError
def get_info(self):
return {'accuracy': self._accuracy.avg, 'score': (self._accuracy.avg * 100)}
|
class SaveMetric(Metric):
'The metric for mse.'
def reset(self):
self._predicts = []
def __call__(self, predictions, targets=None):
if isinstance(predictions, torch.Tensor):
predicts = predictions.cpu().numpy()
self._predicts.append(predicts)
return predicts
else:
raise NotImplementedError
def get_info(self):
all_predicts = np.concatenate(self._predicts)
return {'predictions': all_predicts}
|
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
assert ((d > 0) and (p > 0))
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
h = (1.0 / (x.size()[1] - 1.0))
all_norms = ((h ** (self.d / self.p)) * torch.norm((x.view(num_examples, (- 1)) - y.view(num_examples, (- 1))), self.p, 1))
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm((x.reshape(num_examples, (- 1)) - y.reshape(num_examples, (- 1))), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples, (- 1)), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean((diff_norms / y_norms))
else:
return torch.sum((diff_norms / y_norms))
return (diff_norms / y_norms)
def __call__(self, x, y):
return self.rel(x, y)
|
class _LRScheduler(object):
def __init__(self, optimizer, warmup_epochs, epochs):
if (not isinstance(optimizer, Optimizer)):
raise TypeError('{:} is not an Optimizer'.format(type(optimizer).__name__))
self.optimizer = optimizer
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
self.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups))
self.max_epochs = epochs
self.warmup_epochs = warmup_epochs
self.current_epoch = 0
self.current_iter = 0
def extra_repr(self):
return ''
def __repr__(self):
return ('{name}(warmup={warmup_epochs}, max-epoch={max_epochs}, current::epoch={current_epoch}, iter={current_iter:.2f}'.format(name=self.__class__.__name__, **self.__dict__) + ', {:})'.format(self.extra_repr()))
def state_dict(self):
return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')}
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def get_min_info(self):
lrs = self.get_lr()
return '#LR=[{:.6f}~{:.6f}] epoch={:03d}, iter={:4.2f}#'.format(min(lrs), max(lrs), self.current_epoch, self.current_iter)
def get_min_lr(self):
return min(self.get_lr())
def update(self, cur_epoch, cur_iter):
if (cur_epoch is not None):
assert (isinstance(cur_epoch, int) and (cur_epoch >= 0)), 'invalid cur-epoch : {:}'.format(cur_epoch)
self.current_epoch = cur_epoch
if (cur_iter is not None):
assert (isinstance(cur_iter, float) and (cur_iter >= 0)), 'invalid cur-iter : {:}'.format(cur_iter)
self.current_iter = cur_iter
for (param_group, lr) in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
class CosineAnnealingLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, T_max, eta_min):
self.T_max = T_max
self.eta_min = eta_min
super(CosineAnnealingLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return 'type={:}, T-max={:}, eta-min={:}'.format('cosine', self.T_max, self.eta_min)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if ((self.current_epoch >= self.warmup_epochs) and (self.current_epoch < self.max_epochs)):
last_epoch = (self.current_epoch - self.warmup_epochs)
lr = (self.eta_min + (((base_lr - self.eta_min) * (1 + math.cos(((math.pi * last_epoch) / self.T_max)))) / 2))
elif (self.current_epoch >= self.max_epochs):
lr = self.eta_min
else:
lr = (((self.current_epoch / self.warmup_epochs) + (self.current_iter / self.warmup_epochs)) * base_lr)
lrs.append(lr)
return lrs
|
class MultiStepLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, milestones, gammas):
assert (len(milestones) == len(gammas)), 'invalid {:} vs {:}'.format(len(milestones), len(gammas))
self.milestones = milestones
self.gammas = gammas
super(MultiStepLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return 'type={:}, milestones={:}, gammas={:}, base-lrs={:}'.format('multistep', self.milestones, self.gammas, self.base_lrs)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (self.current_epoch >= self.warmup_epochs):
last_epoch = (self.current_epoch - self.warmup_epochs)
idx = bisect_right(self.milestones, last_epoch)
lr = base_lr
for x in self.gammas[:idx]:
lr *= x
else:
lr = (((self.current_epoch / self.warmup_epochs) + (self.current_iter / self.warmup_epochs)) * base_lr)
lrs.append(lr)
return lrs
|
class ExponentialLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, gamma):
self.gamma = gamma
super(ExponentialLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return 'type={:}, gamma={:}, base-lrs={:}'.format('exponential', self.gamma, self.base_lrs)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (self.current_epoch >= self.warmup_epochs):
last_epoch = (self.current_epoch - self.warmup_epochs)
assert (last_epoch >= 0), 'invalid last_epoch : {:}'.format(last_epoch)
lr = (base_lr * (self.gamma ** last_epoch))
else:
lr = (((self.current_epoch / self.warmup_epochs) + (self.current_iter / self.warmup_epochs)) * base_lr)
lrs.append(lr)
return lrs
|
class LinearLR(_LRScheduler):
def __init__(self, optimizer, warmup_epochs, epochs, max_LR, min_LR):
self.max_LR = max_LR
self.min_LR = min_LR
super(LinearLR, self).__init__(optimizer, warmup_epochs, epochs)
def extra_repr(self):
return 'type={:}, max_LR={:}, min_LR={:}, base-lrs={:}'.format('LinearLR', self.max_LR, self.min_LR, self.base_lrs)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
if (self.current_epoch >= self.warmup_epochs):
last_epoch = (self.current_epoch - self.warmup_epochs)
assert (last_epoch >= 0), 'invalid last_epoch : {:}'.format(last_epoch)
ratio = ((((self.max_LR - self.min_LR) * last_epoch) / self.max_epochs) / self.max_LR)
lr = (base_lr * (1 - ratio))
else:
lr = (((self.current_epoch / self.warmup_epochs) + (self.current_iter / self.warmup_epochs)) * base_lr)
lrs.append(lr)
return lrs
|
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
loss = ((- targets) * log_probs).mean(0).sum()
return loss
|
def get_optim_scheduler(parameters, config):
assert (hasattr(config, 'optim') and hasattr(config, 'scheduler') and hasattr(config, 'criterion')), 'config must have optim / scheduler / criterion keys instead of {:}'.format(config)
if (config.optim == 'SGD'):
optim = torch.optim.SGD(parameters, config.LR, momentum=config.momentum, weight_decay=config.decay, nesterov=config.nesterov)
elif (config.optim == 'RMSprop'):
optim = torch.optim.RMSprop(parameters, config.LR, momentum=config.momentum, weight_decay=config.decay)
else:
raise ValueError('invalid optim : {:}'.format(config.optim))
if (config.scheduler == 'cos'):
T_max = getattr(config, 'T_max', config.epochs)
scheduler = CosineAnnealingLR(optim, config.warmup, config.epochs, T_max, config.eta_min)
elif (config.scheduler == 'multistep'):
scheduler = MultiStepLR(optim, config.warmup, config.epochs, config.milestones, config.gammas)
elif (config.scheduler == 'exponential'):
scheduler = ExponentialLR(optim, config.warmup, config.epochs, config.gamma)
elif (config.scheduler == 'linear'):
scheduler = LinearLR(optim, config.warmup, config.epochs, config.LR, config.LR_min)
else:
raise ValueError('invalid scheduler : {:}'.format(config.scheduler))
if (config.criterion == 'Softmax'):
criterion = torch.nn.CrossEntropyLoss()
elif (config.criterion == 'SmoothSoftmax'):
criterion = CrossEntropyLabelSmooth(config.class_num, config.label_smooth)
elif (config.criterion == 'L2'):
criterion = LpLoss(size_average=False)
else:
raise ValueError('invalid criterion : {:}'.format(config.criterion))
return (optim, scheduler, criterion)
|
def set_log_basic_config(filename=None, format=None, level=None):
'\n Set the basic configuration for the logging system.\n See details at https://docs.python.org/3/library/logging.html#logging.basicConfig\n :param filename: str or None\n The path to save the logs.\n :param format: the logging format\n :param level: int\n :return: Logger\n Logger object.\n '
from qlib.config import C
if (level is None):
level = C.logging_level
if (format is None):
format = C.logging_config['formatters']['logger_format']['format']
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=filename, format=format, level=level)
|
def update_gpu(config, gpu):
config = deepcopy(config)
if (('task' in config) and ('model' in config['task'])):
if ('GPU' in config['task']['model']):
config['task']['model']['GPU'] = gpu
elif (('kwargs' in config['task']['model']) and ('GPU' in config['task']['model']['kwargs'])):
config['task']['model']['kwargs']['GPU'] = gpu
elif ('model' in config):
if ('GPU' in config['model']):
config['model']['GPU'] = gpu
elif (('kwargs' in config['model']) and ('GPU' in config['model']['kwargs'])):
config['model']['kwargs']['GPU'] = gpu
elif (('kwargs' in config) and ('GPU' in config['kwargs'])):
config['kwargs']['GPU'] = gpu
elif ('GPU' in config):
config['GPU'] = gpu
return config
|
def update_market(config, market):
config = deepcopy(config.copy())
config['market'] = market
config['data_handler_config']['instruments'] = market
return config
|
def run_exp(task_config, dataset, experiment_name, recorder_name, uri, model_obj_name='model.pkl'):
model = init_instance_by_config(task_config['model'])
model_fit_kwargs = dict(dataset=dataset)
with R.start(experiment_name=experiment_name, recorder_name=recorder_name, uri=uri, resume=True):
recorder_root_dir = R.get_recorder().get_local_dir()
log_file = os.path.join(recorder_root_dir, '{:}.log'.format(experiment_name))
set_log_basic_config(log_file)
logger = get_module_logger('q.run_exp')
logger.info('task_config::\n{:}'.format(pprint.pformat(task_config, indent=2)))
logger.info('[{:}] - [{:}]: {:}'.format(experiment_name, recorder_name, uri))
logger.info('dataset={:}'.format(dataset))
try:
if hasattr(model, 'to'):
ori_device = model.device
model = R.load_object(model_obj_name)
model.to(ori_device)
else:
model = R.load_object(model_obj_name)
logger.info('[Find existing object from {:}]'.format(model_obj_name))
except OSError:
R.log_params(**flatten_dict(update_gpu(task_config, None)))
if ('save_path' in inspect.getfullargspec(model.fit).args):
model_fit_kwargs['save_path'] = os.path.join(recorder_root_dir, 'model.ckp')
elif ('save_dir' in inspect.getfullargspec(model.fit).args):
model_fit_kwargs['save_dir'] = os.path.join(recorder_root_dir, 'model-ckps')
model.fit(**model_fit_kwargs)
if hasattr(model, 'to'):
old_device = model.device
model.to('cpu')
R.save_objects(**{model_obj_name: model})
model.to(old_device)
else:
R.save_objects(**{model_obj_name: model})
except Exception as e:
raise ValueError('Something wrong: {:}'.format(e))
recorder = R.get_recorder()
for record in task_config['record']:
record = deepcopy(record)
if (record['class'] == 'MultiSegRecord'):
record['kwargs'] = dict(model=model, dataset=dataset, recorder=recorder)
sr = init_instance_by_config(record)
sr.generate(**record['generate_kwargs'])
elif (record['class'] == 'SignalRecord'):
srconf = {'model': model, 'dataset': dataset, 'recorder': recorder}
record['kwargs'].update(srconf)
sr = init_instance_by_config(record)
sr.generate()
else:
rconf = {'recorder': recorder}
record['kwargs'].update(rconf)
ar = init_instance_by_config(record)
ar.generate()
|
def get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant):
expected_flop = torch.mean(expected_flop)
if (flop_cur < (flop_need - flop_tolerant)):
loss = (- torch.log(expected_flop))
elif (flop_cur > flop_need):
loss = torch.log(expected_flop)
else:
loss = None
if (loss is None):
return (0, 0)
else:
return (loss, loss.item())
|
def search_train(search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(base_losses, arch_losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
(arch_cls_losses, arch_flop_losses) = (AverageMeter(), AverageMeter())
(epoch_str, flop_need, flop_weight, flop_tolerant) = (extra_info['epoch-str'], extra_info['FLOP-exp'], extra_info['FLOP-weight'], extra_info['FLOP-tolerant'])
network.train()
logger.log('[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}'.format(epoch_str, flop_need, flop_weight))
end = time.time()
network.apply(change_key('search_mode', 'search'))
for (step, (base_inputs, base_targets, arch_inputs, arch_targets)) in enumerate(search_loader):
scheduler.update(None, ((1.0 * step) / len(search_loader)))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
data_time.update((time.time() - end))
base_optimizer.zero_grad()
(logits, expected_flop) = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
base_optimizer.step()
(prec1, prec5) = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
top1.update(prec1.item(), base_inputs.size(0))
top5.update(prec5.item(), base_inputs.size(0))
arch_optimizer.zero_grad()
(logits, expected_flop) = network(arch_inputs)
flop_cur = network.module.get_flop('genotype', None, None)
(flop_loss, flop_loss_scale) = get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant)
acls_loss = criterion(logits, arch_targets)
arch_loss = (acls_loss + (flop_loss * flop_weight))
arch_loss.backward()
arch_optimizer.step()
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0))
arch_cls_losses.update(acls_loss.item(), arch_inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((step % print_freq) == 0) or ((step + 1) == len(search_loader))):
Sstr = (('**TRAIN** ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(search_loader)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Base-Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=base_losses, top1=top1, top5=top5)
Vstr = 'Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})'.format(aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Vstr))
logger.log(' **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}'.format(top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), baseloss=base_losses.avg, archloss=arch_losses.avg))
return (base_losses.avg, arch_losses.avg, top1.avg, top5.avg)
|
def search_valid(xloader, network, criterion, extra_info, print_freq, logger):
(data_time, batch_time, losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
network.eval()
network.apply(change_key('search_mode', 'search'))
end = time.time()
with torch.no_grad():
for (i, (inputs, targets)) in enumerate(xloader):
data_time.update((time.time() - end))
targets = targets.cuda(non_blocking=True)
(logits, expected_flop) = network(inputs)
loss = criterion(logits, targets)
(prec1, prec5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((i % print_freq) == 0) or ((i + 1) == len(xloader))):
Sstr = (('**VALID** ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5)
Istr = 'Size={:}'.format(list(inputs.size()))
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Istr))
logger.log(' **VALID** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), loss=losses.avg))
return (losses.avg, top1.avg, top5.avg)
|
def get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant):
expected_flop = torch.mean(expected_flop)
if (flop_cur < (flop_need - flop_tolerant)):
loss = (- torch.log(expected_flop))
elif (flop_cur > flop_need):
loss = torch.log(expected_flop)
else:
loss = None
if (loss is None):
return (0, 0)
else:
return (loss, loss.item())
|
def search_train_v2(search_loader, network, criterion, scheduler, base_optimizer, arch_optimizer, optim_config, extra_info, print_freq, logger):
(data_time, batch_time) = (AverageMeter(), AverageMeter())
(base_losses, arch_losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
(arch_cls_losses, arch_flop_losses) = (AverageMeter(), AverageMeter())
(epoch_str, flop_need, flop_weight, flop_tolerant) = (extra_info['epoch-str'], extra_info['FLOP-exp'], extra_info['FLOP-weight'], extra_info['FLOP-tolerant'])
network.train()
logger.log('[Search] : {:}, FLOP-Require={:.2f} MB, FLOP-WEIGHT={:.2f}'.format(epoch_str, flop_need, flop_weight))
end = time.time()
network.apply(change_key('search_mode', 'search'))
for (step, (base_inputs, base_targets, arch_inputs, arch_targets)) in enumerate(search_loader):
scheduler.update(None, ((1.0 * step) / len(search_loader)))
base_targets = base_targets.cuda(non_blocking=True)
arch_targets = arch_targets.cuda(non_blocking=True)
data_time.update((time.time() - end))
base_optimizer.zero_grad()
(logits, expected_flop) = network(base_inputs)
base_loss = criterion(logits, base_targets)
base_loss.backward()
base_optimizer.step()
(prec1, prec5) = obtain_accuracy(logits.data, base_targets.data, topk=(1, 5))
base_losses.update(base_loss.item(), base_inputs.size(0))
top1.update(prec1.item(), base_inputs.size(0))
top5.update(prec5.item(), base_inputs.size(0))
arch_optimizer.zero_grad()
(logits, expected_flop) = network(arch_inputs)
flop_cur = network.module.get_flop('genotype', None, None)
(flop_loss, flop_loss_scale) = get_flop_loss(expected_flop, flop_cur, flop_need, flop_tolerant)
acls_loss = criterion(logits, arch_targets)
arch_loss = (acls_loss + (flop_loss * flop_weight))
arch_loss.backward()
arch_optimizer.step()
arch_losses.update(arch_loss.item(), arch_inputs.size(0))
arch_flop_losses.update(flop_loss_scale, arch_inputs.size(0))
arch_cls_losses.update(acls_loss.item(), arch_inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((step % print_freq) == 0) or ((step + 1) == len(search_loader))):
Sstr = (('**TRAIN** ' + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(epoch_str, step, len(search_loader)))
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Base-Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=base_losses, top1=top1, top5=top5)
Vstr = 'Acls-loss {aloss.val:.3f} ({aloss.avg:.3f}) FLOP-Loss {floss.val:.3f} ({floss.avg:.3f}) Arch-Loss {loss.val:.3f} ({loss.avg:.3f})'.format(aloss=arch_cls_losses, floss=arch_flop_losses, loss=arch_losses)
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Vstr))
logger.log(' **TRAIN** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Base-Loss:{baseloss:.3f}, Arch-Loss={archloss:.3f}'.format(top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), baseloss=base_losses.avg, archloss=arch_losses.avg))
return (base_losses.avg, arch_losses.avg, top1.avg, top5.avg)
|
def simple_KD_train(xloader, teacher, network, criterion, scheduler, optimizer, optim_config, extra_info, print_freq, logger):
(loss, acc1, acc5) = procedure(xloader, teacher, network, criterion, scheduler, optimizer, 'train', optim_config, extra_info, print_freq, logger)
return (loss, acc1, acc5)
|
def simple_KD_valid(xloader, teacher, network, criterion, optim_config, extra_info, print_freq, logger):
with torch.no_grad():
(loss, acc1, acc5) = procedure(xloader, teacher, network, criterion, None, None, 'valid', optim_config, extra_info, print_freq, logger)
return (loss, acc1, acc5)
|
def loss_KD_fn(criterion, student_logits, teacher_logits, studentFeatures, teacherFeatures, targets, alpha, temperature):
basic_loss = (criterion(student_logits, targets) * (1.0 - alpha))
log_student = F.log_softmax((student_logits / temperature), dim=1)
sof_teacher = F.softmax((teacher_logits / temperature), dim=1)
KD_loss = (F.kl_div(log_student, sof_teacher, reduction='batchmean') * ((alpha * temperature) * temperature))
return (basic_loss + KD_loss)
|
def procedure(xloader, teacher, network, criterion, scheduler, optimizer, mode, config, extra_info, print_freq, logger):
(data_time, batch_time, losses, top1, top5) = (AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter())
(Ttop1, Ttop5) = (AverageMeter(), AverageMeter())
if (mode == 'train'):
network.train()
elif (mode == 'valid'):
network.eval()
else:
raise ValueError('The mode is not right : {:}'.format(mode))
teacher.eval()
logger.log('[{:5s}] config :: auxiliary={:}, KD :: [alpha={:.2f}, temperature={:.2f}]'.format(mode, (config.auxiliary if hasattr(config, 'auxiliary') else (- 1)), config.KD_alpha, config.KD_temperature))
end = time.time()
for (i, (inputs, targets)) in enumerate(xloader):
if (mode == 'train'):
scheduler.update(None, ((1.0 * i) / len(xloader)))
data_time.update((time.time() - end))
targets = targets.cuda(non_blocking=True)
if (mode == 'train'):
optimizer.zero_grad()
(student_f, logits) = network(inputs)
if isinstance(logits, list):
assert (len(logits) == 2), 'logits must has {:} items instead of {:}'.format(2, len(logits))
(logits, logits_aux) = logits
else:
(logits, logits_aux) = (logits, None)
with torch.no_grad():
(teacher_f, teacher_logits) = teacher(inputs)
loss = loss_KD_fn(criterion, logits, teacher_logits, student_f, teacher_f, targets, config.KD_alpha, config.KD_temperature)
if ((config is not None) and hasattr(config, 'auxiliary') and (config.auxiliary > 0)):
loss_aux = criterion(logits_aux, targets)
loss += (config.auxiliary * loss_aux)
if (mode == 'train'):
loss.backward()
optimizer.step()
(sprec1, sprec5) = obtain_accuracy(logits.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(sprec1.item(), inputs.size(0))
top5.update(sprec5.item(), inputs.size(0))
(tprec1, tprec5) = obtain_accuracy(teacher_logits.data, targets.data, topk=(1, 5))
Ttop1.update(tprec1.item(), inputs.size(0))
Ttop5.update(tprec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
if (((i % print_freq) == 0) or ((i + 1) == len(xloader))):
Sstr = ((' {:5s} '.format(mode.upper()) + time_string()) + ' [{:}][{:03d}/{:03d}]'.format(extra_info, i, len(xloader)))
if (scheduler is not None):
Sstr += ' {:}'.format(scheduler.get_min_info())
Tstr = 'Time {batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})'.format(batch_time=batch_time, data_time=data_time)
Lstr = 'Loss {loss.val:.3f} ({loss.avg:.3f}) Prec@1 {top1.val:.2f} ({top1.avg:.2f}) Prec@5 {top5.val:.2f} ({top5.avg:.2f})'.format(loss=losses, top1=top1, top5=top5)
Lstr += ' Teacher : acc@1={:.2f}, acc@5={:.2f}'.format(Ttop1.avg, Ttop5.avg)
Istr = 'Size={:}'.format(list(inputs.size()))
logger.log(((((((Sstr + ' ') + Tstr) + ' ') + Lstr) + ' ') + Istr))
logger.log(' **{:5s}** accuracy drop :: @1={:.2f}, @5={:.2f}'.format(mode.upper(), (Ttop1.avg - top1.avg), (Ttop5.avg - top5.avg)))
logger.log(' **{mode:5s}** Prec@1 {top1.avg:.2f} Prec@5 {top5.avg:.2f} Error@1 {error1:.2f} Error@5 {error5:.2f} Loss:{loss:.3f}'.format(mode=mode.upper(), top1=top1, top5=top5, error1=(100 - top1.avg), error5=(100 - top5.avg), loss=losses.avg))
return (losses.avg, top1.avg, top5.avg)
|
def prepare_seed(rand_seed):
random.seed(rand_seed)
np.random.seed(rand_seed)
torch.manual_seed(rand_seed)
torch.cuda.manual_seed(rand_seed)
torch.cuda.manual_seed_all(rand_seed)
|
def prepare_logger(xargs):
args = copy.deepcopy(xargs)
from xautodl.log_utils import Logger
logger = Logger(args.save_dir, args.rand_seed)
logger.log('Main Function with logger : {:}'.format(logger))
logger.log('Arguments : -------------------------------')
for (name, value) in args._get_kwargs():
logger.log('{:16} : {:}'.format(name, value))
logger.log('Python Version : {:}'.format(sys.version.replace('\n', ' ')))
logger.log('Pillow Version : {:}'.format(PIL.__version__))
logger.log('PyTorch Version : {:}'.format(torch.__version__))
logger.log('cuDNN Version : {:}'.format(torch.backends.cudnn.version()))
logger.log('CUDA available : {:}'.format(torch.cuda.is_available()))
logger.log('CUDA GPU numbers : {:}'.format(torch.cuda.device_count()))
logger.log('CUDA_VISIBLE_DEVICES : {:}'.format((os.environ['CUDA_VISIBLE_DEVICES'] if ('CUDA_VISIBLE_DEVICES' in os.environ) else 'None')))
return logger
|
def get_machine_info():
info = 'Python Version : {:}'.format(sys.version.replace('\n', ' '))
info += '\nPillow Version : {:}'.format(PIL.__version__)
info += '\nPyTorch Version : {:}'.format(torch.__version__)
info += '\ncuDNN Version : {:}'.format(torch.backends.cudnn.version())
info += '\nCUDA available : {:}'.format(torch.cuda.is_available())
info += '\nCUDA GPU numbers : {:}'.format(torch.cuda.device_count())
if ('CUDA_VISIBLE_DEVICES' in os.environ):
info += '\nCUDA_VISIBLE_DEVICES={:}'.format(os.environ['CUDA_VISIBLE_DEVICES'])
else:
info += '\nDoes not set CUDA_VISIBLE_DEVICES'
return info
|
def save_checkpoint(state, filename, logger):
if osp.isfile(filename):
if hasattr(logger, 'log'):
logger.log('Find {:} exist, delete is at first before saving'.format(filename))
os.remove(filename)
torch.save(state, filename)
assert osp.isfile(filename), 'save filename : {:} failed, which is not found.'.format(filename)
if hasattr(logger, 'log'):
logger.log('save checkpoint into {:}'.format(filename))
return filename
|
def copy_checkpoint(src, dst, logger):
if osp.isfile(dst):
if hasattr(logger, 'log'):
logger.log('Find {:} exist, delete is at first before saving'.format(dst))
os.remove(dst)
copyfile(src, dst)
if hasattr(logger, 'log'):
logger.log('copy the file from {:} into {:}'.format(src, dst))
|
def has_categorical(space_or_value, x):
if isinstance(space_or_value, Space):
return space_or_value.has(x)
else:
return (space_or_value == x)
|
def has_continuous(space_or_value, x):
if isinstance(space_or_value, Space):
return space_or_value.has(x)
else:
return (abs((space_or_value - x)) <= _EPS)
|
def is_determined(space_or_value):
if isinstance(space_or_value, Space):
return space_or_value.determined
else:
return True
|
def get_determined_value(space_or_value):
if (not is_determined(space_or_value)):
raise ValueError('This input is not determined: {:}'.format(space_or_value))
if isinstance(space_or_value, Space):
if isinstance(space_or_value, Continuous):
return space_or_value.lower
elif isinstance(space_or_value, Categorical):
return get_determined_value(space_or_value[0])
else:
return space_or_value.value
else:
return space_or_value
|
def get_max(space_or_value):
if isinstance(space_or_value, Integer):
return max(space_or_value.candidates)
elif isinstance(space_or_value, Continuous):
return space_or_value.upper
elif isinstance(space_or_value, Categorical):
values = []
for index in range(len(space_or_value)):
max_value = get_max(space_or_value[index])
values.append(max_value)
return max(values)
else:
return space_or_value
|
def get_min(space_or_value):
if isinstance(space_or_value, Integer):
return min(space_or_value.candidates)
elif isinstance(space_or_value, Continuous):
return space_or_value.lower
elif isinstance(space_or_value, Categorical):
values = []
for index in range(len(space_or_value)):
min_value = get_min(space_or_value[index])
values.append(min_value)
return min(values)
else:
return space_or_value
|
class Space(metaclass=abc.ABCMeta):
'Basic search space describing the set of possible candidate values for hyperparameter.\n All search space must inherit from this basic class.\n '
def __init__(self):
self._last_sample = None
self._last_abstract = None
@abc.abstractproperty
def xrepr(self, depth=0) -> Text:
raise NotImplementedError
def __repr__(self) -> Text:
return self.xrepr()
@abc.abstractproperty
def abstract(self, reuse_last=False) -> 'Space':
raise NotImplementedError
@abc.abstractmethod
def random(self, recursion=True, reuse_last=False):
raise NotImplementedError
@abc.abstractmethod
def clean_last_sample(self):
raise NotImplementedError
@abc.abstractmethod
def clean_last_abstract(self):
raise NotImplementedError
def clean_last(self):
self.clean_last_sample()
self.clean_last_abstract()
@abc.abstractproperty
def determined(self) -> bool:
raise NotImplementedError
@abc.abstractmethod
def has(self, x) -> bool:
'Check whether x is in this search space.'
assert (not isinstance(x, Space)), 'The input value itself can not be a search space.'
@abc.abstractmethod
def __eq__(self, other):
raise NotImplementedError
def copy(self) -> 'Space':
return copy.deepcopy(self)
|
class VirtualNode(Space):
'For a nested search space, we represent it as a tree structure.\n\n For example,\n '
def __init__(self, id=None, value=None):
super(VirtualNode, self).__init__()
self._id = id
self._value = value
self._attributes = OrderedDict()
@property
def value(self):
return self._value
def append(self, key, value):
if (not isinstance(key, str)):
raise TypeError('Only accept string as a key instead of {:}'.format(type(key)))
if (not isinstance(value, Space)):
raise ValueError('Invalid type of value: {:}'.format(type(value)))
self._attributes[key] = value
def xrepr(self, depth=0) -> Text:
strs = [(self.__class__.__name__ + '(value={:}'.format(self._value))]
for (key, value) in self._attributes.items():
strs.append(((key + ' = ') + value.xrepr((depth + 1))))
strs.append(')')
if (len(strs) == 2):
return ''.join(strs)
else:
space = ' '
xstrs = (([strs[0]] + [((space * (depth + 1)) + x) for x in strs[1:(- 1)]]) + [((space * depth) + strs[(- 1)])])
return ',\n'.join(xstrs)
def abstract(self, reuse_last=False) -> Space:
if (reuse_last and (self._last_abstract is not None)):
return self._last_abstract
node = VirtualNode(id(self))
for (key, value) in self._attributes.items():
if (not value.determined):
node.append(value.abstract(reuse_last))
self._last_abstract = node
return self._last_abstract
def random(self, recursion=True, reuse_last=False):
if (reuse_last and (self._last_sample is not None)):
return self._last_sample
node = VirtualNode(None, self._value)
for (key, value) in self._attributes.items():
node.append(key, value.random(recursion, reuse_last))
self._last_sample = node
return node
def clean_last_sample(self):
self._last_sample = None
for (key, value) in self._attributes.items():
value.clean_last_sample()
def clean_last_abstract(self):
self._last_abstract = None
for (key, value) in self._attributes.items():
value.clean_last_abstract()
def has(self, x) -> bool:
for (key, value) in self._attributes.items():
if value.has(x):
return True
return False
def __contains__(self, key):
return (key in self._attributes)
def __getitem__(self, key):
return self._attributes[key]
@property
def determined(self) -> bool:
for (key, value) in self._attributes.items():
if (not value.determined):
return False
return True
def __eq__(self, other):
if (not isinstance(other, VirtualNode)):
return False
for (key, value) in self._attributes.items():
if (not (key in other)):
return False
if (value != other[key]):
return False
return True
|
class Categorical(Space):
'A space contains the categorical values.\n It can be a nested space, which means that the candidate in this space can also be a search space.\n '
def __init__(self, *data, default: Optional[int]=None):
super(Categorical, self).__init__()
self._candidates = [*data]
self._default = default
assert ((self._default is None) or (0 <= self._default < len(self._candidates))), 'default >= {:}'.format(len(self._candidates))
assert (len(self) > 0), 'Please provide at least one candidate'
@property
def candidates(self):
return self._candidates
@property
def default(self):
return self._default
@property
def determined(self):
if (len(self) == 1):
return ((not isinstance(self._candidates[0], Space)) or self._candidates[0].determined)
else:
return False
def __getitem__(self, index):
return self._candidates[index]
def __len__(self):
return len(self._candidates)
def clean_last_sample(self):
self._last_sample = None
for candidate in self._candidates:
if isinstance(candidate, Space):
candidate.clean_last_sample()
def clean_last_abstract(self):
self._last_abstract = None
for candidate in self._candidates:
if isinstance(candidate, Space):
candidate.clean_last_abstract()
def abstract(self, reuse_last=False) -> Space:
if (reuse_last and (self._last_abstract is not None)):
return self._last_abstract
if self.determined:
result = VirtualNode(id(self), self)
else:
data = []
for candidate in self.candidates:
if isinstance(candidate, Space):
data.append(candidate.abstract())
else:
data.append(VirtualNode(id(candidate), candidate))
result = Categorical(*data, default=self._default)
self._last_abstract = result
return self._last_abstract
def random(self, recursion=True, reuse_last=False):
if (reuse_last and (self._last_sample is not None)):
return self._last_sample
sample = random.choice(self._candidates)
if (recursion and isinstance(sample, Space)):
sample = sample.random(recursion, reuse_last)
if isinstance(sample, VirtualNode):
sample = sample.copy()
else:
sample = VirtualNode(None, sample)
self._last_sample = sample
return self._last_sample
def xrepr(self, depth=0):
del depth
xrepr = '{name:}(candidates={cs:}, default_index={default:})'.format(name=self.__class__.__name__, cs=self._candidates, default=self._default)
return xrepr
def has(self, x):
super().has(x)
for candidate in self._candidates:
if (isinstance(candidate, Space) and candidate.has(x)):
return True
elif (candidate == x):
return True
return False
def __eq__(self, other):
if (not isinstance(other, Categorical)):
return False
if (len(self) != len(other)):
return False
if (self.default != other.default):
return False
for index in range(len(self)):
if (self.__getitem__(index) != other[index]):
return False
return True
|
class Integer(Categorical):
'A space contains the integer values.'
def __init__(self, lower: int, upper: int, default: Optional[int]=None):
if ((not isinstance(lower, int)) or (not isinstance(upper, int))):
raise ValueError('The lower [{:}] and uppwer [{:}] must be int.'.format(lower, upper))
data = list(range(lower, (upper + 1)))
self._raw_lower = lower
self._raw_upper = upper
self._raw_default = default
if ((default is not None) and ((default < lower) or (default > upper))):
raise ValueError('The default value [{:}] is out of range.'.format(default))
default = data.index(default)
super(Integer, self).__init__(*data, default=default)
def xrepr(self, depth=0):
del depth
xrepr = '{name:}(lower={lower:}, upper={upper:}, default={default:})'.format(name=self.__class__.__name__, lower=self._raw_lower, upper=self._raw_upper, default=self._raw_default)
return xrepr
|
class Continuous(Space):
'A space contains the continuous values.'
def __init__(self, lower: float, upper: float, default: Optional[float]=None, log: bool=False, eps: float=_EPS):
super(Continuous, self).__init__()
self._lower = lower
self._upper = upper
self._default = default
self._log_scale = log
self._eps = eps
@property
def lower(self):
return self._lower
@property
def upper(self):
return self._upper
@property
def default(self):
return self._default
@property
def use_log(self):
return self._log_scale
@property
def eps(self):
return self._eps
def abstract(self, reuse_last=False) -> Space:
if (reuse_last and (self._last_abstract is not None)):
return self._last_abstract
self._last_abstract = self.copy()
return self._last_abstract
def random(self, recursion=True, reuse_last=False):
del recursion
if (reuse_last and (self._last_sample is not None)):
return self._last_sample
if self._log_scale:
sample = random.uniform(math.log(self._lower), math.log(self._upper))
sample = math.exp(sample)
else:
sample = random.uniform(self._lower, self._upper)
self._last_sample = VirtualNode(None, sample)
return self._last_sample
def xrepr(self, depth=0):
del depth
xrepr = '{name:}(lower={lower:}, upper={upper:}, default_value={default:}, log_scale={log:})'.format(name=self.__class__.__name__, lower=self._lower, upper=self._upper, default=self._default, log=self._log_scale)
return xrepr
def convert(self, x):
if (isinstance(x, np_float_types) and (x.size == 1)):
return (float(x), True)
elif (isinstance(x, np_int_types) and (x.size == 1)):
return (float(x), True)
elif isinstance(x, int):
return (float(x), True)
elif isinstance(x, float):
return (float(x), True)
else:
return (None, False)
def has(self, x):
super().has(x)
(converted_x, success) = self.convert(x)
return (success and (self.lower <= converted_x <= self.upper))
@property
def determined(self):
return (abs((self.lower - self.upper)) <= self._eps)
def clean_last_sample(self):
self._last_sample = None
def clean_last_abstract(self):
self._last_abstract = None
def __eq__(self, other):
if (not isinstance(other, Continuous)):
return False
if (self is other):
return True
else:
return ((self.lower == other.lower) and (self.upper == other.upper) and (self.default == other.default) and (self.use_log == other.use_log) and (self.eps == other.eps))
|
def train_or_test_epoch(xloader, model, loss_fn, metric_fn, is_train, optimizer, device):
if is_train:
model.train()
else:
model.eval()
(score_meter, loss_meter) = (AverageMeter(), AverageMeter())
for (ibatch, (feats, labels)) in enumerate(xloader):
(feats, labels) = (feats.to(device), labels.to(device))
preds = model(feats)
loss = loss_fn(preds, labels)
with torch.no_grad():
score = metric_fn(preds, labels)
loss_meter.update(loss.item(), feats.size(0))
score_meter.update(score.item(), feats.size(0))
if (is_train and (optimizer is not None)):
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), 3.0)
optimizer.step()
return (loss_meter.avg, score_meter.avg)
|
class QuantTransformer(Model):
'Transformer-based Quant Model'
def __init__(self, net_config=None, opt_config=None, metric='', GPU=0, seed=None, **kwargs):
self.logger = get_module_logger('QuantTransformer')
self.logger.info('QuantTransformer PyTorch version...')
self.net_config = (net_config or DEFAULT_NET_CONFIG)
self.opt_config = (opt_config or DEFAULT_OPT_CONFIG)
self.metric = metric
self.device = torch.device(('cuda:{:}'.format(GPU) if (torch.cuda.is_available() and (GPU >= 0)) else 'cpu'))
self.seed = seed
self.logger.info('Transformer parameters setting:\nnet_config : {:}\nopt_config : {:}\nmetric : {:}\ndevice : {:}\nseed : {:}'.format(self.net_config, self.opt_config, self.metric, self.device, self.seed))
if (self.seed is not None):
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
if self.use_gpu:
torch.cuda.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
self.model = get_transformer(self.net_config)
self.model.set_super_run_type(super_core.SuperRunMode.FullModel)
self.logger.info('model: {:}'.format(self.model))
self.logger.info('model size: {:.3f} MB'.format(count_parameters(self.model)))
if (self.opt_config['optimizer'] == 'adam'):
self.train_optimizer = optim.Adam(self.model.parameters(), lr=self.opt_config['lr'])
elif (self.opt_config['optimizer'] == 'adam'):
self.train_optimizer = optim.SGD(self.model.parameters(), lr=self.opt_config['lr'])
else:
raise NotImplementedError('optimizer {:} is not supported!'.format(optimizer))
self.fitted = False
self.model.to(self.device)
@property
def use_gpu(self):
return (self.device != torch.device('cpu'))
def to(self, device):
if (device is None):
device = 'cpu'
self.device = device
self.model.to(self.device)
for param in self.train_optimizer.state.values():
if isinstance(param, torch.Tensor):
param.data = param.data.to(device)
if (param._grad is not None):
param._grad.data = param._grad.data.to(device)
elif isinstance(param, dict):
for subparam in param.values():
if isinstance(subparam, torch.Tensor):
subparam.data = subparam.data.to(device)
if (subparam._grad is not None):
subparam._grad.data = subparam._grad.data.to(device)
def loss_fn(self, pred, label):
mask = (~ torch.isnan(label))
if (self.opt_config['loss'] == 'mse'):
return F.mse_loss(pred[mask], label[mask])
else:
raise ValueError('unknown loss `{:}`'.format(self.loss))
def metric_fn(self, pred, label):
if ((self.metric == '') or (self.metric == 'loss')):
return (- self.loss_fn(pred, label))
else:
raise ValueError('unknown metric `{:}`'.format(self.metric))
def fit(self, dataset: DatasetH, save_dir: Optional[Text]=None):
def _prepare_dataset(df_data):
return th_data.TensorDataset(torch.from_numpy(df_data['feature'].values).float(), torch.from_numpy(df_data['label'].values).squeeze().float())
def _prepare_loader(dataset, shuffle):
return th_data.DataLoader(dataset, batch_size=self.opt_config['batch_size'], drop_last=False, pin_memory=True, num_workers=self.opt_config['num_workers'], shuffle=shuffle)
(df_train, df_valid, df_test) = dataset.prepare(['train', 'valid', 'test'], col_set=['feature', 'label'], data_key=DataHandlerLP.DK_L)
(train_dataset, valid_dataset, test_dataset) = (_prepare_dataset(df_train), _prepare_dataset(df_valid), _prepare_dataset(df_test))
(train_loader, valid_loader, test_loader) = (_prepare_loader(train_dataset, True), _prepare_loader(valid_dataset, False), _prepare_loader(test_dataset, False))
save_dir = get_or_create_path(save_dir, return_dir=True)
self.logger.info('Fit procedure for [{:}] with save path={:}'.format(self.__class__.__name__, save_dir))
def _internal_test(ckp_epoch=None, results_dict=None):
with torch.no_grad():
shared_kwards = {'model': self.model, 'loss_fn': self.loss_fn, 'metric_fn': self.metric_fn, 'is_train': False, 'optimizer': None, 'device': self.device}
(train_loss, train_score) = train_or_test_epoch(train_loader, **shared_kwards)
(valid_loss, valid_score) = train_or_test_epoch(valid_loader, **shared_kwards)
(test_loss, test_score) = train_or_test_epoch(test_loader, **shared_kwards)
xstr = 'train-score={:.6f}, valid-score={:.6f}, test-score={:.6f}'.format(train_score, valid_score, test_score)
if ((ckp_epoch is not None) and isinstance(results_dict, dict)):
results_dict['train'][ckp_epoch] = train_score
results_dict['valid'][ckp_epoch] = valid_score
results_dict['test'][ckp_epoch] = test_score
return (dict(train=train_score, valid=valid_score, test=test_score), xstr)
ckp_path = os.path.join(save_dir, '{:}.pth'.format(self.__class__.__name__))
if os.path.exists(ckp_path):
ckp_data = torch.load(ckp_path, map_location=self.device)
(stop_steps, best_score, best_epoch) = (ckp_data['stop_steps'], ckp_data['best_score'], ckp_data['best_epoch'])
(start_epoch, best_param) = (ckp_data['start_epoch'], ckp_data['best_param'])
results_dict = ckp_data['results_dict']
self.model.load_state_dict(ckp_data['net_state_dict'])
self.train_optimizer.load_state_dict(ckp_data['opt_state_dict'])
self.logger.info('Resume from existing checkpoint: {:}'.format(ckp_path))
else:
(stop_steps, best_score, best_epoch) = (0, (- np.inf), (- 1))
(start_epoch, best_param) = (0, None)
results_dict = dict(train=OrderedDict(), valid=OrderedDict(), test=OrderedDict())
(_, eval_str) = _internal_test((- 1), results_dict)
self.logger.info('Training from scratch, metrics@start: {:}'.format(eval_str))
for iepoch in range(start_epoch, self.opt_config['epochs']):
self.logger.info('Epoch={:03d}/{:03d} ::==>> Best valid @{:03d} ({:.6f})'.format(iepoch, self.opt_config['epochs'], best_epoch, best_score))
(train_loss, train_score) = train_or_test_epoch(train_loader, self.model, self.loss_fn, self.metric_fn, True, self.train_optimizer, self.device)
self.logger.info('Training :: loss={:.6f}, score={:.6f}'.format(train_loss, train_score))
(current_eval_scores, eval_str) = _internal_test(iepoch, results_dict)
self.logger.info('Evaluating :: {:}'.format(eval_str))
if (current_eval_scores['valid'] > best_score):
(stop_steps, best_epoch, best_score) = (0, iepoch, current_eval_scores['valid'])
best_param = copy.deepcopy(self.model.state_dict())
else:
stop_steps += 1
if (stop_steps >= self.opt_config['early_stop']):
self.logger.info('early stop at {:}-th epoch, where the best is @{:}'.format(iepoch, best_epoch))
break
save_info = dict(net_config=self.net_config, opt_config=self.opt_config, net_state_dict=self.model.state_dict(), opt_state_dict=self.train_optimizer.state_dict(), best_param=best_param, stop_steps=stop_steps, best_score=best_score, best_epoch=best_epoch, results_dict=results_dict, start_epoch=(iepoch + 1))
torch.save(save_info, ckp_path)
self.logger.info('The best score: {:.6f} @ {:02d}-th epoch'.format(best_score, best_epoch))
self.model.load_state_dict(best_param)
(_, eval_str) = _internal_test('final', results_dict)
self.logger.info('Reload the best parameter :: {:}'.format(eval_str))
if self.use_gpu:
with torch.cuda.device(self.device):
torch.cuda.empty_cache()
self.fitted = True
def predict(self, dataset: DatasetH, segment: Union[(Text, slice)]='test'):
if (not self.fitted):
raise ValueError('The model is not fitted yet!')
x_test = dataset.prepare(segment, col_set='feature', data_key=DataHandlerLP.DK_I)
index = x_test.index
with torch.no_grad():
self.model.eval()
x_values = x_test.values
(sample_num, batch_size) = (x_values.shape[0], self.opt_config['batch_size'])
preds = []
for begin in range(sample_num)[::batch_size]:
if ((sample_num - begin) < batch_size):
end = sample_num
else:
end = (begin + batch_size)
x_batch = torch.from_numpy(x_values[begin:end]).float().to(self.device)
with torch.no_grad():
pred = self.model(x_batch).detach().cpu().numpy()
preds.append(pred)
return pd.Series(np.concatenate(preds), index=index)
|
def obtain_accuracy(output, target, topk=(1,)):
'Computes the precision@k for the specified values of k'
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / batch_size)))
return res
|
def count_parameters_in_MB(model):
return count_parameters(model, 'mb', deprecated=True)
|
def count_parameters(model_or_parameters, unit='mb', deprecated=False):
if isinstance(model_or_parameters, nn.Module):
counts = sum((np.prod(v.size()) for v in model_or_parameters.parameters()))
elif isinstance(model_or_parameters, nn.Parameter):
counts = model_or_parameters.numel()
elif isinstance(model_or_parameters, (list, tuple)):
counts = sum((count_parameters(x, None, deprecated) for x in model_or_parameters))
else:
counts = sum((np.prod(v.size()) for v in model_or_parameters))
if ((not isinstance(unit, str)) and (unit is not None)):
raise ValueError('Unknow type of unit: {:}'.format(unit))
elif (unit is None):
counts = counts
elif ((unit.lower() == 'kb') or (unit.lower() == 'k')):
counts /= (1000.0 if deprecated else (2 ** 10))
elif ((unit.lower() == 'mb') or (unit.lower() == 'm')):
counts /= (1000000.0 if deprecated else (2 ** 20))
elif ((unit.lower() == 'gb') or (unit.lower() == 'g')):
counts /= (1000000000.0 if deprecated else (2 ** 30))
else:
raise ValueError('Unknow unit: {:}'.format(unit))
return counts
|
def get_model_infos(model, shape):
model = add_flops_counting_methods(model)
model.eval()
cache_inputs = torch.rand(*shape)
if next(model.parameters()).is_cuda:
cache_inputs = cache_inputs.cuda()
with torch.no_grad():
_____ = model(cache_inputs)
FLOPs = (compute_average_flops_cost(model) / 1000000.0)
Param = count_parameters_in_MB(model)
if hasattr(model, 'auxiliary_param'):
aux_params = count_parameters_in_MB(model.auxiliary_param())
print('The auxiliary params of this model is : {:}'.format(aux_params))
print('We remove the auxiliary params from the total params ({:}) when counting'.format(Param))
Param = (Param - aux_params)
torch.cuda.empty_cache()
model.apply(remove_hook_function)
return (FLOPs, Param)
|
def add_flops_counting_methods(model):
model.__batch_counter__ = 0
add_batch_counter_hook_function(model)
model.apply(add_flops_counter_variable_or_reset)
model.apply(add_flops_counter_hook_function)
return model
|
def compute_average_flops_cost(model):
'\n A method that will be available after add_flops_counting_methods() is called on a desired net object.\n Returns current mean flops consumption per image.\n '
batches_count = model.__batch_counter__
flops_sum = 0
for module in model.modules():
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv1d) or hasattr(module, 'calculate_flop_self')):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def pool_flops_counter_hook(pool_module, inputs, output):
batch_size = inputs[0].size(0)
kernel_size = pool_module.kernel_size
(out_C, output_height, output_width) = output.shape[1:]
assert (out_C == inputs[0].size(1)), '{:} vs. {:}'.format(out_C, inputs[0].size())
overall_flops = (((((batch_size * out_C) * output_height) * output_width) * kernel_size) * kernel_size)
pool_module.__flops__ += overall_flops
|
def self_calculate_flops_counter_hook(self_module, inputs, output):
overall_flops = self_module.calculate_flop_self(inputs[0].shape, output.shape)
self_module.__flops__ += overall_flops
|
def fc_flops_counter_hook(fc_module, inputs, output):
batch_size = inputs[0].size(0)
(xin, xout) = (fc_module.in_features, fc_module.out_features)
assert ((xin == inputs[0].size(3)) and (xout == output.size(3))), 'IO=({:}, {:})'.format(xin, xout)
overall_flops = ((batch_size * xin) * xout)
if (fc_module.bias is not None):
overall_flops += (batch_size * xout)
fc_module.__flops__ += overall_flops
|
def conv1d_flops_counter_hook(conv_module, inputs, outputs):
batch_size = inputs[0].size(0)
outL = outputs.shape[(- 1)]
[kernel] = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
conv_per_position_flops = (((kernel * in_channels) * out_channels) / groups)
active_elements_count = (batch_size * outL)
overall_flops = (conv_per_position_flops * active_elements_count)
if (conv_module.bias is not None):
overall_flops += (out_channels * active_elements_count)
conv_module.__flops__ += overall_flops
|
def conv2d_flops_counter_hook(conv_module, inputs, output):
batch_size = inputs[0].size(0)
(output_height, output_width) = output.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
conv_per_position_flops = ((((kernel_height * kernel_width) * in_channels) * out_channels) / groups)
active_elements_count = ((batch_size * output_height) * output_width)
overall_flops = (conv_per_position_flops * active_elements_count)
if (conv_module.bias is not None):
overall_flops += (out_channels * active_elements_count)
conv_module.__flops__ += overall_flops
|
def batch_counter_hook(module, inputs, output):
inputs = inputs[0]
batch_size = inputs.shape[0]
module.__batch_counter__ += batch_size
|
def add_batch_counter_hook_function(module):
if (not hasattr(module, '__batch_counter_handle__')):
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def add_flops_counter_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv1d) or isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d) or hasattr(module, 'calculate_flop_self')):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if isinstance(module, torch.nn.Conv2d):
if (not hasattr(module, '__flops_handle__')):
handle = module.register_forward_hook(conv2d_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.Conv1d):
if (not hasattr(module, '__flops_handle__')):
handle = module.register_forward_hook(conv1d_flops_counter_hook)
module.__flops_handle__ = handle
elif isinstance(module, torch.nn.Linear):
if (not hasattr(module, '__flops_handle__')):
handle = module.register_forward_hook(fc_flops_counter_hook)
module.__flops_handle__ = handle
elif (isinstance(module, torch.nn.AvgPool2d) or isinstance(module, torch.nn.MaxPool2d)):
if (not hasattr(module, '__flops_handle__')):
handle = module.register_forward_hook(pool_flops_counter_hook)
module.__flops_handle__ = handle
elif hasattr(module, 'calculate_flop_self'):
if (not hasattr(module, '__flops_handle__')):
handle = module.register_forward_hook(self_calculate_flops_counter_hook)
module.__flops_handle__ = handle
|
def remove_hook_function(module):
hookers = ['__batch_counter_handle__', '__flops_handle__']
for hooker in hookers:
if hasattr(module, hooker):
handle = getattr(module, hooker)
handle.remove()
keys = (['__flops__', '__batch_counter__', '__flops__'] + hookers)
for ckey in keys:
if hasattr(module, ckey):
delattr(module, ckey)
|
class GPUManager():
queries = ('index', 'gpu_name', 'memory.free', 'memory.used', 'memory.total', 'power.draw', 'power.limit')
def __init__(self):
all_gpus = self.query_gpu(False)
def get_info(self, ctype):
cmd = 'nvidia-smi --query-gpu={} --format=csv,noheader'.format(ctype)
lines = os.popen(cmd).readlines()
lines = [line.strip('\n') for line in lines]
return lines
def query_gpu(self, show=True):
num_gpus = len(self.get_info('index'))
all_gpus = [{} for i in range(num_gpus)]
for query in self.queries:
infos = self.get_info(query)
for (idx, info) in enumerate(infos):
all_gpus[idx][query] = info
if ('CUDA_VISIBLE_DEVICES' in os.environ):
CUDA_VISIBLE_DEVICES = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
selected_gpus = []
for (idx, CUDA_VISIBLE_DEVICE) in enumerate(CUDA_VISIBLE_DEVICES):
find = False
for gpu in all_gpus:
if (gpu['index'] == CUDA_VISIBLE_DEVICE):
assert (not find), 'Duplicate cuda device index : {}'.format(CUDA_VISIBLE_DEVICE)
find = True
selected_gpus.append(gpu.copy())
selected_gpus[(- 1)]['index'] = '{}'.format(idx)
assert find, 'Does not find the device : {}'.format(CUDA_VISIBLE_DEVICE)
all_gpus = selected_gpus
if show:
allstrings = ''
for gpu in all_gpus:
string = '| '
for query in self.queries:
if (query.find('memory') == 0):
xinfo = '{:>9}'.format(gpu[query])
else:
xinfo = gpu[query]
string = ((((string + query) + ' : ') + xinfo) + ' | ')
allstrings = ((allstrings + string) + '\n')
return allstrings
else:
return all_gpus
def select_by_memory(self, numbers=1):
all_gpus = self.query_gpu(False)
assert (numbers <= len(all_gpus)), 'Require {} gpus more than you have'.format(numbers)
alls = []
for (idx, gpu) in enumerate(all_gpus):
free_memory = gpu['memory.free']
free_memory = free_memory.split(' ')[0]
free_memory = int(free_memory)
index = gpu['index']
alls.append((free_memory, index))
alls.sort(reverse=True)
alls = [int(alls[i][1]) for i in range(numbers)]
return sorted(alls)
|
def get_md5_file(file_path, post_truncated=5):
md5_hash = hashlib.md5()
if os.path.exists(file_path):
xfile = open(file_path, 'rb')
content = xfile.read()
md5_hash.update(content)
digest = md5_hash.hexdigest()
else:
raise ValueError('[get_md5_file] {:} does not exist'.format(file_path))
if (post_truncated is None):
return digest
else:
return digest[(- post_truncated):]
|
def evaluate_one_shot(model, xloader, api, cal_mode, seed=111):
print('This is an old version of codes to use NAS-Bench-API, and should be modified to align with the new version. Please contact me for more details if you use this function.')
weights = deepcopy(model.state_dict())
model.train(cal_mode)
with torch.no_grad():
logits = nn.functional.log_softmax(model.arch_parameters, dim=(- 1))
archs = CellStructure.gen_all(model.op_names, model.max_nodes, False)
(probs, accuracies, gt_accs_10_valid, gt_accs_10_test) = ([], [], [], [])
loader_iter = iter(xloader)
random.seed(seed)
random.shuffle(archs)
for (idx, arch) in enumerate(archs):
arch_index = api.query_index_by_arch(arch)
metrics = api.get_more_info(arch_index, 'cifar10-valid', None, False, False)
gt_accs_10_valid.append(metrics['valid-accuracy'])
metrics = api.get_more_info(arch_index, 'cifar10', None, False, False)
gt_accs_10_test.append(metrics['test-accuracy'])
select_logits = []
for (i, node_info) in enumerate(arch.nodes):
for (op, xin) in node_info:
node_str = '{:}<-{:}'.format((i + 1), xin)
op_index = model.op_names.index(op)
select_logits.append(logits[(model.edge2index[node_str], op_index)])
cur_prob = sum(select_logits).item()
probs.append(cur_prob)
cor_prob_valid = np.corrcoef(probs, gt_accs_10_valid)[(0, 1)]
cor_prob_test = np.corrcoef(probs, gt_accs_10_test)[(0, 1)]
print('{:} correlation for probabilities : {:.6f} on CIFAR-10 validation and {:.6f} on CIFAR-10 test'.format(time_string(), cor_prob_valid, cor_prob_test))
for (idx, arch) in enumerate(archs):
model.set_cal_mode('dynamic', arch)
try:
(inputs, targets) = next(loader_iter)
except:
loader_iter = iter(xloader)
(inputs, targets) = next(loader_iter)
(_, logits) = model(inputs.cuda())
(_, preds) = torch.max(logits, dim=(- 1))
correct = (preds == targets.cuda()).float()
accuracies.append(correct.mean().item())
if ((idx != 0) and (((idx % 500) == 0) or ((idx + 1) == len(archs)))):
cor_accs_valid = np.corrcoef(accuracies, gt_accs_10_valid[:(idx + 1)])[(0, 1)]
cor_accs_test = np.corrcoef(accuracies, gt_accs_10_test[:(idx + 1)])[(0, 1)]
print('{:} {:05d}/{:05d} mode={:5s}, correlation : accs={:.5f} for CIFAR-10 valid, {:.5f} for CIFAR-10 test.'.format(time_string(), idx, len(archs), ('Train' if cal_mode else 'Eval'), cor_accs_valid, cor_accs_test))
model.load_state_dict(weights)
return (archs, probs, accuracies)
|
class QResult():
'A class to maintain the results of a qlib experiment.'
def __init__(self, name):
self._result = defaultdict(list)
self._name = name
self._recorder_paths = []
self._date2ICs = []
def append(self, key, value):
self._result[key].append(value)
def append_path(self, xpath):
self._recorder_paths.append(xpath)
def append_date2ICs(self, date2IC):
if self._date2ICs:
keys = sorted(list(date2IC.keys()))
pre_keys = sorted(list(self._date2ICs[0].keys()))
assert (len(keys) == len(pre_keys))
for (i, (x, y)) in enumerate(zip(keys, pre_keys)):
assert (x == y), '[{:}] {:} vs {:}'.format(i, x, y)
self._date2ICs.append(date2IC)
def find_all_dates(self):
dates = self._date2ICs[(- 1)].keys()
return sorted(list(dates))
def get_IC_by_date(self, date, scale=1.0):
values = []
for date2IC in self._date2ICs:
values.append((date2IC[date] * scale))
return (float(np.mean(values)), float(np.std(values)))
@property
def name(self):
return self._name
@property
def paths(self):
return self._recorder_paths
@property
def result(self):
return self._result
@property
def keys(self):
return list(self._result.keys())
def __len__(self):
return len(self._result)
def __repr__(self):
return '{name}({xname}, {num} metrics)'.format(name=self.__class__.__name__, xname=self.name, num=len(self.result))
def __getitem__(self, key):
if (key not in self._result):
raise ValueError('Invalid key {:}, please use one of {:}'.format(key, self.keys))
values = self._result[key]
return float(np.mean(values))
def update(self, metrics, filter_keys=None):
for (key, value) in metrics.items():
if ((filter_keys is not None) and (key in filter_keys)):
key = filter_keys[key]
elif (filter_keys is not None):
continue
self.append(key, value)
@staticmethod
def full_str(xstr, space):
xformat = (('{:' + str(space)) + 's}')
return xformat.format(str(xstr))
@staticmethod
def merge_dict(dict_list):
new_dict = dict()
for xkey in dict_list[0].keys():
values = [x for xdict in dict_list for x in xdict[xkey]]
new_dict[xkey] = values
return new_dict
def info(self, keys: List[Text], separate: Text='& ', space: int=20, verbose: bool=True, version: str='v1'):
avaliable_keys = []
for key in keys:
if (key not in self.result):
print('There are invalid key [{:}].'.format(key))
else:
avaliable_keys.append(key)
head_str = separate.join([self.full_str(x, space) for x in avaliable_keys])
values = []
for key in avaliable_keys:
if ('IR' in key):
current_values = [(x * 100) for x in self._result[key]]
else:
current_values = self._result[key]
mean = np.mean(current_values)
std = np.std(current_values)
if (version == 'v0'):
values.append('{:.2f} $\\pm$ {:.2f}'.format(mean, std))
elif (version == 'v1'):
values.append(((('{:.2f}'.format(mean) + ' \\subs{') + '{:.2f}'.format(std)) + '}'))
else:
raise ValueError('Unknown version')
value_str = separate.join([self.full_str(x, space) for x in values])
if verbose:
print(head_str)
print(value_str)
return (head_str, value_str)
|
def split_str2indexes(string: str, max_check: int, length_limit=5):
if (not isinstance(string, str)):
raise ValueError('Invalid scheme for {:}'.format(string))
srangestr = ''.join(string.split())
indexes = set()
for srange in srangestr.split(','):
srange = srange.split('-')
if (len(srange) != 2):
raise ValueError('invalid srange : {:}'.format(srange))
if (length_limit is not None):
assert (len(srange[0]) == len(srange[1]) == length_limit), 'invalid srange : {:}'.format(srange)
srange = (int(srange[0]), int(srange[1]))
if (not (0 <= srange[0] <= srange[1] < max_check)):
raise ValueError('{:} vs {:} vs {:}'.format(srange[0], srange[1], max_check))
for i in range(srange[0], (srange[1] + 1)):
indexes.add(i)
return indexes
|
def show_mean_var(xlist):
values = np.array(xlist)
print(((('{:.2f}'.format(values.mean()) + '$_{{\\pm}{') + '{:.2f}'.format(values.std())) + '}}$'))
|
def optimize_fn(xs, ys, device='cpu', max_iter=2000, max_lr=0.1):
xs = torch.FloatTensor(xs).view((- 1), 1).to(device)
ys = torch.FloatTensor(ys).view((- 1), 1).to(device)
model = SuperSequential(SuperSimpleNorm(xs.mean().item(), xs.std().item()), SuperLinear(1, 200), torch.nn.LeakyReLU(), SuperLinear(200, 100), torch.nn.LeakyReLU(), SuperLinear(100, 1)).to(device)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=max_lr, amsgrad=True)
loss_func = torch.nn.MSELoss()
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int((max_iter * 0.25)), int((max_iter * 0.5)), int((max_iter * 0.75))], gamma=0.3)
(best_loss, best_param) = (None, None)
for _iter in range(max_iter):
preds = model(xs)
optimizer.zero_grad()
loss = loss_func(preds, ys)
loss.backward()
optimizer.step()
lr_scheduler.step()
if ((best_loss is None) or (best_loss > loss.item())):
best_loss = loss.item()
best_param = copy.deepcopy(model.state_dict())
model.load_state_dict(best_param)
return (model, loss_func, best_loss)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.