code
stringlengths
17
6.64M
def evaluate_fn(model, xs, ys, loss_fn, device='cpu'): with torch.no_grad(): inputs = torch.FloatTensor(xs).view((- 1), 1).to(device) ys = torch.FloatTensor(ys).view((- 1), 1).to(device) preds = model(inputs) loss = loss_fn(preds, ys) preds = preds.view((- 1)).cpu().numpy() return (preds, loss.item())
class AnonymousAxis(): 'Important thing: all instances of this class are not equal to each other' def __init__(self, value: str): self.value = int(value) if (self.value <= 1): if (self.value == 1): raise EinopsError('No need to create anonymous axis of length 1. Report this as an issue') else: raise EinopsError('Anonymous axis should have positive length, not {}'.format(self.value)) def __repr__(self): return '{}-axis'.format(str(self.value))
class ParsedExpression(): "\n non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)')\n and keeps some information important for downstream\n " def __init__(self, expression): self.identifiers = set() self.has_non_unitary_anonymous_axes = False self.composition = [] if ('.' in expression): raise ValueError('Does not support . in the expression.') bracket_group = None def add_axis_name(x): if (x is not None): if (x in self.identifiers): raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x)) is_number = str.isdecimal(x) if (is_number and (int(x) == 1)): if (bracket_group is None): self.composition.append([]) else: pass return (is_axis_name, reason) = self.check_axis_name(x, return_reason=True) if (not (is_number or is_axis_name)): raise ValueError('Invalid axis identifier: {}\n{}'.format(x, reason)) if is_number: x = AnonymousAxis(x) self.identifiers.add(x) if is_number: self.has_non_unitary_anonymous_axes = True if (bracket_group is None): self.composition.append([x]) else: bracket_group.append(x) current_identifier = None for char in expression: if (char in '() '): add_axis_name(current_identifier) current_identifier = None if (char == '('): if (bracket_group is not None): raise ValueError('Axis composition is one-level (brackets inside brackets not allowed)') bracket_group = [] elif (char == ')'): if (bracket_group is None): raise ValueError('Brackets are not balanced') self.composition.append(bracket_group) bracket_group = None elif (str.isalnum(char) or (char == '_')): if (current_identifier is None): current_identifier = char else: current_identifier += char else: raise ValueError("Unknown character '{}'".format(char)) if (bracket_group is not None): raise ValueError('Imbalanced parentheses in expression: "{}"'.format(expression)) add_axis_name(current_identifier) def flat_axes_order(self) -> List: result = [] for composed_axis in self.composition: assert isinstance(composed_axis, list), 'does not work with ellipsis' for axis in composed_axis: result.append(axis) return result def has_composed_axes(self) -> bool: for axes in self.composition: if (isinstance(axes, list) and (len(axes) > 1)): return True return False @staticmethod def check_axis_name(name: str, return_reason=False): '\n Valid axes names are python identifiers except keywords,\n and additionally should not start or end with underscore\n ' if (not str.isidentifier(name)): result = (False, 'not a valid python identifier') elif ((name[0] == '_') or (name[(- 1)] == '_')): result = (False, 'axis name should should not start or end with underscore') else: if keyword.iskeyword(name): warnings.warn('It is discouraged to use axes names that are keywords: {}'.format(name), RuntimeWarning) if (name in ['axis']): warnings.warn("It is discouraged to use 'axis' as an axis name and will raise an error in future", FutureWarning) result = (True, None) if return_reason: return result else: return result[0] def __repr__(self) -> str: return '{name}({composition})'.format(name=self.__class__.__name__, composition=self.composition)
class SuperReLU(SuperModule): 'Applies a the rectified linear unit function element-wise.' def __init__(self, inplace: bool=False) -> None: super(SuperReLU, self).__init__() self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return F.relu(input, inplace=self._inplace) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input) def extra_repr(self) -> str: return ('inplace=True' if self._inplace else '')
class SuperGELU(SuperModule): 'Applies a the Gaussian Error Linear Units function element-wise.' def __init__(self) -> None: super(SuperGELU, self).__init__() @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return F.gelu(input) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input)
class SuperSigmoid(SuperModule): 'Applies a the Sigmoid function element-wise.' def __init__(self) -> None: super(SuperSigmoid, self).__init__() @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return torch.sigmoid(input) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input)
class SuperLeakyReLU(SuperModule): 'https://pytorch.org/docs/stable/_modules/torch/nn/modules/activation.html#LeakyReLU' def __init__(self, negative_slope: float=0.01, inplace: bool=False) -> None: super(SuperLeakyReLU, self).__init__() self._negative_slope = negative_slope self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return F.leaky_relu(input, self._negative_slope, self._inplace) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input) def extra_repr(self) -> str: inplace_str = ('inplace=True' if self._inplace else '') return 'negative_slope={}{}'.format(self._negative_slope, inplace_str)
class SuperTanh(SuperModule): 'Applies a the Tanh function element-wise.' def __init__(self) -> None: super(SuperTanh, self).__init__() @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return torch.tanh(input) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input)
class SuperQKVAttentionV2(SuperModule): 'The super model for attention layer.' def __init__(self, qk_att_dim: int, in_v_dim: int, hidden_dim: int, num_heads: int, proj_dim: int, qkv_bias: bool=False, attn_drop: Optional[float]=None, proj_drop: Optional[float]=None): super(SuperQKVAttentionV2, self).__init__() self._in_v_dim = in_v_dim self._qk_att_dim = qk_att_dim self._proj_dim = proj_dim self._hidden_dim = hidden_dim self._num_heads = num_heads self._qkv_bias = qkv_bias self.qk_fc = SuperLinear(qk_att_dim, num_heads, bias=qkv_bias) self.v_fc = SuperLinear(in_v_dim, (hidden_dim * num_heads), bias=qkv_bias) self.attn_drop = nn.Dropout((attn_drop or 0.0)) self.proj = SuperLinear((hidden_dim * num_heads), proj_dim) self.proj_drop = nn.Dropout((proj_drop or 0.0)) self._infinity = 1000000000.0 @property def num_heads(self): return spaces.get_max(self._num_heads) @property def in_v_dim(self): return spaces.get_max(self._in_v_dim) @property def qk_att_dim(self): return spaces.get_max(self._qk_att_dim) @property def hidden_dim(self): return spaces.get_max(self._hidden_dim) @property def proj_dim(self): return spaces.get_max(self._proj_dim) @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) raise NotImplementedError def apply_candidate(self, abstract_child: spaces.VirtualNode): super(SuperQKVAttentionV2, self).apply_candidate(abstract_child) raise NotImplementedError def forward_qkv(self, qk_att_tensor, v_tensor, num_head: int, mask=None) -> torch.Tensor: qk_att = self.qk_fc(qk_att_tensor) (B, N, S, _) = qk_att.shape assert (_ == num_head) attn_v1 = qk_att.permute(0, 3, 1, 2) if (mask is not None): mask = torch.unsqueeze(mask, dim=1) attn_v1 = attn_v1.masked_fill(mask, (- self._infinity)) attn_v1 = attn_v1.softmax(dim=(- 1)) attn_v1 = self.attn_drop(attn_v1) v = self.v_fc(v_tensor) (B0, _, _) = v.shape v_v1 = v.reshape(B0, S, num_head, (- 1)).permute(0, 2, 1, 3) feats_v1 = (attn_v1 @ v_v1).permute(0, 2, 1, 3).reshape(B, N, (- 1)) return feats_v1 def forward_candidate(self, qk_att_tensor, v_tensor, mask=None) -> torch.Tensor: return self.forward_raw(qk_att_tensor, v_tensor, mask) def forward_raw(self, qk_att_tensor, v_tensor, mask=None) -> torch.Tensor: feats = self.forward_qkv(qk_att_tensor, v_tensor, self.num_heads, mask) outs = self.proj(feats) outs = self.proj_drop(outs) return outs def extra_repr(self) -> str: return 'input_dim={:}, hidden_dim={:}, proj_dim={:}, num_heads={:}, infinity={:}'.format((self.qk_att_dim, self.in_v_dim), self._hidden_dim, self._proj_dim, self._num_heads, self._infinity)
class SuperSequential(SuperModule): "A sequential container wrapped with 'Super' ability.\n\n Modules will be added to it in the order they are passed in the constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n To make it easier to understand, here is a small example::\n # Example of using Sequential\n model = SuperSequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n # Example of using Sequential with OrderedDict\n model = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n " def __init__(self, *args): super(SuperSequential, self).__init__() if ((len(args) == 1) and isinstance(args[0], OrderedDict)): for (key, module) in args[0].items(): self.add_module(key, module) else: if (not isinstance(args, (list, tuple))): raise ValueError('Invalid input type: {:}'.format(type(args))) for (idx, module) in enumerate(args): self.add_module(str(idx), module) def _get_item_by_idx(self, iterator, idx) -> T: 'Get the idx-th item of the iterator' size = len(self) idx = operator.index(idx) if (not ((- size) <= idx < size)): raise IndexError('index {} is out of range'.format(idx)) idx %= size return next(islice(iterator, idx, None)) def __getitem__(self, idx) -> Union[('SuperSequential', T)]: if isinstance(idx, slice): return self.__class__(OrderedDict(list(self._modules.items())[idx])) else: return self._get_item_by_idx(self._modules.values(), idx) def __setitem__(self, idx: int, module: SuperModule) -> None: key: str = self._get_item_by_idx(self._modules.keys(), idx) return setattr(self, key, module) def __delitem__(self, idx: Union[(slice, int)]) -> None: if isinstance(idx, slice): for key in list(self._modules.keys())[idx]: delattr(self, key) else: key = self._get_item_by_idx(self._modules.keys(), idx) delattr(self, key) def __len__(self) -> int: return len(self._modules) def __dir__(self): keys = super(SuperSequential, self).__dir__() keys = [key for key in keys if (not key.isdigit())] return keys def __iter__(self) -> Iterator[SuperModule]: return iter(self._modules.values()) @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) for (index, module) in enumerate(self): if (not isinstance(module, SuperModule)): continue space = module.abstract_search_space if (not spaces.is_determined(space)): root_node.append(str(index), space) return root_node def apply_candidate(self, abstract_child: spaces.VirtualNode): super(SuperSequential, self).apply_candidate(abstract_child) for (index, module) in enumerate(self): if (str(index) in abstract_child): module.apply_candidate(abstract_child[str(index)]) def forward_candidate(self, input): return self.forward_raw(input) def forward_raw(self, input): for module in self: input = module(input) return input def forward_with_container(self, input, container, prefix=[]): for (index, module) in enumerate(self): input = module.forward_with_container(input, container, (prefix + [str(index)])) return input
class SuperDropout(SuperModule): 'Applies a the dropout function element-wise.' def __init__(self, p: float=0.5, inplace: bool=False) -> None: super(SuperDropout, self).__init__() self._p = p self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return F.dropout(input, self._p, self.training, self._inplace) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input) def extra_repr(self) -> str: xstr = ('inplace=True' if self._inplace else '') return (('p={:}'.format(self._p) + ', ') + xstr)
class SuperDrop(SuperModule): 'Applies a the drop-path function element-wise.' def __init__(self, p: float, dims: Tuple[int], recover: bool=True) -> None: super(SuperDrop, self).__init__() self._p = p self._dims = dims self._recover = recover @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: if ((not self.training) or (self._p <= 0)): return input keep_prob = (1 - self._p) shape = ([input.shape[0]] + [(x if (y == (- 1)) else y) for (x, y) in zip(input.shape[1:], self._dims)]) random_tensor = (keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)) random_tensor.floor_() if self._recover: return (input.div(keep_prob) * random_tensor) else: return (input * random_tensor) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input) def extra_repr(self) -> str: return (('p={:}'.format(self._p) + ', dims={:}'.format(self._dims)) + ', recover={:}'.format(self._recover))
class SuperModule(abc.ABC, nn.Module): 'This class equips the nn.Module class with the ability to apply AutoDL.' def __init__(self): super(SuperModule, self).__init__() self._super_run_type = SuperRunMode.Default self._abstract_child = None self._verbose = False self._meta_info = {} self._candidate_mode = DISABLE_CANDIDATE def set_super_run_type(self, super_run_type): def _reset_super_run(m): if isinstance(m, SuperModule): m._super_run_type = super_run_type self.apply(_reset_super_run) def add_module(self, name: str, module: Optional[torch.nn.Module]) -> None: if (not isinstance(module, SuperModule)): warnings.warn((('Add {:}:{:} module, which is not SuperModule, into {:}'.format(name, module.__class__.__name__, self.__class__.__name__) + '\n') + 'It may cause some functions invalid.')) super(SuperModule, self).add_module(name, module) def apply_verbose(self, verbose): def _reset_verbose(m): if isinstance(m, SuperModule): m._verbose = verbose self.apply(_reset_verbose) def apply_candidate(self, abstract_child): if (not isinstance(abstract_child, spaces.VirtualNode)): raise ValueError('Invalid abstract child program: {:}'.format(abstract_child)) self._abstract_child = abstract_child def enable_candidate(self): def _enable_candidate(m): if isinstance(m, SuperModule): m._candidate_mode = ENABLE_CANDIDATE self.apply(_enable_candidate) def disable_candidate(self): def _disable_candidate(m): if isinstance(m, SuperModule): m._candidate_mode = DISABLE_CANDIDATE self.apply(_disable_candidate) def get_w_container(self): container = TensorContainer() for (name, param) in self.named_parameters(): container.append(name, param, True) for (name, buf) in self.named_buffers(): container.append(name, buf, False) return container def analyze_weights(self): with torch.no_grad(): for (name, param) in self.named_parameters(): shapestr = '[{:10s}] shape={:}'.format(name, list(param.shape)) finalstr = (shapestr + '{:.2f} +- {:.2f}'.format(param.mean(), param.std())) print(finalstr) def numel(self, buffer=True): total = 0 for (name, param) in self.named_parameters(): total += param.numel() if buffer: for (name, buf) in self.named_buffers(): total += buf.numel() return total def set_best_dir(self, xdir): self._meta_info[BEST_DIR_KEY] = str(xdir) Path(xdir).mkdir(parents=True, exist_ok=True) def set_best_name(self, xname): self._meta_info[BEST_NAME_KEY] = str(xname) def save_best(self, score): if (BEST_DIR_KEY not in self._meta_info): tempdir = tempfile.mkdtemp('-xlayers') self._meta_info[BEST_DIR_KEY] = tempdir if (BEST_SCORE_KEY not in self._meta_info): self._meta_info[BEST_SCORE_KEY] = None best_score = self._meta_info[BEST_SCORE_KEY] if ((best_score is None) or (best_score <= score)): best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__)) best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name) self._meta_info[BEST_SCORE_KEY] = score torch.save(self.state_dict(), best_save_path) return (True, self._meta_info[BEST_SCORE_KEY]) else: return (False, self._meta_info[BEST_SCORE_KEY]) def load_best(self, best_save_name=None): if (BEST_DIR_KEY not in self._meta_info): raise ValueError('Please set BEST_DIR_KEY at first') if (best_save_name is None): best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__)) best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name) state_dict = torch.load(best_save_path) self.load_state_dict(state_dict) def has_best(self, best_name=None): if (BEST_DIR_KEY not in self._meta_info): raise ValueError('Please set BEST_DIR_KEY at first') if (best_name is None): best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__)) else: best_save_name = best_name best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name) return os.path.exists(best_save_path) @property def abstract_search_space(self): raise NotImplementedError @property def super_run_type(self): return self._super_run_type @property def abstract_child(self): return self._abstract_child @property def verbose(self): return self._verbose @abc.abstractmethod def forward_raw(self, *inputs): 'Use the largest candidate for forward. Similar to the original PyTorch model.' raise NotImplementedError @abc.abstractmethod def forward_candidate(self, *inputs): raise NotImplementedError @property def name_with_id(self): return 'name={:}, id={:}'.format(self.__class__.__name__, id(self)) def get_shape_str(self, tensors): if isinstance(tensors, (list, tuple)): shapes = [self.get_shape_str(tensor) for tensor in tensors] if (len(shapes) == 1): return shapes[0] else: return ', '.join(shapes) elif isinstance(tensors, (torch.Tensor, nn.Parameter)): return str(tuple(tensors.shape)) else: raise TypeError('Invalid input type: {:}.'.format(type(tensors))) def forward(self, *inputs): if self.verbose: print('[{:}] inputs shape: {:}'.format(self.name_with_id, self.get_shape_str(inputs))) if (self.super_run_type == SuperRunMode.FullModel): outputs = self.forward_raw(*inputs) elif (self.super_run_type == SuperRunMode.Candidate): if (self._candidate_mode == DISABLE_CANDIDATE): raise ValueError('candidate mode is disabled') outputs = self.forward_candidate(*inputs) else: raise ValueError('Unknown Super Model Run Mode: {:}'.format(self.super_run_type)) if self.verbose: print('[{:}] outputs shape: {:}'.format(self.name_with_id, self.get_shape_str(outputs))) return outputs def forward_with_container(self, inputs, container, prefix=[]): raise NotImplementedError
class SuperLayerNorm1D(SuperModule): 'Super Layer Norm.' def __init__(self, dim: IntSpaceType, eps: float=1e-06, elementwise_affine: bool=True) -> None: super(SuperLayerNorm1D, self).__init__() self._in_dim = dim self._eps = eps self._elementwise_affine = elementwise_affine if self._elementwise_affine: self.register_parameter('weight', nn.Parameter(torch.Tensor(self.in_dim))) self.register_parameter('bias', nn.Parameter(torch.Tensor(self.in_dim))) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() @property def in_dim(self): return spaces.get_max(self._in_dim) @property def eps(self): return self._eps def reset_parameters(self) -> None: if self._elementwise_affine: nn.init.ones_(self.weight) nn.init.zeros_(self.bias) @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) if (not spaces.is_determined(self._in_dim)): root_node.append('_in_dim', self._in_dim.abstract(reuse_last=True)) return root_node def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: if (not spaces.is_determined(self._in_dim)): expected_input_dim = self.abstract_child['_in_dim'].value else: expected_input_dim = spaces.get_determined_value(self._in_dim) if (input.size((- 1)) != expected_input_dim): raise ValueError('Expect the input dim of {:} instead of {:}'.format(expected_input_dim, input.size((- 1)))) if self._elementwise_affine: weight = self.weight[:expected_input_dim] bias = self.bias[:expected_input_dim] else: (weight, bias) = (None, None) return F.layer_norm(input, (expected_input_dim,), weight, bias, self.eps) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: return F.layer_norm(input, (self.in_dim,), self.weight, self.bias, self.eps) def forward_with_container(self, input, container, prefix=[]): super_weight_name = '.'.join((prefix + ['weight'])) if container.has(super_weight_name): weight = container.query(super_weight_name) else: weight = None super_bias_name = '.'.join((prefix + ['bias'])) if container.has(super_bias_name): bias = container.query(super_bias_name) else: bias = None return F.layer_norm(input, (self.in_dim,), weight, bias, self.eps) def extra_repr(self) -> str: return 'shape={in_dim}, eps={eps}, elementwise_affine={elementwise_affine}'.format(in_dim=self._in_dim, eps=self._eps, elementwise_affine=self._elementwise_affine)
class SuperSimpleNorm(SuperModule): 'Super simple normalization.' def __init__(self, mean, std, inplace=False) -> None: super(SuperSimpleNorm, self).__init__() self.register_buffer('_mean', torch.tensor(mean, dtype=torch.float)) self.register_buffer('_std', torch.tensor(std, dtype=torch.float)) self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: if (not self._inplace): tensor = input.clone() else: tensor = input mean = torch.as_tensor(self._mean, dtype=tensor.dtype, device=tensor.device) std = torch.as_tensor(self._std, dtype=tensor.dtype, device=tensor.device) if (std == 0).any(): raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(tensor.dtype)) while (mean.ndim < tensor.ndim): (mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0)) return tensor.sub_(mean).div_(std) def extra_repr(self) -> str: return 'mean={mean}, std={std}, inplace={inplace}'.format(mean=self._mean.item(), std=self._std.item(), inplace=self._inplace)
class SuperSimpleLearnableNorm(SuperModule): 'Super simple normalization.' def __init__(self, mean=0, std=1, eps=1e-06, inplace=False) -> None: super(SuperSimpleLearnableNorm, self).__init__() self.register_parameter('_mean', nn.Parameter(torch.tensor(mean, dtype=torch.float))) self.register_parameter('_std', nn.Parameter(torch.tensor(std, dtype=torch.float))) self._eps = eps self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: if (not self._inplace): tensor = input.clone() else: tensor = input (mean, std) = (self._mean.to(tensor.device), (torch.abs(self._std.to(tensor.device)) + self._eps)) if (std == 0).any(): raise ValueError('std leads to division by zero.') while (mean.ndim < tensor.ndim): (mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0)) return tensor.sub_(mean).div_(std) def forward_with_container(self, input, container, prefix=[]): if (not self._inplace): tensor = input.clone() else: tensor = input mean_name = '.'.join((prefix + ['_mean'])) std_name = '.'.join((prefix + ['_std'])) (mean, std) = (container.query(mean_name).to(tensor.device), (torch.abs(container.query(std_name).to(tensor.device)) + self._eps)) while (mean.ndim < tensor.ndim): (mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0)) return tensor.sub_(mean).div_(std) def extra_repr(self) -> str: return 'mean={mean}, std={std}, inplace={inplace}'.format(mean=self._mean.item(), std=self._std.item(), inplace=self._inplace)
class SuperIdentity(SuperModule): 'Super identity mapping layer.' def __init__(self, inplace=False, **kwargs) -> None: super(SuperIdentity, self).__init__() self._inplace = inplace @property def abstract_search_space(self): return spaces.VirtualNode(id(self)) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: return self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: if (not self._inplace): tensor = input.clone() else: tensor = input return tensor def extra_repr(self) -> str: return 'inplace={inplace}'.format(inplace=self._inplace) def forward_with_container(self, input, container, prefix=[]): return self.forward_raw(input)
class SuperReArrange(SuperModule): 'Applies the rearrange operation.' def __init__(self, pattern, **axes_lengths): super(SuperReArrange, self).__init__() self._pattern = pattern self._axes_lengths = axes_lengths axes_lengths = tuple(sorted(self._axes_lengths.items())) (left, right) = pattern.split('->') left = ParsedExpression(left) right = ParsedExpression(right) difference = set.symmetric_difference(left.identifiers, right.identifiers) if difference: raise ValueError('Identifiers only on one side of expression (should be on both): {}'.format(difference)) axis_name2known_length = OrderedDict() for composite_axis in left.composition: for axis_name in composite_axis: if isinstance(axis_name, AnonymousAxis): axis_name2known_length[axis_name] = axis_name.value else: axis_name2known_length[axis_name] = None for axis_name in right.identifiers: if (axis_name not in axis_name2known_length): if isinstance(axis_name, AnonymousAxis): axis_name2known_length[axis_name] = axis_name.value else: axis_name2known_length[axis_name] = None axis_name2position = {name: position for (position, name) in enumerate(axis_name2known_length)} for (elementary_axis, axis_length) in axes_lengths: if (not ParsedExpression.check_axis_name(elementary_axis)): raise ValueError('Invalid name for an axis', elementary_axis) if (elementary_axis not in axis_name2known_length): raise ValueError('Axis {} is not used in transform'.format(elementary_axis)) axis_name2known_length[elementary_axis] = axis_length input_composite_axes = [] for composite_axis in left.composition: known = {axis for axis in composite_axis if (axis_name2known_length[axis] is not None)} unknown = {axis for axis in composite_axis if (axis_name2known_length[axis] is None)} if (len(unknown) > 1): raise ValueError('Could not infer sizes for {}'.format(unknown)) assert ((len(unknown) + len(known)) == len(composite_axis)) input_composite_axes.append(([axis_name2position[axis] for axis in known], [axis_name2position[axis] for axis in unknown])) axis_position_after_reduction = {} for axis_name in itertools.chain(*left.composition): if (axis_name in right.identifiers): axis_position_after_reduction[axis_name] = len(axis_position_after_reduction) result_axes_grouping = [] for composite_axis in right.composition: result_axes_grouping.append([axis_name2position[axis] for axis in composite_axis]) ordered_axis_right = list(itertools.chain(*right.composition)) axes_permutation = tuple((axis_position_after_reduction[axis] for axis in ordered_axis_right if (axis in left.identifiers))) self.input_composite_axes = input_composite_axes self.output_composite_axes = result_axes_grouping self.elementary_axes_lengths = list(axis_name2known_length.values()) self.axes_permutation = axes_permutation @functools.lru_cache(maxsize=1024) def reconstruct_from_shape(self, shape): if (len(shape) != len(self.input_composite_axes)): raise ValueError('Expected {} dimensions, got {}'.format(len(self.input_composite_axes), len(shape))) axes_lengths = list(self.elementary_axes_lengths) for (input_axis, (known_axes, unknown_axes)) in enumerate(self.input_composite_axes): length = shape[input_axis] known_product = 1 for axis in known_axes: known_product *= axes_lengths[axis] if (len(unknown_axes) == 0): if (isinstance(length, int) and isinstance(known_product, int) and (length != known_product)): raise ValueError('Shape mismatch, {} != {}'.format(length, known_product)) else: if (isinstance(length, int) and isinstance(known_product, int) and ((length % known_product) != 0)): raise ValueError("Shape mismatch, can't divide axis of length {} in chunks of {}".format(length, known_product)) (unknown_axis,) = unknown_axes axes_lengths[unknown_axis] = (length // known_product) final_shape = [] for (output_axis, grouping) in enumerate(self.output_composite_axes): lengths = [axes_lengths[elementary_axis] for elementary_axis in grouping] final_shape.append(int(np.prod(lengths))) axes_reordering = self.axes_permutation return (axes_lengths, axes_reordering, final_shape) @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) return root_node def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: self.forward_raw(input) def forward_raw(self, input: torch.Tensor) -> torch.Tensor: (init_shape, axes_reordering, final_shape) = self.reconstruct_from_shape(tuple(input.shape)) tensor = torch.reshape(input, init_shape) tensor = tensor.permute(axes_reordering) tensor = torch.reshape(tensor, final_shape) return tensor def extra_repr(self) -> str: params = repr(self._pattern) for (axis, length) in self._axes_lengths.items(): params += ', {}={}'.format(axis, length) return '{:}'.format(params)
class SuperAlphaEBDv1(SuperModule): 'A simple layer to convert the raw trading data from 1-D to 2-D data and apply an FC layer.' def __init__(self, d_feat: int, embed_dim: IntSpaceType): super(SuperAlphaEBDv1, self).__init__() self._d_feat = d_feat self._embed_dim = embed_dim self.proj = SuperLinear(d_feat, embed_dim) @property def embed_dim(self): return spaces.get_max(self._embed_dim) @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) space = self.proj.abstract_search_space if (not spaces.is_determined(space)): root_node.append('proj', space) if (not spaces.is_determined(self._embed_dim)): root_node.append('_embed_dim', self._embed_dim.abstract(reuse_last=True)) return root_node def apply_candidate(self, abstract_child: spaces.VirtualNode): super(SuperAlphaEBDv1, self).apply_candidate(abstract_child) if ('proj' in abstract_child): self.proj.apply_candidate(abstract_child['proj']) def forward_candidate(self, input: torch.Tensor) -> torch.Tensor: x = input.reshape(len(input), self._d_feat, (- 1)) x = x.permute(0, 2, 1) if (not spaces.is_determined(self._embed_dim)): embed_dim = self.abstract_child['_embed_dim'].value else: embed_dim = spaces.get_determined_value(self._embed_dim) out = (self.proj(x) * math.sqrt(embed_dim)) return out def forward_raw(self, input: torch.Tensor) -> torch.Tensor: x = input.reshape(len(input), self._d_feat, (- 1)) x = x.permute(0, 2, 1) out = (self.proj(x) * math.sqrt(self.embed_dim)) return out
class SuperTransformerEncoderLayer(SuperModule): 'TransformerEncoderLayer is made up of self-attn and feedforward network.\n This is a super model for TransformerEncoderLayer that can support search for the transformer encoder layer.\n\n Reference:\n - Paper: Attention Is All You Need, NeurIPS 2017\n - PyTorch Implementation: https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer\n\n Details:\n the original post-norm version: MHA -> residual -> norm -> MLP -> residual -> norm\n the pre-norm version: norm -> MHA -> residual -> norm -> MLP -> residual\n ' def __init__(self, d_model: IntSpaceType, num_heads: IntSpaceType, qkv_bias: BoolSpaceType=False, mlp_hidden_multiplier: IntSpaceType=4, dropout: Optional[float]=None, att_dropout: Optional[float]=None, norm_affine: bool=True, act_layer: Callable[([], nn.Module)]=nn.GELU, order: LayerOrder=LayerOrder.PreNorm, use_mask: bool=False): super(SuperTransformerEncoderLayer, self).__init__() mha = SuperSelfAttention(d_model, d_model, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=att_dropout, proj_drop=None, use_mask=use_mask) mlp = SuperMLPv2(d_model, hidden_multiplier=mlp_hidden_multiplier, out_features=d_model, act_layer=act_layer, drop=dropout) if (order is LayerOrder.PreNorm): self.norm1 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine) self.mha = mha self.drop = nn.Dropout((dropout or 0.0)) self.norm2 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine) self.mlp = mlp elif (order is LayerOrder.PostNorm): self.mha = mha self.drop1 = nn.Dropout((dropout or 0.0)) self.norm1 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine) self.mlp = mlp self.drop2 = nn.Dropout((dropout or 0.0)) self.norm2 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine) else: raise ValueError('Unknown order: {:}'.format(order)) self._order = order @property def abstract_search_space(self): root_node = spaces.VirtualNode(id(self)) xdict = dict(mha=self.mha.abstract_search_space, norm1=self.norm1.abstract_search_space, mlp=self.mlp.abstract_search_space, norm2=self.norm2.abstract_search_space) for (key, space) in xdict.items(): if (not spaces.is_determined(space)): root_node.append(key, space) return root_node def apply_candidate(self, abstract_child: spaces.VirtualNode): super(SuperTransformerEncoderLayer, self).apply_candidate(abstract_child) valid_keys = ['mha', 'norm1', 'mlp', 'norm2'] for key in valid_keys: if (key in abstract_child): getattr(self, key).apply_candidate(abstract_child[key]) def forward_candidate(self, inputs: torch.Tensor) -> torch.Tensor: return self.forward_raw(inputs) def forward_raw(self, inputs: torch.Tensor) -> torch.Tensor: if (self._order is LayerOrder.PreNorm): x = self.norm1(inputs) x = self.mha(x) x = self.drop(x) x = (x + inputs) y = self.norm2(x) outs = (x + self.mlp(y)) elif (self._order is LayerOrder.PostNorm): x = self.mha(inputs) x = (inputs + self.drop1(x)) x = self.norm1(x) y = self.mlp(x) y = (x + self.drop2(y)) outs = self.norm2(y) else: raise ValueError('Unknown order: {:}'.format(self._order)) return outs
class LayerOrder(Enum): 'This class defines the enumerations for order of operation in a residual or normalization-based layer.' PreNorm = 'pre-norm' PostNorm = 'post-norm'
class SuperRunMode(Enum): 'This class defines the enumerations for Super Model Running Mode.' FullModel = 'fullmodel' Candidate = 'candidate' Default = 'fullmodel'
class ShapeContainer(): 'A class to maintain the shape of each weight tensor for a model.' def __init__(self): self._names = [] self._shapes = [] self._name2index = dict() self._param_or_buffers = [] @property def shapes(self): return self._shapes def __getitem__(self, index): return self._shapes[index] def translate(self, tensors, all_none_match=True): result = TensorContainer() for (index, name) in enumerate(self._names): cur_num = tensors[index].numel() expected_num = self._shapes[index].numel() if ((cur_num < expected_num) or ((cur_num > expected_num) and (not all_none_match))): raise ValueError('Invalid {:} vs {:}'.format(cur_num, expected_num)) cur_tensor = tensors[index].view((- 1))[:expected_num] new_tensor = torch.reshape(cur_tensor, self._shapes[index]) result.append(name, new_tensor, self._param_or_buffers[index]) return result def append(self, name, shape, param_or_buffer): if (not isinstance(shape, torch.Size)): raise TypeError('The input tensor must be torch.Size instead of {:}'.format(type(shape))) self._names.append(name) self._shapes.append(shape) self._param_or_buffers.append(param_or_buffer) assert (name not in self._name2index), 'The [{:}] has already been added.'.format(name) self._name2index[name] = (len(self._names) - 1) def query(self, name): if (not self.has(name)): raise ValueError('The {:} is not in {:}'.format(name, list(self._name2index.keys()))) index = self._name2index[name] return self._shapes[index] def has(self, name): return (name in self._name2index) def has_prefix(self, prefix): for (name, idx) in self._name2index.items(): if name.startswith(prefix): return name return False def numel(self, index=None): if (index is None): shapes = self._shapes else: shapes = [self._shapes[index]] total = 0 for shape in shapes: total += shape.numel() return total def __len__(self): return len(self._names) def __repr__(self): return '{name}({num} tensors)'.format(name=self.__class__.__name__, num=len(self))
class TensorContainer(): 'A class to maintain both parameters and buffers for a model.' def __init__(self): self._names = [] self._tensors = [] self._param_or_buffers = [] self._name2index = dict() def additive(self, tensors): result = TensorContainer() for (index, name) in enumerate(self._names): new_tensor = (self._tensors[index] + tensors[index]) result.append(name, new_tensor, self._param_or_buffers[index]) return result def create_container(self, tensors): result = TensorContainer() for (index, name) in enumerate(self._names): new_tensor = tensors[index] result.append(name, new_tensor, self._param_or_buffers[index]) return result def no_grad_clone(self): result = TensorContainer() with torch.no_grad(): for (index, name) in enumerate(self._names): result.append(name, self._tensors[index].clone(), self._param_or_buffers[index]) return result def to_shape_container(self): result = ShapeContainer() for (index, name) in enumerate(self._names): result.append(name, self._tensors[index].shape, self._param_or_buffers[index]) return result def requires_grad_(self, requires_grad=True): for tensor in self._tensors: tensor.requires_grad_(requires_grad) def parameters(self): return self._tensors @property def tensors(self): return self._tensors def flatten(self, tensors=None): if (tensors is None): tensors = self._tensors tensors = [tensor.view((- 1)) for tensor in tensors] return torch.cat(tensors) def unflatten(self, tensor): (tensors, s) = ([], 0) for raw_tensor in self._tensors: length = raw_tensor.numel() x = torch.reshape(tensor[s:(s + length)], shape=raw_tensor.shape) tensors.append(x) s += length return tensors def append(self, name, tensor, param_or_buffer): if (not isinstance(tensor, torch.Tensor)): raise TypeError('The input tensor must be torch.Tensor instead of {:}'.format(type(tensor))) self._names.append(name) self._tensors.append(tensor) self._param_or_buffers.append(param_or_buffer) assert (name not in self._name2index), 'The [{:}] has already been added.'.format(name) self._name2index[name] = (len(self._names) - 1) def query(self, name): if (not self.has(name)): raise ValueError('The {:} is not in {:}'.format(name, list(self._name2index.keys()))) index = self._name2index[name] return self._tensors[index] def has(self, name): return (name in self._name2index) def has_prefix(self, prefix): for (name, idx) in self._name2index.items(): if name.startswith(prefix): return name return False def numel(self): total = 0 for tensor in self._tensors: total += tensor.numel() return total def __len__(self): return len(self._names) def __repr__(self): return '{name}({num} tensors)'.format(name=self.__class__.__name__, num=len(self))
def _no_grad_trunc_normal_(tensor, mean, std, a, b): def norm_cdf(x): return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0) if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))): warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2) with torch.no_grad(): l = norm_cdf(((a - mean) / std)) u = norm_cdf(((b - mean) / std)) tensor.uniform_(((2 * l) - 1), ((2 * u) - 1)) tensor.erfinv_() tensor.mul_((std * math.sqrt(2.0))) tensor.add_(mean) tensor.clamp_(min=a, max=b) return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0): 'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n ' if isinstance(tensor, list): return [_no_grad_trunc_normal_(x, mean, std, a, b) for x in tensor] else: return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def init_transformer(m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, super_core.SuperLinear): trunc_normal_(m._super_weight, std=0.02) if (m._super_bias is not None): nn.init.constant_(m._super_bias, 0) elif isinstance(m, super_core.SuperLayerNorm1D): nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0)
def get_scheduler(indicator, lr): if (indicator == 'warm-cos'): multiplier = WarmupParamScheduler(CosineParamScheduler(lr, (lr * 0.001)), warmup_factor=0.001, warmup_length=0.05, warmup_method='linear') else: raise ValueError('Unknown indicator: {:}'.format(indicator)) return multiplier
class Logger(): 'A logger used in xautodl.' def __init__(self, root_dir, prefix='', log_time=True): 'Create a summary writer logging to log_dir.' self.root_dir = Path(root_dir) self.log_dir = (self.root_dir / 'logs') self.log_dir.mkdir(parents=True, exist_ok=True) self._prefix = prefix self._log_time = log_time self.logger_path = (self.log_dir / '{:}{:}.log'.format(self._prefix, time_for_file())) self._logger_file = open(self.logger_path, 'w') @property def logger(self): return self._logger_file def log(self, string, save=True, stdout=False): string = ('{:} {:}'.format(time_string(), string) if self._log_time else string) if stdout: sys.stdout.write(string) sys.stdout.flush() else: print(string) if save: self._logger_file.write('{:}\n'.format(string)) self._logger_file.flush() def close(self): self._logger_file.close() if (self.writer is not None): self.writer.close() def __repr__(self): return '{name}(dir={log_dir}, prefix={_prefix}, log_time={_log_time})'.format(name=self.__class__.__name__, **self.__dict__)
class AverageMeter(): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.val = 0.0 self.avg = 0.0 self.sum = 0.0 self.count = 0.0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count) def __repr__(self): return '{name}(val={val}, avg={avg}, count={count})'.format(name=self.__class__.__name__, **self.__dict__)
class Metric(abc.ABC): 'The default meta metric class.' def __init__(self): self.reset() def reset(self): raise NotImplementedError def __call__(self, predictions, targets): raise NotImplementedError def get_info(self): raise NotImplementedError def perf_str(self): raise NotImplementedError def __repr__(self): return '{name}({inner})'.format(name=self.__class__.__name__, inner=self.inner_repr()) def inner_repr(self): return ''
class ComposeMetric(Metric): 'The composed metric class.' def __init__(self, *metric_list): self.reset() for metric in metric_list: self.append(metric) def reset(self): self._metric_list = [] def append(self, metric): if (not isinstance(metric, Metric)): raise ValueError('The input metric is not correct: {:}'.format(type(metric))) self._metric_list.append(metric) def __len__(self): return len(self._metric_list) def __call__(self, predictions, targets): results = list() for metric in self._metric_list: results.append(metric(predictions, targets)) return results def get_info(self): results = dict() for metric in self._metric_list: for (key, value) in metric.get_info().items(): results[key] = value return results def inner_repr(self): xlist = [] for metric in self._metric_list: xlist.append(str(metric)) return ','.join(xlist)
class CrossEntropyMetric(Metric): 'The metric for the cross entropy metric.' def __init__(self, ignore_batch): super(CrossEntropyMetric, self).__init__() self._ignore_batch = ignore_batch def reset(self): self._loss = AverageMeter() def __call__(self, predictions, targets): if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)): (batch, _) = predictions.shape() max_prob_indexes = torch.argmax(predictions, dim=(- 1)) if self._ignore_batch: loss = F.cross_entropy(predictions, targets, reduction='sum') self._loss.update(loss.item(), 1) else: loss = F.cross_entropy(predictions, targets, reduction='mean') self._loss.update(loss.item(), batch) return loss else: raise NotImplementedError def get_info(self): return {'loss': self._loss.avg, 'score': (self._loss.avg * 100)} def perf_str(self): return 'ce-loss={:.5f}'.format(self._loss.avg)
class Top1AccMetric(Metric): 'The metric for the top-1 accuracy.' def __init__(self, ignore_batch): super(Top1AccMetric, self).__init__() self._ignore_batch = ignore_batch def reset(self): self._accuracy = AverageMeter() def __call__(self, predictions, targets): if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)): (batch, _) = predictions.shape() max_prob_indexes = torch.argmax(predictions, dim=(- 1)) corrects = torch.eq(max_prob_indexes, targets) accuracy = corrects.float().mean().float() if self._ignore_batch: self._accuracy.update(accuracy, 1) else: self._accuracy.update(accuracy, batch) return accuracy else: raise NotImplementedError def get_info(self): return {'accuracy': self._accuracy.avg, 'score': (self._accuracy.avg * 100)} def perf_str(self): return 'accuracy={:.3f}%'.format((self._accuracy.avg * 100))
def has_key_words(xdict): if (not isinstance(xdict, dict)): return False key_set = set(KEYS) cur_set = set(xdict.keys()) return (key_set.intersection(cur_set) == key_set)
def get_module_by_module_path(module_path): 'Load the module from the path.' if module_path.endswith('.py'): module_spec = importlib.util.spec_from_file_location('', module_path) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) else: module = importlib.import_module(module_path) return module
def call_by_dict(config: Dict[(Text, Any)], *args, **kwargs) -> object: "\n get initialized instance with config\n Parameters\n ----------\n config : a dictionary, such as:\n {\n 'cls_or_func': 'ClassName',\n 'args': list,\n 'kwargs': dict,\n 'model_path': a string indicating the path,\n }\n Returns\n -------\n object:\n An initialized object based on the config info\n " module = get_module_by_module_path(config['module_path']) cls_or_func = getattr(module, config[CLS_FUNC_KEY]) args = tuple((list(config['args']) + list(args))) kwargs = {**config['kwargs'], **kwargs} return cls_or_func(*args, **kwargs)
def call_by_yaml(path, *args, **kwargs) -> object: config = load_yaml(path) return call_by_config(config, *args, **kwargs)
def nested_call_by_dict(config: Union[(Dict[(Text, Any)], Any)], *args, **kwargs) -> object: 'Similar to `call_by_dict`, but differently, the args may contain another dict needs to be called.' if isinstance(config, list): return [nested_call_by_dict(x) for x in config] elif isinstance(config, tuple): return (nested_call_by_dict(x) for x in config) elif (not isinstance(config, dict)): return config elif (not has_key_words(config)): return {key: nested_call_by_dict(x) for (x, key) in config.items()} else: module = get_module_by_module_path(config['module_path']) cls_or_func = getattr(module, config[CLS_FUNC_KEY]) args = tuple((list(config['args']) + list(args))) kwargs = {**config['kwargs'], **kwargs} new_args = [nested_call_by_dict(x) for x in args] new_kwargs = {} for (key, x) in kwargs.items(): new_kwargs[key] = nested_call_by_dict(x) return cls_or_func(*new_args, **new_kwargs)
def nested_call_by_yaml(path, *args, **kwargs) -> object: config = load_yaml(path) return nested_call_by_dict(config, *args, **kwargs)
class BatchSampler(): 'A batch sampler used for single machine training.' def __init__(self, dataset, batch, steps): self._num_per_epoch = len(dataset) self._iter_per_epoch = (self._num_per_epoch // batch) self._steps = steps self._batch = batch if (self._num_per_epoch < self._batch): raise ValueError('The dataset size must be larger than batch={:}'.format(batch)) self._indexes = list(range(self._num_per_epoch)) def __iter__(self): '\n yield a batch of indexes using random sampling\n ' for i in range(self._steps): if ((i % self._iter_per_epoch) == 0): random.shuffle(self._indexes) j = (i % self._iter_per_epoch) (yield self._indexes[(j * self._batch):((j + 1) * self._batch)]) def __len__(self): return self._steps
class ParamScheduler(): '\n Base class for parameter schedulers.\n A parameter scheduler defines a mapping from a progress value in [0, 1) to\n a number (e.g. learning rate).\n ' WHERE_EPSILON = 1e-06 def __call__(self, where: float) -> float: '\n Get the value of the param for a given point at training.\n\n We update params (such as learning rate) based on the percent progress\n of training completed. This allows a scheduler to be agnostic to the\n exact length of a particular run (e.g. 120 epochs vs 90 epochs), as\n long as the relative progress where params should be updated is the same.\n However, it assumes that the total length of training is known.\n\n Args:\n where: A float in [0,1) that represents how far training has progressed\n\n ' raise NotImplementedError('Param schedulers must override __call__')
class ConstantParamScheduler(ParamScheduler): '\n Returns a constant value for a param.\n ' def __init__(self, value: float) -> None: self._value = value def __call__(self, where: float) -> float: if (where >= 1.0): raise RuntimeError(f'where in ParamScheduler must be in [0, 1]: got {where}') return self._value
class CosineParamScheduler(ParamScheduler): "\n Cosine decay or cosine warmup schedules based on start and end values.\n The schedule is updated based on the fraction of training progress.\n The schedule was proposed in 'SGDR: Stochastic Gradient Descent with\n Warm Restarts' (https://arxiv.org/abs/1608.03983). Note that this class\n only implements the cosine annealing part of SGDR, and not the restarts.\n\n Example:\n\n .. code-block:: python\n\n CosineParamScheduler(start_value=0.1, end_value=0.0001)\n " def __init__(self, start_value: float, end_value: float) -> None: self._start_value = start_value self._end_value = end_value def __call__(self, where: float) -> float: return (self._end_value + ((0.5 * (self._start_value - self._end_value)) * (1 + math.cos((math.pi * where)))))
class ExponentialParamScheduler(ParamScheduler): '\n Exponetial schedule parameterized by a start value and decay.\n The schedule is updated based on the fraction of training\n progress, `where`, with the formula\n `param_t = start_value * (decay ** where)`.\n\n Example:\n\n .. code-block:: python\n ExponentialParamScheduler(start_value=2.0, decay=0.02)\n\n Corresponds to a decreasing schedule with values in [2.0, 0.04).\n ' def __init__(self, start_value: float, decay: float) -> None: self._start_value = start_value self._decay = decay def __call__(self, where: float) -> float: return (self._start_value * (self._decay ** where))
class LinearParamScheduler(ParamScheduler): '\n Linearly interpolates parameter between ``start_value`` and ``end_value``.\n Can be used for either warmup or decay based on start and end values.\n The schedule is updated after every train step by default.\n\n Example:\n\n .. code-block:: python\n\n LinearParamScheduler(start_value=0.0001, end_value=0.01)\n\n Corresponds to a linear increasing schedule with values in [0.0001, 0.01)\n ' def __init__(self, start_value: float, end_value: float) -> None: self._start_value = start_value self._end_value = end_value def __call__(self, where: float) -> float: return ((self._end_value * where) + (self._start_value * (1 - where)))
class MultiStepParamScheduler(ParamScheduler): '\n Takes a predefined schedule for a param value, and a list of epochs or steps\n which stand for the upper boundary (excluded) of each range.\n\n Example:\n\n .. code-block:: python\n\n MultiStepParamScheduler(\n values=[0.1, 0.01, 0.001, 0.0001],\n milestones=[30, 60, 80, 120]\n )\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epochs 60-79, 0.0001 for epochs 80-120.\n Note that the length of values must be equal to the length of milestones\n plus one.\n ' def __init__(self, values: List[float], num_updates: Optional[int]=None, milestones: Optional[List[int]]=None) -> None: '\n Args:\n values: param value in each range\n num_updates: the end of the last range. If None, will use ``milestones[-1]``\n milestones: the boundary of each range. If None, will evenly split ``num_updates``\n\n For example, all the following combinations define the same scheduler:\n\n * num_updates=90, milestones=[30, 60], values=[1, 0.1, 0.01]\n * num_updates=90, values=[1, 0.1, 0.01]\n * milestones=[30, 60, 90], values=[1, 0.1, 0.01]\n * milestones=[3, 6, 9], values=[1, 0.1, 0.01] (ParamScheduler is scale-invariant)\n ' if ((num_updates is None) and (milestones is None)): raise ValueError('num_updates and milestones cannot both be None') if (milestones is None): milestones = [] step_width = math.ceil((num_updates / float(len(values)))) for idx in range((len(values) - 1)): milestones.append((step_width * (idx + 1))) elif (not (isinstance(milestones, Sequence) and (len(milestones) == (len(values) - int((num_updates is not None)))))): raise ValueError(('MultiStep scheduler requires a list of %d miletones' % (len(values) - int((num_updates is not None))))) if (num_updates is None): (num_updates, milestones) = (milestones[(- 1)], milestones[:(- 1)]) if (num_updates < len(values)): raise ValueError('Total num_updates must be greater than length of param schedule') self._param_schedule = values self._num_updates = num_updates self._milestones: List[int] = milestones start_epoch = 0 for milestone in self._milestones: if (milestone >= self._num_updates): raise ValueError(('Milestone must be smaller than total number of updates: num_updates=%d, milestone=%d' % (self._num_updates, milestone))) if (start_epoch >= milestone): raise ValueError(('Milestone must be smaller than start epoch: start_epoch=%d, milestone=%d' % (start_epoch, milestone))) start_epoch = milestone def __call__(self, where: float) -> float: if (where > 1.0): raise RuntimeError(f'where in ParamScheduler must be in [0, 1]: got {where}') epoch_num = int(((where + self.WHERE_EPSILON) * self._num_updates)) return self._param_schedule[bisect.bisect_right(self._milestones, epoch_num)]
class PolynomialDecayParamScheduler(ParamScheduler): '\n Decays the param value after every epoch according to a\n polynomial function with a fixed power.\n The schedule is updated after every train step by default.\n\n Example:\n\n .. code-block:: python\n\n PolynomialDecayParamScheduler(base_value=0.1, power=0.9)\n\n Then the param value will be 0.1 for epoch 0, 0.099 for epoch 1, and\n so on.\n ' def __init__(self, base_value: float, power: float) -> None: self._base_value = base_value self._power = power def __call__(self, where: float) -> float: return (self._base_value * ((1 - where) ** self._power))
class StepParamScheduler(ParamScheduler): '\n Takes a fixed schedule for a param value. If the length of the\n fixed schedule is less than the number of epochs, then the epochs\n are divided evenly among the param schedule.\n The schedule is updated after every train epoch by default.\n\n Example:\n\n .. code-block:: python\n\n StepParamScheduler(values=[0.1, 0.01, 0.001, 0.0001], num_updates=120)\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epoch 60-89, 0.0001 for epochs 90-119.\n ' def __init__(self, num_updates: Union[(int, float)], values: List[float]) -> None: if (num_updates <= 0): raise ValueError('Number of updates must be larger than 0') if (not (isinstance(values, Sequence) and (len(values) > 0))): raise ValueError('Step scheduler requires a list of at least one param value') self._param_schedule = values def __call__(self, where: float) -> float: ind = int(((where + self.WHERE_EPSILON) * len(self._param_schedule))) return self._param_schedule[ind]
class StepWithFixedGammaParamScheduler(ParamScheduler): '\n Decays the param value by gamma at equal number of steps so as to have the\n specified total number of decays.\n\n Example:\n\n .. code-block:: python\n\n StepWithFixedGammaParamScheduler(\n base_value=0.1, gamma=0.1, num_decays=3, num_updates=120)\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epoch 60-89, 0.0001 for epochs 90-119.\n ' def __init__(self, base_value: float, num_decays: int, gamma: float, num_updates: int) -> None: for k in [base_value, gamma]: if (not (isinstance(k, (int, float)) and (k > 0))): raise ValueError('base_value and gamma must be positive numbers') for k in [num_decays, num_updates]: if (not (isinstance(k, int) and (k > 0))): raise ValueError('num_decays and num_updates must be positive integers') self.base_value = base_value self.num_decays = num_decays self.gamma = gamma self.num_updates = num_updates values = [base_value] for _ in range(num_decays): values.append((values[(- 1)] * gamma)) self._step_param_scheduler = StepParamScheduler(num_updates=num_updates, values=values) def __call__(self, where: float) -> float: return self._step_param_scheduler(where)
class CompositeParamScheduler(ParamScheduler): "\n Composite parameter scheduler composed of intermediate schedulers.\n Takes a list of schedulers and a list of lengths corresponding to\n percentage of training each scheduler should run for. Schedulers\n are run in order. All values in lengths should sum to 1.0.\n\n Each scheduler also has a corresponding interval scale. If interval\n scale is 'fixed', the intermediate scheduler will be run without any rescaling\n of the time. If interval scale is 'rescaled', intermediate scheduler is\n run such that each scheduler will start and end at the same values as it\n would if it were the only scheduler. Default is 'rescaled' for all schedulers.\n\n Example:\n\n .. code-block:: python\n\n schedulers = [\n ConstantParamScheduler(value=0.42),\n CosineParamScheduler(start_value=0.42, end_value=1e-4)\n ]\n CompositeParamScheduler(\n schedulers=schedulers,\n interval_scaling=['rescaled', 'rescaled'],\n lengths=[0.3, 0.7])\n\n The parameter value will be 0.42 for the first [0%, 30%) of steps,\n and then will cosine decay from 0.42 to 0.0001 for [30%, 100%) of\n training.\n " def __init__(self, schedulers: Sequence[ParamScheduler], lengths: List[float], interval_scaling: Sequence[str]) -> None: if (len(schedulers) != len(lengths)): raise ValueError('Schedulers and lengths must be same length') if (len(schedulers) == 0): raise ValueError('There must be at least one scheduler in the composite scheduler') if (abs((sum(lengths) - 1.0)) >= 0.001): raise ValueError('The sum of all values in lengths must be 1') if (sum(lengths) != 1.0): lengths[(- 1)] = (1.0 - sum(lengths[:(- 1)])) for s in interval_scaling: if (s not in ['rescaled', 'fixed']): raise ValueError(f'Unsupported interval_scaling: {s}') self._lengths = lengths self._schedulers = schedulers self._interval_scaling = interval_scaling def __call__(self, where: float) -> float: i = 0 running_total = self._lengths[i] while (((where + self.WHERE_EPSILON) > running_total) and (i < (len(self._schedulers) - 1))): i += 1 running_total += self._lengths[i] scheduler = self._schedulers[i] scheduler_where = where interval_scale = self._interval_scaling[i] if (interval_scale == 'rescaled'): scheduler_start = (running_total - self._lengths[i]) scheduler_where = ((where - scheduler_start) / self._lengths[i]) return scheduler(scheduler_where)
class WarmupParamScheduler(CompositeParamScheduler): '\n Add an initial warmup stage to another scheduler.\n ' def __init__(self, scheduler: ParamScheduler, warmup_factor: float, warmup_length: float, warmup_method: str='linear'): '\n Args:\n scheduler: warmup will be added at the beginning of this scheduler\n warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001\n warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire\n training, e.g. 0.01\n warmup_method: one of "linear" or "constant"\n ' end_value = scheduler(warmup_length) start_value = (warmup_factor * scheduler(0.0)) if (warmup_method == 'constant'): warmup = ConstantParamScheduler(start_value) elif (warmup_method == 'linear'): warmup = LinearParamScheduler(start_value, end_value) else: raise ValueError('Unknown warmup method: {}'.format(warmup_method)) super().__init__([warmup, scheduler], interval_scaling=['rescaled', 'fixed'], lengths=[warmup_length, (1 - warmup_length)])
class LRMultiplier(torch.optim.lr_scheduler._LRScheduler): '\n A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the\n learning rate of each param in the optimizer.\n Every step, the learning rate of each parameter becomes its initial value\n multiplied by the output of the given :class:`ParamScheduler`.\n The absolute learning rate value of each parameter can be different.\n This scheduler can be used as long as the relative scale among them do\n not change during training.\n Examples:\n ::\n LRMultiplier(\n opt,\n WarmupParamScheduler(\n MultiStepParamScheduler(\n [1, 0.1, 0.01],\n milestones=[60000, 80000],\n num_updates=90000,\n ), 0.001, 100 / 90000\n ),\n max_iter=90000\n )\n ' def __init__(self, optimizer: torch.optim.Optimizer, multiplier: ParamScheduler, max_iter: int, last_iter: int=(- 1)): '\n Args:\n optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.\n ``last_iter`` is the same as ``last_epoch``.\n multiplier: a fvcore ParamScheduler that defines the multiplier on\n every LR of the optimizer\n max_iter: the total number of training iterations\n ' if (not isinstance(multiplier, ParamScheduler)): raise ValueError(f'_LRMultiplier(multiplier=) must be an instance of fvcore ParamScheduler. Got {multiplier} instead.') self._multiplier = multiplier self._max_iter = max_iter super().__init__(optimizer, last_epoch=last_iter) def state_dict(self): return {'base_lrs': self.base_lrs, 'last_epoch': self.last_epoch} def get_lr(self) -> List[float]: multiplier = self._multiplier((self.last_epoch / self._max_iter)) return [(base_lr * multiplier) for base_lr in self.base_lrs]
def time_for_file(): ISOTIMEFORMAT = '%d-%h-at-%H-%M-%S' return '{:}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
def time_string(): ISOTIMEFORMAT = '%Y-%m-%d %X' string = '[{:}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time()))) return string
def convert_secs2time(epoch_time, return_str=False): need_hour = int((epoch_time / 3600)) need_mins = int(((epoch_time - (3600 * need_hour)) / 60)) need_secs = int(((epoch_time - (3600 * need_hour)) - (60 * need_mins))) if return_str: str = '[{:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs) return str else: return (need_hour, need_mins, need_secs)
def count_parameters(model_or_parameters, unit='mb'): if isinstance(model_or_parameters, nn.Module): counts = sum((np.prod(v.size()) for v in model_or_parameters.parameters())) elif isinstance(model_or_parameters, nn.Parameter): counts = models_or_parameters.numel() elif isinstance(model_or_parameters, (list, tuple)): counts = sum((count_parameters(x, None) for x in models_or_parameters)) else: counts = sum((np.prod(v.size()) for v in model_or_parameters)) if ((unit.lower() == 'kb') or (unit.lower() == 'k')): counts /= 1000.0 elif ((unit.lower() == 'mb') or (unit.lower() == 'm')): counts /= 1000000.0 elif ((unit.lower() == 'gb') or (unit.lower() == 'g')): counts /= 1000000000.0 elif (unit is not None): raise ValueError('Unknow unit: {:}'.format(unit)) return counts
def load_yaml(path): if (not os.path.isfile(path)): raise ValueError('{:} is not a file.'.format(path)) with open(path, 'r') as stream: data = yaml.safe_load(stream) return data
def get_model(config: Dict[(Text, Any)], **kwargs): model_type = config.get('model_type', 'simple_mlp').lower() if (model_type == 'simple_mlp'): act_cls = super_name2activation[kwargs['act_cls']] norm_cls = super_name2norm[kwargs['norm_cls']] (mean, std) = (kwargs.get('mean', None), kwargs.get('std', None)) if ('hidden_dim' in kwargs): hidden_dim1 = kwargs.get('hidden_dim') hidden_dim2 = kwargs.get('hidden_dim') else: hidden_dim1 = kwargs.get('hidden_dim1', 200) hidden_dim2 = kwargs.get('hidden_dim2', 100) model = SuperSequential(norm_cls(mean=mean, std=std), SuperLinear(kwargs['input_dim'], hidden_dim1), act_cls(), SuperLinear(hidden_dim1, hidden_dim2), act_cls(), SuperLinear(hidden_dim2, kwargs['output_dim'])) elif (model_type == 'norm_mlp'): act_cls = super_name2activation[kwargs['act_cls']] norm_cls = super_name2norm[kwargs['norm_cls']] (sub_layers, last_dim) = ([], kwargs['input_dim']) for (i, hidden_dim) in enumerate(kwargs['hidden_dims']): sub_layers.append(SuperLinear(last_dim, hidden_dim)) if (hidden_dim > 1): sub_layers.append(norm_cls(hidden_dim, elementwise_affine=False)) sub_layers.append(act_cls()) last_dim = hidden_dim sub_layers.append(SuperLinear(last_dim, kwargs['output_dim'])) model = SuperSequential(*sub_layers) elif (model_type == 'dual_norm_mlp'): act_cls = super_name2activation[kwargs['act_cls']] norm_cls = super_name2norm[kwargs['norm_cls']] (sub_layers, last_dim) = ([], kwargs['input_dim']) for (i, hidden_dim) in enumerate(kwargs['hidden_dims']): if (i > 0): sub_layers.append(norm_cls(last_dim, elementwise_affine=False)) sub_layers.append(SuperLinear(last_dim, hidden_dim)) sub_layers.append(SuperDropout(kwargs['dropout'])) sub_layers.append(SuperLinear(hidden_dim, hidden_dim)) sub_layers.append(act_cls()) last_dim = hidden_dim sub_layers.append(SuperLinear(last_dim, kwargs['output_dim'])) model = SuperSequential(*sub_layers) elif (model_type == 'quant_transformer'): raise NotImplementedError else: raise TypeError('Unkonwn model type: {:}'.format(model_type)) return model
def evaluate_all_datasets(arch: Text, datasets: List[Text], xpaths: List[Text], splits: List[Text], config_path: Text, seed: int, raw_arch_config, workers, logger): (machine_info, raw_arch_config) = (get_machine_info(), deepcopy(raw_arch_config)) all_infos = {'info': machine_info} all_dataset_keys = [] for (dataset, xpath, split) in zip(datasets, xpaths, splits): (train_data, valid_data, xshape, class_num) = get_datasets(dataset, xpath, (- 1)) if ((dataset == 'cifar10') or (dataset == 'cifar100')): split_info = load_config('configs/nas-benchmark/cifar-split.txt', None, None) elif dataset.startswith('ImageNet16'): split_info = load_config('configs/nas-benchmark/{:}-split.txt'.format(dataset), None, None) elif (dataset == 'ninapro'): split_info = None else: raise ValueError('invalid dataset : {:}'.format(dataset)) config = load_config(config_path, dict(class_num=class_num, xshape=xshape), logger) if bool(split): assert (dataset == 'cifar10') ValLoaders = {'ori-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True)} assert (len(train_data) == (len(split_info.train) + len(split_info.valid))), 'invalid length : {:} vs {:} + {:}'.format(len(train_data), len(split_info.train), len(split_info.valid)) train_data_v2 = deepcopy(train_data) train_data_v2.transform = valid_data.transform valid_data = train_data_v2 train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.train), num_workers=workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(split_info.valid), num_workers=workers, pin_memory=True) ValLoaders['x-valid'] = valid_loader else: train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True, num_workers=workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True) if (dataset == 'cifar10'): ValLoaders = {'ori-test': valid_loader} elif (dataset == 'cifar100'): cifar100_splits = load_config('configs/nas-benchmark/cifar100-test-split.txt', None, None) ValLoaders = {'ori-test': valid_loader, 'x-valid': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xvalid), num_workers=workers, pin_memory=True), 'x-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(cifar100_splits.xtest), num_workers=workers, pin_memory=True)} elif (dataset == 'ImageNet16-120'): imagenet16_splits = load_config('configs/nas-benchmark/imagenet-16-120-test-split.txt', None, None) ValLoaders = {'ori-test': valid_loader, 'x-valid': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet16_splits.xvalid), num_workers=workers, pin_memory=True), 'x-test': torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(imagenet16_splits.xtest), num_workers=workers, pin_memory=True)} elif (dataset == 'ninapro'): ValLoaders = {'ori-test': valid_loader} else: raise ValueError('invalid dataset : {:}'.format(dataset)) dataset_key = '{:}'.format(dataset) if bool(split): dataset_key = (dataset_key + '-valid') logger.log('Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size)) logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(dataset_key, config)) for (key, value) in ValLoaders.items(): logger.log('Evaluate ---->>>> {:10s} with {:} batchs'.format(key, len(value))) arch_config = dict2config(dict(name='infer.tiny', C=raw_arch_config['channel'], N=raw_arch_config['num_cells'], genotype=arch, num_classes=config.class_num), None) results = bench_evaluate_for_seed(arch_config, config, train_loader, ValLoaders, seed, logger) all_infos[dataset_key] = results all_dataset_keys.append(dataset_key) all_infos['all_dataset_keys'] = all_dataset_keys return all_infos
def main(save_dir: Path, workers: int, datasets: List[Text], xpaths: List[Text], splits: List[int], seeds: List[int], nets: List[str], opt_config: Dict[(Text, Any)], to_evaluate_indexes: tuple, cover_mode: bool, arch_config: Dict[(Text, Any)]): log_dir = (save_dir / 'logs') log_dir.mkdir(parents=True, exist_ok=True) logger = Logger(str(log_dir), os.getpid(), False) logger.log('xargs : seeds = {:}'.format(seeds)) logger.log('xargs : cover_mode = {:}'.format(cover_mode)) logger.log(('-' * 100)) logger.log(('Start evaluating range =: {:06d} - {:06d}'.format(min(to_evaluate_indexes), max(to_evaluate_indexes)) + '({:} in total) / {:06d} with cover-mode={:}'.format(len(to_evaluate_indexes), len(nets), cover_mode))) for (i, (dataset, xpath, split)) in enumerate(zip(datasets, xpaths, splits)): logger.log('--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}'.format(i, len(datasets), dataset, xpath, split)) logger.log('--->>> optimization config : {:}'.format(opt_config)) (start_time, epoch_time) = (time.time(), AverageMeter()) for (i, index) in enumerate(to_evaluate_indexes): arch = nets[index] logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] {:}'.format(time_string(), i, len(to_evaluate_indexes), index, len(nets), seeds, ('-' * 15))) logger.log('{:} {:} {:}'.format(('-' * 15), arch, ('-' * 15))) has_continue = False for seed in seeds: to_save_name = (save_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed)) if to_save_name.exists(): if cover_mode: logger.log('Find existing file : {:}, remove it before evaluation'.format(to_save_name)) os.remove(str(to_save_name)) else: logger.log('Find existing file : {:}, skip this evaluation'.format(to_save_name)) has_continue = True continue results = evaluate_all_datasets(CellStructure.str2structure(arch), datasets, xpaths, splits, opt_config, seed, arch_config, workers, logger) torch.save(results, to_save_name) logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] ===>>> {:}'.format(time_string(), i, len(to_evaluate_indexes), index, len(nets), seeds, to_save_name)) if (not has_continue): epoch_time.update((time.time() - start_time)) start_time = time.time() need_time = 'Time Left: {:}'.format(convert_secs2time((epoch_time.avg * ((len(to_evaluate_indexes) - i) - 1)), True)) logger.log('This arch costs : {:}'.format(convert_secs2time(epoch_time.val, True))) logger.log('{:}'.format(('*' * 100))) logger.log('{:} {:74s} {:}'.format(('*' * 10), '{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}'.format(i, len(to_evaluate_indexes), index, len(nets), need_time), ('*' * 10))) logger.log('{:}'.format(('*' * 100))) logger.close()
def train_single_model(save_dir, workers, datasets, xpaths, splits, use_less, seeds, model_str, arch_config): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True save_dir = ((Path(save_dir) / 'specifics') / '{:}-{:}-{:}-{:}'.format(('LESS' if use_less else 'FULL'), model_str, arch_config['channel'], arch_config['num_cells'])) logger = Logger(str(save_dir), 0, False) if (model_str in CellArchitectures): arch = CellArchitectures[model_str] logger.log('The model string is found in pre-defined architecture dict : {:}'.format(model_str)) else: try: arch = CellStructure.str2structure(model_str) except: raise ValueError('Invalid model string : {:}. It can not be found or parsed.'.format(model_str)) assert arch.check_valid_op(get_search_spaces('cell', 'full')), '{:} has the invalid op.'.format(arch) logger.log('Start train-evaluate {:}'.format(arch.tostr())) logger.log('arch_config : {:}'.format(arch_config)) (start_time, seed_time) = (time.time(), AverageMeter()) for (_is, seed) in enumerate(seeds): logger.log('\nThe {:02d}/{:02d}-th seed is {:} ----------------------<.>----------------------'.format(_is, len(seeds), seed)) to_save_name = (save_dir / 'seed-{:04d}.pth'.format(seed)) if to_save_name.exists(): logger.log('Find the existing file {:}, directly load!'.format(to_save_name)) checkpoint = torch.load(to_save_name) else: logger.log('Does not find the existing file {:}, train and evaluate!'.format(to_save_name)) checkpoint = evaluate_all_datasets(arch, datasets, xpaths, splits, use_less, seed, arch_config, workers, logger) torch.save(checkpoint, to_save_name) logger.log('{:}'.format(checkpoint['info'])) all_dataset_keys = checkpoint['all_dataset_keys'] for dataset_key in all_dataset_keys: logger.log('\n{:} dataset : {:} {:}'.format(('-' * 15), dataset_key, ('-' * 15))) dataset_info = checkpoint[dataset_key] logger.log('Flops = {:} MB, Params = {:} MB'.format(dataset_info['flop'], dataset_info['param'])) logger.log('config : {:}'.format(dataset_info['config'])) logger.log('Training State (finish) = {:}'.format(dataset_info['finish-train'])) last_epoch = (dataset_info['total_epoch'] - 1) (train_acc1es, train_acc5es) = (dataset_info['train_acc1es'], dataset_info['train_acc5es']) (valid_acc1es, valid_acc5es) = (dataset_info['valid_acc1es'], dataset_info['valid_acc5es']) logger.log('Last Info : Train = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%, Test = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%'.format(train_acc1es[last_epoch], train_acc5es[last_epoch], (100 - train_acc1es[last_epoch]), valid_acc1es[last_epoch], valid_acc5es[last_epoch], (100 - valid_acc1es[last_epoch]))) seed_time.update((time.time() - start_time)) start_time = time.time() need_time = 'Time Left: {:}'.format(convert_secs2time((seed_time.avg * ((len(seeds) - _is) - 1)), True)) logger.log('\n<<<***>>> The {:02d}/{:02d}-th seed is {:} <finish> other procedures need {:}'.format(_is, len(seeds), seed, need_time)) logger.close()
def generate_meta_info(save_dir, max_node, divide=40): aa_nas_bench_ss = get_search_spaces('cell', 'nas-bench-201') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) total_arch = len(archs) num = 50000 indexes_5W = list(range(num)) random.seed(1021) random.shuffle(indexes_5W) train_split = sorted(list(set(indexes_5W[:(num // 2)]))) valid_split = sorted(list(set(indexes_5W[(num // 2):]))) assert ((len(train_split) + len(valid_split)) == num) assert ((train_split[0] == 0) and (train_split[10] == 26) and (train_split[111] == 203) and (valid_split[0] == 1) and (valid_split[10] == 18) and (valid_split[111] == 242)), '{:} {:} {:} - {:} {:} {:}'.format(train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111]) splits = {num: {'train': train_split, 'valid': valid_split}} info = {'archs': [x.tostr() for x in archs], 'total': total_arch, 'max_node': max_node, 'splits': splits} save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) save_name = (save_dir / 'meta-node-{:}.pth'.format(max_node)) assert (not save_name.exists()), '{:} already exist'.format(save_name) torch.save(info, save_name) print('save the meta file into {:}'.format(save_name))
def traverse_net(max_node): aa_nas_bench_ss = get_search_spaces('cell', 'nats-bench') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) return [x.tostr() for x in archs]
def filter_indexes(xlist, mode, save_dir, seeds): all_indexes = [] for index in xlist: if (mode == 'cover'): all_indexes.append(index) else: for seed in seeds: temp_path = (save_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed)) if (not temp_path.exists()): all_indexes.append(index) break print('{:} [FILTER-INDEXES] : there are {:}/{:} architectures in total'.format(time_string(), len(all_indexes), len(xlist))) return all_indexes
def create_result_count(used_seed: int, dataset: Text, arch_config: Dict[(Text, Any)], results: Dict[(Text, Any)], dataloader_dict: Dict[(Text, Any)]) -> ResultsCount: xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None) net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes': arch_config['class_num']}, None) if ('train_times' in results): xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times']) xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times']) else: network = get_cell_based_tiny_net(net_config) network.load_state_dict(xresult.get_net_param()) if (dataset == 'cifar10-valid'): xresult.update_OLD_eval('x-valid', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda()) xresult.update_OLD_eval('ori-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) xresult.update_latency(latencies) elif (dataset == 'cifar10'): xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda()) xresult.update_latency(latencies) elif ((dataset == 'cifar100') or (dataset == 'ImageNet16-120')): xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda()) xresult.update_OLD_eval('x-valid', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda()) xresult.update_OLD_eval('x-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) xresult.update_latency(latencies) else: raise ValueError('invalid dataset name : {:}'.format(dataset)) return xresult
def account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict): information = ArchResults(arch_index, arch_str) for checkpoint_path in checkpoints: checkpoint = torch.load(checkpoint_path, map_location='cpu') used_seed = checkpoint_path.name.split('-')[(- 1)].split('.')[0] ok_dataset = 0 dataset = 'ninapro' if (dataset not in checkpoint): print('Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)) continue else: ok_dataset += 1 results = checkpoint[dataset] assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path) arch_config = {'channel': results['arch_config']['C'], 'num_cells': results['arch_config']['N'], 'arch_str': arch_str, 'class_num': results['arch_config']['num_classes']} xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict) information.update(dataset, int(used_seed), xresult) if (ok_dataset == 0): raise ValueError('{:} does not find any data'.format(checkpoint_path)) return information
def correct_time_related_info(arch_index: int, arch_infos: Dict[(Text, ArchResults)]): '\n cifar010_latency = (\n api.get_latency(arch_index, "cifar10-valid", hp="200")\n + api.get_latency(arch_index, "cifar10", hp="200")\n ) / 2\n cifar100_latency = api.get_latency(arch_index, "cifar100", hp="200")\n image_latency = api.get_latency(arch_index, "ImageNet16-120", hp="200")\n \n for hp, arch_info in arch_infos.items():\n arch_info.reset_latency("cifar10-valid", 777, cifar010_latency)\n arch_info.reset_latency("cifar10", 777, cifar010_latency)\n arch_info.reset_latency("cifar100", 777, cifar100_latency)\n arch_info.reset_latency("ImageNet16-120", 777, image_latency)\n ' train_per_epoch_time = list(arch_infos['12'].query('ninapro', 777).train_times.values()) train_per_epoch_time = (sum(train_per_epoch_time) / len(train_per_epoch_time)) (eval_ori_test_time, eval_x_valid_time) = ([], []) for (key, value) in arch_infos['12'].query('ninapro', 777).eval_times.items(): if key.startswith('ori-test@'): eval_ori_test_time.append(value) elif key.startswith('x-valid@'): eval_x_valid_time.append(value) else: raise ValueError('-- {:} --'.format(key)) (eval_ori_test_time, eval_x_valid_time) = (float(np.mean(eval_ori_test_time)), float(np.mean(eval_x_valid_time))) nums = {'cifar10-valid-train': 25000, 'ninapro-train': 3297, 'ninapro-test': 659} eval_per_sample = ((eval_ori_test_time + eval_x_valid_time) / nums['ninapro-test']) for (hp, arch_info) in arch_infos.items(): arch_info.reset_pseudo_train_times('ninapro', None, ((train_per_epoch_time / nums['cifar10-valid-train']) * nums['ninapro-train'])) arch_info.reset_pseudo_eval_times('ninapro', None, 'ori-test', (eval_per_sample * nums['ninapro-test'])) return arch_infos
def simplify(save_dir, save_name, nets, total, sup_config): dataloader_dict = {} (hps, seeds) = (['12'], set()) for hp in hps: sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp)) ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth'))) seed2names = defaultdict(list) for ckp in ckps: parts = re.split('-|\\.', ckp.name) seed2names[parts[3]].append(ckp.name) print('DIR : {:}'.format(sub_save_dir)) nums = [] for (seed, xlist) in seed2names.items(): seeds.add(seed) nums.append(len(xlist)) print(' [seed={:}] there are {:} checkpoints.'.format(seed, len(xlist))) assert (len(nets) == total == max(nums)), 'there are some missed files : {:} vs {:}'.format(max(nums), total) print('{:} start simplify the checkpoint.'.format(time_string())) datasets = 'ninapro' full_save_dir = (save_dir / (save_name + '-FULL')) simple_save_dir = (save_dir / (save_name + '-SIMPLIFY')) full_save_dir.mkdir(parents=True, exist_ok=True) simple_save_dir.mkdir(parents=True, exist_ok=True) (arch2infos, evaluated_indexes) = (dict(), set()) (end_time, arch_time) = (time.time(), AverageMeter()) temp_final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': None, 'evaluated_indexes': set()} pickle_save(temp_final_infos, str((full_save_dir / 'meta.pickle'))) pickle_save(temp_final_infos, str((simple_save_dir / 'meta.pickle'))) for index in tqdm(range(total)): arch_str = nets[index] hp2info = OrderedDict() full_save_path = (full_save_dir / '{:06d}.pickle'.format(index)) simple_save_path = (simple_save_dir / '{:06d}.pickle'.format(index)) for hp in hps: sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp)) ckps = [(sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed)) for seed in seeds] ckps = [x for x in ckps if x.exists()] if (len(ckps) == 0): raise ValueError('Invalid data : index={:}, hp={:}'.format(index, hp)) arch_info = account_one_arch(index, arch_str, ckps, datasets, dataloader_dict) hp2info[hp] = arch_info hp2info = correct_time_related_info(index, hp2info) evaluated_indexes.add(index) for hp in hps: hp2info[hp].clear_params() to_save_data = OrderedDict({'12': hp2info['12'].state_dict()}) print(to_save_data) pickle_save(to_save_data, str(full_save_path)) pickle_save(to_save_data, str(simple_save_path)) arch2infos[index] = to_save_data arch_time.update((time.time() - end_time)) end_time = time.time() need_time = '{:}'.format(convert_secs2time((arch_time.avg * ((total - index) - 1)), True)) print('{:} {:} done.'.format(time_string(), save_name)) final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes} save_file_name = (save_dir / '{:}.pickle'.format(save_name)) pickle_save(final_infos, str(save_file_name)) hd5sum = get_md5_file((str(save_file_name) + '.pbz2')) hd5_file_name = (save_dir / '{:}-{:}.pickle.pbz2'.format(NATS_TSS_BASE_NAME, hd5sum)) shutil.move((str(save_file_name) + '.pbz2'), hd5_file_name) print('Save {:} / {:} architecture results into {:} -> {:}.'.format(len(evaluated_indexes), total, save_file_name, hd5_file_name)) hd5_full_save_dir = (save_dir / '{:}-{:}-full'.format(NATS_TSS_BASE_NAME, hd5sum)) hd5_simple_save_dir = (save_dir / '{:}-{:}-simple'.format(NATS_TSS_BASE_NAME, hd5sum)) shutil.move(full_save_dir, hd5_full_save_dir) shutil.move(simple_save_dir, hd5_simple_save_dir)
def traverse_net(max_node): aa_nas_bench_ss = get_search_spaces('cell', 'nats-bench') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) return [x.tostr() for x in archs]
def get_topology_config_space(search_space, max_nodes=4): cs = ConfigSpace.ConfigurationSpace() for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space)) return cs
def get_size_config_space(search_space): cs = ConfigSpace.ConfigurationSpace() for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space['candidates'])) return cs
def config2topology_func(max_nodes=4): def config2structure(config): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = config[node_str] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return config2structure
def config2size_func(search_space): def config2structure(config): channels = [] for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) channels.append(str(config[node_str])) return ':'.join(channels) return config2structure
class MyWorker(Worker): def __init__(self, *args, convert_func=None, dataset=None, api=None, **kwargs): super().__init__(*args, **kwargs) self.convert_func = convert_func self._dataset = dataset self._api = api self.total_times = [] self.trajectory = [] def compute(self, config, budget, **kwargs): arch = self.convert_func(config) (accuracy, latency, time_cost, total_time) = self._api.simulate_train_eval(arch, self._dataset, iepoch=(int(budget) - 1), hp='12') self.trajectory.append((accuracy, arch)) self.total_times.append(total_time) return {'loss': (100 - accuracy), 'info': self._api.query_index_by_arch(arch)}
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): cs = get_topology_config_space(search_space) config2structure = config2topology_func() else: cs = get_size_config_space(search_space) config2structure = config2size_func(search_space) hb_run_id = '0' NS = hpns.NameServer(run_id=hb_run_id, host='localhost', port=0) (ns_host, ns_port) = NS.start() num_workers = 1 workers = [] for i in range(num_workers): w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataset=xargs.dataset, api=api, run_id=hb_run_id, id=i) w.run(background=True) workers.append(w) start_time = time.time() bohb = BOHB(configspace=cs, run_id=hb_run_id, eta=3, min_budget=1, max_budget=12, nameserver=ns_host, nameserver_port=ns_port, num_samples=xargs.num_samples, random_fraction=xargs.random_fraction, bandwidth_factor=xargs.bandwidth_factor, ping_interval=10, min_bandwidth=xargs.min_bandwidth) results = bohb.run(xargs.n_iters, min_n_workers=num_workers) bohb.shutdown(shutdown_workers=True) NS.shutdown() current_best_index = [] for idx in range(len(workers[0].trajectory)): trajectory = workers[0].trajectory[:(idx + 1)] arch = max(trajectory, key=(lambda x: x[0]))[1] current_best_index.append(api.query_index_by_arch(arch)) best_arch = max(workers[0].trajectory, key=(lambda x: x[0]))[1] logger.log('Best found configuration: {:} within {:.3f} s'.format(best_arch, workers[0].total_times[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'ninapro', iepoch=None, hp='200') acc = info_num['valtest-accuracy'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, workers[0].total_times, acc)
def get_topology_config_space(search_space, max_nodes=4): cs = ConfigSpace.ConfigurationSpace() for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space)) return cs
def get_size_config_space(search_space): cs = ConfigSpace.ConfigurationSpace() for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space['candidates'])) return cs
def config2topology_func(max_nodes=4): def config2structure(config): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = config[node_str] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return config2structure
def config2size_func(search_space): def config2structure(config): channels = [] for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) channels.append(str(config[node_str])) return ':'.join(channels) return config2structure
class MyWorker(Worker): def __init__(self, *args, convert_func=None, dataset=None, api=None, **kwargs): super().__init__(*args, **kwargs) self.convert_func = convert_func self._dataset = dataset self._api = api self.total_times = [] self.trajectory = [] def compute(self, config, budget, **kwargs): arch = self.convert_func(config) (accuracy, latency, time_cost, total_time) = self._api.simulate_train_eval(arch, self._dataset, iepoch=(int(budget) - 1), hp='12') self.trajectory.append((accuracy, arch)) self.total_times.append(total_time) return {'loss': (100 - accuracy), 'info': self._api.query_index_by_arch(arch)}
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): cs = get_topology_config_space(search_space) config2structure = config2topology_func() else: cs = get_size_config_space(search_space) config2structure = config2size_func(search_space) hb_run_id = '0' NS = hpns.NameServer(run_id=hb_run_id, host='localhost', port=0) (ns_host, ns_port) = NS.start() num_workers = 1 workers = [] for i in range(num_workers): w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataset=xargs.dataset, api=api, run_id=hb_run_id, id=i) w.run(background=True) workers.append(w) start_time = time.time() hyper = HyperBand(configspace=cs, run_id=hb_run_id, eta=3, min_budget=1, max_budget=12, nameserver=ns_host, nameserver_port=ns_port, ping_interval=50) results = hyper.run(xargs.n_iters, min_n_workers=num_workers) hyper.shutdown(shutdown_workers=True) NS.shutdown() current_best_index = [] for idx in range(len(workers[0].trajectory)): trajectory = workers[0].trajectory[:(idx + 1)] arch = max(trajectory, key=(lambda x: x[0]))[1] current_best_index.append(api.query_index_by_arch(arch)) best_arch = max(workers[0].trajectory, key=(lambda x: x[0]))[1] logger.log('Best found configuration: {:} within {:.3f} s'.format(best_arch, workers[0].total_times[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), args.dataset, iepoch=None, hp='200') acc = info_num['test-accuracy'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, workers[0].total_times, acc)
def random_topology_func(op_names, max_nodes=4): def random_architecture(): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = random.choice(op_names) xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return random_architecture
def random_size_func(info): def random_architecture(): channels = [] for i in range(info['numbers']): channels.append(str(random.choice(info['candidates']))) return ':'.join(channels) return random_architecture
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): random_arch = random_topology_func(search_space) else: random_arch = random_size_func(search_space) (best_arch, best_acc, total_time_cost, history) = (None, (- 1), [], []) current_best_index = [] while ((len(total_time_cost) == 0) or (total_time_cost[(- 1)] < xargs.time_budget)): arch = random_arch() (accuracy, _, _, total_cost) = api.simulate_train_eval(arch, xargs.dataset, iepoch=11, hp='12') total_time_cost.append(total_cost) history.append(arch) if ((best_arch is None) or (best_acc < accuracy)): (best_acc, best_arch) = (accuracy, arch) logger.log('[{:03d}] : {:} : accuracy = {:.2f}%'.format(len(history), arch, accuracy)) current_best_index.append(api.query_index_by_arch(best_arch)) logger.log('{:} best arch is {:}, accuracy = {:.2f}%, visit {:} archs with {:.1f} s.'.format(time_string(), best_arch, best_acc, len(history), total_time_cost[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'ninapro', iepoch=None, hp='200') acc = info_num['valtest-accuracy'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_time_cost, acc)
class Model(object): def __init__(self): self.arch = None self.accuracy = None def __str__(self): 'Prints a readable version of this bitstring.' return '{:}'.format(self.arch)
def random_topology_func(op_names, max_nodes=4): def random_architecture(): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = random.choice(op_names) xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return random_architecture
def random_size_func(info): def random_architecture(): channels = [] for i in range(info['numbers']): channels.append(str(random.choice(info['candidates']))) return ':'.join(channels) return random_architecture
def mutate_topology_func(op_names): 'Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n ' def mutate_topology_func(parent_arch): child_arch = deepcopy(parent_arch) node_id = random.randint(0, (len(child_arch.nodes) - 1)) node_info = list(child_arch.nodes[node_id]) snode_id = random.randint(0, (len(node_info) - 1)) xop = random.choice(op_names) while (xop == node_info[snode_id][0]): xop = random.choice(op_names) node_info[snode_id] = (xop, node_info[snode_id][1]) child_arch.nodes[node_id] = tuple(node_info) return child_arch return mutate_topology_func
def mutate_size_func(info): 'Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n ' def mutate_size_func(parent_arch): child_arch = deepcopy(parent_arch) child_arch = child_arch.split(':') index = random.randint(0, (len(child_arch) - 1)) child_arch[index] = str(random.choice(info['candidates'])) return ':'.join(child_arch) return mutate_size_func
def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, api, use_proxy, dataset): 'Algorithm for regularized evolution (i.e. aging evolution).\n\n Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image\n Classifier Architecture Search".\n\n Args:\n cycles: the number of cycles the algorithm should run for.\n population_size: the number of individuals to keep in the population.\n sample_size: the number of individuals that should participate in each tournament.\n time_budget: the upper bound of searching cost\n\n Returns:\n history: a list of `Model` instances, representing all the models computed\n during the evolution experiment.\n ' population = collections.deque() api.reset_time() (history, total_time_cost) = ([], []) current_best_index = [] while (len(population) < population_size): model = Model() model.arch = random_arch() (model.accuracy, _, _, total_cost) = api.simulate_train_eval(model.arch, dataset, iepoch=11, hp='12') population.append(model) history.append((model.accuracy, model.arch)) total_time_cost.append(total_cost) current_best_index.append(api.query_index_by_arch(max(history, key=(lambda x: x[0]))[1])) while (total_time_cost[(- 1)] < time_budget): (start_time, sample) = (time.time(), []) while (len(sample) < sample_size): candidate = random.choice(list(population)) sample.append(candidate) parent = max(sample, key=(lambda i: i.accuracy)) child = Model() child.arch = mutate_arch(parent.arch) (child.accuracy, _, _, total_cost) = api.simulate_train_eval(child.arch, dataset, iepoch=11, hp='12') population.append(child) history.append((child.accuracy, child.arch)) current_best_index.append(api.query_index_by_arch(max(history, key=(lambda x: x[0]))[1])) total_time_cost.append(total_cost) population.popleft() return (history, current_best_index, total_time_cost)
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): random_arch = random_topology_func(search_space) mutate_arch = mutate_topology_func(search_space) else: random_arch = random_size_func(search_space) mutate_arch = mutate_size_func(search_space) x_start_time = time.time() logger.log('{:} use api : {:}'.format(time_string(), api)) logger.log((('-' * 30) + ' start searching with the time budget of {:} s'.format(xargs.time_budget))) (history, current_best_index, total_times) = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, api, (xargs.use_proxy > 0), xargs.dataset) logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s).'.format(time_string(), len(history), total_times[(- 1)], (time.time() - x_start_time))) best_arch = max(history, key=(lambda x: x[0]))[1] logger.log('{:} best arch is {:}'.format(time_string(), best_arch)) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'ninapro', iepoch=None, hp='200') acc = info_num['valtest-accuracy'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_times, acc)
class PolicyTopology(nn.Module): def __init__(self, search_space, max_nodes=4): super(PolicyTopology, self).__init__() self.max_nodes = max_nodes self.search_space = deepcopy(search_space) self.edge2index = {} for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) self.edge2index[node_str] = len(self.edge2index) self.arch_parameters = nn.Parameter((0.001 * torch.randn(len(self.edge2index), len(search_space)))) def generate_arch(self, actions): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = self.search_space[actions[self.edge2index[node_str]]] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def genotype(self): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[self.edge2index[node_str]] op_name = self.search_space[weights.argmax().item()] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1)) return alphas
class PolicySize(nn.Module): def __init__(self, search_space): super(PolicySize, self).__init__() self.candidates = search_space['candidates'] self.numbers = search_space['numbers'] self.arch_parameters = nn.Parameter((0.001 * torch.randn(self.numbers, len(self.candidates)))) def generate_arch(self, actions): channels = [str(self.candidates[i]) for i in actions] return ':'.join(channels) def genotype(self): channels = [] for i in range(self.numbers): index = self.arch_parameters[i].argmax().item() channels.append(str(self.candidates[index])) return ':'.join(channels) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1)) return alphas
class ExponentialMovingAverage(object): 'Class that maintains an exponential moving average.' def __init__(self, momentum): self._numerator = 0 self._denominator = 0 self._momentum = momentum def update(self, value): self._numerator = ((self._momentum * self._numerator) + ((1 - self._momentum) * value)) self._denominator = ((self._momentum * self._denominator) + (1 - self._momentum)) def value(self): 'Return the current value of the moving average' return (self._numerator / self._denominator)
def select_action(policy): probs = policy() m = Categorical(probs) action = m.sample() return (m.log_prob(action), action.cpu().tolist())
def main(xargs, api, api_full): prepare_seed(xargs.rand_seed) logger = prepare_logger(args) search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): policy = PolicyTopology(search_space) else: policy = PolicySize(search_space) optimizer = torch.optim.Adam(policy.parameters(), lr=xargs.learning_rate) eps = np.finfo(np.float32).eps.item() baseline = ExponentialMovingAverage(xargs.EMA_momentum) logger.log('policy : {:}'.format(policy)) logger.log('optimizer : {:}'.format(optimizer)) logger.log('eps : {:}'.format(eps)) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() x_start_time = time.time() logger.log('Will start searching with time budget of {:} s.'.format(xargs.time_budget)) (total_steps, total_costs, trace) = (0, [], []) current_best_index = [] while ((len(total_costs) == 0) or (total_costs[(- 1)] < xargs.time_budget)): start_time = time.time() (log_prob, action) = select_action(policy) arch = policy.generate_arch(action) (reward, _, _, current_total_cost) = api.simulate_train_eval(arch, xargs.dataset, iepoch=11, hp='12') trace.append((reward, arch)) total_costs.append(current_total_cost) baseline.update(reward) policy_loss = ((- log_prob) * (reward - baseline.value())).sum() optimizer.zero_grad() policy_loss.backward() optimizer.step() total_steps += 1 logger.log('step [{:3d}] : average-reward={:.3f} : policy_loss={:.4f} : {:}'.format(total_steps, baseline.value(), policy_loss.item(), policy.genotype())) current_best_index.append(api.query_index_by_arch(max(trace, key=(lambda x: x[0]))[1])) best_arch = max(trace, key=(lambda x: x[0]))[1] logger.log('REINFORCE finish with {:} steps and {:.1f} s (real cost={:.3f}).'.format(total_steps, total_costs[(- 1)], (time.time() - x_start_time))) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'ninapro', iepoch=None, hp='200') acc = info_num['valtest-accuracy'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_costs, acc)
def _concat(xs): return torch.cat([x.view((- 1)) for x in xs])
def _hessian_vector_product(vector, network, criterion, base_inputs, base_targets, r=0.01): R = (r / _concat(vector).norm()) for (p, v) in zip(network.weights, vector): p.data.add_(R, v) (_, logits) = network(base_inputs) loss = criterion(logits, base_targets) grads_p = torch.autograd.grad(loss, network.alphas) for (p, v) in zip(network.weights, vector): p.data.sub_((2 * R), v) (_, logits) = network(base_inputs) loss = criterion(logits, base_targets) grads_n = torch.autograd.grad(loss, network.alphas) for (p, v) in zip(network.weights, vector): p.data.add_(R, v) return [(x - y).div_((2 * R)) for (x, y) in zip(grads_p, grads_n)]
def backward_step_unrolled(network, criterion, base_inputs, base_targets, w_optimizer, arch_inputs, arch_targets): (_, logits) = network(base_inputs) loss = criterion(logits, base_targets) (LR, WD, momentum) = (w_optimizer.param_groups[0]['lr'], w_optimizer.param_groups[0]['weight_decay'], w_optimizer.param_groups[0]['momentum']) with torch.no_grad(): theta = _concat(network.weights) try: moment = _concat((w_optimizer.state[v]['momentum_buffer'] for v in network.weights)) moment = moment.mul_(momentum) except: moment = torch.zeros_like(theta) dtheta = (_concat(torch.autograd.grad(loss, network.weights, retain_graph=True)) + (WD * theta)) params = theta.sub(LR, (moment + dtheta)) unrolled_model = deepcopy(network) model_dict = unrolled_model.state_dict() (new_params, offset) = ({}, 0) for (k, v) in network.named_parameters(): if ('arch_parameters' in k): continue v_length = np.prod(v.size()) new_params[k] = params[offset:(offset + v_length)].view(v.size()) offset += v_length model_dict.update(new_params) unrolled_model.load_state_dict(model_dict) unrolled_model.zero_grad() (_, unrolled_logits) = unrolled_model(arch_inputs) unrolled_loss = criterion(unrolled_logits, arch_targets) unrolled_loss.backward() dalpha = unrolled_model.arch_parameters.grad vector = [v.grad.data for v in unrolled_model.weights] [implicit_grads] = _hessian_vector_product(vector, network, criterion, base_inputs, base_targets) dalpha.data.sub_(LR, implicit_grads.data) if (network.arch_parameters.grad is None): network.arch_parameters.grad = deepcopy(dalpha) else: network.arch_parameters.grad.data.copy_(dalpha.data) return (unrolled_loss.detach(), unrolled_logits.detach())