code stringlengths 17 6.64M |
|---|
class SuperReLU(SuperModule):
'Applies a the rectified linear unit function element-wise.'
def __init__(self, inplace: bool=False) -> None:
super(SuperReLU, self).__init__()
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.relu(input, inplace=self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
return ('inplace=True' if self._inplace else '')
|
class SuperGELU(SuperModule):
'Applies a the Gaussian Error Linear Units function element-wise.'
def __init__(self) -> None:
super(SuperGELU, self).__init__()
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.gelu(input)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
|
class SuperSigmoid(SuperModule):
'Applies a the Sigmoid function element-wise.'
def __init__(self) -> None:
super(SuperSigmoid, self).__init__()
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return torch.sigmoid(input)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
|
class SuperLeakyReLU(SuperModule):
'https://pytorch.org/docs/stable/_modules/torch/nn/modules/activation.html#LeakyReLU'
def __init__(self, negative_slope: float=0.01, inplace: bool=False) -> None:
super(SuperLeakyReLU, self).__init__()
self._negative_slope = negative_slope
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.leaky_relu(input, self._negative_slope, self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
inplace_str = ('inplace=True' if self._inplace else '')
return 'negative_slope={}{}'.format(self._negative_slope, inplace_str)
|
class SuperTanh(SuperModule):
'Applies a the Tanh function element-wise.'
def __init__(self) -> None:
super(SuperTanh, self).__init__()
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return torch.tanh(input)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
|
class SuperQKVAttentionV2(SuperModule):
'The super model for attention layer.'
def __init__(self, qk_att_dim: int, in_v_dim: int, hidden_dim: int, num_heads: int, proj_dim: int, qkv_bias: bool=False, attn_drop: Optional[float]=None, proj_drop: Optional[float]=None):
super(SuperQKVAttentionV2, self).__init__()
self._in_v_dim = in_v_dim
self._qk_att_dim = qk_att_dim
self._proj_dim = proj_dim
self._hidden_dim = hidden_dim
self._num_heads = num_heads
self._qkv_bias = qkv_bias
self.qk_fc = SuperLinear(qk_att_dim, num_heads, bias=qkv_bias)
self.v_fc = SuperLinear(in_v_dim, (hidden_dim * num_heads), bias=qkv_bias)
self.attn_drop = nn.Dropout((attn_drop or 0.0))
self.proj = SuperLinear((hidden_dim * num_heads), proj_dim)
self.proj_drop = nn.Dropout((proj_drop or 0.0))
self._infinity = 1000000000.0
@property
def num_heads(self):
return spaces.get_max(self._num_heads)
@property
def in_v_dim(self):
return spaces.get_max(self._in_v_dim)
@property
def qk_att_dim(self):
return spaces.get_max(self._qk_att_dim)
@property
def hidden_dim(self):
return spaces.get_max(self._hidden_dim)
@property
def proj_dim(self):
return spaces.get_max(self._proj_dim)
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
raise NotImplementedError
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperQKVAttentionV2, self).apply_candidate(abstract_child)
raise NotImplementedError
def forward_qkv(self, qk_att_tensor, v_tensor, num_head: int, mask=None) -> torch.Tensor:
qk_att = self.qk_fc(qk_att_tensor)
(B, N, S, _) = qk_att.shape
assert (_ == num_head)
attn_v1 = qk_att.permute(0, 3, 1, 2)
if (mask is not None):
mask = torch.unsqueeze(mask, dim=1)
attn_v1 = attn_v1.masked_fill(mask, (- self._infinity))
attn_v1 = attn_v1.softmax(dim=(- 1))
attn_v1 = self.attn_drop(attn_v1)
v = self.v_fc(v_tensor)
(B0, _, _) = v.shape
v_v1 = v.reshape(B0, S, num_head, (- 1)).permute(0, 2, 1, 3)
feats_v1 = (attn_v1 @ v_v1).permute(0, 2, 1, 3).reshape(B, N, (- 1))
return feats_v1
def forward_candidate(self, qk_att_tensor, v_tensor, mask=None) -> torch.Tensor:
return self.forward_raw(qk_att_tensor, v_tensor, mask)
def forward_raw(self, qk_att_tensor, v_tensor, mask=None) -> torch.Tensor:
feats = self.forward_qkv(qk_att_tensor, v_tensor, self.num_heads, mask)
outs = self.proj(feats)
outs = self.proj_drop(outs)
return outs
def extra_repr(self) -> str:
return 'input_dim={:}, hidden_dim={:}, proj_dim={:}, num_heads={:}, infinity={:}'.format((self.qk_att_dim, self.in_v_dim), self._hidden_dim, self._proj_dim, self._num_heads, self._infinity)
|
class SuperSequential(SuperModule):
"A sequential container wrapped with 'Super' ability.\n\n Modules will be added to it in the order they are passed in the constructor.\n Alternatively, an ordered dict of modules can also be passed in.\n To make it easier to understand, here is a small example::\n # Example of using Sequential\n model = SuperSequential(\n nn.Conv2d(1,20,5),\n nn.ReLU(),\n nn.Conv2d(20,64,5),\n nn.ReLU()\n )\n # Example of using Sequential with OrderedDict\n model = nn.Sequential(OrderedDict([\n ('conv1', nn.Conv2d(1,20,5)),\n ('relu1', nn.ReLU()),\n ('conv2', nn.Conv2d(20,64,5)),\n ('relu2', nn.ReLU())\n ]))\n "
def __init__(self, *args):
super(SuperSequential, self).__init__()
if ((len(args) == 1) and isinstance(args[0], OrderedDict)):
for (key, module) in args[0].items():
self.add_module(key, module)
else:
if (not isinstance(args, (list, tuple))):
raise ValueError('Invalid input type: {:}'.format(type(args)))
for (idx, module) in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T:
'Get the idx-th item of the iterator'
size = len(self)
idx = operator.index(idx)
if (not ((- size) <= idx < size)):
raise IndexError('index {} is out of range'.format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx) -> Union[('SuperSequential', T)]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: SuperModule) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[(slice, int)]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def __len__(self) -> int:
return len(self._modules)
def __dir__(self):
keys = super(SuperSequential, self).__dir__()
keys = [key for key in keys if (not key.isdigit())]
return keys
def __iter__(self) -> Iterator[SuperModule]:
return iter(self._modules.values())
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
for (index, module) in enumerate(self):
if (not isinstance(module, SuperModule)):
continue
space = module.abstract_search_space
if (not spaces.is_determined(space)):
root_node.append(str(index), space)
return root_node
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperSequential, self).apply_candidate(abstract_child)
for (index, module) in enumerate(self):
if (str(index) in abstract_child):
module.apply_candidate(abstract_child[str(index)])
def forward_candidate(self, input):
return self.forward_raw(input)
def forward_raw(self, input):
for module in self:
input = module(input)
return input
def forward_with_container(self, input, container, prefix=[]):
for (index, module) in enumerate(self):
input = module.forward_with_container(input, container, (prefix + [str(index)]))
return input
|
class SuperDropout(SuperModule):
'Applies a the dropout function element-wise.'
def __init__(self, p: float=0.5, inplace: bool=False) -> None:
super(SuperDropout, self).__init__()
self._p = p
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.dropout(input, self._p, self.training, self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
xstr = ('inplace=True' if self._inplace else '')
return (('p={:}'.format(self._p) + ', ') + xstr)
|
class SuperDrop(SuperModule):
'Applies a the drop-path function element-wise.'
def __init__(self, p: float, dims: Tuple[int], recover: bool=True) -> None:
super(SuperDrop, self).__init__()
self._p = p
self._dims = dims
self._recover = recover
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
if ((not self.training) or (self._p <= 0)):
return input
keep_prob = (1 - self._p)
shape = ([input.shape[0]] + [(x if (y == (- 1)) else y) for (x, y) in zip(input.shape[1:], self._dims)])
random_tensor = (keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device))
random_tensor.floor_()
if self._recover:
return (input.div(keep_prob) * random_tensor)
else:
return (input * random_tensor)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
def extra_repr(self) -> str:
return (('p={:}'.format(self._p) + ', dims={:}'.format(self._dims)) + ', recover={:}'.format(self._recover))
|
class SuperModule(abc.ABC, nn.Module):
'This class equips the nn.Module class with the ability to apply AutoDL.'
def __init__(self):
super(SuperModule, self).__init__()
self._super_run_type = SuperRunMode.Default
self._abstract_child = None
self._verbose = False
self._meta_info = {}
self._candidate_mode = DISABLE_CANDIDATE
def set_super_run_type(self, super_run_type):
def _reset_super_run(m):
if isinstance(m, SuperModule):
m._super_run_type = super_run_type
self.apply(_reset_super_run)
def add_module(self, name: str, module: Optional[torch.nn.Module]) -> None:
if (not isinstance(module, SuperModule)):
warnings.warn((('Add {:}:{:} module, which is not SuperModule, into {:}'.format(name, module.__class__.__name__, self.__class__.__name__) + '\n') + 'It may cause some functions invalid.'))
super(SuperModule, self).add_module(name, module)
def apply_verbose(self, verbose):
def _reset_verbose(m):
if isinstance(m, SuperModule):
m._verbose = verbose
self.apply(_reset_verbose)
def apply_candidate(self, abstract_child):
if (not isinstance(abstract_child, spaces.VirtualNode)):
raise ValueError('Invalid abstract child program: {:}'.format(abstract_child))
self._abstract_child = abstract_child
def enable_candidate(self):
def _enable_candidate(m):
if isinstance(m, SuperModule):
m._candidate_mode = ENABLE_CANDIDATE
self.apply(_enable_candidate)
def disable_candidate(self):
def _disable_candidate(m):
if isinstance(m, SuperModule):
m._candidate_mode = DISABLE_CANDIDATE
self.apply(_disable_candidate)
def get_w_container(self):
container = TensorContainer()
for (name, param) in self.named_parameters():
container.append(name, param, True)
for (name, buf) in self.named_buffers():
container.append(name, buf, False)
return container
def analyze_weights(self):
with torch.no_grad():
for (name, param) in self.named_parameters():
shapestr = '[{:10s}] shape={:}'.format(name, list(param.shape))
finalstr = (shapestr + '{:.2f} +- {:.2f}'.format(param.mean(), param.std()))
print(finalstr)
def numel(self, buffer=True):
total = 0
for (name, param) in self.named_parameters():
total += param.numel()
if buffer:
for (name, buf) in self.named_buffers():
total += buf.numel()
return total
def set_best_dir(self, xdir):
self._meta_info[BEST_DIR_KEY] = str(xdir)
Path(xdir).mkdir(parents=True, exist_ok=True)
def set_best_name(self, xname):
self._meta_info[BEST_NAME_KEY] = str(xname)
def save_best(self, score):
if (BEST_DIR_KEY not in self._meta_info):
tempdir = tempfile.mkdtemp('-xlayers')
self._meta_info[BEST_DIR_KEY] = tempdir
if (BEST_SCORE_KEY not in self._meta_info):
self._meta_info[BEST_SCORE_KEY] = None
best_score = self._meta_info[BEST_SCORE_KEY]
if ((best_score is None) or (best_score <= score)):
best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__))
best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name)
self._meta_info[BEST_SCORE_KEY] = score
torch.save(self.state_dict(), best_save_path)
return (True, self._meta_info[BEST_SCORE_KEY])
else:
return (False, self._meta_info[BEST_SCORE_KEY])
def load_best(self, best_save_name=None):
if (BEST_DIR_KEY not in self._meta_info):
raise ValueError('Please set BEST_DIR_KEY at first')
if (best_save_name is None):
best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__))
best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name)
state_dict = torch.load(best_save_path)
self.load_state_dict(state_dict)
def has_best(self, best_name=None):
if (BEST_DIR_KEY not in self._meta_info):
raise ValueError('Please set BEST_DIR_KEY at first')
if (best_name is None):
best_save_name = self._meta_info.get(BEST_NAME_KEY, 'best-{:}.pth'.format(self.__class__.__name__))
else:
best_save_name = best_name
best_save_path = os.path.join(self._meta_info[BEST_DIR_KEY], best_save_name)
return os.path.exists(best_save_path)
@property
def abstract_search_space(self):
raise NotImplementedError
@property
def super_run_type(self):
return self._super_run_type
@property
def abstract_child(self):
return self._abstract_child
@property
def verbose(self):
return self._verbose
@abc.abstractmethod
def forward_raw(self, *inputs):
'Use the largest candidate for forward. Similar to the original PyTorch model.'
raise NotImplementedError
@abc.abstractmethod
def forward_candidate(self, *inputs):
raise NotImplementedError
@property
def name_with_id(self):
return 'name={:}, id={:}'.format(self.__class__.__name__, id(self))
def get_shape_str(self, tensors):
if isinstance(tensors, (list, tuple)):
shapes = [self.get_shape_str(tensor) for tensor in tensors]
if (len(shapes) == 1):
return shapes[0]
else:
return ', '.join(shapes)
elif isinstance(tensors, (torch.Tensor, nn.Parameter)):
return str(tuple(tensors.shape))
else:
raise TypeError('Invalid input type: {:}.'.format(type(tensors)))
def forward(self, *inputs):
if self.verbose:
print('[{:}] inputs shape: {:}'.format(self.name_with_id, self.get_shape_str(inputs)))
if (self.super_run_type == SuperRunMode.FullModel):
outputs = self.forward_raw(*inputs)
elif (self.super_run_type == SuperRunMode.Candidate):
if (self._candidate_mode == DISABLE_CANDIDATE):
raise ValueError('candidate mode is disabled')
outputs = self.forward_candidate(*inputs)
else:
raise ValueError('Unknown Super Model Run Mode: {:}'.format(self.super_run_type))
if self.verbose:
print('[{:}] outputs shape: {:}'.format(self.name_with_id, self.get_shape_str(outputs)))
return outputs
def forward_with_container(self, inputs, container, prefix=[]):
raise NotImplementedError
|
class SuperLayerNorm1D(SuperModule):
'Super Layer Norm.'
def __init__(self, dim: IntSpaceType, eps: float=1e-06, elementwise_affine: bool=True) -> None:
super(SuperLayerNorm1D, self).__init__()
self._in_dim = dim
self._eps = eps
self._elementwise_affine = elementwise_affine
if self._elementwise_affine:
self.register_parameter('weight', nn.Parameter(torch.Tensor(self.in_dim)))
self.register_parameter('bias', nn.Parameter(torch.Tensor(self.in_dim)))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.reset_parameters()
@property
def in_dim(self):
return spaces.get_max(self._in_dim)
@property
def eps(self):
return self._eps
def reset_parameters(self) -> None:
if self._elementwise_affine:
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
if (not spaces.is_determined(self._in_dim)):
root_node.append('_in_dim', self._in_dim.abstract(reuse_last=True))
return root_node
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
if (not spaces.is_determined(self._in_dim)):
expected_input_dim = self.abstract_child['_in_dim'].value
else:
expected_input_dim = spaces.get_determined_value(self._in_dim)
if (input.size((- 1)) != expected_input_dim):
raise ValueError('Expect the input dim of {:} instead of {:}'.format(expected_input_dim, input.size((- 1))))
if self._elementwise_affine:
weight = self.weight[:expected_input_dim]
bias = self.bias[:expected_input_dim]
else:
(weight, bias) = (None, None)
return F.layer_norm(input, (expected_input_dim,), weight, bias, self.eps)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
return F.layer_norm(input, (self.in_dim,), self.weight, self.bias, self.eps)
def forward_with_container(self, input, container, prefix=[]):
super_weight_name = '.'.join((prefix + ['weight']))
if container.has(super_weight_name):
weight = container.query(super_weight_name)
else:
weight = None
super_bias_name = '.'.join((prefix + ['bias']))
if container.has(super_bias_name):
bias = container.query(super_bias_name)
else:
bias = None
return F.layer_norm(input, (self.in_dim,), weight, bias, self.eps)
def extra_repr(self) -> str:
return 'shape={in_dim}, eps={eps}, elementwise_affine={elementwise_affine}'.format(in_dim=self._in_dim, eps=self._eps, elementwise_affine=self._elementwise_affine)
|
class SuperSimpleNorm(SuperModule):
'Super simple normalization.'
def __init__(self, mean, std, inplace=False) -> None:
super(SuperSimpleNorm, self).__init__()
self.register_buffer('_mean', torch.tensor(mean, dtype=torch.float))
self.register_buffer('_std', torch.tensor(std, dtype=torch.float))
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
if (not self._inplace):
tensor = input.clone()
else:
tensor = input
mean = torch.as_tensor(self._mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(self._std, dtype=tensor.dtype, device=tensor.device)
if (std == 0).any():
raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(tensor.dtype))
while (mean.ndim < tensor.ndim):
(mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0))
return tensor.sub_(mean).div_(std)
def extra_repr(self) -> str:
return 'mean={mean}, std={std}, inplace={inplace}'.format(mean=self._mean.item(), std=self._std.item(), inplace=self._inplace)
|
class SuperSimpleLearnableNorm(SuperModule):
'Super simple normalization.'
def __init__(self, mean=0, std=1, eps=1e-06, inplace=False) -> None:
super(SuperSimpleLearnableNorm, self).__init__()
self.register_parameter('_mean', nn.Parameter(torch.tensor(mean, dtype=torch.float)))
self.register_parameter('_std', nn.Parameter(torch.tensor(std, dtype=torch.float)))
self._eps = eps
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
if (not self._inplace):
tensor = input.clone()
else:
tensor = input
(mean, std) = (self._mean.to(tensor.device), (torch.abs(self._std.to(tensor.device)) + self._eps))
if (std == 0).any():
raise ValueError('std leads to division by zero.')
while (mean.ndim < tensor.ndim):
(mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0))
return tensor.sub_(mean).div_(std)
def forward_with_container(self, input, container, prefix=[]):
if (not self._inplace):
tensor = input.clone()
else:
tensor = input
mean_name = '.'.join((prefix + ['_mean']))
std_name = '.'.join((prefix + ['_std']))
(mean, std) = (container.query(mean_name).to(tensor.device), (torch.abs(container.query(std_name).to(tensor.device)) + self._eps))
while (mean.ndim < tensor.ndim):
(mean, std) = (torch.unsqueeze(mean, dim=0), torch.unsqueeze(std, dim=0))
return tensor.sub_(mean).div_(std)
def extra_repr(self) -> str:
return 'mean={mean}, std={std}, inplace={inplace}'.format(mean=self._mean.item(), std=self._std.item(), inplace=self._inplace)
|
class SuperIdentity(SuperModule):
'Super identity mapping layer.'
def __init__(self, inplace=False, **kwargs) -> None:
super(SuperIdentity, self).__init__()
self._inplace = inplace
@property
def abstract_search_space(self):
return spaces.VirtualNode(id(self))
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
return self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
if (not self._inplace):
tensor = input.clone()
else:
tensor = input
return tensor
def extra_repr(self) -> str:
return 'inplace={inplace}'.format(inplace=self._inplace)
def forward_with_container(self, input, container, prefix=[]):
return self.forward_raw(input)
|
class SuperReArrange(SuperModule):
'Applies the rearrange operation.'
def __init__(self, pattern, **axes_lengths):
super(SuperReArrange, self).__init__()
self._pattern = pattern
self._axes_lengths = axes_lengths
axes_lengths = tuple(sorted(self._axes_lengths.items()))
(left, right) = pattern.split('->')
left = ParsedExpression(left)
right = ParsedExpression(right)
difference = set.symmetric_difference(left.identifiers, right.identifiers)
if difference:
raise ValueError('Identifiers only on one side of expression (should be on both): {}'.format(difference))
axis_name2known_length = OrderedDict()
for composite_axis in left.composition:
for axis_name in composite_axis:
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = None
for axis_name in right.identifiers:
if (axis_name not in axis_name2known_length):
if isinstance(axis_name, AnonymousAxis):
axis_name2known_length[axis_name] = axis_name.value
else:
axis_name2known_length[axis_name] = None
axis_name2position = {name: position for (position, name) in enumerate(axis_name2known_length)}
for (elementary_axis, axis_length) in axes_lengths:
if (not ParsedExpression.check_axis_name(elementary_axis)):
raise ValueError('Invalid name for an axis', elementary_axis)
if (elementary_axis not in axis_name2known_length):
raise ValueError('Axis {} is not used in transform'.format(elementary_axis))
axis_name2known_length[elementary_axis] = axis_length
input_composite_axes = []
for composite_axis in left.composition:
known = {axis for axis in composite_axis if (axis_name2known_length[axis] is not None)}
unknown = {axis for axis in composite_axis if (axis_name2known_length[axis] is None)}
if (len(unknown) > 1):
raise ValueError('Could not infer sizes for {}'.format(unknown))
assert ((len(unknown) + len(known)) == len(composite_axis))
input_composite_axes.append(([axis_name2position[axis] for axis in known], [axis_name2position[axis] for axis in unknown]))
axis_position_after_reduction = {}
for axis_name in itertools.chain(*left.composition):
if (axis_name in right.identifiers):
axis_position_after_reduction[axis_name] = len(axis_position_after_reduction)
result_axes_grouping = []
for composite_axis in right.composition:
result_axes_grouping.append([axis_name2position[axis] for axis in composite_axis])
ordered_axis_right = list(itertools.chain(*right.composition))
axes_permutation = tuple((axis_position_after_reduction[axis] for axis in ordered_axis_right if (axis in left.identifiers)))
self.input_composite_axes = input_composite_axes
self.output_composite_axes = result_axes_grouping
self.elementary_axes_lengths = list(axis_name2known_length.values())
self.axes_permutation = axes_permutation
@functools.lru_cache(maxsize=1024)
def reconstruct_from_shape(self, shape):
if (len(shape) != len(self.input_composite_axes)):
raise ValueError('Expected {} dimensions, got {}'.format(len(self.input_composite_axes), len(shape)))
axes_lengths = list(self.elementary_axes_lengths)
for (input_axis, (known_axes, unknown_axes)) in enumerate(self.input_composite_axes):
length = shape[input_axis]
known_product = 1
for axis in known_axes:
known_product *= axes_lengths[axis]
if (len(unknown_axes) == 0):
if (isinstance(length, int) and isinstance(known_product, int) and (length != known_product)):
raise ValueError('Shape mismatch, {} != {}'.format(length, known_product))
else:
if (isinstance(length, int) and isinstance(known_product, int) and ((length % known_product) != 0)):
raise ValueError("Shape mismatch, can't divide axis of length {} in chunks of {}".format(length, known_product))
(unknown_axis,) = unknown_axes
axes_lengths[unknown_axis] = (length // known_product)
final_shape = []
for (output_axis, grouping) in enumerate(self.output_composite_axes):
lengths = [axes_lengths[elementary_axis] for elementary_axis in grouping]
final_shape.append(int(np.prod(lengths)))
axes_reordering = self.axes_permutation
return (axes_lengths, axes_reordering, final_shape)
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
return root_node
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
self.forward_raw(input)
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
(init_shape, axes_reordering, final_shape) = self.reconstruct_from_shape(tuple(input.shape))
tensor = torch.reshape(input, init_shape)
tensor = tensor.permute(axes_reordering)
tensor = torch.reshape(tensor, final_shape)
return tensor
def extra_repr(self) -> str:
params = repr(self._pattern)
for (axis, length) in self._axes_lengths.items():
params += ', {}={}'.format(axis, length)
return '{:}'.format(params)
|
class SuperAlphaEBDv1(SuperModule):
'A simple layer to convert the raw trading data from 1-D to 2-D data and apply an FC layer.'
def __init__(self, d_feat: int, embed_dim: IntSpaceType):
super(SuperAlphaEBDv1, self).__init__()
self._d_feat = d_feat
self._embed_dim = embed_dim
self.proj = SuperLinear(d_feat, embed_dim)
@property
def embed_dim(self):
return spaces.get_max(self._embed_dim)
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
space = self.proj.abstract_search_space
if (not spaces.is_determined(space)):
root_node.append('proj', space)
if (not spaces.is_determined(self._embed_dim)):
root_node.append('_embed_dim', self._embed_dim.abstract(reuse_last=True))
return root_node
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperAlphaEBDv1, self).apply_candidate(abstract_child)
if ('proj' in abstract_child):
self.proj.apply_candidate(abstract_child['proj'])
def forward_candidate(self, input: torch.Tensor) -> torch.Tensor:
x = input.reshape(len(input), self._d_feat, (- 1))
x = x.permute(0, 2, 1)
if (not spaces.is_determined(self._embed_dim)):
embed_dim = self.abstract_child['_embed_dim'].value
else:
embed_dim = spaces.get_determined_value(self._embed_dim)
out = (self.proj(x) * math.sqrt(embed_dim))
return out
def forward_raw(self, input: torch.Tensor) -> torch.Tensor:
x = input.reshape(len(input), self._d_feat, (- 1))
x = x.permute(0, 2, 1)
out = (self.proj(x) * math.sqrt(self.embed_dim))
return out
|
class SuperTransformerEncoderLayer(SuperModule):
'TransformerEncoderLayer is made up of self-attn and feedforward network.\n This is a super model for TransformerEncoderLayer that can support search for the transformer encoder layer.\n\n Reference:\n - Paper: Attention Is All You Need, NeurIPS 2017\n - PyTorch Implementation: https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer\n\n Details:\n the original post-norm version: MHA -> residual -> norm -> MLP -> residual -> norm\n the pre-norm version: norm -> MHA -> residual -> norm -> MLP -> residual\n '
def __init__(self, d_model: IntSpaceType, num_heads: IntSpaceType, qkv_bias: BoolSpaceType=False, mlp_hidden_multiplier: IntSpaceType=4, dropout: Optional[float]=None, att_dropout: Optional[float]=None, norm_affine: bool=True, act_layer: Callable[([], nn.Module)]=nn.GELU, order: LayerOrder=LayerOrder.PreNorm, use_mask: bool=False):
super(SuperTransformerEncoderLayer, self).__init__()
mha = SuperSelfAttention(d_model, d_model, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=att_dropout, proj_drop=None, use_mask=use_mask)
mlp = SuperMLPv2(d_model, hidden_multiplier=mlp_hidden_multiplier, out_features=d_model, act_layer=act_layer, drop=dropout)
if (order is LayerOrder.PreNorm):
self.norm1 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine)
self.mha = mha
self.drop = nn.Dropout((dropout or 0.0))
self.norm2 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine)
self.mlp = mlp
elif (order is LayerOrder.PostNorm):
self.mha = mha
self.drop1 = nn.Dropout((dropout or 0.0))
self.norm1 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine)
self.mlp = mlp
self.drop2 = nn.Dropout((dropout or 0.0))
self.norm2 = SuperLayerNorm1D(d_model, elementwise_affine=norm_affine)
else:
raise ValueError('Unknown order: {:}'.format(order))
self._order = order
@property
def abstract_search_space(self):
root_node = spaces.VirtualNode(id(self))
xdict = dict(mha=self.mha.abstract_search_space, norm1=self.norm1.abstract_search_space, mlp=self.mlp.abstract_search_space, norm2=self.norm2.abstract_search_space)
for (key, space) in xdict.items():
if (not spaces.is_determined(space)):
root_node.append(key, space)
return root_node
def apply_candidate(self, abstract_child: spaces.VirtualNode):
super(SuperTransformerEncoderLayer, self).apply_candidate(abstract_child)
valid_keys = ['mha', 'norm1', 'mlp', 'norm2']
for key in valid_keys:
if (key in abstract_child):
getattr(self, key).apply_candidate(abstract_child[key])
def forward_candidate(self, inputs: torch.Tensor) -> torch.Tensor:
return self.forward_raw(inputs)
def forward_raw(self, inputs: torch.Tensor) -> torch.Tensor:
if (self._order is LayerOrder.PreNorm):
x = self.norm1(inputs)
x = self.mha(x)
x = self.drop(x)
x = (x + inputs)
y = self.norm2(x)
outs = (x + self.mlp(y))
elif (self._order is LayerOrder.PostNorm):
x = self.mha(inputs)
x = (inputs + self.drop1(x))
x = self.norm1(x)
y = self.mlp(x)
y = (x + self.drop2(y))
outs = self.norm2(y)
else:
raise ValueError('Unknown order: {:}'.format(self._order))
return outs
|
class LayerOrder(Enum):
'This class defines the enumerations for order of operation in a residual or normalization-based layer.'
PreNorm = 'pre-norm'
PostNorm = 'post-norm'
|
class SuperRunMode(Enum):
'This class defines the enumerations for Super Model Running Mode.'
FullModel = 'fullmodel'
Candidate = 'candidate'
Default = 'fullmodel'
|
class ShapeContainer():
'A class to maintain the shape of each weight tensor for a model.'
def __init__(self):
self._names = []
self._shapes = []
self._name2index = dict()
self._param_or_buffers = []
@property
def shapes(self):
return self._shapes
def __getitem__(self, index):
return self._shapes[index]
def translate(self, tensors, all_none_match=True):
result = TensorContainer()
for (index, name) in enumerate(self._names):
cur_num = tensors[index].numel()
expected_num = self._shapes[index].numel()
if ((cur_num < expected_num) or ((cur_num > expected_num) and (not all_none_match))):
raise ValueError('Invalid {:} vs {:}'.format(cur_num, expected_num))
cur_tensor = tensors[index].view((- 1))[:expected_num]
new_tensor = torch.reshape(cur_tensor, self._shapes[index])
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def append(self, name, shape, param_or_buffer):
if (not isinstance(shape, torch.Size)):
raise TypeError('The input tensor must be torch.Size instead of {:}'.format(type(shape)))
self._names.append(name)
self._shapes.append(shape)
self._param_or_buffers.append(param_or_buffer)
assert (name not in self._name2index), 'The [{:}] has already been added.'.format(name)
self._name2index[name] = (len(self._names) - 1)
def query(self, name):
if (not self.has(name)):
raise ValueError('The {:} is not in {:}'.format(name, list(self._name2index.keys())))
index = self._name2index[name]
return self._shapes[index]
def has(self, name):
return (name in self._name2index)
def has_prefix(self, prefix):
for (name, idx) in self._name2index.items():
if name.startswith(prefix):
return name
return False
def numel(self, index=None):
if (index is None):
shapes = self._shapes
else:
shapes = [self._shapes[index]]
total = 0
for shape in shapes:
total += shape.numel()
return total
def __len__(self):
return len(self._names)
def __repr__(self):
return '{name}({num} tensors)'.format(name=self.__class__.__name__, num=len(self))
|
class TensorContainer():
'A class to maintain both parameters and buffers for a model.'
def __init__(self):
self._names = []
self._tensors = []
self._param_or_buffers = []
self._name2index = dict()
def additive(self, tensors):
result = TensorContainer()
for (index, name) in enumerate(self._names):
new_tensor = (self._tensors[index] + tensors[index])
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def create_container(self, tensors):
result = TensorContainer()
for (index, name) in enumerate(self._names):
new_tensor = tensors[index]
result.append(name, new_tensor, self._param_or_buffers[index])
return result
def no_grad_clone(self):
result = TensorContainer()
with torch.no_grad():
for (index, name) in enumerate(self._names):
result.append(name, self._tensors[index].clone(), self._param_or_buffers[index])
return result
def to_shape_container(self):
result = ShapeContainer()
for (index, name) in enumerate(self._names):
result.append(name, self._tensors[index].shape, self._param_or_buffers[index])
return result
def requires_grad_(self, requires_grad=True):
for tensor in self._tensors:
tensor.requires_grad_(requires_grad)
def parameters(self):
return self._tensors
@property
def tensors(self):
return self._tensors
def flatten(self, tensors=None):
if (tensors is None):
tensors = self._tensors
tensors = [tensor.view((- 1)) for tensor in tensors]
return torch.cat(tensors)
def unflatten(self, tensor):
(tensors, s) = ([], 0)
for raw_tensor in self._tensors:
length = raw_tensor.numel()
x = torch.reshape(tensor[s:(s + length)], shape=raw_tensor.shape)
tensors.append(x)
s += length
return tensors
def append(self, name, tensor, param_or_buffer):
if (not isinstance(tensor, torch.Tensor)):
raise TypeError('The input tensor must be torch.Tensor instead of {:}'.format(type(tensor)))
self._names.append(name)
self._tensors.append(tensor)
self._param_or_buffers.append(param_or_buffer)
assert (name not in self._name2index), 'The [{:}] has already been added.'.format(name)
self._name2index[name] = (len(self._names) - 1)
def query(self, name):
if (not self.has(name)):
raise ValueError('The {:} is not in {:}'.format(name, list(self._name2index.keys())))
index = self._name2index[name]
return self._tensors[index]
def has(self, name):
return (name in self._name2index)
def has_prefix(self, prefix):
for (name, idx) in self._name2index.items():
if name.startswith(prefix):
return name
return False
def numel(self):
total = 0
for tensor in self._tensors:
total += tensor.numel()
return total
def __len__(self):
return len(self._names)
def __repr__(self):
return '{name}({num} tensors)'.format(name=self.__class__.__name__, num=len(self))
|
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return ((1.0 + math.erf((x / math.sqrt(2.0)))) / 2.0)
if ((mean < (a - (2 * std))) or (mean > (b + (2 * std)))):
warnings.warn('mean is more than 2 std from [a, b] in nn.init.trunc_normal_. The distribution of values may be incorrect.', stacklevel=2)
with torch.no_grad():
l = norm_cdf(((a - mean) / std))
u = norm_cdf(((b - mean) / std))
tensor.uniform_(((2 * l) - 1), ((2 * u) - 1))
tensor.erfinv_()
tensor.mul_((std * math.sqrt(2.0)))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
|
def trunc_normal_(tensor, mean=0.0, std=1.0, a=(- 2.0), b=2.0):
'Fills the input Tensor with values drawn from a truncated\n normal distribution. The values are effectively drawn from the\n normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n with values outside :math:`[a, b]` redrawn until they are within\n the bounds. The method used for generating the random values works\n best when :math:`a \\leq \\text{mean} \\leq b`.\n Args:\n tensor: an n-dimensional `torch.Tensor`\n mean: the mean of the normal distribution\n std: the standard deviation of the normal distribution\n a: the minimum cutoff value\n b: the maximum cutoff value\n Examples:\n >>> w = torch.empty(3, 5)\n >>> nn.init.trunc_normal_(w)\n '
if isinstance(tensor, list):
return [_no_grad_trunc_normal_(x, mean, std, a, b) for x in tensor]
else:
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
def init_transformer(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, super_core.SuperLinear):
trunc_normal_(m._super_weight, std=0.02)
if (m._super_bias is not None):
nn.init.constant_(m._super_bias, 0)
elif isinstance(m, super_core.SuperLayerNorm1D):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
|
def get_scheduler(indicator, lr):
if (indicator == 'warm-cos'):
multiplier = WarmupParamScheduler(CosineParamScheduler(lr, (lr * 0.001)), warmup_factor=0.001, warmup_length=0.05, warmup_method='linear')
else:
raise ValueError('Unknown indicator: {:}'.format(indicator))
return multiplier
|
class Logger():
'A logger used in xautodl.'
def __init__(self, root_dir, prefix='', log_time=True):
'Create a summary writer logging to log_dir.'
self.root_dir = Path(root_dir)
self.log_dir = (self.root_dir / 'logs')
self.log_dir.mkdir(parents=True, exist_ok=True)
self._prefix = prefix
self._log_time = log_time
self.logger_path = (self.log_dir / '{:}{:}.log'.format(self._prefix, time_for_file()))
self._logger_file = open(self.logger_path, 'w')
@property
def logger(self):
return self._logger_file
def log(self, string, save=True, stdout=False):
string = ('{:} {:}'.format(time_string(), string) if self._log_time else string)
if stdout:
sys.stdout.write(string)
sys.stdout.flush()
else:
print(string)
if save:
self._logger_file.write('{:}\n'.format(string))
self._logger_file.flush()
def close(self):
self._logger_file.close()
if (self.writer is not None):
self.writer.close()
def __repr__(self):
return '{name}(dir={log_dir}, prefix={_prefix}, log_time={_log_time})'.format(name=self.__class__.__name__, **self.__dict__)
|
class AverageMeter():
'Computes and stores the average and current value'
def __init__(self):
self.reset()
def reset(self):
self.val = 0.0
self.avg = 0.0
self.sum = 0.0
self.count = 0.0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.avg = (self.sum / self.count)
def __repr__(self):
return '{name}(val={val}, avg={avg}, count={count})'.format(name=self.__class__.__name__, **self.__dict__)
|
class Metric(abc.ABC):
'The default meta metric class.'
def __init__(self):
self.reset()
def reset(self):
raise NotImplementedError
def __call__(self, predictions, targets):
raise NotImplementedError
def get_info(self):
raise NotImplementedError
def perf_str(self):
raise NotImplementedError
def __repr__(self):
return '{name}({inner})'.format(name=self.__class__.__name__, inner=self.inner_repr())
def inner_repr(self):
return ''
|
class ComposeMetric(Metric):
'The composed metric class.'
def __init__(self, *metric_list):
self.reset()
for metric in metric_list:
self.append(metric)
def reset(self):
self._metric_list = []
def append(self, metric):
if (not isinstance(metric, Metric)):
raise ValueError('The input metric is not correct: {:}'.format(type(metric)))
self._metric_list.append(metric)
def __len__(self):
return len(self._metric_list)
def __call__(self, predictions, targets):
results = list()
for metric in self._metric_list:
results.append(metric(predictions, targets))
return results
def get_info(self):
results = dict()
for metric in self._metric_list:
for (key, value) in metric.get_info().items():
results[key] = value
return results
def inner_repr(self):
xlist = []
for metric in self._metric_list:
xlist.append(str(metric))
return ','.join(xlist)
|
class CrossEntropyMetric(Metric):
'The metric for the cross entropy metric.'
def __init__(self, ignore_batch):
super(CrossEntropyMetric, self).__init__()
self._ignore_batch = ignore_batch
def reset(self):
self._loss = AverageMeter()
def __call__(self, predictions, targets):
if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)):
(batch, _) = predictions.shape()
max_prob_indexes = torch.argmax(predictions, dim=(- 1))
if self._ignore_batch:
loss = F.cross_entropy(predictions, targets, reduction='sum')
self._loss.update(loss.item(), 1)
else:
loss = F.cross_entropy(predictions, targets, reduction='mean')
self._loss.update(loss.item(), batch)
return loss
else:
raise NotImplementedError
def get_info(self):
return {'loss': self._loss.avg, 'score': (self._loss.avg * 100)}
def perf_str(self):
return 'ce-loss={:.5f}'.format(self._loss.avg)
|
class Top1AccMetric(Metric):
'The metric for the top-1 accuracy.'
def __init__(self, ignore_batch):
super(Top1AccMetric, self).__init__()
self._ignore_batch = ignore_batch
def reset(self):
self._accuracy = AverageMeter()
def __call__(self, predictions, targets):
if (isinstance(predictions, torch.Tensor) and isinstance(targets, torch.Tensor)):
(batch, _) = predictions.shape()
max_prob_indexes = torch.argmax(predictions, dim=(- 1))
corrects = torch.eq(max_prob_indexes, targets)
accuracy = corrects.float().mean().float()
if self._ignore_batch:
self._accuracy.update(accuracy, 1)
else:
self._accuracy.update(accuracy, batch)
return accuracy
else:
raise NotImplementedError
def get_info(self):
return {'accuracy': self._accuracy.avg, 'score': (self._accuracy.avg * 100)}
def perf_str(self):
return 'accuracy={:.3f}%'.format((self._accuracy.avg * 100))
|
def has_key_words(xdict):
if (not isinstance(xdict, dict)):
return False
key_set = set(KEYS)
cur_set = set(xdict.keys())
return (key_set.intersection(cur_set) == key_set)
|
def get_module_by_module_path(module_path):
'Load the module from the path.'
if module_path.endswith('.py'):
module_spec = importlib.util.spec_from_file_location('', module_path)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
else:
module = importlib.import_module(module_path)
return module
|
def call_by_dict(config: Dict[(Text, Any)], *args, **kwargs) -> object:
"\n get initialized instance with config\n Parameters\n ----------\n config : a dictionary, such as:\n {\n 'cls_or_func': 'ClassName',\n 'args': list,\n 'kwargs': dict,\n 'model_path': a string indicating the path,\n }\n Returns\n -------\n object:\n An initialized object based on the config info\n "
module = get_module_by_module_path(config['module_path'])
cls_or_func = getattr(module, config[CLS_FUNC_KEY])
args = tuple((list(config['args']) + list(args)))
kwargs = {**config['kwargs'], **kwargs}
return cls_or_func(*args, **kwargs)
|
def call_by_yaml(path, *args, **kwargs) -> object:
config = load_yaml(path)
return call_by_config(config, *args, **kwargs)
|
def nested_call_by_dict(config: Union[(Dict[(Text, Any)], Any)], *args, **kwargs) -> object:
'Similar to `call_by_dict`, but differently, the args may contain another dict needs to be called.'
if isinstance(config, list):
return [nested_call_by_dict(x) for x in config]
elif isinstance(config, tuple):
return (nested_call_by_dict(x) for x in config)
elif (not isinstance(config, dict)):
return config
elif (not has_key_words(config)):
return {key: nested_call_by_dict(x) for (x, key) in config.items()}
else:
module = get_module_by_module_path(config['module_path'])
cls_or_func = getattr(module, config[CLS_FUNC_KEY])
args = tuple((list(config['args']) + list(args)))
kwargs = {**config['kwargs'], **kwargs}
new_args = [nested_call_by_dict(x) for x in args]
new_kwargs = {}
for (key, x) in kwargs.items():
new_kwargs[key] = nested_call_by_dict(x)
return cls_or_func(*new_args, **new_kwargs)
|
def nested_call_by_yaml(path, *args, **kwargs) -> object:
config = load_yaml(path)
return nested_call_by_dict(config, *args, **kwargs)
|
class BatchSampler():
'A batch sampler used for single machine training.'
def __init__(self, dataset, batch, steps):
self._num_per_epoch = len(dataset)
self._iter_per_epoch = (self._num_per_epoch // batch)
self._steps = steps
self._batch = batch
if (self._num_per_epoch < self._batch):
raise ValueError('The dataset size must be larger than batch={:}'.format(batch))
self._indexes = list(range(self._num_per_epoch))
def __iter__(self):
'\n yield a batch of indexes using random sampling\n '
for i in range(self._steps):
if ((i % self._iter_per_epoch) == 0):
random.shuffle(self._indexes)
j = (i % self._iter_per_epoch)
(yield self._indexes[(j * self._batch):((j + 1) * self._batch)])
def __len__(self):
return self._steps
|
class ParamScheduler():
'\n Base class for parameter schedulers.\n A parameter scheduler defines a mapping from a progress value in [0, 1) to\n a number (e.g. learning rate).\n '
WHERE_EPSILON = 1e-06
def __call__(self, where: float) -> float:
'\n Get the value of the param for a given point at training.\n\n We update params (such as learning rate) based on the percent progress\n of training completed. This allows a scheduler to be agnostic to the\n exact length of a particular run (e.g. 120 epochs vs 90 epochs), as\n long as the relative progress where params should be updated is the same.\n However, it assumes that the total length of training is known.\n\n Args:\n where: A float in [0,1) that represents how far training has progressed\n\n '
raise NotImplementedError('Param schedulers must override __call__')
|
class ConstantParamScheduler(ParamScheduler):
'\n Returns a constant value for a param.\n '
def __init__(self, value: float) -> None:
self._value = value
def __call__(self, where: float) -> float:
if (where >= 1.0):
raise RuntimeError(f'where in ParamScheduler must be in [0, 1]: got {where}')
return self._value
|
class CosineParamScheduler(ParamScheduler):
"\n Cosine decay or cosine warmup schedules based on start and end values.\n The schedule is updated based on the fraction of training progress.\n The schedule was proposed in 'SGDR: Stochastic Gradient Descent with\n Warm Restarts' (https://arxiv.org/abs/1608.03983). Note that this class\n only implements the cosine annealing part of SGDR, and not the restarts.\n\n Example:\n\n .. code-block:: python\n\n CosineParamScheduler(start_value=0.1, end_value=0.0001)\n "
def __init__(self, start_value: float, end_value: float) -> None:
self._start_value = start_value
self._end_value = end_value
def __call__(self, where: float) -> float:
return (self._end_value + ((0.5 * (self._start_value - self._end_value)) * (1 + math.cos((math.pi * where)))))
|
class ExponentialParamScheduler(ParamScheduler):
'\n Exponetial schedule parameterized by a start value and decay.\n The schedule is updated based on the fraction of training\n progress, `where`, with the formula\n `param_t = start_value * (decay ** where)`.\n\n Example:\n\n .. code-block:: python\n ExponentialParamScheduler(start_value=2.0, decay=0.02)\n\n Corresponds to a decreasing schedule with values in [2.0, 0.04).\n '
def __init__(self, start_value: float, decay: float) -> None:
self._start_value = start_value
self._decay = decay
def __call__(self, where: float) -> float:
return (self._start_value * (self._decay ** where))
|
class LinearParamScheduler(ParamScheduler):
'\n Linearly interpolates parameter between ``start_value`` and ``end_value``.\n Can be used for either warmup or decay based on start and end values.\n The schedule is updated after every train step by default.\n\n Example:\n\n .. code-block:: python\n\n LinearParamScheduler(start_value=0.0001, end_value=0.01)\n\n Corresponds to a linear increasing schedule with values in [0.0001, 0.01)\n '
def __init__(self, start_value: float, end_value: float) -> None:
self._start_value = start_value
self._end_value = end_value
def __call__(self, where: float) -> float:
return ((self._end_value * where) + (self._start_value * (1 - where)))
|
class MultiStepParamScheduler(ParamScheduler):
'\n Takes a predefined schedule for a param value, and a list of epochs or steps\n which stand for the upper boundary (excluded) of each range.\n\n Example:\n\n .. code-block:: python\n\n MultiStepParamScheduler(\n values=[0.1, 0.01, 0.001, 0.0001],\n milestones=[30, 60, 80, 120]\n )\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epochs 60-79, 0.0001 for epochs 80-120.\n Note that the length of values must be equal to the length of milestones\n plus one.\n '
def __init__(self, values: List[float], num_updates: Optional[int]=None, milestones: Optional[List[int]]=None) -> None:
'\n Args:\n values: param value in each range\n num_updates: the end of the last range. If None, will use ``milestones[-1]``\n milestones: the boundary of each range. If None, will evenly split ``num_updates``\n\n For example, all the following combinations define the same scheduler:\n\n * num_updates=90, milestones=[30, 60], values=[1, 0.1, 0.01]\n * num_updates=90, values=[1, 0.1, 0.01]\n * milestones=[30, 60, 90], values=[1, 0.1, 0.01]\n * milestones=[3, 6, 9], values=[1, 0.1, 0.01] (ParamScheduler is scale-invariant)\n '
if ((num_updates is None) and (milestones is None)):
raise ValueError('num_updates and milestones cannot both be None')
if (milestones is None):
milestones = []
step_width = math.ceil((num_updates / float(len(values))))
for idx in range((len(values) - 1)):
milestones.append((step_width * (idx + 1)))
elif (not (isinstance(milestones, Sequence) and (len(milestones) == (len(values) - int((num_updates is not None)))))):
raise ValueError(('MultiStep scheduler requires a list of %d miletones' % (len(values) - int((num_updates is not None)))))
if (num_updates is None):
(num_updates, milestones) = (milestones[(- 1)], milestones[:(- 1)])
if (num_updates < len(values)):
raise ValueError('Total num_updates must be greater than length of param schedule')
self._param_schedule = values
self._num_updates = num_updates
self._milestones: List[int] = milestones
start_epoch = 0
for milestone in self._milestones:
if (milestone >= self._num_updates):
raise ValueError(('Milestone must be smaller than total number of updates: num_updates=%d, milestone=%d' % (self._num_updates, milestone)))
if (start_epoch >= milestone):
raise ValueError(('Milestone must be smaller than start epoch: start_epoch=%d, milestone=%d' % (start_epoch, milestone)))
start_epoch = milestone
def __call__(self, where: float) -> float:
if (where > 1.0):
raise RuntimeError(f'where in ParamScheduler must be in [0, 1]: got {where}')
epoch_num = int(((where + self.WHERE_EPSILON) * self._num_updates))
return self._param_schedule[bisect.bisect_right(self._milestones, epoch_num)]
|
class PolynomialDecayParamScheduler(ParamScheduler):
'\n Decays the param value after every epoch according to a\n polynomial function with a fixed power.\n The schedule is updated after every train step by default.\n\n Example:\n\n .. code-block:: python\n\n PolynomialDecayParamScheduler(base_value=0.1, power=0.9)\n\n Then the param value will be 0.1 for epoch 0, 0.099 for epoch 1, and\n so on.\n '
def __init__(self, base_value: float, power: float) -> None:
self._base_value = base_value
self._power = power
def __call__(self, where: float) -> float:
return (self._base_value * ((1 - where) ** self._power))
|
class StepParamScheduler(ParamScheduler):
'\n Takes a fixed schedule for a param value. If the length of the\n fixed schedule is less than the number of epochs, then the epochs\n are divided evenly among the param schedule.\n The schedule is updated after every train epoch by default.\n\n Example:\n\n .. code-block:: python\n\n StepParamScheduler(values=[0.1, 0.01, 0.001, 0.0001], num_updates=120)\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epoch 60-89, 0.0001 for epochs 90-119.\n '
def __init__(self, num_updates: Union[(int, float)], values: List[float]) -> None:
if (num_updates <= 0):
raise ValueError('Number of updates must be larger than 0')
if (not (isinstance(values, Sequence) and (len(values) > 0))):
raise ValueError('Step scheduler requires a list of at least one param value')
self._param_schedule = values
def __call__(self, where: float) -> float:
ind = int(((where + self.WHERE_EPSILON) * len(self._param_schedule)))
return self._param_schedule[ind]
|
class StepWithFixedGammaParamScheduler(ParamScheduler):
'\n Decays the param value by gamma at equal number of steps so as to have the\n specified total number of decays.\n\n Example:\n\n .. code-block:: python\n\n StepWithFixedGammaParamScheduler(\n base_value=0.1, gamma=0.1, num_decays=3, num_updates=120)\n\n Then the param value will be 0.1 for epochs 0-29, 0.01 for\n epochs 30-59, 0.001 for epoch 60-89, 0.0001 for epochs 90-119.\n '
def __init__(self, base_value: float, num_decays: int, gamma: float, num_updates: int) -> None:
for k in [base_value, gamma]:
if (not (isinstance(k, (int, float)) and (k > 0))):
raise ValueError('base_value and gamma must be positive numbers')
for k in [num_decays, num_updates]:
if (not (isinstance(k, int) and (k > 0))):
raise ValueError('num_decays and num_updates must be positive integers')
self.base_value = base_value
self.num_decays = num_decays
self.gamma = gamma
self.num_updates = num_updates
values = [base_value]
for _ in range(num_decays):
values.append((values[(- 1)] * gamma))
self._step_param_scheduler = StepParamScheduler(num_updates=num_updates, values=values)
def __call__(self, where: float) -> float:
return self._step_param_scheduler(where)
|
class CompositeParamScheduler(ParamScheduler):
"\n Composite parameter scheduler composed of intermediate schedulers.\n Takes a list of schedulers and a list of lengths corresponding to\n percentage of training each scheduler should run for. Schedulers\n are run in order. All values in lengths should sum to 1.0.\n\n Each scheduler also has a corresponding interval scale. If interval\n scale is 'fixed', the intermediate scheduler will be run without any rescaling\n of the time. If interval scale is 'rescaled', intermediate scheduler is\n run such that each scheduler will start and end at the same values as it\n would if it were the only scheduler. Default is 'rescaled' for all schedulers.\n\n Example:\n\n .. code-block:: python\n\n schedulers = [\n ConstantParamScheduler(value=0.42),\n CosineParamScheduler(start_value=0.42, end_value=1e-4)\n ]\n CompositeParamScheduler(\n schedulers=schedulers,\n interval_scaling=['rescaled', 'rescaled'],\n lengths=[0.3, 0.7])\n\n The parameter value will be 0.42 for the first [0%, 30%) of steps,\n and then will cosine decay from 0.42 to 0.0001 for [30%, 100%) of\n training.\n "
def __init__(self, schedulers: Sequence[ParamScheduler], lengths: List[float], interval_scaling: Sequence[str]) -> None:
if (len(schedulers) != len(lengths)):
raise ValueError('Schedulers and lengths must be same length')
if (len(schedulers) == 0):
raise ValueError('There must be at least one scheduler in the composite scheduler')
if (abs((sum(lengths) - 1.0)) >= 0.001):
raise ValueError('The sum of all values in lengths must be 1')
if (sum(lengths) != 1.0):
lengths[(- 1)] = (1.0 - sum(lengths[:(- 1)]))
for s in interval_scaling:
if (s not in ['rescaled', 'fixed']):
raise ValueError(f'Unsupported interval_scaling: {s}')
self._lengths = lengths
self._schedulers = schedulers
self._interval_scaling = interval_scaling
def __call__(self, where: float) -> float:
i = 0
running_total = self._lengths[i]
while (((where + self.WHERE_EPSILON) > running_total) and (i < (len(self._schedulers) - 1))):
i += 1
running_total += self._lengths[i]
scheduler = self._schedulers[i]
scheduler_where = where
interval_scale = self._interval_scaling[i]
if (interval_scale == 'rescaled'):
scheduler_start = (running_total - self._lengths[i])
scheduler_where = ((where - scheduler_start) / self._lengths[i])
return scheduler(scheduler_where)
|
class WarmupParamScheduler(CompositeParamScheduler):
'\n Add an initial warmup stage to another scheduler.\n '
def __init__(self, scheduler: ParamScheduler, warmup_factor: float, warmup_length: float, warmup_method: str='linear'):
'\n Args:\n scheduler: warmup will be added at the beginning of this scheduler\n warmup_factor: the factor w.r.t the initial value of ``scheduler``, e.g. 0.001\n warmup_length: the relative length (in [0, 1]) of warmup steps w.r.t the entire\n training, e.g. 0.01\n warmup_method: one of "linear" or "constant"\n '
end_value = scheduler(warmup_length)
start_value = (warmup_factor * scheduler(0.0))
if (warmup_method == 'constant'):
warmup = ConstantParamScheduler(start_value)
elif (warmup_method == 'linear'):
warmup = LinearParamScheduler(start_value, end_value)
else:
raise ValueError('Unknown warmup method: {}'.format(warmup_method))
super().__init__([warmup, scheduler], interval_scaling=['rescaled', 'fixed'], lengths=[warmup_length, (1 - warmup_length)])
|
class LRMultiplier(torch.optim.lr_scheduler._LRScheduler):
'\n A LRScheduler which uses fvcore :class:`ParamScheduler` to multiply the\n learning rate of each param in the optimizer.\n Every step, the learning rate of each parameter becomes its initial value\n multiplied by the output of the given :class:`ParamScheduler`.\n The absolute learning rate value of each parameter can be different.\n This scheduler can be used as long as the relative scale among them do\n not change during training.\n Examples:\n ::\n LRMultiplier(\n opt,\n WarmupParamScheduler(\n MultiStepParamScheduler(\n [1, 0.1, 0.01],\n milestones=[60000, 80000],\n num_updates=90000,\n ), 0.001, 100 / 90000\n ),\n max_iter=90000\n )\n '
def __init__(self, optimizer: torch.optim.Optimizer, multiplier: ParamScheduler, max_iter: int, last_iter: int=(- 1)):
'\n Args:\n optimizer, last_iter: See ``torch.optim.lr_scheduler._LRScheduler``.\n ``last_iter`` is the same as ``last_epoch``.\n multiplier: a fvcore ParamScheduler that defines the multiplier on\n every LR of the optimizer\n max_iter: the total number of training iterations\n '
if (not isinstance(multiplier, ParamScheduler)):
raise ValueError(f'_LRMultiplier(multiplier=) must be an instance of fvcore ParamScheduler. Got {multiplier} instead.')
self._multiplier = multiplier
self._max_iter = max_iter
super().__init__(optimizer, last_epoch=last_iter)
def state_dict(self):
return {'base_lrs': self.base_lrs, 'last_epoch': self.last_epoch}
def get_lr(self) -> List[float]:
multiplier = self._multiplier((self.last_epoch / self._max_iter))
return [(base_lr * multiplier) for base_lr in self.base_lrs]
|
def time_for_file():
ISOTIMEFORMAT = '%d-%h-at-%H-%M-%S'
return '{:}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
|
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{:}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
|
def convert_secs2time(epoch_time, return_str=False):
need_hour = int((epoch_time / 3600))
need_mins = int(((epoch_time - (3600 * need_hour)) / 60))
need_secs = int(((epoch_time - (3600 * need_hour)) - (60 * need_mins)))
if return_str:
str = '[{:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
return str
else:
return (need_hour, need_mins, need_secs)
|
def count_parameters(model_or_parameters, unit='mb'):
if isinstance(model_or_parameters, nn.Module):
counts = sum((np.prod(v.size()) for v in model_or_parameters.parameters()))
elif isinstance(model_or_parameters, nn.Parameter):
counts = models_or_parameters.numel()
elif isinstance(model_or_parameters, (list, tuple)):
counts = sum((count_parameters(x, None) for x in models_or_parameters))
else:
counts = sum((np.prod(v.size()) for v in model_or_parameters))
if ((unit.lower() == 'kb') or (unit.lower() == 'k')):
counts /= 1000.0
elif ((unit.lower() == 'mb') or (unit.lower() == 'm')):
counts /= 1000000.0
elif ((unit.lower() == 'gb') or (unit.lower() == 'g')):
counts /= 1000000000.0
elif (unit is not None):
raise ValueError('Unknow unit: {:}'.format(unit))
return counts
|
def load_yaml(path):
if (not os.path.isfile(path)):
raise ValueError('{:} is not a file.'.format(path))
with open(path, 'r') as stream:
data = yaml.safe_load(stream)
return data
|
def get_model(config: Dict[(Text, Any)], **kwargs):
model_type = config.get('model_type', 'simple_mlp').lower()
if (model_type == 'simple_mlp'):
act_cls = super_name2activation[kwargs['act_cls']]
norm_cls = super_name2norm[kwargs['norm_cls']]
(mean, std) = (kwargs.get('mean', None), kwargs.get('std', None))
if ('hidden_dim' in kwargs):
hidden_dim1 = kwargs.get('hidden_dim')
hidden_dim2 = kwargs.get('hidden_dim')
else:
hidden_dim1 = kwargs.get('hidden_dim1', 200)
hidden_dim2 = kwargs.get('hidden_dim2', 100)
model = SuperSequential(norm_cls(mean=mean, std=std), SuperLinear(kwargs['input_dim'], hidden_dim1), act_cls(), SuperLinear(hidden_dim1, hidden_dim2), act_cls(), SuperLinear(hidden_dim2, kwargs['output_dim']))
elif (model_type == 'norm_mlp'):
act_cls = super_name2activation[kwargs['act_cls']]
norm_cls = super_name2norm[kwargs['norm_cls']]
(sub_layers, last_dim) = ([], kwargs['input_dim'])
for (i, hidden_dim) in enumerate(kwargs['hidden_dims']):
sub_layers.append(SuperLinear(last_dim, hidden_dim))
if (hidden_dim > 1):
sub_layers.append(norm_cls(hidden_dim, elementwise_affine=False))
sub_layers.append(act_cls())
last_dim = hidden_dim
sub_layers.append(SuperLinear(last_dim, kwargs['output_dim']))
model = SuperSequential(*sub_layers)
elif (model_type == 'dual_norm_mlp'):
act_cls = super_name2activation[kwargs['act_cls']]
norm_cls = super_name2norm[kwargs['norm_cls']]
(sub_layers, last_dim) = ([], kwargs['input_dim'])
for (i, hidden_dim) in enumerate(kwargs['hidden_dims']):
if (i > 0):
sub_layers.append(norm_cls(last_dim, elementwise_affine=False))
sub_layers.append(SuperLinear(last_dim, hidden_dim))
sub_layers.append(SuperDropout(kwargs['dropout']))
sub_layers.append(SuperLinear(hidden_dim, hidden_dim))
sub_layers.append(act_cls())
last_dim = hidden_dim
sub_layers.append(SuperLinear(last_dim, kwargs['output_dim']))
model = SuperSequential(*sub_layers)
elif (model_type == 'quant_transformer'):
raise NotImplementedError
else:
raise TypeError('Unkonwn model type: {:}'.format(model_type))
return model
|
def eval_cosmic_seed(seed):
model = xgb.XGBClassifier()
model.load_model(f'../xgboost_model_cosmic_{seed}.json')
logits = model.predict([x.reshape((- 1)) for x in imgs]).reshape(*imgs.shape)
logits = logits.flatten()
test_predictions = (logits.reshape((- 1), 1, 128, 128) > 0.5).flatten()
test_gts = masks.reshape((- 1), 1, 128, 128).flatten()
print(test_predictions.shape)
print(test_gts.shape)
test_predictions = test_predictions.astype(np.int32)
test_gts = test_gts.astype(np.int32)
auroc = metrics.roc_auc_score(test_gts, test_predictions)
return (1 - auroc)
|
def process_log(fname, kwd='test score: '):
res = 0.0
with open(fname, 'r') as f:
for line in f.readlines():
if (kwd in line):
res = float(line.split(kwd)[1].strip())
return res
|
def dm_to_numpy(loader, n=None):
print(len(loader))
data = [xy for xy in loader]
x = np.vstack([xy[0] for xy in data])
y = np.concatenate([xy[1] for xy in data])
x = x[:n]
y = y[:n]
n = x.shape[0]
x = x.reshape(n, (- 1))
y = y.reshape(n, (- 1))
print(x.shape, y.shape)
return (x, y)
|
def main(task='cifar100', seed=7734, load_np=False, save_np=False, no_test=False):
model_params = {'random_state': seed, 'max_depth': 3, 'eta': 1, 'n_jobs': 10, 'gpu_id': 0, 'early_stopping_rounds': 5, 'tree_method': 'gpu_hist', 'subsample': 0.9, 'sampling_method': 'gradient_based'}
if (task == 'cifar100'):
dm = CIFAR100DataModule(batch_size=10000, root='datasets')
model_params = {'objective': 'multi:softmax', 'eval_metric': 'merror', 'num_class': 100, **model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = accuracy_score
elif (task == 'spherical'):
dm = SphericalDataModule(batch_size=10000, root='datasets')
model_params = {'objective': 'multi:softmax', 'eval_metric': 'merror', 'num_class': 100, **model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = accuracy_score
elif (task == 'ninapro'):
dm = NinaProDataModule(batch_size=10000, root='datasets')
model_params = {'objective': 'multi:softmax', 'eval_metric': 'merror', 'num_class': 18, **model_params}
print(model_params)
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = accuracy_score
elif (task == 'satellite'):
dm = SatelliteDataModule(batch_size=10000, root='datasets')
model_params = {'objective': 'multi:softmax', 'eval_metric': 'merror', 'num_class': 24, **model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = accuracy_score
elif (task == 'deepsea'):
dm = DeepSEADataModule(batch_size=10000, root='datasets')
model_params = {**model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = roc_auc_score
elif (task == 'ecg'):
dm = ECGDataModule(batch_size=10000, root='datasets')
model_params = {'objective': 'binary:logistic', **model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = partial(f1_score, average='macro')
elif (task == 'fsd50k'):
dm = FSD50KDataModule(batch_size=64, root='datasets')
model_params = {**model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
elif (task == 'darcyflow'):
dm = DarcyFlowDataModule(batch_size=100, root='datasets')
model_params = {**model_params}
fit_params = {'verbose': True}
model = xgb.XGBRegressor(**model_params)
eval_metric = darcy_utils.LpLoss(size_average=False)
elif (task == 'cosmic'):
dm = CosmicDataModule(batch_size=100, root='datasets')
model_params = {**model_params}
fit_params = {'verbose': True}
model = xgb.XGBClassifier(**model_params)
eval_metric = None
elif (task == 'psicov'):
dm = PSICOVDataModule(batch_size=100, root='datasets')
model_params = {**model_params}
model_params['tree_method'] = 'hist'
model_params['sampling_method'] = 'uniform'
fit_params = {'verbose': True}
model = xgb.XGBRegressor(**model_params)
eval_metric = None
else:
raise NotImplementedError
if load_np:
with open(f'{task}_train.npy', 'rb') as f:
x_train = np.load(f)
y_train = np.load(f)
r = np.random.permutation(x_train.shape[0])
x_train = x_train[r]
y_train = y_train[r]
with open(f'{task}_valid.npy', 'rb') as f:
x_valid = np.load(f)
y_valid = np.load(f)
with open(f'{task}_test.npy', 'rb') as f:
x_test = np.load(f)
y_test = np.load(f)
else:
dm.setup(stage=None)
(x_train, y_train) = utils.dm_to_numpy(dm.train_dataloader())
(x_valid, y_valid) = utils.dm_to_numpy(dm.val_dataloader())
(x_test, y_test) = utils.dm_to_numpy(dm.test_dataloader())
if save_np:
with open(f'{task}_train.npy', 'wb') as f:
np.save(f, x_train)
np.save(f, y_train)
with open(f'{task}_valid.npy', 'wb') as f:
np.save(f, x_valid)
np.save(f, y_valid)
with open(f'{task}_test.npy', 'wb') as f:
np.save(f, x_test)
np.save(f, y_test)
model.fit(x_train, y_train, eval_set=[(x_train, y_train), (x_valid, y_valid)], **fit_params)
model.save_model(f'xgboost_model_{task}_{seed}.json')
if (not no_test):
if ((task != 'fsd50k') and (task != 'darcyflow')):
y_test_preds = model.predict(x_test)
test_score = eval_metric(y_test, y_test_preds)
print(f'test score: {test_score}')
elif (task == 'fsd50k'):
y_test_preds = model.predict(x_test)
y_test = torch.tensor(np.array(y_test))
y_test_preds = torch.tensor(np.array(y_test_preds))
eval_metric = tm.AveragePrecision(pos_label=1, average='macro')
test_score = eval_metric(y_test, y_test_preds)
print(f'test score: {test_score}')
elif (task == 'darcyflow'):
y_test_preds = model.predict(x_test)
y_test = torch.tensor(y_test)
y_test_preds = torch.tensor(y_test_preds)
test_score = (eval_metric(y_test, y_test_preds) / 100.0)
print(f'test score: {test_score}')
|
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True, input_constructor=None, ost=sys.stdout):
assert (type(input_res) is tuple)
assert (len(input_res) >= 2)
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.ones(()).new_empty((1, *input_res), dtype=next(flops_model.parameters()).dtype, device=next(flops_model.parameters()).device)
flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model, ost=ost)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return (flops_to_string(flops_count), params_to_string(params_count))
return (flops_count, params_count)
|
def flops_to_string(flops, units='GMac', precision=2):
if (units is None):
if ((flops // (10 ** 9)) > 0):
return (str(round((flops / (10.0 ** 9)), precision)) + ' GMac')
elif ((flops // (10 ** 6)) > 0):
return (str(round((flops / (10.0 ** 6)), precision)) + ' MMac')
elif ((flops // (10 ** 3)) > 0):
return (str(round((flops / (10.0 ** 3)), precision)) + ' KMac')
else:
return (str(flops) + ' Mac')
elif (units == 'GMac'):
return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units)
elif (units == 'MMac'):
return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units)
elif (units == 'KMac'):
return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units)
else:
return (str(flops) + ' Mac')
|
def params_to_string(params_num):
"converting number to string\n\n :param float params_num: number\n :returns str: number\n\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n "
if ((params_num // (10 ** 6)) > 0):
return (str(round((params_num / (10 ** 6)), 2)) + ' M')
elif (params_num // (10 ** 3)):
return (str(round((params_num / (10 ** 3)), 2)) + ' k')
else:
return str(params_num)
|
def print_model_with_flops(model, units='GMac', precision=3, ost=sys.stdout):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return (self.__flops__ / model.__batch_counter__)
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} MACs'.format((accumulated_flops_cost / total_flops)), self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if (m.extra_repr != flops_extra_repr):
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert (m.extra_repr != m.original_extra_repr)
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model, file=ost)
model.apply(del_extra_repr)
|
def get_model_parameters_number(model):
params_num = sum((p.numel() for p in model.parameters() if p.requires_grad))
return params_num
|
def add_flops_counting_methods(net_main_module):
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
|
def compute_average_flops_cost(self):
'\n A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n Returns current mean flops consumption per image.\n '
batches_count = self.__batch_counter__
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return (flops_sum / batches_count)
|
def start_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n Activates the computation of mean flops consumption per image.\n Call it before you run the network.\n '
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
|
def stop_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n Stops computing the mean flops consumption per image.\n Call whenever you want to pause the computation.\n '
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
|
def reset_flops_count(self):
'\n A method that will be available after add_flops_counting_methods() is\n called on a desired net object.\n Resets statistics computed so far.\n '
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
|
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
|
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
|
def is_supported_instance(module):
if isinstance(module, SUPPORTED_TYPES):
return True
else:
return False
|
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
|
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
|
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
|
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += int(((batch_size * input.shape[1]) * output.shape[1]))
|
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
|
def bn_flops_counter_hook(module, input, output):
module.affine
input = input[0]
batch_flops = np.prod(input.shape)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
|
def deconv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(input_height, input_width) = input.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = (((kernel_height * kernel_width) * in_channels) * filters_per_channel)
active_elements_count = ((batch_size * input_height) * input_width)
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
(output_height, output_width) = output.shape[2:]
bias_flops = (((out_channels * batch_size) * output_height) * output_height)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = (out_channels // groups)
conv_per_position_flops = ((np.prod(kernel_dims) * in_channels) * filters_per_channel)
active_elements_count = (batch_size * np.prod(output_dims))
if (conv_module.__mask__ is not None):
(output_height, output_width) = output.shape[2:]
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = (conv_per_position_flops * active_elements_count)
bias_flops = 0
if (conv_module.bias is not None):
bias_flops = (out_channels * active_elements_count)
overall_flops = (overall_conv_flops + bias_flops)
conv_module.__flops__ += int(overall_flops)
|
def batch_counter_hook(module, input, output):
batch_size = 1
if (len(input) > 0):
input = input[0]
batch_size = len(input)
else:
print('Warning! No positional inputs found for a module, assuming batch size is 1.')
module.__batch_counter__ += batch_size
|
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
|
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
|
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
|
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
|
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, CONV_TYPES):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, RELU_TYPES):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, LINEAR_TYPES):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, POOLING_TYPES):
handle = module.register_forward_hook(pool_flops_counter_hook)
elif isinstance(module, BN_TYPES):
handle = module.register_forward_hook(bn_flops_counter_hook)
elif isinstance(module, UPSAMPLE_TYPES):
handle = module.register_forward_hook(upsample_flops_counter_hook)
elif isinstance(module, DECONV_TYPES):
handle = module.register_forward_hook(deconv_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
|
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
|
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
|
def params_to_string(params_num):
"converting number to string\n\n :param float params_num: number\n :returns str: number\n\n >>> params_to_string(1e9)\n '1000.0 M'\n >>> params_to_string(2e5)\n '200.0 k'\n >>> params_to_string(3e-9)\n '3e-09'\n "
if ((params_num // (10 ** 6)) > 0):
return (str(round((params_num / (10 ** 6)), 2)) + ' M')
elif (params_num // (10 ** 3)):
return (str(round((params_num / (10 ** 3)), 2)) + ' k')
else:
return str(params_num)
|
def flops_to_string(flops, units='GMac', precision=2):
if (units is None):
if ((flops // (10 ** 9)) > 0):
return (str(round((flops / (10.0 ** 9)), precision)) + ' GMac')
elif ((flops // (10 ** 6)) > 0):
return (str(round((flops / (10.0 ** 6)), precision)) + ' MMac')
elif ((flops // (10 ** 3)) > 0):
return (str(round((flops / (10.0 ** 3)), precision)) + ' KMac')
else:
return (str(flops) + ' Mac')
elif (units == 'GMac'):
return ((str(round((flops / (10.0 ** 9)), precision)) + ' ') + units)
elif (units == 'MMac'):
return ((str(round((flops / (10.0 ** 6)), precision)) + ' ') + units)
elif (units == 'KMac'):
return ((str(round((flops / (10.0 ** 3)), precision)) + ' ') + units)
else:
return (str(flops) + ' Mac')
|
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
'Quantize an array of (-inf, inf) to [0, levels-1].\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the quantized array.\n\n Returns:\n tuple: Quantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError('levels must be a positive integer, but got {}'.format(levels))
if (min_val >= max_val):
raise ValueError('min_val ({}) must be smaller than max_val ({})'.format(min_val, max_val))
arr = (np.clip(arr, min_val, max_val) - min_val)
quantized_arr = np.minimum(np.floor(((levels * arr) / (max_val - min_val))).astype(dtype), (levels - 1))
return quantized_arr
|
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
'Dequantize an array.\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the dequantized array.\n\n Returns:\n tuple: Dequantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError('levels must be a positive integer, but got {}'.format(levels))
if (min_val >= max_val):
raise ValueError('min_val ({}) must be smaller than max_val ({})'.format(min_val, max_val))
dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val)
return dequantized_arr
|
class AlexNet(nn.Module):
'AlexNet backbone.\n\n Args:\n num_classes (int): number of classes for classification.\n '
def __init__(self, num_classes=(- 1)):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.features(x)
if (self.num_classes > 0):
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride, dilation)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert (not with_cp)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False):
'Bottleneck block.\n\n If style is "pytorch", the stride-two layer is the 3x3 conv layer,\n if it is "caffe", the stride-two layer is the first 1x1 conv layer.\n '
super(Bottleneck, self).__init__()
assert (style in ['pytorch', 'caffe'])
if (style == 'pytorch'):
conv1_stride = 1
conv2_stride = stride
else:
conv1_stride = stride
conv2_stride = 1
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * self.expansion), kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
|
def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False):
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp))
inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
return nn.Sequential(*layers)
|
class ResNet(nn.Module):
'ResNet backbone.\n\n Args:\n depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n num_stages (int): Resnet stages, normally 4.\n strides (Sequence[int]): Strides of the first block of each stage.\n dilations (Sequence[int]): Dilation of each stage.\n out_indices (Sequence[int]): Output from which stages.\n style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two\n layer is the 3x3 conv layer, otherwise the stride-two layer is\n the first 1x1 conv layer.\n frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n not freezing any parameters.\n bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze\n running stats (mean and var).\n bn_frozen (bool): Whether to freeze weight and bias of BN layers.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n memory while slowing down the training speed.\n '
arch_settings = {18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3))}
def __init__(self, depth, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=(- 1), bn_eval=True, bn_frozen=False, with_cp=False):
super(ResNet, self).__init__()
if (depth not in self.arch_settings):
raise KeyError('invalid depth {} for resnet'.format(depth))
assert ((num_stages >= 1) and (num_stages <= 4))
(block, stage_blocks) = self.arch_settings[depth]
stage_blocks = stage_blocks[:num_stages]
assert (len(strides) == len(dilations) == num_stages)
assert (max(out_indices) < num_stages)
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_cp = with_cp
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.res_layers = []
for (i, num_blocks) in enumerate(stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = (64 * (2 ** i))
res_layer = make_res_layer(block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp)
self.inplanes = (planes * block.expansion)
layer_name = 'layer{}'.format((i + 1))
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = ((block.expansion * 64) * (2 ** (len(stage_blocks) - 1)))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outs = []
for (i, layer_name) in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
if (i in self.out_indices):
outs.append(x)
if (len(outs) == 1):
return outs[0]
else:
return tuple(outs)
def train(self, mode=True):
super(ResNet, self).train(mode)
if self.bn_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
if (mode and (self.frozen_stages >= 0)):
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
self.bn1.eval()
self.bn1.weight.requires_grad = False
self.bn1.bias.requires_grad = False
for i in range(1, (self.frozen_stages + 1)):
mod = getattr(self, 'layer{}'.format(i))
mod.eval()
for param in mod.parameters():
param.requires_grad = False
|
def conv3x3(in_planes, out_planes, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, padding=dilation, dilation=dilation)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.