code stringlengths 17 6.64M |
|---|
class NameMatcher(object):
def __init__(self, rules=None):
if (rules is None):
self._rules = []
elif isinstance(rules, dict):
self._rules = list(rules.items())
else:
assert isinstance(rules, collections.Iterable)
self._rules = list(rules)
self._map = {}
self._compiled_rules = []
self._compiled = False
self._matched = []
self._unused = set()
self._last_stat = None
@property
def rules(self):
return self._rules
def map(self):
assert self._compiled
return self._map
def append_rule(self, rule):
self._rules.append(tuple(rule))
def insert_rule(self, index, rule):
self._rules.insert(index, rule)
def pop_rule(self, index=None):
self._rules.pop(index)
def begin(self, *, force_compile=False):
if ((not self._compiled) or force_compile):
self.compile()
self._matched = []
self._unused = set(range(len(self._compiled_rules)))
def end(self):
return (self._matched, {self._compiled_rules[i][0] for i in self._unused})
def match(self, k):
for (i, (r, p, v)) in enumerate(self._compiled_rules):
if p.match(k):
if (i in self._unused):
self._unused.remove(i)
self._matched.append((k, r, v))
return v
return None
def compile(self):
self._map = dict()
self._compiled_rules = []
for (r, v) in self._rules:
self._map[r] = v
p = fnmatch.translate(r)
p = re.compile(p, flags=re.IGNORECASE)
self._compiled_rules.append((r, p, v))
self._compiled = True
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._last_stat = self.end()
def get_last_stat(self):
return self._last_stat
|
class IENameMatcher(object):
def __init__(self, include=None, exclude=None):
if (include is None):
self.include = None
else:
self.include = NameMatcher([(i, True) for i in include])
if (exclude is None):
self.exclude = None
else:
self.exclude = NameMatcher([(e, True) for e in exclude])
self._last_stat = None
def begin(self):
if (self.include is not None):
self.include.begin()
if (self.exclude is not None):
self.exclude.begin()
self._last_stat = (set(), set())
def end(self):
if (self.include is not None):
self.include.end()
if (self.exclude is not None):
self.exclude.end()
if (len(self._last_stat[0]) < len(self._last_stat[1])):
self._last_stat = ('included', self._last_stat[0])
else:
self._last_stat = ('excluded', self._last_stat[1])
def match(self, k):
if (self.include is None):
ret = True
else:
ret = bool(self.include.match(k))
if (self.exclude is not None):
ret = (ret and (not bool(self.exclude.match(k))))
if ret:
self._last_stat[0].add(k)
else:
self._last_stat[1].add(k)
return ret
def __enter__(self):
self.begin()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.end()
def get_last_stat(self):
return self._last_stat
|
def map_exec(func, *iterables):
return list(map(func, *iterables))
|
class AverageMeter(object):
'Computes and stores the average and current value'
val = 0
avg = 0
sum = 0
count = 0
tot_count = 0
def __init__(self):
self.reset()
self.tot_count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += (val * n)
self.count += n
self.tot_count += n
self.avg = (self.sum / self.count)
|
class GroupMeters(object):
def __init__(self):
self._meters = collections.defaultdict(AverageMeter)
def reset(self):
map_exec(AverageMeter.reset, self._meters.values())
def update(self, updates=None, value=None, n=1, **kwargs):
'\n Example:\n >>> meters.update(key, value)\n >>> meters.update({key1: value1, key2: value2})\n >>> meters.update(key1=value1, key2=value2)\n '
if (updates is None):
updates = {}
if ((updates is not None) and (value is not None)):
updates = {updates: value}
updates.update(kwargs)
for (k, v) in updates.items():
self._meters[k].update(v, n=n)
def __getitem__(self, name):
return self._meters[name]
def items(self):
return self._meters.items()
@property
def sum(self):
return {k: m.sum for (k, m) in self._meters.items() if (m.count > 0)}
@property
def avg(self):
return {k: m.avg for (k, m) in self._meters.items() if (m.count > 0)}
@property
def val(self):
return {k: m.val for (k, m) in self._meters.items() if (m.count > 0)}
def format(self, caption, values, kv_format, glue):
meters_kv = self._canonize_values(values)
log_str = [caption]
log_str.extend(itertools.starmap(kv_format.format, sorted(meters_kv.items())))
return glue.join(log_str)
def format_simple(self, caption, values='avg', compressed=True):
if compressed:
return self.format(caption, values, '{}={:4f}', ' ')
else:
return self.format(caption, values, '\t{} = {:4f}', '\n')
def dump(self, filename, values='avg'):
meters_kv = self._canonize_values(values)
with open(filename, 'a') as f:
f.write(json.dumps(meters_kv, cls=JsonObjectEncoder, sort_keys=True, indent=4, separators=(',', ': ')))
f.write('\n')
def _canonize_values(self, values):
if isinstance(values, six.string_types):
assert (values in ('avg', 'val', 'sum'))
meters_kv = getattr(self, values)
else:
meters_kv = values
return meters_kv
|
class JsonObjectEncoder(json.JSONEncoder):
'Adapted from https://stackoverflow.com/a/35483750'
def default(self, obj):
if hasattr(obj, '__jsonify__'):
json_object = obj.__jsonify__()
if isinstance(json_object, six.string_types):
return json_object
return self.encode(json_object)
else:
raise TypeError(("Object of type '%s' is not JSON serializable." % obj.__class__.__name__))
if hasattr(obj, '__dict__'):
d = dict(((key, value) for (key, value) in inspect.getmembers(obj) if ((not key.startswith('__')) and (not inspect.isabstract(value)) and (not inspect.isbuiltin(value)) and (not inspect.isfunction(value)) and (not inspect.isgenerator(value)) and (not inspect.isgeneratorfunction(value)) and (not inspect.ismethod(value)) and (not inspect.ismethoddescriptor(value)) and (not inspect.isroutine(value)))))
return self.default(d)
return obj
|
class ModelIOKeysMixin(object):
def _get_input(self, feed_dict):
return feed_dict['input']
def _get_label(self, feed_dict):
return feed_dict['label']
def _get_covariate(self, feed_dict):
'For cox'
return feed_dict['X']
def _get_fail_indicator(self, feed_dict):
'For cox'
return feed_dict['E'].reshape((- 1), 1)
def _get_failure_time(self, feed_dict):
'For cox'
return feed_dict['T']
def _compose_output(self, value):
return dict(pred=value)
|
class MLPModel(MLPLayer):
def freeze_weights(self):
for (name, p) in self.named_parameters():
if (name != 'mu'):
p.requires_grad = False
def get_gates(self, mode):
if (mode == 'raw'):
return self.mu.detach().cpu().numpy()
elif (mode == 'prob'):
return np.minimum(1.0, np.maximum(0.0, (self.mu.detach().cpu().numpy() + 0.5)))
else:
raise NotImplementedError()
|
class L1RegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.loss = nn.MSELoss()
self.lam = lam
def forward(self, feed_dict):
pred = super().forward(self._get_input(feed_dict))
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
reg = torch.mean(torch.abs(self.mlp[0][0].weight))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(pred)
|
class L1GateRegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.GateingLayer = GatingLayer(input_dim, device)
self.reg = self.GateingLayer.regularizer
self.mu = self.GateingLayer.mu
self.loss = nn.MSELoss()
self.lam = lam
def forward(self, feed_dict):
x = self.GateingLayer(self._get_input(feed_dict))
pred = super().forward(x)
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
reg = torch.mean(self.reg(self.mu))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(pred)
|
class SoftThreshRegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.loss = nn.MSELoss()
self.lam = lam
def prox_plus(self, w):
'Projection onto non-negative numbers\n '
below = (w < 0)
w[below] = 0
return w
def prox_op(self, w):
return (torch.sign(w) * self.prox_plus((torch.abs(w) - self.lam)))
def forward(self, feed_dict):
pred = super().forward(self._get_input(feed_dict))
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
total_loss = loss
return (total_loss, dict(), dict())
else:
return self._compose_output(pred)
|
class STGRegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.FeatureSelector = FeatureSelector(input_dim, sigma, device)
self.loss = nn.MSELoss()
self.reg = self.FeatureSelector.regularizer
self.lam = lam
self.mu = self.FeatureSelector.mu
self.sigma = self.FeatureSelector.sigma
def forward(self, feed_dict):
x = self.FeatureSelector(self._get_input(feed_dict))
pred = super().forward(x)
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
reg = torch.mean(self.reg(((self.mu + 0.5) / self.sigma)))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(pred)
|
class STGClassificationModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, nr_classes, hidden_dims, device, batch_norm=None, dropout=None, activation='relu', sigma=1.0, lam=0.1):
super().__init__(input_dim, nr_classes, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.FeatureSelector = FeatureSelector(input_dim, sigma, device)
self.softmax = nn.Softmax()
self.loss = nn.CrossEntropyLoss()
self.reg = self.FeatureSelector.regularizer
self.lam = lam
self.mu = self.FeatureSelector.mu
self.sigma = self.FeatureSelector.sigma
def forward(self, feed_dict):
x = self.FeatureSelector(self._get_input(feed_dict))
logits = super().forward(x)
if self.training:
loss = self.loss(logits, self._get_label(feed_dict))
reg = torch.mean(self.reg(((self.mu + 0.5) / self.sigma)))
total_loss = (loss + (self.lam * reg))
return (total_loss, dict(), dict())
else:
return self._compose_output(logits)
def _compose_output(self, logits):
value = self.softmax(logits)
(_, pred) = value.max(dim=1)
return dict(prob=value, pred=pred, logits=logits)
|
class STGCoxModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, nr_classes, hidden_dims, device, lam, batch_norm=None, dropout=None, activation='relu', sigma=1.0):
super().__init__(input_dim, nr_classes, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.FeatureSelector = FeatureSelector(input_dim, sigma, device)
self.loss = PartialLogLikelihood
self.noties = 'noties'
self.lam = lam
self.reg = self.FeatureSelector.regularizer
self.mu = self.FeatureSelector.mu
self.sigma = self.FeatureSelector.sigma
def forward(self, feed_dict):
x = self.FeatureSelector(self._get_covariate(feed_dict))
logits = super().forward(x)
if self.training:
loss = self.loss(logits, self._get_fail_indicator(feed_dict), self.noties)
reg = torch.sum(self.reg(((self.mu + 0.5) / self.sigma)))
total_loss = (loss + reg)
return (total_loss, logits, dict())
else:
return self._compose_output(logits)
def _compose_output(self, logits):
return dict(logits=logits)
|
class MLPCoxModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, nr_classes, hidden_dims, batch_norm=None, dropout=None, activation='relu'):
super().__init__(input_dim, nr_classes, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.loss = PartialLogLikelihood
self.noties = 'noties'
def forward(self, feed_dict):
logits = super().forward(self._get_covariate(feed_dict))
if self.training:
loss = self.loss(logits, self._get_fail_indicator(feed_dict), self.noties)
return (loss, logits, dict())
else:
return self._compose_output(logits)
def _compose_output(self, logits):
return dict(logits=logits)
|
class MLPRegressionModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, output_dim, hidden_dims, batch_norm=None, dropout=None, activation='relu'):
super().__init__(input_dim, output_dim, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.loss = nn.MSELoss()
def forward(self, feed_dict):
pred = super().forward(self._get_input(feed_dict))
if self.training:
loss = self.loss(pred, self._get_label(feed_dict))
return (loss, dict(), dict())
else:
return self._compose_output(pred)
|
class MLPClassificationModel(MLPModel, ModelIOKeysMixin):
def __init__(self, input_dim, nr_classes, hidden_dims, batch_norm=None, dropout=None, activation='relu'):
super().__init__(input_dim, nr_classes, hidden_dims, batch_norm=batch_norm, dropout=dropout, activation=activation)
self.softmax = nn.Softmax()
self.loss = nn.CrossEntropyLoss()
def forward(self, feed_dict):
logits = super().forward(self._get_input(feed_dict))
if self.training:
loss = self.loss(logits, self._get_label(feed_dict))
return (loss, dict(), dict())
else:
return self._compose_output(logits)
def _compose_output(self, logits):
value = self.softmax(logits)
(_, pred) = value.max(dim=1)
return dict(prob=value, pred=pred, logits=logits)
|
class LinearRegressionModel(MLPRegressionModel):
def __init__(self, input_dim, output_dim):
super().__init__(input_dim, output_dim, [])
|
class LinearClassificationModel(MLPClassificationModel):
def __init__(self, input_dim, nr_classes):
super().__init__(input_dim, nr_classes, [])
|
def _standard_truncnorm_sample(lower_bound, upper_bound, sample_shape=torch.Size()):
'\n Implements accept-reject algorithm for doubly truncated standard normal distribution.\n (Section 2.2. Two-sided truncated normal distribution in [1])\n [1] Robert, Christian P. "Simulation of truncated normal variables." Statistics and computing 5.2 (1995): 121-125.\n Available online: https://arxiv.org/abs/0907.4010\n Args:\n lower_bound (Tensor): lower bound for standard normal distribution. Best to keep it greater than -4.0 for\n stable results\n upper_bound (Tensor): upper bound for standard normal distribution. Best to keep it smaller than 4.0 for\n stable results\n '
x = torch.randn(sample_shape)
done = torch.zeros(sample_shape).byte()
while (not done.all()):
proposed_x = (lower_bound + (torch.rand(sample_shape) * (upper_bound - lower_bound)))
if (upper_bound * lower_bound).lt(0.0):
log_prob_accept = ((- 0.5) * (proposed_x ** 2))
elif (upper_bound < 0.0):
log_prob_accept = (0.5 * ((upper_bound ** 2) - (proposed_x ** 2)))
else:
assert lower_bound.gt(0.0)
log_prob_accept = (0.5 * ((lower_bound ** 2) - (proposed_x ** 2)))
prob_accept = torch.exp(log_prob_accept).clamp_(0.0, 1.0)
accept = (torch.bernoulli(prob_accept).byte() & (~ done))
if accept.any():
accept = accept.bool()
x[accept] = proposed_x[accept]
accept = accept.byte()
done |= accept
return x
|
class STG(object):
def __init__(self, device, input_dim=784, output_dim=10, hidden_dims=[400, 200], activation='relu', sigma=0.5, lam=0.1, optimizer='Adam', learning_rate=1e-05, batch_size=100, freeze_onward=None, feature_selection=True, weight_decay=0.001, task_type='classification', report_maps=False, random_state=1, extra_args=None):
self.batch_size = batch_size
self.activation = activation
self.device = self.get_device(device)
self.report_maps = report_maps
self.task_type = task_type
self.extra_args = extra_args
self.freeze_onward = freeze_onward
self._model = self.build_model(input_dim, output_dim, hidden_dims, activation, sigma, lam, task_type, feature_selection)
self._model.apply(self.init_weights)
self._model = self._model.to(self.device)
self._optimizer = get_optimizer(optimizer, self._model, lr=learning_rate, weight_decay=weight_decay)
def get_device(self, device):
if (device == 'cpu'):
device = torch.device('cpu')
elif (device == 'cuda'):
args_cuda = torch.cuda.is_available()
device = torch.device(('cuda' if args_cuda else 'cpu'))
else:
raise NotImplementedError("Only 'cpu' or 'cuda' is a valid option.")
return device
def init_weights(self, m):
if isinstance(m, nn.Linear):
stddev = torch.tensor(0.1)
shape = m.weight.shape
m.weight = nn.Parameter(_standard_truncnorm_sample(lower_bound=((- 2) * stddev), upper_bound=(2 * stddev), sample_shape=shape))
torch.nn.init.zeros_(m.bias)
def build_model(self, input_dim, output_dim, hidden_dims, activation, sigma, lam, task_type, feature_selection):
if (task_type == 'classification'):
self.metric = nn.CrossEntropyLoss()
self.tensor_names = ('input', 'label')
if feature_selection:
return STGClassificationModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation, sigma=sigma, lam=lam)
else:
return MLPClassificationModel(input_dim, output_dim, hidden_dims, activation=activation)
elif (task_type == 'regression'):
self.metric = nn.MSELoss()
self.tensor_names = ('input', 'label')
if (self.extra_args is not None):
if (self.extra_args == 'l1-softthresh'):
return SoftThreshRegressionModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation)
elif (self.extra_args == 'l1-norm-reg'):
return L1RegressionModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation)
elif (self.extra_args == 'l1-gate'):
return L1GateRegressionModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation)
elif feature_selection:
return STGRegressionModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation, sigma=sigma, lam=lam)
else:
return MLPRegressionModel(input_dim, output_dim, hidden_dims, activation=activation)
elif (task_type == 'cox'):
self.metric = PartialLogLikelihood
self.tensor_names = ('X', 'E', 'T')
if feature_selection:
return STGCoxModel(input_dim, output_dim, hidden_dims, device=self.device, activation=activation, sigma=sigma, lam=lam)
else:
return MLPCoxModel(input_dim, output_dim, hidden_dims, activation=activation)
else:
raise NotImplementedError()
def train_step(self, feed_dict, meters=None):
assert self._model.training
(loss, logits, monitors) = self._model(feed_dict)
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
if (self.task_type == 'cox'):
ci = calc_concordance_index(logits.detach().cpu().numpy(), feed_dict['E'].detach().cpu().numpy(), feed_dict['T'].detach().cpu().numpy())
if (self.extra_args == 'l1-softthresh'):
self._model.mlp[0][0].weight.data = self._model.prox_op(self._model.mlp[0][0].weight)
loss = as_float(loss)
if (meters is not None):
meters.update(loss=loss)
if (self.task_type == 'cox'):
meters.update(CI=ci)
meters.update(monitors)
def get_dataloader(self, X, y, shuffle):
if (self.task_type == 'classification'):
data_loader = FastTensorDataLoader(torch.from_numpy(X).float().to(self.device), torch.from_numpy(y).long().to(self.device), tensor_names=self.tensor_names, batch_size=self.batch_size, shuffle=shuffle)
elif (self.task_type == 'regression'):
data_loader = FastTensorDataLoader(torch.from_numpy(X).float().to(self.device), torch.from_numpy(y).float().to(self.device), tensor_names=self.tensor_names, batch_size=self.batch_size, shuffle=shuffle)
elif (self.task_type == 'cox'):
assert isinstance(y, dict)
data_loader = FastTensorDataLoader(torch.from_numpy(X).float().to(self.device), torch.from_numpy(y['E']).float().to(self.device), torch.from_numpy(y['T']).float().to(self.device), tensor_names=self.tensor_names, batch_size=self.batch_size, shuffle=shuffle)
else:
raise NotImplementedError()
return data_loader
def fit(self, X, y, nr_epochs, valid_X=None, valid_y=None, verbose=True, meters=None, early_stop=None, print_interval=1, shuffle=False):
data_loader = self.get_dataloader(X, y, shuffle)
if (valid_X is not None):
val_data_loader = self.get_dataloader(valid_X, valid_y, shuffle)
else:
val_data_loader = None
self.train(data_loader, nr_epochs, val_data_loader, verbose, meters, early_stop, print_interval)
def evaluate(self, X, y):
data_loader = self.get_dataloader(X, y, shuffle=None)
meters = GroupMeters()
self.validate(data_loader, self.metric, meters, mode='test')
print(meters.format_simple(''))
def predict(self, X, verbose=True):
dataset = SimpleDataset(torch.from_numpy(X).float().to(self.device))
data_loader = DataLoader(dataset, batch_size=X.shape[0], shuffle=False)
res = []
self._model.eval()
for feed_dict in data_loader:
feed_dict_np = as_numpy(feed_dict)
feed_dict = as_tensor(feed_dict)
with torch.no_grad():
output_dict = self._model(feed_dict)
output_dict_np = as_numpy(output_dict)
res.append(output_dict_np['pred'])
return np.concatenate(res, axis=0)
def train_epoch(self, data_loader, meters=None):
if (meters is None):
meters = GroupMeters()
self._model.train()
end = time.time()
for feed_dict in data_loader:
data_time = (time.time() - end)
end = time.time()
self.train_step(feed_dict, meters=meters)
step_time = (time.time() - end)
end = time.time()
return meters
def train(self, data_loader, nr_epochs, val_data_loader=None, verbose=True, meters=None, early_stop=None, print_interval=1):
if (meters is None):
meters = GroupMeters()
for epoch in range(1, (1 + nr_epochs)):
meters.reset()
if (epoch == self.freeze_onward):
self._model.freeze_weights()
self.train_epoch(data_loader, meters=meters)
if (verbose and ((epoch % print_interval) == 0)):
self.validate(val_data_loader, self.metric, meters)
caption = 'Epoch: {}:'.format(epoch)
print(meters.format_simple(caption))
if (early_stop is not None):
flag = early_stop(self._model)
if flag:
break
def validate_step(self, feed_dict, metric, meters=None, mode='valid'):
with torch.no_grad():
pred = self._model(feed_dict)
if (self.task_type == 'classification'):
result = metric(pred['logits'], self._model._get_label(feed_dict))
elif (self.task_type == 'regression'):
result = metric(pred['pred'], self._model._get_label(feed_dict))
elif (self.task_type == 'cox'):
result = metric(pred['logits'], self._model._get_fail_indicator(feed_dict), 'noties')
val_CI = calc_concordance_index(pred['logits'].detach().cpu().numpy(), feed_dict['E'].detach().cpu().numpy(), feed_dict['T'].detach().cpu().numpy())
result = as_float(result)
else:
raise NotImplementedError()
if (meters is not None):
meters.update({(mode + '_loss'): result})
if (self.task_type == 'cox'):
meters.update({(mode + '_CI'): val_CI})
def validate(self, data_loader, metric, meters=None, mode='valid'):
if (meters is None):
meters = GroupMeters()
self._model.eval()
end = time.time()
for fd in data_loader:
data_time = (time.time() - end)
end = time.time()
self.validate_step(fd, metric, meters=meters, mode=mode)
step_time = (time.time() - end)
end = time.time()
return meters.avg
def save_checkpoint(self, filename, extra=None):
model = self._model
state = {'model': state_dict(model, cpu=True), 'optimizer': as_cpu(self._optimizer.state_dict()), 'extra': extra}
try:
torch.save(state, filename)
logger.info('Checkpoint saved: "{}".'.format(filename))
except Exception:
logger.exception('Error occurred when dump checkpoint "{}".'.format(filename))
def load_checkpoint(self, filename):
if osp.isfile(filename):
model = self._model
if isinstance(model, nn.DataParallel):
model = model.module
try:
checkpoint = torch.load(filename)
load_state_dict(model, checkpoint['model'])
self._optimizer.load_state_dict(checkpoint['optimizer'])
logger.critical('Checkpoint loaded: {}.'.format(filename))
return checkpoint['extra']
except Exception:
logger.exception('Error occurred when load checkpoint "{}".'.format(filename))
else:
logger.warning('No checkpoint found at specified position: "{}".'.format(filename))
return None
def get_gates(self, mode):
return self._model.get_gates(mode)
|
class SimpleDataset(Dataset):
'\n Assuming X and y are numpy arrays and \n with X.shape = (n_samples, n_features) \n y.shape = (n_samples,)\n '
def __init__(self, X, y=None):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, i):
data = self.X[i]
if (self.y is not None):
return dict(input=data, label=self.y[i])
else:
return dict(input=data)
|
class FastTensorDataLoader():
'\n A DataLoader-like object for a set of tensors that can be much faster than\n TensorDataset + DataLoader because dataloader grabs individual indices of\n the dataset and calls cat (slow).\n Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6\n '
def __init__(self, *tensors, tensor_names, batch_size=32, shuffle=False):
'\n Initialize a FastTensorDataLoader.\n :param *tensors: tensors to store. Must have the same length @ dim 0.\n :param tensor_names: name of tensors (for feed_dict)\n :param batch_size: batch size to load.\n :param shuffle: if True, shuffle the data *in-place* whenever an\n iterator is created out of this object.\n :returns: A FastTensorDataLoader.\n '
assert all(((t.shape[0] == tensors[0].shape[0]) for t in tensors))
self.tensors = tensors
self.tensor_names = tensor_names
self.dataset_len = self.tensors[0].shape[0]
self.batch_size = batch_size
self.shuffle = shuffle
(n_batches, remainder) = divmod(self.dataset_len, self.batch_size)
if (remainder > 0):
n_batches += 1
self.n_batches = n_batches
def __iter__(self):
if self.shuffle:
r = torch.randperm(self.dataset_len)
self.tensors = [t[r] for t in self.tensors]
self.i = 0
return self
def __next__(self):
if (self.i >= self.dataset_len):
raise StopIteration
batch = {}
for k in range(len(self.tensor_names)):
batch.update({self.tensor_names[k]: self.tensors[k][self.i:(self.i + self.batch_size)]})
self.i += self.batch_size
return batch
def __len__(self):
return self.n_batches
|
def standardize_dataset(dataset, offset, scale):
norm_ds = copy.deepcopy(dataset)
norm_ds['x'] = ((norm_ds['x'] - offset) / scale)
return norm_ds
|
def load_datasets(dataset_file):
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
|
def load_cox_gaussian_data():
dataset_file = os.path.join(os.path.dirname(__file__), 'datasets/gaussian_survival_data.h5')
datasets = defaultdict(dict)
with h5py.File(dataset_file, 'r') as fp:
for ds in fp:
for array in fp[ds]:
datasets[ds][array] = fp[ds][array][:]
return datasets
|
def prepare_data(x, label):
if isinstance(label, dict):
(e, t) = (label['e'], label['t'])
sort_idx = np.argsort(t)[::(- 1)]
x = x[sort_idx]
e = e[sort_idx]
t = t[sort_idx]
return (x, e, t)
|
def probe_infnan(v, name, extras={}):
nps = torch.isnan(v)
s = nps.sum().item()
if (s > 0):
print('>>> {} >>>'.format(name))
print(name, s)
print(v[nps])
for (k, val) in extras.items():
print(k, val, val.sum().item())
quit()
|
class Identity(nn.Module):
def forward(self, *args):
if (len(args) == 1):
return args[0]
return args
|
def get_batcnnorm(bn, nr_features=None, nr_dims=1):
if isinstance(bn, nn.Module):
return bn
assert (1 <= nr_dims <= 3)
if (bn in (True, 'async')):
clz_name = 'BatchNorm{}d'.format(nr_dims)
return getattr(nn, clz_name)(nr_features)
else:
raise ValueError('Unknown type of batch normalization: {}.'.format(bn))
|
def get_dropout(dropout, nr_dims=1):
if isinstance(dropout, nn.Module):
return dropout
if (dropout is True):
dropout = 0.5
if (nr_dims == 1):
return nn.Dropout(dropout, True)
else:
clz_name = 'Dropout{}d'.format(nr_dims)
return getattr(nn, clz_name)(dropout)
|
def get_activation(act):
if isinstance(act, nn.Module):
return act
assert (type(act) is str), 'Unknown type of activation: {}.'.format(act)
act_lower = act.lower()
if (act_lower == 'identity'):
return Identity()
elif (act_lower == 'relu'):
return nn.ReLU(True)
elif (act_lower == 'selu'):
return nn.SELU(True)
elif (act_lower == 'sigmoid'):
return nn.Sigmoid()
elif (act_lower == 'tanh'):
return nn.Tanh()
else:
try:
return getattr(nn, act)
except AttributeError:
raise ValueError('Unknown activation function: {}.'.format(act))
|
def get_optimizer(optimizer, model, *args, **kwargs):
if isinstance(optimizer, optim.Optimizer):
return optimizer
if (type(optimizer) is str):
try:
optimizer = getattr(optim, optimizer)
except AttributeError:
raise ValueError('Unknown optimizer type: {}.'.format(optimizer))
return optimizer(filter((lambda p: p.requires_grad), model.parameters()), *args, **kwargs)
|
def stmap(func, iterable):
if isinstance(iterable, six.string_types):
return func(iterable)
elif isinstance(iterable, (collections.Sequence, collections.UserList)):
return [stmap(func, v) for v in iterable]
elif isinstance(iterable, collections.Set):
return {stmap(func, v) for v in iterable}
elif isinstance(iterable, (collections.Mapping, collections.UserDict)):
return {k: stmap(func, v) for (k, v) in iterable.items()}
else:
return func(iterable)
|
def _as_tensor(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
return o
if torch.is_tensor(o):
return o
return torch.from_numpy(np.array(o))
|
def as_tensor(obj):
return stmap(_as_tensor, obj)
|
def _as_numpy(o):
from torch.autograd import Variable
if isinstance(o, SKIP_TYPES):
return o
if isinstance(o, Variable):
o = o
if torch.is_tensor(o):
return o.cpu().numpy()
return np.array(o)
|
def as_numpy(obj):
return stmap(_as_numpy, obj)
|
def _as_float(o):
if isinstance(o, SKIP_TYPES):
return o
if torch.is_tensor(o):
return o.item()
arr = as_numpy(o)
assert (arr.size == 1)
return float(arr)
|
def as_float(obj):
return stmap(_as_float, obj)
|
def _as_cpu(o):
from torch.autograd import Variable
if (isinstance(o, Variable) or torch.is_tensor(o)):
return o.cpu()
return o
|
def as_cpu(obj):
return stmap(_as_cpu, obj)
|
def create_twomoon_dataset(n, p):
(relevant, y) = make_moons(n_samples=n, shuffle=True, noise=0.1, random_state=None)
print(y.shape)
noise_vector = norm.rvs(loc=0, scale=1, size=[n, (p - 2)])
data = np.concatenate([relevant, noise_vector], axis=1)
print(data.shape)
return (data, y)
|
def create_sin_dataset(n, p):
x1 = (5 * np.random.uniform(0, 1, n).reshape((- 1), 1))
x2 = (5 * np.random.uniform(0, 1, n).reshape((- 1), 1))
y = (np.sin(x1) * (np.cos(x2) ** 3))
relevant = np.hstack((x1, x2))
noise_vector = norm.rvs(loc=0, scale=1, size=[n, (p - 2)])
data = np.concatenate([relevant, noise_vector], axis=1)
return (data, y.astype(np.float32))
|
def create_simple_sin_dataset(n, p):
'This dataset was added to provide an example of L1 norm reg failure for presentation.\n '
assert (p == 2)
x1 = np.random.uniform((- math.pi), math.pi, n).reshape(n, 1)
x2 = np.random.uniform((- math.pi), math.pi, n).reshape(n, 1)
y = np.sin(x1)
data = np.concatenate([x1, x2], axis=1)
print('data.shape: {}'.format(data.shape))
return (data, y)
|
def getRelDict(graph):
rel = dict()
counter = 0
for triple in graph:
(s, p, o) = triple
if ((str(p) not in rel) and isinstance(o, rdflib.URIRef)):
rel[str(p)] = counter
counter += 1
return rel
|
def get_attr_set(graph):
attr_set = set()
for triple in graph:
(s, p, o) = triple
if isinstance(o, rdflib.Literal):
attr_set.add(p)
return attr_set
|
def get_training_attrs(graph, attr_set):
training_attrs = set()
for subject in graph.subjects():
row = list()
row.append(str(subject))
count = 0
for triple in graph.triples((subject, None, None)):
(s, p, o) = triple
if (p in attr_set):
row.append(str(p))
count += 1
if (count > 0):
training_attrs.add('\t'.join(row))
return training_attrs
|
def get_ent_set(graph):
ent_set = set()
for triple in graph:
(s, p, o) = triple
if (str(s) not in ent_set):
ent_set.add(str(s))
if (isinstance(o, rdflib.URIRef) and (str(o) not in ent_set)):
ent_set.add(str(o))
return ent_set
|
def get_ent_dict(graph, start_id):
ent_set = get_ent_set(graph)
count = start_id
res = dict()
for e in ent_set:
res[e] = count
count += 1
return res
|
class Linf_SGD(Optimizer):
'Implements stochastic gradient descent (optionally with momentum).\n Nesterov momentum is based on the formula from\n `On the importance of initialization and momentum in deep learning`__.\n Args:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): learning rate\n momentum (float, optional): momentum factor (default: 0)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n dampening (float, optional): dampening for momentum (default: 0)\n nesterov (bool, optional): enables Nesterov momentum (default: False)\n Example:\n >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf\n .. note::\n The implementation of SGD with Momentum/Nesterov subtly differs from\n Sutskever et. al. and implementations in some other frameworks.\n Considering the specific case of Momentum, the update can be written as\n .. math::\n v = \\rho * v + g \\\\\n p = p - lr * v\n where p, g, v and :math:`\\rho` denote the parameters, gradient,\n velocity, and momentum respectively.\n This is in contrast to Sutskever et. al. and\n other frameworks which employ an update of the form\n .. math::\n v = \\rho * v + lr * g \\\\\n p = p - v\n The Nesterov version is analogously modified.\n '
def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False):
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(Linf_SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(Linf_SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = torch.sign(p.grad.data)
if (weight_decay != 0):
d_p.add_(weight_decay, p.data)
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
buf.mul_(momentum).add_(d_p)
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_((1 - dampening), d_p)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_((- group['lr']), d_p)
return loss
|
def Linf_PGD_alpha(model, X, y, epsilon, steps=7, random_start=True):
training = model.training
if training:
model.eval()
saved_params = [p.clone() for p in model.arch_parameters()]
optimizer = Linf_SGD(model.arch_parameters(), lr=((2 * epsilon) / steps))
with torch.no_grad():
loss_before = model._loss(X, y)
if random_start:
for p in model.arch_parameters():
p.data.add_(torch.zeros_like(p).uniform_((- epsilon), epsilon))
model.clip()
for _ in range(steps):
optimizer.zero_grad()
model.zero_grad()
loss = (- model._loss(X, y))
loss.backward()
optimizer.step()
diff = [(model.arch_parameters()[i] - saved_params[i]).clamp_((- epsilon), epsilon) for i in range(len(saved_params))]
for (i, p) in enumerate(model.arch_parameters()):
p.data.copy_((diff[i] + saved_params[i]))
model.clip()
optimizer.zero_grad()
model.zero_grad()
with torch.no_grad():
loss_after = model._loss(X, y)
if (loss_before > loss_after):
for (i, p) in enumerate(model.arch_parameters()):
p.data.copy_(saved_params[i])
if training:
model.train()
|
def Random_alpha(model, X, y, epsilon):
for p in model.arch_parameters():
p.data.add_(torch.zeros_like(p).uniform_((- epsilon), epsilon))
model.clip()
|
def Linf_PGD_alpha_RNN(model, X, y, hidden, epsilon, steps=7, random_start=True):
training = model.training
if training:
model.eval()
saved_params = [p.clone() for p in model.arch_parameters()]
optimizer = Linf_SGD(model.arch_parameters(), lr=((2 * epsilon) / steps))
with torch.no_grad():
(loss_before, _) = model._loss(hidden, X, y, updateType='weight')
if random_start:
for p in model.arch_parameters():
p.data.add_(torch.zeros_like(p).uniform_((- epsilon), epsilon))
model.clip()
for _ in range(steps):
optimizer.zero_grad()
model.zero_grad()
(loss, _) = model._loss(hidden, X, y, updateType='weight')
loss = (- loss)
loss.backward()
optimizer.step()
diff = [(model.arch_parameters()[i] - saved_params[i]).clamp_((- epsilon), epsilon) for i in range(len(saved_params))]
for (i, p) in enumerate(model.arch_parameters()):
p.data.copy_((diff[i] + saved_params[i]))
model.clip()
optimizer.zero_grad()
model.zero_grad()
with torch.no_grad():
(loss_after, _) = model._loss(hidden, X, y, updateType='weight')
if (loss_before > loss_after):
for (i, p) in enumerate(model.arch_parameters()):
p.data.copy_(saved_params[i])
if training:
model.train()
|
def Random_alpha_RNN(model, X, y, hidden, epsilon):
for p in model.arch_parameters():
p.data.add_(torch.zeros_like(p).uniform_((- epsilon), epsilon))
model.clip()
|
def calculate_md5(fpath, chunk_size=(1024 * 1024)):
md5 = hashlib.md5()
with open(fpath, 'rb') as f:
for chunk in iter((lambda : f.read(chunk_size)), b''):
md5.update(chunk)
return md5.hexdigest()
|
def check_md5(fpath, md5, **kwargs):
return (md5 == calculate_md5(fpath, **kwargs))
|
def check_integrity(fpath, md5=None):
if (not os.path.isfile(fpath)):
return False
if (md5 is None):
return True
else:
return check_md5(fpath, md5)
|
class ImageNet16(data.Dataset):
train_list = [['train_data_batch_1', '27846dcaa50de8e21a7d1a35f30f0e91'], ['train_data_batch_2', 'c7254a054e0e795c69120a5727050e3f'], ['train_data_batch_3', '4333d3df2e5ffb114b05d2ffc19b1e87'], ['train_data_batch_4', '1620cdf193304f4a92677b695d70d10f'], ['train_data_batch_5', '348b3c2fdbb3940c4e9e834affd3b18d'], ['train_data_batch_6', '6e765307c242a1b3d7d5ef9139b48945'], ['train_data_batch_7', '564926d8cbf8fc4818ba23d2faac7564'], ['train_data_batch_8', 'f4755871f718ccb653440b9dd0ebac66'], ['train_data_batch_9', 'bb6dd660c38c58552125b1a92f86b5d4'], ['train_data_batch_10', '8f03f34ac4b42271a294f91bf480f29b']]
valid_list = [['val_data', '3410e3017fdaefba8d5073aaa65e4bd6']]
def __init__(self, root, train, transform, use_num_of_class_only=None):
self.root = root
self.transform = transform
self.train = train
if (not self._check_integrity()):
raise RuntimeError('Dataset not found or corrupted.')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.valid_list
self.data = []
self.targets = []
for (i, (file_name, checksum)) in enumerate(downloaded_list):
file_path = os.path.join(self.root, file_name)
with open(file_path, 'rb') as f:
if (sys.version_info[0] == 2):
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
self.targets.extend(entry['labels'])
self.data = np.vstack(self.data).reshape((- 1), 3, 16, 16)
self.data = self.data.transpose((0, 2, 3, 1))
if (use_num_of_class_only is not None):
assert (isinstance(use_num_of_class_only, int) and (use_num_of_class_only > 0) and (use_num_of_class_only < 1000)), 'invalid use_num_of_class_only : {:}'.format(use_num_of_class_only)
(new_data, new_targets) = ([], [])
for (I, L) in zip(self.data, self.targets):
if (1 <= L <= use_num_of_class_only):
new_data.append(I)
new_targets.append(L)
self.data = new_data
self.targets = new_targets
def __getitem__(self, index):
(img, target) = (self.data[index], (self.targets[index] - 1))
img = Image.fromarray(img)
if (self.transform is not None):
img = self.transform(img)
return (img, target)
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.valid_list):
(filename, md5) = (fentry[0], fentry[1])
fpath = os.path.join(root, filename)
if (not check_integrity(fpath, md5)):
return False
return True
|
class Architect(object):
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(), lr=args.arch_learning_rate, betas=(0.5, 0.999), weight_decay=args.arch_weight_decay)
self._init_arch_parameters = []
for alpha in self.model.arch_parameters():
alpha_init = torch.zeros_like(alpha)
alpha_init.data.copy_(alpha)
self._init_arch_parameters.append(alpha_init)
if (args.method in ['darts', 'darts-proj', 'sdarts', 'sdarts-proj']):
self.method = 'fo'
elif ('so' in args.method):
print('ERROR: PLEASE USE architect.py for second order darts')
elif (args.method in ['blank', 'blank-proj']):
self.method = 'blank'
else:
print('ERROR: WRONG ARCH UPDATE METHOD', args.method)
exit(0)
def reset_arch_parameters(self):
for (alpha, alpha_init) in zip(self.model.arch_parameters(), self._init_arch_parameters):
alpha.data.copy_(alpha_init.data)
def step(self, input_train, target_train, input_valid, target_valid, *args, **kwargs):
if (self.method == 'fo'):
shared = self._step_fo(input_train, target_train, input_valid, target_valid)
elif (self.method == 'so'):
raise NotImplementedError
elif (self.method == 'blank'):
shared = None
return shared
def _step_fo(self, input_train, target_train, input_valid, target_valid):
loss = self.model._loss(input_valid, target_valid)
loss.backward()
self.optimizer.step()
return None
def _step_darts_so(self, input_train, target_train, input_valid, target_valid, eta, model_optimizer):
raise NotImplementedError
|
def get_combination(space, num):
combs = []
for i in range(num):
if (i == 0):
for func in space:
combs.append([(func, i)])
else:
new_combs = []
for string in combs:
for func in space:
xstring = (string + [(func, i)])
new_combs.append(xstring)
combs = new_combs
return combs
|
class Structure():
def __init__(self, genotype):
assert (isinstance(genotype, list) or isinstance(genotype, tuple)), 'invalid class of genotype : {:}'.format(type(genotype))
self.node_num = (len(genotype) + 1)
self.nodes = []
self.node_N = []
for (idx, node_info) in enumerate(genotype):
assert (isinstance(node_info, list) or isinstance(node_info, tuple)), 'invalid class of node_info : {:}'.format(type(node_info))
assert (len(node_info) >= 1), 'invalid length : {:}'.format(len(node_info))
for node_in in node_info:
assert (isinstance(node_in, list) or isinstance(node_in, tuple)), 'invalid class of in-node : {:}'.format(type(node_in))
assert ((len(node_in) == 2) and (node_in[1] <= idx)), 'invalid in-node : {:}'.format(node_in)
self.node_N.append(len(node_info))
self.nodes.append(tuple(deepcopy(node_info)))
def tolist(self, remove_str):
genotypes = []
for node_info in self.nodes:
node_info = list(node_info)
node_info = sorted(node_info, key=(lambda x: (x[1], x[0])))
node_info = tuple(filter((lambda x: (x[0] != remove_str)), node_info))
if (len(node_info) == 0):
return (None, False)
genotypes.append(node_info)
return (genotypes, True)
def node(self, index):
assert ((index > 0) and (index <= len(self))), 'invalid index={:} < {:}'.format(index, len(self))
return self.nodes[index]
def tostr(self):
strings = []
for node_info in self.nodes:
string = '|'.join([(x[0] + '~{:}'.format(x[1])) for x in node_info])
string = '|{:}|'.format(string)
strings.append(string)
return '+'.join(strings)
def check_valid(self):
nodes = {0: True}
for (i, node_info) in enumerate(self.nodes):
sums = []
for (op, xin) in node_info:
if ((op == 'none') or (nodes[xin] is False)):
x = False
else:
x = True
sums.append(x)
nodes[(i + 1)] = (sum(sums) > 0)
return nodes[len(self.nodes)]
def to_unique_str(self, consider_zero=False):
nodes = {0: '0'}
for (i_node, node_info) in enumerate(self.nodes):
cur_node = []
for (op, xin) in node_info:
if (consider_zero is None):
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
elif consider_zero:
if ((op == 'none') or (nodes[xin] == '#')):
x = '#'
elif (op == 'skip_connect'):
x = nodes[xin]
else:
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
elif (op == 'skip_connect'):
x = nodes[xin]
else:
x = ((('(' + nodes[xin]) + ')') + '@{:}'.format(op))
cur_node.append(x)
nodes[(i_node + 1)] = '+'.join(sorted(cur_node))
return nodes[len(self.nodes)]
def check_valid_op(self, op_names):
for node_info in self.nodes:
for inode_edge in node_info:
if (inode_edge[0] not in op_names):
return False
return True
def __repr__(self):
return '{name}({node_num} nodes with {node_info})'.format(name=self.__class__.__name__, node_info=self.tostr(), **self.__dict__)
def __len__(self):
return (len(self.nodes) + 1)
def __getitem__(self, index):
return self.nodes[index]
@staticmethod
def str2structure(xstr):
assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))
nodestrs = xstr.split('+')
genotypes = []
for (i, node_str) in enumerate(nodestrs):
inputs = list(filter((lambda x: (x != '')), node_str.split('|')))
for xinput in inputs:
assert (len(xinput.split('~')) == 2), 'invalid input length : {:}'.format(xinput)
inputs = (xi.split('~') for xi in inputs)
input_infos = tuple(((op, int(IDX)) for (op, IDX) in inputs))
genotypes.append(input_infos)
return Structure(genotypes)
@staticmethod
def str2fullstructure(xstr, default_name='none'):
assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))
nodestrs = xstr.split('+')
genotypes = []
for (i, node_str) in enumerate(nodestrs):
inputs = list(filter((lambda x: (x != '')), node_str.split('|')))
for xinput in inputs:
assert (len(xinput.split('~')) == 2), 'invalid input length : {:}'.format(xinput)
inputs = (xi.split('~') for xi in inputs)
input_infos = list(((op, int(IDX)) for (op, IDX) in inputs))
all_in_nodes = list((x[1] for x in input_infos))
for j in range(i):
if (j not in all_in_nodes):
input_infos.append((default_name, j))
node_info = sorted(input_infos, key=(lambda x: (x[1], x[0])))
genotypes.append(tuple(node_info))
return Structure(genotypes)
@staticmethod
def gen_all(search_space, num, return_ori):
assert (isinstance(search_space, list) or isinstance(search_space, tuple)), 'invalid class of search-space : {:}'.format(type(search_space))
assert (num >= 2), 'There should be at least two nodes in a neural cell instead of {:}'.format(num)
all_archs = get_combination(search_space, 1)
for (i, arch) in enumerate(all_archs):
all_archs[i] = [tuple(arch)]
for inode in range(2, num):
cur_nodes = get_combination(search_space, inode)
new_all_archs = []
for previous_arch in all_archs:
for cur_node in cur_nodes:
new_all_archs.append((previous_arch + [tuple(cur_node)]))
all_archs = new_all_archs
if return_ori:
return all_archs
else:
return [Structure(x) for x in all_archs]
|
def pt_project(train_queue, valid_queue, model, architect, criterion, optimizer, epoch, args, infer, query):
def project(model, args):
(num_edge, num_op) = (model.num_edge, model.num_op)
remain_eids = torch.nonzero(model.candidate_flags).cpu().numpy().T[0]
if (args.edge_decision == 'random'):
selected_eid = np.random.choice(remain_eids, size=1)[0]
if (args.proj_crit == 'loss'):
crit_idx = 1
compare = (lambda x, y: (x > y))
if (args.proj_crit == 'acc'):
crit_idx = 0
compare = (lambda x, y: (x < y))
best_opid = 0
crit_extrema = None
for opid in range(num_op):
weights = model.get_projected_weights()
proj_mask = torch.ones_like(weights[selected_eid])
proj_mask[opid] = 0
weights[selected_eid] = (weights[selected_eid] * proj_mask)
valid_stats = infer(valid_queue, model, criterion, log=False, eval=False, weights=weights)
crit = valid_stats[crit_idx]
if ((crit_extrema is None) or compare(crit, crit_extrema)):
crit_extrema = crit
best_opid = opid
logging.info('valid_acc %f', valid_stats[0])
logging.info('valid_loss %f', valid_stats[1])
logging.info('best opid %d', best_opid)
return (selected_eid, best_opid)
if (not args.fast):
api = API('../data/NAS-Bench-201-v1_0-e61699.pth')
model.train()
model.printing(logging)
(train_acc, train_obj) = infer(train_queue, model, criterion, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
(valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
num_edges = model.arch_parameters()[0].shape[0]
proj_intv = args.proj_intv
tune_epochs = (proj_intv * (num_edges - 1))
model.reset_optimizer((args.learning_rate / 10), args.momentum, args.weight_decay)
for epoch in range(tune_epochs):
logging.info('epoch %d', epoch)
if (((epoch % proj_intv) == 0) or (epoch == (tune_epochs - 1))):
logging.info('project')
(selected_eid, best_opid) = project(model, args)
model.project_op(selected_eid, best_opid)
model.printing(logging)
for (step, (input, target)) in enumerate(train_queue):
model.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
(input_search, target_search) = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer.zero_grad()
architect.optimizer.zero_grad()
shared = architect.step(input, target, input_search, target_search, return_logits=True)
optimizer.zero_grad()
architect.optimizer.zero_grad()
(logits, loss) = model.step(input, target, args, shared=shared)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
model.printing(logging)
(train_acc, train_obj) = infer(train_queue, model, criterion, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
(valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
if (not args.fast):
query(api, model.genotype(), logging)
return
|
class TinyNetwork(nn.Module):
def __init__(self, C, N, max_nodes, num_classes, criterion, search_space, args, affine=False, track_running_stats=True):
super(TinyNetwork, self).__init__()
self._C = C
self._layerN = N
self.max_nodes = max_nodes
self._num_classes = num_classes
self._criterion = criterion
self._args = args
self._affine = affine
self._track_running_stats = track_running_stats
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = ((((([C] * N) + [(C * 2)]) + ([(C * 2)] * N)) + [(C * 4)]) + ([(C * 4)] * N))
layer_reductions = ((((([False] * N) + [True]) + ([False] * N)) + [True]) + ([False] * N))
(C_prev, num_edge, edge2index) = (C, None, None)
self.cells = nn.ModuleList()
for (index, (C_curr, reduction)) in enumerate(zip(layer_channels, layer_reductions)):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2)
else:
cell = SearchCell(C_prev, C_curr, 1, max_nodes, search_space, affine, track_running_stats)
if (num_edge is None):
(num_edge, edge2index) = (cell.num_edges, cell.edge2index)
else:
assert ((num_edge == cell.num_edges) and (edge2index == cell.edge2index)), 'invalid {:} vs. {:}.'.format(num_edge, cell.num_edges)
self.cells.append(cell)
C_prev = cell.out_dim
self.num_edge = num_edge
self.num_op = len(search_space)
self.op_names = deepcopy(search_space)
self._Layer = len(self.cells)
self.edge2index = edge2index
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._arch_parameters = Variable((0.001 * torch.randn(num_edge, len(search_space)).cuda()), requires_grad=True)
arch_params = set((id(m) for m in self.arch_parameters()))
self._model_params = [m for m in self.parameters() if (id(m) not in arch_params)]
self.optimizer = torch.optim.SGD(self._model_params, args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
def entropy_y_x(self, p_logit):
p = F.softmax(p_logit, dim=1)
return ((- torch.sum((p * F.log_softmax(p_logit, dim=1)))) / p_logit.shape[0])
def _loss(self, input, target, return_logits=False):
logits = self(input)
loss = self._criterion(logits, target)
return ((loss, logits) if return_logits else loss)
def get_weights(self):
xlist = (list(self.stem.parameters()) + list(self.cells.parameters()))
xlist += (list(self.lastact.parameters()) + list(self.global_pooling.parameters()))
xlist += list(self.classifier.parameters())
return xlist
def arch_parameters(self):
return [self._arch_parameters]
def get_theta(self):
return nn.functional.softmax(self._arch_parameters, dim=(- 1)).cpu()
def get_message(self):
string = self.extra_repr()
for (i, cell) in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def extra_repr(self):
return '{name}(C={_C}, Max-Nodes={max_nodes}, N={_layerN}, L={_Layer})'.format(name=self.__class__.__name__, **self.__dict__)
def genotype(self):
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self._arch_parameters[self.edge2index[node_str]]
op_name = self.op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
def forward(self, inputs, weights=None):
sim_nn = []
weights = (nn.functional.softmax(self._arch_parameters, dim=(- 1)) if (weights is None) else weights)
if self.slim:
weights[1].data.fill_(0)
weights[3].data.fill_(0)
weights[4].data.fill_(0)
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell(feature, weights)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return logits
def _save_arch_parameters(self):
self._saved_arch_parameters = [p.clone() for p in self._arch_parameters]
def project_arch(self):
self._save_arch_parameters()
for p in self.arch_parameters():
(m, n) = p.size()
maxIndexs = p.data.cpu().numpy().argmax(axis=1)
p.data = self.proximal_step(p, maxIndexs)
def proximal_step(self, var, maxIndexs=None):
values = var.data.cpu().numpy()
(m, n) = values.shape
alphas = []
for i in range(m):
for j in range(n):
if (j == maxIndexs[i]):
alphas.append(values[i][j].copy())
values[i][j] = 1
else:
values[i][j] = 0
return torch.Tensor(values).cuda()
def restore_arch_parameters(self):
for (i, p) in enumerate(self._arch_parameters):
p.data.copy_(self._saved_arch_parameters[i])
del self._saved_arch_parameters
def new(self):
model_new = TinyNetwork(self._C, self._layerN, self.max_nodes, self._num_classes, self._criterion, self.op_names, self._args, self._affine, self._track_running_stats).cuda()
for (x, y) in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def step(self, input, target, args, shared=None, return_grad=False):
(Lt, logit_t) = self._loss(input, target, return_logits=True)
Lt.backward()
nn.utils.clip_grad_norm_(self.get_weights(), args.grad_clip)
self.optimizer.step()
if return_grad:
grad = torch.nn.utils.parameters_to_vector([p.grad for p in self.get_weights()])
return (logit_t, Lt, grad)
else:
return (logit_t, Lt)
def printing(self, logging):
logging.info(self.get_theta())
def set_arch_parameters(self, new_alphas):
for (alpha, new_alpha) in zip(self.arch_parameters(), new_alphas):
alpha.data.copy_(new_alpha.data)
def save_arch_parameters(self):
self._saved_arch_parameters = self._arch_parameters.clone()
def restore_arch_parameters(self):
self.set_arch_parameters(self._saved_arch_parameters)
def reset_optimizer(self, lr, momentum, weight_decay):
del self.optimizer
self.optimizer = torch.optim.SGD(self.get_weights(), lr, momentum=momentum, weight_decay=weight_decay)
|
class TinyNetworkDarts(TinyNetwork):
def __init__(self, C, N, max_nodes, num_classes, criterion, search_space, args, affine=False, track_running_stats=True):
super(TinyNetworkDarts, self).__init__(C, N, max_nodes, num_classes, criterion, search_space, args, affine=affine, track_running_stats=track_running_stats)
self.theta_map = (lambda x: torch.softmax(x, dim=(- 1)))
def get_theta(self):
return self.theta_map(self._arch_parameters).cpu()
def forward(self, inputs):
weights = self.theta_map(self._arch_parameters)
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell(feature, weights)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return logits
|
class TinyNetworkDartsProj(TinyNetwork):
def __init__(self, C, N, max_nodes, num_classes, criterion, search_space, args, affine=False, track_running_stats=True):
super(TinyNetworkDartsProj, self).__init__(C, N, max_nodes, num_classes, criterion, search_space, args, affine=affine, track_running_stats=track_running_stats)
self.theta_map = (lambda x: torch.softmax(x, dim=(- 1)))
self.candidate_flags = torch.tensor((len(self._arch_parameters) * [True]), requires_grad=False, dtype=torch.bool).cuda()
self.proj_weights = torch.zeros_like(self._arch_parameters)
def project_op(self, eid, opid):
self.proj_weights[eid][opid] = 1
self.candidate_flags[eid] = False
def get_projected_weights(self):
weights = self.theta_map(self._arch_parameters)
for eid in range(len(self._arch_parameters)):
if (not self.candidate_flags[eid]):
weights[eid].data.copy_(self.proj_weights[eid])
return weights
def forward(self, inputs, weights=None):
if (weights is None):
weights = self.get_projected_weights()
feature = self.stem(inputs)
for (i, cell) in enumerate(self.cells):
if isinstance(cell, SearchCell):
feature = cell(feature, weights)
else:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), (- 1))
logits = self.classifier(out)
return logits
def get_theta(self):
return self.get_projected_weights()
def arch_parameters(self):
return [self._arch_parameters]
def set_arch_parameters(self, new_alphas):
for (eid, alpha) in enumerate(self.arch_parameters()):
alpha.data.copy_(new_alphas[eid])
def genotype(self):
proj_weights = self.get_projected_weights()
genotypes = []
for i in range(1, self.max_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = proj_weights[self.edge2index[node_str]]
op_name = self.op_names[weights.argmax().item()]
xlist.append((op_name, j))
genotypes.append(tuple(xlist))
return Structure(genotypes)
|
def main():
torch.set_num_threads(3)
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
gpu = (ig_utils.pick_gpu_lowest_memory() if (args.gpu == 'auto') else int(args.gpu))
torch.cuda.set_device(gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('args = %s', args)
logging.info(('gpu device = %d' % gpu))
if (not args.fast):
api = API('../data/NAS-Bench-201-v1_0-e61699.pth')
criterion = nn.CrossEntropyLoss()
search_space = SearchSpaceNames[args.search_space]
if (args.method in ['darts', 'blank']):
model = TinyNetworkDarts(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=search_space, args=args)
elif (args.method in ['darts-proj', 'blank-proj']):
model = TinyNetworkDartsProj(C=args.init_channels, N=5, max_nodes=4, num_classes=n_classes, criterion=criterion, search_space=search_space, args=args)
model = model.cuda()
logging.info('param size = %fMB', ig_utils.count_parameters_in_MB(model))
architect = Architect(model, args)
if (args.dataset == 'cifar10'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'cifar100'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'imagenet16-120'):
import torchvision.transforms as transforms
from nasbench201.DownsampledImageNet import ImageNet16
mean = [(x / 255) for x in [122.68, 116.66, 104.01]]
std = [(x / 255) for x in [63.22, 61.26, 65.09]]
lists = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(16, padding=2), transforms.ToTensor(), transforms.Normalize(mean, std)]
train_transform = transforms.Compose(lists)
train_data = ImageNet16(root=os.path.join(args.data, 'imagenet16'), train=True, transform=train_transform, use_num_of_class_only=120)
valid_data = ImageNet16(root=os.path.join(args.data, 'imagenet16'), train=False, transform=train_transform, use_num_of_class_only=120)
assert (len(train_data) == 151700)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor((args.train_portion * num_train)))
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model.optimizer, float(args.epochs), eta_min=args.learning_rate_min)
start_epoch = 0
if (args.resume_epoch != 0):
logging.info('loading checkpoint from {}'.format(expid))
file = ('checkpoint.pth.tar' if (args.resume_epoch == (- 1)) else 'checkpoint_{}.pth.tar'.format(args.resume_epoch))
filename = os.path.join(args.save, file)
if os.path.isfile(filename):
logging.info("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
start_epoch = checkpoint['epoch']
model_state_dict = checkpoint['state_dict']
if ('_arch_parameters' in model_state_dict):
del model_state_dict['_arch_parameters']
model.load_state_dict(model_state_dict)
saved_arch_parameters = checkpoint['alpha']
model.set_arch_parameters(saved_arch_parameters)
scheduler.load_state_dict(checkpoint['scheduler'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
logging.info("=> loaded checkpoint '{}' (epoch {})".format(filename, (start_epoch - 1)))
else:
print("=> no checkpoint found at '{}'".format(filename))
for epoch in range(start_epoch, args.epochs):
lr = scheduler.get_lr()[0]
if args.cutout:
train_transform.transforms[(- 1)].cutout_prob = ((args.cutout_prob * epoch) / (args.epochs - 1))
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[(- 1)].cutout_prob)
else:
logging.info('epoch %d lr %e', epoch, lr)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
model.printing(logging)
(train_acc, train_obj) = train(train_queue, valid_queue, model, architect, model.optimizer, lr, epoch)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
(valid_acc, valid_obj) = infer(valid_queue, model, criterion, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
if (not args.fast):
(cifar10_train, cifar10_test, cifar100_train, cifar100_valid, cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test) = query(api, model.genotype(), logging)
writer.add_scalars('accuracy', {'train': train_acc, 'valid': valid_acc}, epoch)
writer.add_scalars('loss', {'train': train_obj, 'valid': valid_obj}, epoch)
writer.add_scalars('nasbench201/cifar10', {'train': cifar10_train, 'test': cifar10_test}, epoch)
writer.add_scalars('nasbench201/cifar100', {'train': cifar100_train, 'valid': cifar100_valid, 'test': cifar100_test}, epoch)
writer.add_scalars('nasbench201/imagenet16', {'train': imagenet16_train, 'valid': imagenet16_valid, 'test': imagenet16_test}, epoch)
scheduler.step()
save_state = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'alpha': model.arch_parameters(), 'optimizer': model.optimizer.state_dict(), 'arch_optimizer': architect.optimizer.state_dict(), 'scheduler': scheduler.state_dict()}
if ((save_state['epoch'] % args.ckpt_interval) == 0):
ig_utils.save_checkpoint(save_state, False, args.save, per_epoch=True)
if (args.dev == 'proj'):
pt_project(train_queue, valid_queue, model, architect, criterion, model.optimizer, start_epoch, args, infer, query)
writer.close()
|
def train(train_queue, valid_queue, model, architect, optimizer, lr, epoch):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
for step in range(len(train_queue)):
model.train()
(input, target) = next(iter(train_queue))
input = input.cuda()
target = target.cuda(non_blocking=True)
(input_search, target_search) = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer.zero_grad()
architect.optimizer.zero_grad()
shared = architect.step(input, target, input_search, target_search, eta=lr, network_optimizer=optimizer)
optimizer.zero_grad()
architect.optimizer.zero_grad()
(logits, loss) = model.step(input, target, args, shared=shared)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
return (top1.avg, objs.avg)
|
def infer(valid_queue, model, criterion, log=True, eval=True, weights=None, double=False, bn_est=False):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
(model.eval() if eval else model.train())
if bn_est:
_data_loader = deepcopy(valid_queue)
for (step, (input, target)) in enumerate(_data_loader):
input = input.cuda()
target = target.cuda(non_blocking=True)
with torch.no_grad():
logits = model(input)
model.eval()
with torch.no_grad():
for (step, (input, target)) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
if double:
input = input.double()
target = target.double()
logits = (model(input) if (weights is None) else model(input, weights=weights))
loss = criterion(logits, target)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if (log and ((step % args.report_freq) == 0)):
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
return (top1.avg, objs.avg)
|
def distill(result):
result = result.split('\n')
cifar10 = result[5].replace(' ', '').split(':')
cifar100 = result[7].replace(' ', '').split(':')
imagenet16 = result[9].replace(' ', '').split(':')
cifar10_train = float(cifar10[1].strip(',test')[(- 7):(- 2)].strip('='))
cifar10_test = float(cifar10[2][(- 7):(- 2)].strip('='))
cifar100_train = float(cifar100[1].strip(',valid')[(- 7):(- 2)].strip('='))
cifar100_valid = float(cifar100[2].strip(',test')[(- 7):(- 2)].strip('='))
cifar100_test = float(cifar100[3][(- 7):(- 2)].strip('='))
imagenet16_train = float(imagenet16[1].strip(',valid')[(- 7):(- 2)].strip('='))
imagenet16_valid = float(imagenet16[2].strip(',test')[(- 7):(- 2)].strip('='))
imagenet16_test = float(imagenet16[3][(- 7):(- 2)].strip('='))
return (cifar10_train, cifar10_test, cifar100_train, cifar100_valid, cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test)
|
def query(api, genotype, logging):
result = api.query_by_arch(genotype)
logging.info('{:}'.format(result))
(cifar10_train, cifar10_test, cifar100_train, cifar100_valid, cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test) = distill(result)
logging.info('cifar10 train %f test %f', cifar10_train, cifar10_test)
logging.info('cifar100 train %f valid %f test %f', cifar100_train, cifar100_valid, cifar100_test)
logging.info('imagenet16 train %f valid %f test %f', imagenet16_train, imagenet16_valid, imagenet16_test)
return (cifar10_train, cifar10_test, cifar100_train, cifar100_valid, cifar100_test, imagenet16_train, imagenet16_valid, imagenet16_test)
|
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
(op_names, indices) = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
(op_names, indices) = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert (len(op_names) == len(indices))
self._steps = (len(op_names) // 2)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for (name, index) in zip(op_names, indices):
stride = (2 if (reduction and (index < 2)) else 1)
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
s = (h1 + h2)
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
|
class AuxiliaryHead(nn.Module):
def __init__(self, C, num_classes):
'assuming input size 8x8'
super(AuxiliaryHead, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=3, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.BatchNorm2d(768), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x
|
class Network(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(Network, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
stem_multiplier = 3
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr))
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHead(C_to_auxiliary, num_classes)
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = s1 = self.stem(input)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux)
|
class Cell(nn.Module):
def __init__(self, genotype, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
print(C_prev_prev, C_prev, C)
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0)
if reduction:
(op_names, indices) = zip(*genotype.reduce)
concat = genotype.reduce_concat
else:
(op_names, indices) = zip(*genotype.normal)
concat = genotype.normal_concat
self._compile(C, op_names, indices, concat, reduction)
def _compile(self, C, op_names, indices, concat, reduction):
assert (len(op_names) == len(indices))
self._steps = (len(op_names) // 2)
self._concat = concat
self.multiplier = len(concat)
self._ops = nn.ModuleList()
for (name, index) in zip(op_names, indices):
stride = (2 if (reduction and (index < 2)) else 1)
op = OPS[name](C, stride, True)
self._ops += [op]
self._indices = indices
def forward(self, s0, s1, drop_prob):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
for i in range(self._steps):
h1 = states[self._indices[(2 * i)]]
h2 = states[self._indices[((2 * i) + 1)]]
op1 = self._ops[(2 * i)]
op2 = self._ops[((2 * i) + 1)]
h1 = op1(h1)
h2 = op2(h2)
if (self.training and (drop_prob > 0.0)):
if (not isinstance(op1, Identity)):
h1 = drop_path(h1, drop_prob)
if (not isinstance(op2, Identity)):
h2 = drop_path(h2, drop_prob)
s = (h1 + h2)
states += [s]
return torch.cat([states[i] for i in self._concat], dim=1)
|
class AuxiliaryHeadImageNet(nn.Module):
def __init__(self, C, num_classes):
'assuming input size 14x14'
super(AuxiliaryHeadImageNet, self).__init__()
self.features = nn.Sequential(nn.ReLU(inplace=True), nn.AvgPool2d(5, stride=2, padding=0, count_include_pad=False), nn.Conv2d(C, 128, 1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 768, 2, bias=False), nn.ReLU(inplace=True))
self.classifier = nn.Linear(768, num_classes)
def forward(self, x):
x = self.features(x)
x = self.classifier(x.view(x.size(0), (- 1)))
return x
|
class NetworkImageNet(nn.Module):
def __init__(self, C, num_classes, layers, auxiliary, genotype):
super(NetworkImageNet, self).__init__()
self._layers = layers
self._auxiliary = auxiliary
self.drop_path_prob = 0.0
self.stem0 = nn.Sequential(nn.Conv2d(3, (C // 2), kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d((C // 2)), nn.ReLU(inplace=True), nn.Conv2d((C // 2), C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
self.stem1 = nn.Sequential(nn.ReLU(inplace=True), nn.Conv2d(C, C, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(C))
(C_prev_prev, C_prev, C_curr) = (C, C, C)
self.cells = nn.ModuleList()
reduction_prev = True
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(genotype, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (cell.multiplier * C_curr))
if (i == ((2 * layers) // 3)):
C_to_auxiliary = C_prev
if auxiliary:
self.auxiliary_head = AuxiliaryHeadImageNet(C_to_auxiliary, num_classes)
self.global_pooling = nn.AvgPool2d(7)
self.classifier = nn.Linear(C_prev, num_classes)
def forward(self, input):
logits_aux = None
s0 = self.stem0(input)
s1 = self.stem1(s0)
for (i, cell) in enumerate(self.cells):
(s0, s1) = (s1, cell(s0, s1, self.drop_path_prob))
if (i == ((2 * self._layers) // 3)):
if (self._auxiliary and self.training):
logits_aux = self.auxiliary_head(s1)
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return (logits, logits_aux)
|
class MixedOp(nn.Module):
def __init__(self, C, stride, PRIMITIVES):
super(MixedOp, self).__init__()
self._ops = nn.ModuleList()
for primitive in PRIMITIVES:
op = OPS[primitive](C, stride, False)
if ('pool' in primitive):
op = nn.Sequential(op, nn.BatchNorm2d(C, affine=False))
self._ops.append(op)
def forward(self, x, weights):
ret = sum(((w * op(x)) for (w, op) in zip(weights, self._ops) if (w != 0)))
return ret
|
class Cell(nn.Module):
def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction, reduction_prev):
super(Cell, self).__init__()
self.reduction = reduction
self.primitives = self.PRIMITIVES[('primitives_reduct' if reduction else 'primitives_normal')]
if reduction_prev:
self.preprocess0 = FactorizedReduce(C_prev_prev, C, affine=False)
else:
self.preprocess0 = ReLUConvBN(C_prev_prev, C, 1, 1, 0, affine=False)
self.preprocess1 = ReLUConvBN(C_prev, C, 1, 1, 0, affine=False)
self._steps = steps
self._multiplier = multiplier
self._ops = nn.ModuleList()
self._bns = nn.ModuleList()
edge_index = 0
for i in range(self._steps):
for j in range((2 + i)):
stride = (2 if (reduction and (j < 2)) else 1)
op = MixedOp(C, stride, self.primitives[edge_index])
self._ops.append(op)
edge_index += 1
def forward(self, s0, s1, weights, drop_prob=0.0):
s0 = self.preprocess0(s0)
s1 = self.preprocess1(s1)
states = [s0, s1]
offset = 0
for i in range(self._steps):
if ((drop_prob > 0.0) and self.training):
s = sum((drop_path(self._ops[(offset + j)](h, weights[(offset + j)]), drop_prob) for (j, h) in enumerate(states)))
else:
s = sum((self._ops[(offset + j)](h, weights[(offset + j)]) for (j, h) in enumerate(states)))
offset += len(states)
states.append(s)
return torch.cat(states[(- self._multiplier):], dim=1)
|
class Network(nn.Module):
def __init__(self, C, num_classes, layers, criterion, primitives, args, steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0):
super(Network, self).__init__()
self._C = C
self._num_classes = num_classes
self._layers = layers
self._criterion = criterion
self._steps = steps
self._multiplier = multiplier
self.drop_path_prob = drop_path_prob
nn.Module.PRIMITIVES = primitives
self.op_names = primitives
C_curr = (stem_multiplier * C)
self.stem = nn.Sequential(nn.Conv2d(3, C_curr, 3, padding=1, bias=False), nn.BatchNorm2d(C_curr))
(C_prev_prev, C_prev, C_curr) = (C_curr, C_curr, C)
self.cells = nn.ModuleList()
reduction_prev = False
for i in range(layers):
if (i in [(layers // 3), ((2 * layers) // 3)]):
C_curr *= 2
reduction = True
else:
reduction = False
cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction, reduction_prev)
reduction_prev = reduction
self.cells += [cell]
(C_prev_prev, C_prev) = (C_prev, (multiplier * C_curr))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self._initialize_alphas()
self._args = args
self.optimizer = torch.optim.SGD(self.get_weights(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
def reset_optimizer(self, lr, momentum, weight_decay):
del self.optimizer
self.optimizer = torch.optim.SGD(self.get_weights(), lr, momentum=momentum, weight_decay=weight_decay)
def _loss(self, input, target, return_logits=False):
logits = self(input)
loss = self._criterion(logits, target)
return ((loss, logits) if return_logits else loss)
def _initialize_alphas(self):
k = sum((1 for i in range(self._steps) for n in range((2 + i))))
num_ops = len(self.PRIMITIVES['primitives_normal'][0])
self.num_edges = k
self.num_ops = num_ops
self.alphas_normal = self._initialize_alphas_numpy(k, num_ops)
self.alphas_reduce = self._initialize_alphas_numpy(k, num_ops)
self._arch_parameters = [self.alphas_normal, self.alphas_reduce]
def _initialize_alphas_numpy(self, k, num_ops):
' init from specified arch '
return Variable((0.001 * torch.randn(k, num_ops).cuda()), requires_grad=True)
def forward(self, input):
weights = self.get_softmax()
weights_normal = weights['normal']
weights_reduce = weights['reduce']
s0 = s1 = self.stem(input)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
weights = weights_reduce
else:
weights = weights_normal
(s0, s1) = (s1, cell(s0, s1, weights, self.drop_path_prob))
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return logits
def step(self, input, target, args, shared=None):
assert (shared is None), 'gradient sharing disabled'
(Lt, logit_t) = self._loss(input, target, return_logits=True)
Lt.backward()
nn.utils.clip_grad_norm_(self.get_weights(), args.grad_clip)
self.optimizer.step()
return (logit_t, Lt)
def set_arch_parameters(self, new_alphas):
for (alpha, new_alpha) in zip(self.arch_parameters(), new_alphas):
alpha.data.copy_(new_alpha.data)
def get_softmax(self):
weights_normal = F.softmax(self.alphas_normal, dim=(- 1))
weights_reduce = F.softmax(self.alphas_reduce, dim=(- 1))
return {'normal': weights_normal, 'reduce': weights_reduce}
def printing(self, logging, option='all'):
weights = self.get_softmax()
if (option in ['all', 'normal']):
weights_normal = weights['normal']
logging.info(weights_normal)
if (option in ['all', 'reduce']):
weights_reduce = weights['reduce']
logging.info(weights_reduce)
def arch_parameters(self):
return self._arch_parameters
def get_weights(self):
return self.parameters()
def new(self):
model_new = Network(self._C, self._num_classes, self._layers, self._criterion, self.PRIMITIVES, self._args, drop_path_prob=self.drop_path_prob).cuda()
for (x, y) in zip(model_new.arch_parameters(), self.arch_parameters()):
x.data.copy_(y.data)
return model_new
def clip(self):
for p in self.arch_parameters():
for line in p:
max_index = line.argmax()
line.data.clamp_(0, 1)
if (line.sum() == 0.0):
line.data[max_index] = 1.0
line.data.div_(line.sum())
def genotype(self):
def _parse(weights, normal=True):
PRIMITIVES = self.PRIMITIVES[('primitives_normal' if normal else 'primitives_reduct')]
gene = []
n = 2
start = 0
for i in range(self._steps):
end = (start + n)
W = weights[start:end].copy()
try:
edges = sorted(range((i + 2)), key=(lambda x: (- max((W[x][k] for k in range(len(W[x])) if (k != PRIMITIVES[x].index('none')))))))[:2]
except ValueError:
edges = sorted(range((i + 2)), key=(lambda x: (- max((W[x][k] for k in range(len(W[x])))))))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if ('none' in PRIMITIVES[j]):
if (k != PRIMITIVES[j].index('none')):
if ((k_best is None) or (W[j][k] > W[j][k_best])):
k_best = k
elif ((k_best is None) or (W[j][k] > W[j][k_best])):
k_best = k
gene.append((PRIMITIVES[(start + j)][k_best], j))
start = end
n += 1
return gene
gene_normal = _parse(F.softmax(self.alphas_normal, dim=(- 1)).data.cpu().numpy(), True)
gene_reduce = _parse(F.softmax(self.alphas_reduce, dim=(- 1)).data.cpu().numpy(), False)
concat = range(((2 + self._steps) - self._multiplier), (self._steps + 2))
genotype = Genotype(normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat)
return genotype
|
class DartsNetworkProj(Network):
def __init__(self, C, num_classes, layers, criterion, primitives, args, steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(DartsNetworkProj, self).__init__(C, num_classes, layers, criterion, primitives, args, steps=steps, multiplier=multiplier, stem_multiplier=stem_multiplier, drop_path_prob=drop_path_prob)
self._initialize_flags()
self._initialize_proj_weights()
self._initialize_topology_dicts()
def _initialize_topology_dicts(self):
self.nid2eids = {0: [2, 3, 4], 1: [5, 6, 7, 8], 2: [9, 10, 11, 12, 13]}
self.nid2selected_eids = {'normal': {0: [], 1: [], 2: []}, 'reduce': {0: [], 1: [], 2: []}}
def _initialize_flags(self):
self.candidate_flags = {'normal': torch.tensor((self.num_edges * [True]), requires_grad=False, dtype=torch.bool).cuda(), 'reduce': torch.tensor((self.num_edges * [True]), requires_grad=False, dtype=torch.bool).cuda()}
self.candidate_flags_edge = {'normal': torch.tensor((3 * [True]), requires_grad=False, dtype=torch.bool).cuda(), 'reduce': torch.tensor((3 * [True]), requires_grad=False, dtype=torch.bool).cuda()}
def _initialize_proj_weights(self):
' data structures used for proj '
if isinstance(self.alphas_normal, list):
alphas_normal = torch.stack(self.alphas_normal, dim=0)
alphas_reduce = torch.stack(self.alphas_reduce, dim=0)
else:
alphas_normal = self.alphas_normal
alphas_reduce = self.alphas_reduce
self.proj_weights = {'normal': torch.zeros_like(alphas_normal), 'reduce': torch.zeros_like(alphas_reduce)}
def project_op(self, eid, opid, cell_type):
self.proj_weights[cell_type][eid][opid] = 1
self.candidate_flags[cell_type][eid] = False
def project_edge(self, nid, eids, cell_type):
for eid in self.nid2eids[nid]:
if (eid not in eids):
self.proj_weights[cell_type][eid].data.fill_(0)
self.nid2selected_eids[cell_type][nid] = deepcopy(eids)
self.candidate_flags_edge[cell_type][nid] = False
def get_projected_weights(self, cell_type):
' used in forward and genotype '
weights = self.get_softmax()[cell_type]
for eid in range(self.num_edges):
if (not self.candidate_flags[cell_type][eid]):
weights[eid].data.copy_(self.proj_weights[cell_type][eid])
for nid in self.nid2eids:
if (not self.candidate_flags_edge[cell_type][nid]):
for eid in self.nid2eids[nid]:
if (eid not in self.nid2selected_eids[cell_type][nid]):
weights[eid].data.copy_(self.proj_weights[cell_type][eid])
return weights
def forward(self, input, weights_dict=None):
if ((weights_dict is None) or ('normal' not in weights_dict)):
weights_normal = self.get_projected_weights('normal')
else:
weights_normal = weights_dict['normal']
if ((weights_dict is None) or ('reduce' not in weights_dict)):
weights_reduce = self.get_projected_weights('reduce')
else:
weights_reduce = weights_dict['reduce']
s0 = s1 = self.stem(input)
for (i, cell) in enumerate(self.cells):
if cell.reduction:
weights = weights_reduce
else:
weights = weights_normal
(s0, s1) = (s1, cell(s0, s1, weights, self.drop_path_prob))
out = self.global_pooling(s1)
logits = self.classifier(out.view(out.size(0), (- 1)))
return logits
def printing(self, logging, option='all'):
weights_normal = self.get_projected_weights('normal')
weights_reduce = self.get_projected_weights('reduce')
if (option in ['all', 'normal']):
logging.info('\n%s', weights_normal)
if (option in ['all', 'reduce']):
logging.info('\n%s', weights_reduce)
def genotype(self):
def _parse(weights, normal=True):
PRIMITIVES = self.PRIMITIVES[('primitives_normal' if normal else 'primitives_reduct')]
gene = []
n = 2
start = 0
for i in range(self._steps):
end = (start + n)
W = weights[start:end].copy()
try:
edges = sorted(range((i + 2)), key=(lambda x: (- max((W[x][k] for k in range(len(W[x])) if (k != PRIMITIVES[x].index('none')))))))[:2]
except ValueError:
edges = sorted(range((i + 2)), key=(lambda x: (- max((W[x][k] for k in range(len(W[x])))))))[:2]
for j in edges:
k_best = None
for k in range(len(W[j])):
if ('none' in PRIMITIVES[j]):
if (k != PRIMITIVES[j].index('none')):
if ((k_best is None) or (W[j][k] > W[j][k_best])):
k_best = k
elif ((k_best is None) or (W[j][k] > W[j][k_best])):
k_best = k
gene.append((PRIMITIVES[(start + j)][k_best], j))
start = end
n += 1
return gene
weights_normal = self.get_projected_weights('normal')
weights_reduce = self.get_projected_weights('reduce')
gene_normal = _parse(weights_normal.data.cpu().numpy(), True)
gene_reduce = _parse(weights_reduce.data.cpu().numpy(), False)
concat = range(((2 + self._steps) - self._multiplier), (self._steps + 2))
genotype = Genotype(normal=gene_normal, normal_concat=concat, reduce=gene_reduce, reduce_concat=concat)
return genotype
def get_state_dict(self, epoch, architect, scheduler):
model_state_dict = {'epoch': epoch, 'state_dict': self.state_dict(), 'alpha': self.arch_parameters(), 'optimizer': self.optimizer.state_dict(), 'arch_optimizer': architect.optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'nid2eids': self.nid2eids, 'nid2selected_eids': self.nid2selected_eids, 'candidate_flags': self.candidate_flags, 'candidate_flags_edge': self.candidate_flags_edge, 'proj_weights': self.proj_weights}
return model_state_dict
def set_state_dict(self, architect, scheduler, checkpoint):
self.load_state_dict(checkpoint['state_dict'])
self.set_arch_parameters(checkpoint['alpha'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
self.nid2eids = checkpoint['nid2eids']
self.nid2selected_eids = checkpoint['nid2selected_eids']
self.candidate_flags = checkpoint['candidate_flags']
self.candidate_flags_edge = checkpoint['candidate_flags_edge']
self.proj_weights = checkpoint['proj_weights']
|
class SDartsNetwork(Network):
def __init__(self, C, num_classes, layers, criterion, primitives, args, steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(SDartsNetwork, self).__init__(C, num_classes, layers, criterion, primitives, args, steps, multiplier, stem_multiplier, drop_path_prob)
self.softmaxed = False
def _save_arch_parameters(self):
self._saved_arch_parameters = [p.clone() for p in self._arch_parameters]
def softmax_arch_parameters(self):
self.softmaxed = True
self._save_arch_parameters()
for p in self._arch_parameters:
p.data.copy_(F.softmax(p, dim=(- 1)))
def restore_arch_parameters(self):
self.softmaxed = False
for (i, p) in enumerate(self._arch_parameters):
p.data.copy_(self._saved_arch_parameters[i])
del self._saved_arch_parameters
def get_softmax(self):
if self.softmaxed:
weights_normal = self.alphas_normal
weights_reduce = self.alphas_reduce
else:
weights_normal = F.softmax(self.alphas_normal, dim=(- 1))
weights_reduce = F.softmax(self.alphas_reduce, dim=(- 1))
return {'normal': weights_normal, 'reduce': weights_reduce}
|
class SDartsNetworkProj(DartsNetworkProj):
def __init__(self, C, num_classes, layers, criterion, primitives, args, steps=4, multiplier=4, stem_multiplier=3, drop_path_prob=0.0):
super(SDartsNetworkProj, self).__init__(C, num_classes, layers, criterion, primitives, args, steps=steps, multiplier=multiplier, stem_multiplier=stem_multiplier, drop_path_prob=drop_path_prob)
self.softmaxed = False
def _save_arch_parameters(self):
self._saved_arch_parameters = [p.clone() for p in self._arch_parameters]
def softmax_arch_parameters(self):
self._save_arch_parameters()
for (p, cell_type) in zip(self._arch_parameters, self.candidate_flags.keys()):
p.data.copy_(self.get_projected_weights(cell_type))
self.softmaxed = True
def restore_arch_parameters(self):
for (i, p) in enumerate(self._arch_parameters):
p.data.copy_(self._saved_arch_parameters[i])
del self._saved_arch_parameters
self.softmaxed = False
def get_softmax(self):
if self.softmaxed:
weights_normal = self.alphas_normal
weights_reduce = self.alphas_reduce
else:
weights_normal = F.softmax(self.alphas_normal, dim=(- 1))
weights_reduce = F.softmax(self.alphas_reduce, dim=(- 1))
return {'normal': weights_normal, 'reduce': weights_reduce}
def arch_parameters(self):
return self._arch_parameters
|
def project_op(model, proj_queue, args, infer, cell_type, selected_eid=None):
' operation '
(num_edges, num_ops) = (model.num_edges, model.num_ops)
candidate_flags = model.candidate_flags[cell_type]
proj_crit = args.proj_crit[cell_type]
if (selected_eid is None):
remain_eids = torch.nonzero(candidate_flags).cpu().numpy().T[0]
if (args.edge_decision == 'random'):
selected_eid = np.random.choice(remain_eids, size=1)[0]
logging.info('selected edge: %d %s', selected_eid, cell_type)
if (proj_crit == 'loss'):
crit_idx = 1
compare = (lambda x, y: (x > y))
elif (proj_crit == 'acc'):
crit_idx = 0
compare = (lambda x, y: (x < y))
best_opid = 0
crit_extrema = None
for opid in range(num_ops):
weights = model.get_projected_weights(cell_type)
proj_mask = torch.ones_like(weights[selected_eid])
proj_mask[opid] = 0
weights[selected_eid] = (weights[selected_eid] * proj_mask)
weights_dict = {cell_type: weights}
valid_stats = infer(proj_queue, model, log=False, _eval=False, weights_dict=weights_dict)
crit = valid_stats[crit_idx]
if ((crit_extrema is None) or compare(crit, crit_extrema)):
crit_extrema = crit
best_opid = opid
logging.info('valid_acc %f', valid_stats[0])
logging.info('valid_loss %f', valid_stats[1])
logging.info('best opid: %d', best_opid)
return (selected_eid, best_opid)
|
def project_edge(model, proj_queue, args, infer, cell_type):
' topology '
candidate_flags = model.candidate_flags_edge[cell_type]
proj_crit = args.proj_crit[cell_type]
remain_nids = torch.nonzero(candidate_flags).cpu().numpy().T[0]
if (args.edge_decision == 'random'):
selected_nid = np.random.choice(remain_nids, size=1)[0]
logging.info('selected node: %d %s', selected_nid, cell_type)
if (proj_crit == 'loss'):
crit_idx = 1
compare = (lambda x, y: (x > y))
elif (proj_crit == 'acc'):
crit_idx = 0
compare = (lambda x, y: (x < y))
eids = deepcopy(model.nid2eids[selected_nid])
while (len(eids) > 2):
eid_todel = None
crit_extrema = None
for eid in eids:
weights = model.get_projected_weights(cell_type)
weights[eid].data.fill_(0)
weights_dict = {cell_type: weights}
valid_stats = infer(proj_queue, model, log=False, _eval=False, weights_dict=weights_dict)
crit = valid_stats[crit_idx]
if ((crit_extrema is None) or (not compare(crit, crit_extrema))):
crit_extrema = crit
eid_todel = eid
logging.info('valid_acc %f', valid_stats[0])
logging.info('valid_loss %f', valid_stats[1])
eids.remove(eid_todel)
logging.info('top2 edges: (%d, %d)', eids[0], eids[1])
return (selected_nid, eids)
|
def pt_project(train_queue, valid_queue, model, architect, optimizer, epoch, args, infer, perturb_alpha, epsilon_alpha):
model.train()
model.printing(logging)
(train_acc, train_obj) = infer(train_queue, model, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
(valid_acc, valid_obj) = infer(valid_queue, model, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
num_projs = ((model.num_edges + len(model.nid2eids.keys())) - 1)
tune_epochs = ((args.proj_intv * num_projs) + 1)
proj_intv = args.proj_intv
args.proj_crit = {'normal': args.proj_crit_normal, 'reduce': args.proj_crit_reduce}
proj_queue = valid_queue
model.reset_optimizer((args.learning_rate / 10), args.momentum, args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model.optimizer, float(tune_epochs), eta_min=args.learning_rate_min)
start_epoch = 0
if (args.dev_resume_epoch >= 0):
filename = os.path.join(args.dev_resume_checkpoint_dir, 'checkpoint_{}.pth.tar'.format(args.dev_resume_epoch))
if os.path.isfile(filename):
logging.info("=> loading projection checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
start_epoch = checkpoint['epoch']
model.set_state_dict(architect, scheduler, checkpoint)
model.set_arch_parameters(checkpoint['alpha'])
scheduler.load_state_dict(checkpoint['scheduler'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
else:
logging.info("=> no checkpoint found at '{}'".format(filename))
exit(0)
for epoch in range(start_epoch, tune_epochs):
logging.info('epoch %d', epoch)
if (((epoch % proj_intv) == 0) or (epoch == (tune_epochs - 1))):
save_state_dict = model.get_state_dict(epoch, architect, scheduler)
ig_utils.save_checkpoint(save_state_dict, False, args.dev_save_checkpoint_dir, per_epoch=True)
if (epoch < (proj_intv * model.num_edges)):
logging.info('project op')
(selected_eid_normal, best_opid_normal) = project_op(model, proj_queue, args, infer, cell_type='normal')
model.project_op(selected_eid_normal, best_opid_normal, cell_type='normal')
(selected_eid_reduce, best_opid_reduce) = project_op(model, proj_queue, args, infer, cell_type='reduce')
model.project_op(selected_eid_reduce, best_opid_reduce, cell_type='reduce')
model.printing(logging)
else:
logging.info('project edge')
(selected_nid_normal, eids_normal) = project_edge(model, proj_queue, args, infer, cell_type='normal')
model.project_edge(selected_nid_normal, eids_normal, cell_type='normal')
(selected_nid_reduce, eids_reduce) = project_edge(model, proj_queue, args, infer, cell_type='reduce')
model.project_edge(selected_nid_reduce, eids_reduce, cell_type='reduce')
model.printing(logging)
for (step, (input, target)) in enumerate(train_queue):
model.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
(input_search, target_search) = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer.zero_grad()
architect.optimizer.zero_grad()
architect.step(input, target, input_search, target_search, return_logits=True)
if perturb_alpha:
model.softmax_arch_parameters()
optimizer.zero_grad()
architect.optimizer.zero_grad()
perturb_alpha(model, input, target, epsilon_alpha)
optimizer.zero_grad()
architect.optimizer.zero_grad()
(logits, loss) = model.step(input, target, args)
if perturb_alpha:
model.restore_arch_parameters()
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
model.printing(logging)
(train_acc, train_obj) = infer(train_queue, model, log=False)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
(valid_acc, valid_obj) = infer(valid_queue, model, log=False)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
logging.info('projection finished')
model.printing(logging)
num_params = ig_utils.count_parameters_in_Compact(model)
genotype = model.genotype()
logging.info('param size = %f', num_params)
logging.info('genotype = %s', genotype)
return
|
def main():
torch.set_num_threads(3)
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
if args.queue:
ig_utils.queue_gpu()
np.random.seed(args.seed)
gpu = (ig_utils.pick_gpu_lowest_memory() if (args.gpu == 'auto') else int(args.gpu))
torch.cuda.set_device(gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % gpu))
logging.info('args = %s', args)
genotype = eval(('genotypes.%s' % args.arch))
model = Network(args.init_channels, n_classes, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info('param size = %fMB', ig_utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
if (args.dataset == 'cifar10'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'cifar100'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'svhn'):
(train_transform, valid_transform) = ig_utils._data_transforms_svhn(args)
train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform)
valid_data = dset.SVHN(root=args.data, split='test', download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
start_epoch = 0
if (args.resume_epoch > 0):
logging.info('loading checkpoint from {}'.format(expid))
filename = os.path.join(args.save, 'checkpoint_{}.pth.tar'.format(args.resume_epoch))
if os.path.isfile(filename):
print("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
resume_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
scheduler.load_state_dict(checkpoint['scheduler'])
optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = args.resume_epoch
print("=> loaded checkpoint '{}' (epoch {})".format(filename, resume_epoch))
else:
print("=> no checkpoint found at '{}'".format(filename))
best_valid_acc = 0
for epoch in range(start_epoch, args.epochs):
lr = scheduler.get_lr()[0]
if args.cutout:
train_transform.transforms[(- 1)].cutout_prob = args.cutout_prob
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[(- 1)].cutout_prob)
else:
logging.info('epoch %d lr %e', epoch, lr)
model.drop_path_prob = ((args.drop_path_prob * epoch) / args.epochs)
(train_acc, train_obj) = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
scheduler.step()
(valid_acc, valid_obj) = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
writer.add_scalar('Acc/valid', valid_acc, epoch)
writer.add_scalar('Obj/valid', valid_obj, epoch)
if (((epoch + 1) % args.ckpt_interval) == 0):
save_state_dict = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict()}
ig_utils.save_checkpoint(save_state_dict, False, args.save, per_epoch=True)
best_valid_acc = max(best_valid_acc, valid_acc)
logging.info('best valid_acc %f', best_valid_acc)
writer.close()
|
def train(train_queue, model, criterion, optimizer):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
(logits, logits_aux) = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += (args.auxiliary_weight * loss_aux)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
logging.info('//// WARNING: FAST MODE')
break
return (top1.avg, objs.avg)
|
def infer(valid_queue, model, criterion):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for (step, (input, target)) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
(logits, _) = model(input)
loss = criterion(logits, target)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
logging.info('//// WARNING: FAST MODE')
break
return (top1.avg, objs.avg)
|
class CrossEntropyLabelSmooth(nn.Module):
def __init__(self, num_classes, epsilon):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros_like(log_probs).scatter_(1, targets.unsqueeze(1), 1)
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
loss = ((- targets) * log_probs).mean(0).sum()
return loss
|
def main():
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % args.gpu))
logging.info('args = %s', args)
genotype = eval(('genotypes.%s' % args.arch))
model = Network(args.init_channels, CLASSES, args.layers, args.auxiliary, genotype)
if args.parallel:
model = nn.DataParallel(model).cuda()
else:
model = model.cuda()
logging.info('param size = %fMB', utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
criterion_smooth = CrossEntropyLabelSmooth(CLASSES, args.label_smooth)
criterion_smooth = criterion_smooth.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_data = dset.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2), transforms.ToTensor(), normalize]))
valid_data = dset.ImageFolder(validdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]))
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=4)
valid_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=4)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.decay_period, gamma=args.gamma)
if args.load:
(model, optimizer, start_epoch, best_acc_top1) = utils.load_checkpoint(model, optimizer, '../../experiments/sota/imagenet/eval/EXP-20200210-143540-c10_s3_pgd-0-auxiliary-0.4-2753')
else:
best_acc_top1 = 0
for epoch in range(start_epoch, args.epochs):
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = ((args.drop_path_prob * epoch) / args.epochs)
(train_acc, train_obj) = train(train_queue, model, criterion_smooth, optimizer)
logging.info('train_acc %f', train_acc)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
scheduler.step()
(valid_acc_top1, valid_acc_top5, valid_obj) = infer(valid_queue, model, criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
writer.add_scalar('Acc/valid_top1', valid_acc_top1, epoch)
writer.add_scalar('Acc/valid_top5', valid_acc_top5, epoch)
is_best = False
if (valid_acc_top1 > best_acc_top1):
best_acc_top1 = valid_acc_top1
is_best = True
utils.save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'best_acc_top1': best_acc_top1, 'optimizer': optimizer.state_dict()}, is_best, args.save)
|
def train(train_queue, model, criterion, optimizer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
for (step, (input, target)) in enumerate(train_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
optimizer.zero_grad()
(logits, logits_aux) = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += (args.auxiliary_weight * loss_aux)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return (top1.avg, objs.avg)
|
def infer(valid_queue, model, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for (step, (input, target)) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
(logits, _) = model(input)
loss = criterion(logits, target)
(prec1, prec5) = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data, n)
top1.update(prec1.data, n)
top5.update(prec5.data, n)
if ((step % args.report_freq) == 0):
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return (top1.avg, top5.avg, objs.avg)
|
def main():
torch.set_num_threads(3)
if (not torch.cuda.is_available()):
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
gpu = (ig_utils.pick_gpu_lowest_memory() if (args.gpu == 'auto') else int(args.gpu))
torch.cuda.set_device(gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info(('gpu device = %d' % gpu))
logging.info('args = %s', args)
if (args.dataset == 'cifar10'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'cifar100'):
(train_transform, valid_transform) = ig_utils._data_transforms_cifar100(args)
train_data = dset.CIFAR100(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR100(root=args.data, train=False, download=True, transform=valid_transform)
elif (args.dataset == 'svhn'):
(train_transform, valid_transform) = ig_utils._data_transforms_svhn(args)
train_data = dset.SVHN(root=args.data, split='train', download=True, transform=train_transform)
valid_data = dset.SVHN(root=args.data, split='test', download=True, transform=valid_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor((args.train_portion * num_train)))
train_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]), pin_memory=True)
valid_queue = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]), pin_memory=True)
test_queue = torch.utils.data.DataLoader(valid_data, batch_size=args.batch_size, pin_memory=True)
if (args.perturb_alpha == 'none'):
perturb_alpha = None
elif (args.perturb_alpha == 'pgd_linf'):
perturb_alpha = Linf_PGD_alpha
elif (args.perturb_alpha == 'random'):
perturb_alpha = Random_alpha
else:
print('ERROR PERTURB_ALPHA TYPE:', args.perturb_alpha)
exit(1)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
if (args.method in ['darts', 'blank']):
model = DartsNetwork(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method == 'sdarts'):
model = SDartsNetwork(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method in ['darts-proj', 'blank-proj']):
model = DartsNetworkProj(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
elif (args.method in ['sdarts-proj']):
model = SDartsNetworkProj(args.init_channels, n_classes, args.layers, criterion, spaces_dict[args.search_space], args)
else:
print('ERROR: WRONG MODEL:', args.method)
exit(0)
model = model.cuda()
architect = Architect(model, args)
logging.info('param size = %fMB', ig_utils.count_parameters_in_MB(model))
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model.optimizer, float(args.epochs), eta_min=args.learning_rate_min)
start_epoch = 0
if (args.resume_epoch > 0):
logging.info('loading checkpoint from {}'.format(expid))
filename = os.path.join(args.save, 'checkpoint_{}.pth.tar'.format(args.resume_epoch))
if os.path.isfile(filename):
logging.info("=> loading checkpoint '{}'".format(filename))
checkpoint = torch.load(filename, map_location='cpu')
resume_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
saved_arch_parameters = checkpoint['alpha']
model.set_arch_parameters(saved_arch_parameters)
scheduler.load_state_dict(checkpoint['scheduler'])
model.optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
start_epoch = args.resume_epoch
logging.info("=> loaded checkpoint '{}' (epoch {})".format(filename, resume_epoch))
else:
logging.info("=> no checkpoint found at '{}'".format(filename))
logging.info('starting training at epoch {}'.format(start_epoch))
for epoch in range(start_epoch, args.epochs):
lr = scheduler.get_lr()[0]
if args.cutout:
train_transform.transforms[(- 1)].cutout_prob = ((args.cutout_prob * epoch) / (args.epochs - 1))
logging.info('epoch %d lr %e cutout_prob %e', epoch, lr, train_transform.transforms[(- 1)].cutout_prob)
else:
logging.info('epoch %d lr %e', epoch, lr)
if args.perturb_alpha:
epsilon_alpha = (0.03 + (((args.epsilon_alpha - 0.03) * epoch) / args.epochs))
logging.info('epoch %d epsilon_alpha %e', epoch, epsilon_alpha)
num_params = ig_utils.count_parameters_in_Compact(model)
genotype = model.genotype()
logging.info('param size = %f', num_params)
logging.info('genotype = %s', genotype)
model.printing(logging)
(train_acc, train_obj) = train(train_queue, valid_queue, model, architect, model.optimizer, lr, epoch, perturb_alpha, epsilon_alpha)
logging.info('train_acc %f | train_obj %f', train_acc, train_obj)
writer.add_scalar('Acc/train', train_acc, epoch)
writer.add_scalar('Obj/train', train_obj, epoch)
scheduler.step()
(valid_acc, valid_obj) = infer(valid_queue, model, log=False)
logging.info('valid_acc %f | valid_obj %f', valid_acc, valid_obj)
writer.add_scalar('Acc/valid', valid_acc, epoch)
writer.add_scalar('Obj/valid', valid_obj, epoch)
(test_acc, test_obj) = infer(test_queue, model, log=False)
logging.info('test_acc %f | test_obj %f', test_acc, test_obj)
writer.add_scalar('Acc/test', test_acc, epoch)
writer.add_scalar('Obj/test', test_obj, epoch)
if (((epoch + 1) % args.ckpt_interval) == 0):
save_state_dict = {'epoch': (epoch + 1), 'state_dict': model.state_dict(), 'alpha': model.arch_parameters(), 'optimizer': model.optimizer.state_dict(), 'arch_optimizer': architect.optimizer.state_dict(), 'scheduler': scheduler.state_dict()}
ig_utils.save_checkpoint(save_state_dict, False, args.save, per_epoch=True)
if (args.dev == 'proj'):
pt_project(train_queue, valid_queue, model, architect, model.optimizer, start_epoch, args, infer, perturb_alpha, args.epsilon_alpha)
writer.close()
|
def train(train_queue, valid_queue, model, architect, optimizer, lr, epoch, perturb_alpha, epsilon_alpha):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
for step in range(len(train_queue)):
model.train()
(input, target) = next(iter(train_queue))
input = input.cuda()
target = target.cuda(non_blocking=True)
(input_search, target_search) = next(iter(valid_queue))
input_search = input_search.cuda()
target_search = target_search.cuda(non_blocking=True)
optimizer.zero_grad()
architect.optimizer.zero_grad()
architect.step(input, target, input_search, target_search, lr, optimizer)
if perturb_alpha:
model.softmax_arch_parameters()
optimizer.zero_grad()
architect.optimizer.zero_grad()
perturb_alpha(model, input, target, epsilon_alpha)
optimizer.zero_grad()
architect.optimizer.zero_grad()
(logits, loss) = model.step(input, target, args)
if perturb_alpha:
model.restore_arch_parameters()
n = input.size(0)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if ((step % args.report_freq) == 0):
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
return (top1.avg, objs.avg)
|
def infer(valid_queue, model, log=True, _eval=True, weights_dict=None):
objs = ig_utils.AvgrageMeter()
top1 = ig_utils.AvgrageMeter()
top5 = ig_utils.AvgrageMeter()
(model.eval() if _eval else model.train())
with torch.no_grad():
for (step, (input, target)) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
if (weights_dict is None):
(loss, logits) = model._loss(input, target, return_logits=True)
else:
logits = model(input, weights_dict=weights_dict)
loss = model._criterion(logits, target)
(prec1, prec5) = ig_utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data.item(), n)
top1.update(prec1.data.item(), n)
top5.update(prec5.data.item(), n)
if (((step % args.report_freq) == 0) and log):
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
if args.fast:
break
return (top1.avg, objs.avg)
|
def plot(genotype, filename, mode=''):
g = Digraph(format='pdf', edge_attr=dict(fontsize='40', fontname='times'), node_attr=dict(style='filled', shape='rect', align='center', fontsize='40', height='0.5', width='0.5', penwidth='2', fontname='times'), engine='dot')
g.body.extend(['rankdir=LR'])
g.body.extend(['ratio=0.15'])
g.node('c_{k-2}', fillcolor='darkseagreen2')
g.node('c_{k-1}', fillcolor='darkseagreen2')
assert ((len(genotype) % 2) == 0)
steps = (len(genotype) // 2)
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for i in range(steps):
for k in [(2 * i), ((2 * i) + 1)]:
(op, j) = genotype[k]
if (j == 0):
u = 'c_{k-2}'
elif (j == 1):
u = 'c_{k-1}'
else:
u = str((j - 2))
v = str(i)
if ((mode == 'cue') and (op != 'skip_connect') and (op != 'noise')):
g.edge(u, v, label=op, fillcolor='gray', color='red', fontcolor='red')
else:
g.edge(u, v, label=op, fillcolor='gray')
g.node('c_{k}', fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), 'c_{k}', fillcolor='gray')
g.render(filename, view=False)
|
def plot_space(primitives, filename):
g = Digraph(format='pdf', edge_attr=dict(fontsize='20', fontname='times'), node_attr=dict(style='filled', shape='rect', align='center', fontsize='20', height='0.5', width='0.5', penwidth='2', fontname='times'), engine='dot')
g.body.extend(['rankdir=LR'])
g.body.extend(['ratio=50.0'])
g.node('c_{k-2}', fillcolor='darkseagreen2')
g.node('c_{k-1}', fillcolor='darkseagreen2')
steps = 4
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
n = 2
start = 0
nodes_indx = ['c_{k-2}', 'c_{k-1}']
for i in range(steps):
end = (start + n)
p = primitives[start:end]
v = str(i)
for (node, prim) in zip(nodes_indx, p):
u = node
for op in prim:
g.edge(u, v, label=op, fillcolor='gray')
start = end
n += 1
nodes_indx.append(v)
g.node('c_{k}', fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), 'c_{k}', fillcolor='gray')
g.render(filename, view=False)
|
def plot(genotype, filename):
g = Digraph(format='pdf', edge_attr=dict(fontsize='100', fontname='times'), node_attr=dict(style='filled', shape='rect', align='center', fontsize='100', height='0.5', width='0.5', penwidth='2', fontname='times'), engine='dot')
g.body.extend(['rankdir=LR'])
g.body.extend(['ratio=0.3'])
g.node('c_{k-2}', fillcolor='darkseagreen2')
g.node('c_{k-1}', fillcolor='darkseagreen2')
num_edges = len(genotype)
for i in range(steps):
g.node(str(i), fillcolor='lightblue')
for eid in range(num_edges):
op = genotype[eid]
(u, v) = supernet_dict[eid]
if (op != 'skip_connect'):
g.edge(u, v, label=op, fillcolor='gray', color='red', fontcolor='red')
else:
g.edge(u, v, label=op, fillcolor='gray')
g.node('c_{k}', fillcolor='palegoldenrod')
for i in range(steps):
g.edge(str(i), 'c_{k}', fillcolor='gray')
g.render(filename, view=False)
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.