code stringlengths 101 5.91M |
|---|
def pi2pi(theta, theta0=0.0):
while (theta > (np.pi + theta0)):
theta = (theta - (2.0 * np.pi))
while (theta < ((- np.pi) + theta0)):
theta = (theta + (2.0 * np.pi))
return theta |
class ExampleConfigTest(object):
def __init__(self, *args, **kwargs):
super(ExampleConfigTest, self).__init__(*args, **kwargs)
self.vocab_file = None
def _config_path(self):
raise NotImplementedError()
def create_model(self, mode, params=None):
return _load_model_from_config(config_path=self._config_path(), hparam_overrides=params, vocab_file=self.vocab_file.name, mode=mode) |
def load_matrix(fname, n_rows):
vecs = []
with open(fname) as f:
for idx in tqdm.tqdm(range(n_rows)):
vecs.append(np.array([float(x) for x in f.readline().split()]))
return np.vstack(vecs) |
def transformer(*args, **kwargs):
parser = options.get_interactive_generation_parser()
model = TransformerModel.from_pretrained(parser, *args, **kwargs)
return model |
def sql_functions_b_example(spark):
df = spark.createDataFrame([('1',), ('2',), ('10',)], ['n1'])
df.withColumn('base64_n1', base64(df.n1)).show()
print('base64 API finished')
df = spark.createDataFrame([(1,), (2,), (3,)], ['n1'])
df.select(bin(df.n1).alias('binary_number')).show()
print('bin API finished')
df = spark.createDataFrame([(1,), (2,), (3,)], ['n1'])
df.select(bitwiseNOT(df.n1).alias('bitwise_not_value')).show()
print('bitwiseNOT API finished')
df1 = spark.createDataFrame([(1, 'aa'), (4, 'dd')], ['n1', 's1'])
df2 = spark.createDataFrame([(1, 'a'), (2, 'b'), (3, 'c'), (5, 'e'), (6, 'f')], ['n2', 's2'])
df1.join(broadcast(df2), (df1.n1 == df2.n2)).show()
print('broadcast API finished')
spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
print('bround API finished')
print('Finish running function_b API') |
_criterion('binary_cross_entropy')
class BinaryCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, infonce=False, loss_weights=None, log_keys=None):
super().__init__(task)
self.infonce = infonce
self.loss_weights = (None if (loss_weights is None) else eval(loss_weights))
self.log_keys = ([] if (log_keys is None) else eval(log_keys))
def add_args(parser):
parser.add_argument('--infonce', action='store_true', help='if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)')
parser.add_argument('--loss-weights', type=str, default=None, help='weights for additional loss terms (not first one)')
parser.add_argument('--log-keys', type=str, default=None, help='output keys to log')
def forward(self, model, sample, reduce=True, log_pred=False):
net_output = model(**sample['net_input'])
logits = model.get_logits(net_output).float()
target = model.get_targets(sample, net_output)
weights = None
if (hasattr(model, 'get_target_weights') and (not self.infonce)):
weights = model.get_target_weights(target, net_output)
if torch.is_tensor(weights):
weights = weights.float()
losses = []
if self.infonce:
loss = F.cross_entropy(logits, target, reduction=('sum' if reduce else 'none'))
else:
loss = F.binary_cross_entropy_with_logits(logits, target.float(), weights, reduction=('sum' if reduce else 'none'))
sample_size = (target.numel() if self.infonce else target.sum().long().item())
losses.append(loss)
if ((self.loss_weights is not None) and hasattr(model, 'get_extra_losses')):
extra_losses = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
if ((len(self.loss_weights) == 1) and (len(extra_losses) != 1)):
self.loss_weights = ([self.loss_weights[0]] * len(extra_losses))
assert (len(extra_losses) == len(self.loss_weights)), f'{len(extra_losses)}, {len(self.loss_weights)}'
for (p, coef) in zip(extra_losses, self.loss_weights):
if ((coef != 0) and (p is not None)):
p = ((coef * p.float()) * sample_size)
loss += p
losses.append(p)
logging_output = {'loss': (loss.item() if reduce else loss), 'ntokens': sample_size, 'nsentences': logits.size(0), 'sample_size': sample_size}
for lk in self.log_keys:
if (lk in net_output):
logging_output[lk] = float(net_output[lk])
if (len(losses) > 1):
for (i, l) in enumerate(losses):
logging_output[f'loss_{i}'] = l.item()
if self.infonce:
with torch.no_grad():
if (logits.numel() == 0):
corr = 0
count = 0
else:
assert (logits.dim() > 1), logits.shape
max = (logits.argmax((- 1)) == 0)
min = (logits.argmin((- 1)) == 0)
both = (max & min)
corr = (max.long().sum().item() - both.long().sum().item())
count = max.numel()
logging_output['correct'] = corr
logging_output['count'] = count
if log_pred:
logging_output['logits'] = logits.cpu().numpy()
logging_output['target'] = target.cpu().numpy()
return (loss, sample_size, logging_output)
def aggregate_logging_outputs(logging_outputs):
loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs)))
ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs)))
nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs)))
sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs)))
agg_output = {'loss': ((loss_sum / sample_size) / math.log(2)), 'ntokens': ntokens, 'nsentences': nsentences, 'sample_size': sample_size}
if (sample_size != ntokens):
agg_output['nll_loss'] = ((loss_sum / ntokens) / math.log(2))
correct = sum((log.get('correct', 0) for log in logging_outputs))
total = sum((log.get('count', 0) for log in logging_outputs))
if (total > 0):
agg_output['accuracy'] = (correct / total)
builtin_keys = {'loss', 'ntokens', 'nsentences', 'sample_size', 'correct', 'count'}
for k in logging_outputs[0]:
if (k not in builtin_keys):
val = (sum((log.get(k, 0) for log in logging_outputs)) / len(logging_outputs))
if k.startswith('loss'):
val = ((val / ntokens) if (ntokens > 0) else float('nan'))
agg_output[k] = val
return agg_output |
def upNvis():
uNbu.switch()
if (uNbu.status() == 'Uhide'):
upN.off()
elif (uNbu.status() == 'Ushow'):
upN.on() |
class TFOptimizer():
def __init__(self, tf_model, optim_method, sess=None, dataset=None, clip_norm=None, clip_value=None, model_dir=None):
self.optim_method = optim_method
self.sess = sess
self.dataset = dataset
self.clip_norm = clip_norm
if ((clip_value is not None) and (not isinstance(clip_value, tuple))):
invalidInputError(False, 'The clip_value argument should be a tuple (min_value, max_value)')
self.clip_constant = clip_value
if (self.dataset.batch_size <= 0):
invalidInputError(False, 'You should set batch_size instead of batch_per_thread for training')
self.model_dir = model_dir
self.tf_model = tf_model
batch_size = self.dataset.batch_size
self.train_data = self.dataset.get_training_data()
self.val_data = self.dataset.get_validation_data()
self.batch_size = batch_size
self.estimator = Estimator(self.tf_model.training_helper_layer, self.optim_method, self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
(min_value, max_value) = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def load_checkpoint(self, path, version):
model_path = os.path.join(path, 'model.{}'.format(version))
optim_method_path = os.path.join(path, 'optimMethod-TFParkTraining.{}'.format(version))
self.tf_model.training_helper_layer.load_checkpoint(model_path)
self.optim_method = OptimMethod.load(optim_method_path)
self.estimator = Estimator(self.tf_model.training_helper_layer, self.optim_method, self.model_dir)
if self.clip_norm:
self.estimator.set_l2_norm_gradient_clipping(self.clip_norm)
if self.clip_constant:
(min_value, max_value) = self.clip_constant
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def _get_or_create_session(session):
import tensorflow as tf
if (session is None):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
else:
sess = session
return sess
def _get_dataset_from_loss(loss):
import tensorflow as tf
all_required_inputs = find_placeholders([loss])
dataset = tf.get_collection(all_required_inputs[0].name)[0]
return dataset
def _get_vars_grads(loss):
import tensorflow as tf
grads_vars = tf.train.GradientDescentOptimizer(0).compute_gradients(loss)
grads_vars.sort(key=(lambda grad_var: grad_var[1].name))
variables = []
grads = []
for (grad, var) in grads_vars:
if (grad is not None):
variables.append(var)
grads.append(grad)
return (grads, variables)
def _get_vars_grads_from_train_op(train_op):
def predicate(t):
return t.name.split('/')[(- 1)].startswith('zoo_identity_op_for_grad')
grads = find_tensors([train_op], predicate)
grad_ops = [grad.op for grad in grads]
variables = []
for grad in grad_ops:
var = list(grad.control_inputs)[0]
if (var.name == 'VarHandleOp'):
variables.append(var)
else:
variables.append(list(var.outputs)[0])
return (grads, variables)
def from_train_op(cls, train_op, loss, *, inputs=None, labels=None, metrics=None, updates=None, sess=None, dataset=None, tensor_with_value=None, session_config=None, model_dir=None):
sess = TFOptimizer._get_or_create_session(sess)
(grads, variables) = TFOptimizer._get_vars_grads_from_train_op(train_op)
if (dataset is None):
dataset = TFOptimizer._get_dataset_from_loss(loss)
_ = dataset.tensors
dataset_inputs = dataset._original_tensors
if (isinstance(dataset_inputs, tuple) and (len(dataset_inputs) == 2)):
if (inputs is None):
inputs = dataset_inputs[0]
if (labels is None):
labels = dataset_inputs[1]
else:
if (inputs is None):
inputs = dataset_inputs
if (labels is None):
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
from bigdl.orca.tfpark.zoo_optimizer import FakeOptimMethod
return TFOptimizer._from_grads(loss=loss, sess=sess, inputs=inputs, labels=labels, grads=grads, variables=variables, dataset=dataset, metrics=metrics, tensor_with_value=tensor_with_value, optim_method=FakeOptimMethod(), session_config=session_config, updates=updates, model_dir=model_dir, train_op=train_op)
def _from_grads(cls, loss, sess, inputs, labels, grads, variables, dataset, optim_method=None, clip_norm=None, clip_value=None, metrics=None, tensor_with_value=None, session_config=None, model_dir=None, updates=None, train_op=None):
graph = loss.graph
if (metrics is None):
metrics = {}
tf_model = TFModel.create(loss, sess, inputs, labels, [], grads, variables, graph, tensor_with_value, session_config, metrics, updates, model_dir=None, train_op=train_op)
return cls(tf_model, optim_method, sess=sess, dataset=dataset, clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
def from_loss(cls, loss, optim_method, session=None, inputs=None, dataset=None, val_outputs=None, val_labels=None, val_method=None, clip_norm=None, clip_value=None, metrics=None, tensor_with_value=None, session_config=None, model_dir=None, updates=None):
sess = TFOptimizer._get_or_create_session(session)
(grads, variables) = TFOptimizer._get_vars_grads(loss)
if ((dataset is None) and (inputs is None)):
dataset = TFOptimizer._get_dataset_from_loss(loss)
inputs = dataset._original_tensors
else:
if (inputs is None):
invalidInputError(False, 'please specify inputs')
_ = dataset.tensors
if (isinstance(inputs, tuple) and (len(inputs) == 2)):
(inputs, labels) = inputs
else:
labels = []
inputs = nest.flatten(inputs)
labels = nest.flatten(labels)
if (clip_value is not None):
if (isinstance(clip_value, float) or isinstance(clip_value, int)):
if (clip_value <= 0):
ValueError('The clip_value argument should be positive number')
clip_value = ((- float(clip_value)), float(clip_value))
if (not isinstance(clip_value, tuple)):
invalidInputError(False, ((('The clip_value argument should be' + ' a positive float/int which clips to') + ' (-clip_value, clip_value); ') + 'or a tuple which clips to (min_value, max_value)'))
if (val_method is not None):
val_methods = to_list(val_method)
if (metrics is None):
metrics = {}
for (i, method) in enumerate(val_methods):
metrics[('bigdl_metric_' + str(i))] = BigDLMetric(method, val_outputs, val_labels)
return TFOptimizer._from_grads(loss, sess, inputs, labels, grads, variables, dataset, optim_method, clip_norm, clip_value, metrics, tensor_with_value, session_config, model_dir, updates)
def export_training_model(export_dir, loss, sess, inputs, labels=None, predictions=None, metrics=None, tensor_with_value=None, updates=None):
(grads, variables) = TFOptimizer._get_vars_grads(loss)
TFModel.export(export_dir, loss, sess, inputs, labels, predictions, grads, variables, loss.graph, tensor_with_value, metrics, updates)
logging.info('Exported TensorFlow model in {} for training'.format(export_dir))
def _shape_match(model_shape, dataset_shape):
for i in range(len(dataset_shape)):
if (dataset_shape[i].value is None):
return (model_shape[i].value is None)
else:
return ((dataset_shape[i].value == model_shape[i].value) or (model_shape[i].value is None))
def from_keras(cls, keras_model, dataset, session_config=None, model_dir=None, metrics=None, optimizer=None):
import tensorflow.keras.backend as K
model_inputs = keras_model.inputs
if hasattr(keras_model, 'targets'):
model_targets = keras_model.targets
else:
model_targets = keras_model._targets
model_targets = list(filter((lambda x: (x is not None)), model_targets))
check_data_compatible(dataset, keras_model, mode='train')
if isinstance(dataset, TFNdarrayDataset):
dataset = _standarize_feature_label_dataset(dataset, keras_model)
flatten_inputs = nest.flatten(dataset.feature_tensors)
invalidInputError((len(model_inputs) == len(flatten_inputs)), 'the keras model and TFDataset should have the same number of tensors keras model has {} inputs while TFDataset has {} inputs'.format(len(model_inputs), len(flatten_inputs)))
for i in range(len(flatten_inputs)):
if (not TFOptimizer._shape_match(model_inputs[i].shape, flatten_inputs[i].shape)):
invalidInputError(False, 'The {}th input in keras model {} does not match the TFDatasetinput {}'.format(i, model_inputs[i], flatten_inputs[i]))
flatten_targets = nest.flatten(dataset.label_tensors)
invalidInputError((len(model_targets) == len(flatten_targets)), 'the keras model and TFDataset should have the same number of tensors keras model has {} targets while TFDataset has {} labels'.format(len(model_targets), len(flatten_inputs)))
loss = keras_model.total_loss
variables = keras_model._collected_trainable_weights
variables.sort(key=(lambda variable: variable.name))
keras_optimizer = keras_model.optimizer
from bigdl.orca.tfpark.zoo_optimizer import get_gradients_for_keras
grads = get_gradients_for_keras(keras_optimizer, loss, variables)
grads_and_vars = list(zip(grads, variables))
import tensorflow.python.keras.optimizers as koptimizers
if isinstance(keras_optimizer, koptimizers.TFOptimizer):
train_op = keras_optimizer.optimizer.apply_gradients(grads_and_vars)
else:
train_op = keras_optimizer.apply_gradients(grads_and_vars)
sess = K.get_session()
if (keras_model.metrics and (dataset.get_validation_data() is not None)):
if isinstance(keras_model.metrics, dict):
invalidInputError(False, 'different metrics for different outputs are not supported right now')
if (len(keras_model.outputs) > 1):
if (not all([name.endswith('loss') for name in keras_model.metrics_names])):
invalidInputError(False, 'metrics (except loss) for multi-head model is not supported')
else:
bigdl_val_methods = [Loss()]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
bigdl_val_methods = [to_bigdl_metric(m, keras_model.loss) for m in keras_model.metrics_names]
val_outputs = keras_model.outputs
val_labels = model_targets
else:
val_outputs = None
val_labels = None
bigdl_val_methods = None
tensor_with_value = {K.learning_phase(): [True, False]}
updates = []
updates += keras_model.get_updates_for(None)
updates += keras_model.get_updates_for(keras_model.inputs)
if (bigdl_val_methods is not None):
val_methods = to_list(bigdl_val_methods)
bigdl_metrics = {}
for (i, method) in enumerate(val_methods):
bigdl_metrics[('bigdl_metric_' + str(i))] = BigDLMetric(method, val_outputs, val_labels)
if (metrics is None):
metrics = bigdl_metrics
else:
metrics.update(bigdl_metrics)
if (optimizer is not None):
clip_norm = None
clip_value = None
if hasattr(keras_optimizer, 'clipnorm'):
clip_norm = keras_optimizer.clipnorm
if hasattr(keras_optimizer, 'clipvalue'):
clip_value = ((- keras_optimizer.clipvalue), keras_optimizer.clipvalue)
tf_model = TFModel.create(loss, sess, model_inputs, model_targets, keras_model.outputs, grads, variables, loss.graph, tensor_with_value, session_config, metrics, updates, model_dir=None)
return cls(tf_model, optimizer, sess=sess, dataset=dataset, clip_norm=clip_norm, clip_value=clip_value, model_dir=model_dir)
return cls.from_train_op(train_op, loss, inputs=model_inputs, labels=model_targets, metrics=metrics, updates=updates, sess=sess, dataset=dataset, tensor_with_value=tensor_with_value, session_config=session_config, model_dir=model_dir)
def set_constant_gradient_clipping(self, min_value, max_value):
self.estimator.set_constant_gradient_clipping(min_value, max_value)
def set_gradient_clipping_by_l2_norm(self, clip_norm):
self.estimator.set_l2_norm_gradient_clipping(clip_norm)
def optimize(self, end_trigger=None, checkpoint_trigger=None):
if (end_trigger is None):
end_trigger = MaxEpoch(1)
if (checkpoint_trigger is None):
checkpoint_trigger = EveryEpoch()
if isinstance(self.train_data, FeatureSet):
if (self.train_data.value.getNumOfSlice() != 1):
if isinstance(checkpoint_trigger, EveryEpoch):
checkpoint_trigger = ZEveryEpoch()
elif (not isinstance(checkpoint_trigger, ZooTrigger)):
invalidInputError(False, 'Please use a trigger defined in bigdl.dllib.utils.triggers')
if (self.tf_model.val_methods and (self.val_data is not None)):
self.estimator.train_minibatch(train_set=self.train_data, criterion=self.tf_model.criterion, end_trigger=end_trigger, checkpoint_trigger=checkpoint_trigger, validation_set=self.val_data, validation_method=self.tf_model.val_methods)
else:
self.estimator.train_minibatch(train_set=self.train_data, criterion=self.tf_model.criterion, end_trigger=end_trigger, checkpoint_trigger=checkpoint_trigger)
self.tf_model.training_helper_layer.get_weights_to_python() |
def summarize_error(key):
if (type(err_info[key]) == str):
return (' ' + err_info[key])
else:
return (('\n' + '\n'.join([(' %s: %s' % (name, err)) for (name, err) in err_info[key]])) + '\n') |
def _dist_train(model, dataset, cfg, validate=False):
data_loaders = [build_dataloader(dataset, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True)]
model = MMDistributedDataParallel(model.cuda())
optimizer = build_optimizer(model, cfg.optimizer)
runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg)
else:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config)
runner.register_hook(DistSamplerSeedHook())
if validate:
val_dataset_cfg = cfg.data.val
eval_cfg = cfg.get('evaluation', {})
if isinstance(model.module, RPN):
runner.register_hook(CocoDistEvalRecallHook(val_dataset_cfg, **eval_cfg))
else:
dataset_type = getattr(datasets, val_dataset_cfg.type)
if issubclass(dataset_type, datasets.CocoDataset):
runner.register_hook(CocoDistEvalmAPHook(val_dataset_cfg, **eval_cfg))
else:
runner.register_hook(DistEvalmAPHook(val_dataset_cfg, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_epochs) |
def num_del(inp_lists):
tmp = []
for inp in inp_lists:
l = inp['del_span']
total = len(l)
tmp.append((total - 1))
return tmp |
class HansProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'heuristics_train_set.txt')), 'train')
def get_dev_examples(self, data_dir):
return self._create_examples(self._read_tsv(os.path.join(data_dir, 'heuristics_evaluation_set.txt')), 'dev')
def get_labels(self):
return ['contradiction', 'entailment', 'neutral']
def _create_examples(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
if (i == 0):
continue
guid = ('%s-%s' % (set_type, line[0]))
text_a = line[5]
text_b = line[6]
pairID = (line[7][2:] if line[7].startswith('ex') else line[7])
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID))
return examples |
class Client():
d = None
try:
with open('../data/state_traces.json', 'r', encoding='utf-8') as f:
d = json.load(f)
except FileNotFoundError as e:
d = None
logger.warn('no user behavior trace was found, running in no-trace mode')
def __init__(self, client_id, group=None, train_data={'x': [], 'y': []}, eval_data={'x': [], 'y': []}, device=None, cfg=None):
self._model = None
self.id = client_id
self.group = group
self.deadline = 1
self.cfg = cfg
self.train_data = train_data
if (eval_data != None):
self.eval_data = {'x': self.preprocess_data_x(eval_data['x']), 'y': self.preprocess_data_y(eval_data['y'])}
else:
self.eval_data = eval_data
self.fedbalancer = None
self.oort = None
self.loss_threshold = 0
self.train_time_per_batch_list = []
self.sorted_loss = []
self.inference_times = []
self.inference_times_per_sample = []
self.per_epoch_train_times = []
self.per_batch_train_times = []
self.trained_num_of_samples = []
self.device = device
if (self.device == None):
logger.warn('client {} with no device init, upload time will be set as 0 and speed will be the gpu speed'.format(self.id))
self.upload_time = 0
d = Client.d
if (d == None):
cfg.behav_hete = False
if cfg.behav_hete:
uid = random.sample(list(d.keys()), 1)[0]
self.timer = Timer(ubt=d[str(uid)], google=True)
while (self.timer.isSuccess != True):
uid = random.sample(list(d.keys()), 1)[0]
self.timer = Timer(ubt=d[str(uid)], google=True)
else:
self.timer = Timer(None)
self.deadline = sys.maxsize
real_device_model = self.timer.model
if (not self.device):
self.device = Device(cfg, 0.0)
if self.cfg.ss_baseline:
self.is_big_client = False
self.select_sample_num = 0
if self.cfg.hard_hete:
curr_per_batch_train_times = self.device.set_device_model(real_device_model, self.id)
for per_batch_train_time in curr_per_batch_train_times:
self.per_batch_train_times.append(per_batch_train_time)
if (train_data != None):
num_train_samples = len(train_data['x'])
else:
num_train_samples = 1
for per_batch_train_time in curr_per_batch_train_times:
if (self.cfg.oortbalancer or self.cfg.oort):
self.per_epoch_train_times.append(per_batch_train_time)
self.per_batch_train_times.append(per_batch_train_time)
self.trained_num_of_samples.append(self.cfg.batch_size)
if self.cfg.oortbalancer:
self.inference_times.append((((((num_train_samples - 1) // self.cfg.batch_size) + 1) * per_batch_train_time) * 0.5))
self.inference_times_per_sample.append((self.inference_times[(- 1)] / num_train_samples))
else:
self.per_epoch_train_times.append(((((num_train_samples - 1) // self.cfg.batch_size) + 1) * per_batch_train_time))
self.per_batch_train_times.append(per_batch_train_time)
self.trained_num_of_samples.append(num_train_samples)
if self.cfg.fedbalancer:
self.inference_times.append((self.per_epoch_train_times[(- 1)] * 0.5))
self.inference_times_per_sample.append((self.inference_times[(- 1)] / num_train_samples))
else:
curr_per_batch_train_times = self.device.set_device_model('Redmi Note 8', self.id)
for per_batch_train_time in curr_per_batch_train_times:
self.per_batch_train_times.append(per_batch_train_time)
if (train_data != None):
num_train_samples = len(train_data['x'])
else:
num_train_samples = 1
for per_batch_train_time in curr_per_batch_train_times:
self.per_epoch_train_times.append(((((num_train_samples - 1) // self.cfg.batch_size) + 1) * per_batch_train_time))
self.per_batch_train_times.append(per_batch_train_time)
self.trained_num_of_samples.append(num_train_samples)
self.inference_times.append((self.per_epoch_train_times[(- 1)] * 0.5))
self.inference_times_per_sample.append((self.inference_times[(- 1)] / num_train_samples))
self.sampled_per_epoch_train_time = np.mean(self.per_epoch_train_times)
self.whole_data_loss_list = []
self.is_first_round = True
def preprocess_data_x(self, data):
return torch.tensor(data, requires_grad=True)
def preprocess_data_y(self, data):
data_y = []
for i in data:
data_float = float(i)
data_y.append(data_float)
return torch.tensor(data_y, requires_grad=True)
def inference_on_whole_dataset(self, whole_data):
return self.model.test(whole_data)['loss_list']
def train(self, start_t=None, num_epochs=1, batch_size=10):
def train_with_simulate_time(self, start_t, num_epochs=1, batch_size=10):
ne = (- 1)
num_data = min(len(self.train_data['x']), self.cfg.max_sample)
user_whole_data_len = num_data
if (self.cfg.fedbalancer or self.cfg.fb_client_selection or self.cfg.oortbalancer or (self.cfg.ss_baseline and self.is_big_client)):
if (self.is_first_round or (not self.cfg.fb_inference_pipelining)):
self.whole_data_loss_list = self.fedbalancer.calculate_loss_on_whole_dataset_with_inference(self.train_data, self.model)
if self.cfg.fedbalancer:
(selected_data, num_data, data_idx, self.sorted_loss) = self.fedbalancer.fb_sample_selection(num_data, self.loss_threshold, self.whole_data_loss_list, self.train_data, self.deadline, self.train_time_per_batch_list, num_epochs, batch_size)
elif self.cfg.oortbalancer:
(selected_data, xss, yss, num_data, data_idx, self.sorted_loss) = self.fedbalancer.fb_oortbalancer_sample_selection(batch_size, self.loss_threshold, self.whole_data_loss_list, self.train_data, self.deadline, self.train_time_per_batch_list, num_epochs, self.model)
elif self.cfg.oort:
(selected_data, xss, yss, num_data, data_idx, self.sorted_loss) = self.oort.select_batch_samples(batch_size, self.train_data, num_epochs, self.model)
elif (self.cfg.ss_baseline and self.is_big_client):
data_len = self.select_sample_num
tmp_data = zip(self.train_data['x'], self.train_data['y'])
tmp_data = zip(tmp_data, range(len(self.train_data['x'])))
tmp_data = zip(self.whole_data_loss_list, tmp_data)
tmp_data = sorted(tmp_data, reverse=True, key=(lambda elem: elem[0]))
tmp_data_pkg = [x for (_, x) in tmp_data[:data_len]]
tmp_data = [x for (x, _) in tmp_data_pkg]
tmp_data_idx = [x for (_, x) in tmp_data_pkg]
num_data = self.select_sample_num
(xs, ys) = zip(*tmp_data)
data_idx = tmp_data_idx
selected_data = {'x': xs, 'y': ys}
selected_data = {'x': self.preprocess_data_x(selected_data['x']), 'y': self.preprocess_data_y(selected_data['y'])}
else:
(xs, ys) = zip(*random.sample(list(zip(self.train_data['x'], self.train_data['y'])), num_data))
data_idx = list(range(len(ys)))
selected_data = {'x': xs, 'y': ys}
selected_data = {'x': self.preprocess_data_x(selected_data['x']), 'y': self.preprocess_data_y(selected_data['y'])}
data = selected_data
(train_time, train_time_per_batch, train_time_per_epoch) = self.device.get_train_time_and_train_time_per_batch_and_train_time_per_epoch(num_data, batch_size, num_epochs)
self.train_time_per_batch_list.append(train_time_per_batch)
logger.debug('client {}: num data:{}'.format(self.id, num_data))
logger.debug('client {}: train time:{}'.format(self.id, train_time))
inference_time = 0
if self.cfg.fedbalancer:
if (num_data == user_whole_data_len):
inference_time = 0
elif (self.cfg.fb_inference_pipelining and (not self.is_first_round)):
inference_time = 0
else:
(inference_time, _) = self.device.get_train_time_and_train_time_per_batch(user_whole_data_len, batch_size, 1)
inference_time = (inference_time * 0.5)
elif (self.cfg.ss_baseline and self.is_big_client):
if (not self.is_first_round):
inference_time = 0
else:
(inference_time, _) = self.device.get_train_time_and_train_time_per_batch(user_whole_data_len, batch_size, 1)
inference_time = (inference_time * 0.5)
elif self.cfg.oortbalancer:
if (self.cfg.fb_inference_pipelining and (not self.is_first_round)):
inference_time = 0
else:
(inference_time, _) = self.device.get_train_time_and_train_time_per_batch(user_whole_data_len, batch_size, 1)
inference_time = (inference_time * 0.5)
if self.is_first_round:
self.is_first_round = False
download_time = self.device.get_download_time()
upload_time = self.device.get_upload_time()
self.act_inference_time = 0
self.ori_inference_time = 0
if (self.cfg.fedbalancer or self.cfg.oortbalancer or (self.cfg.ss_baseline and self.is_big_client)):
down_end_time = self.timer.get_future_time(start_t, download_time)
logger.debug('client {} download-time-need={}, download-time-cost={} end at {}, '.format(self.id, download_time, (down_end_time - start_t), down_end_time))
inference_end_time = self.timer.get_future_time(down_end_time, inference_time)
logger.debug('client {} inference-time-need={}, inference-time-cost={} end at {}, '.format(self.id, inference_time, (inference_end_time - down_end_time), inference_end_time))
train_end_time = self.timer.get_future_time(inference_end_time, train_time)
logger.debug('client {} train-time-need={}, train-time-cost={} end at {}, '.format(self.id, train_time, (train_end_time - inference_end_time), train_end_time))
up_end_time = self.timer.get_future_time(train_end_time, upload_time)
logger.debug('client {} upload-time-need={}, upload-time-cost={} end at {}, '.format(self.id, upload_time, (up_end_time - train_end_time), up_end_time))
self.ori_download_time = download_time
self.ori_inference_time = inference_time
self.ori_train_time = train_time
self.before_comp_upload_time = upload_time
self.ori_upload_time = upload_time
self.act_download_time = (down_end_time - start_t)
self.act_inference_time = (inference_end_time - down_end_time)
self.act_train_time = (train_end_time - inference_end_time)
self.act_upload_time = (up_end_time - train_end_time)
self.update_size = self.model.size
else:
down_end_time = self.timer.get_future_time(start_t, download_time)
logger.debug('client {} download-time-need={}, download-time-cost={} end at {}, '.format(self.id, download_time, (down_end_time - start_t), down_end_time))
train_end_time = self.timer.get_future_time(down_end_time, train_time)
logger.debug('client {} train-time-need={}, train-time-cost={} end at {}, '.format(self.id, train_time, (train_end_time - down_end_time), train_end_time))
up_end_time = self.timer.get_future_time(train_end_time, upload_time)
logger.debug('client {} upload-time-need={}, upload-time-cost={} end at {}, '.format(self.id, upload_time, (up_end_time - train_end_time), up_end_time))
self.ori_download_time = download_time
self.ori_train_time = train_time
self.before_comp_upload_time = upload_time
self.ori_upload_time = upload_time
self.act_download_time = (down_end_time - start_t)
self.act_train_time = (train_end_time - down_end_time)
self.act_upload_time = (up_end_time - train_end_time)
self.update_size = self.model.size
data_loss_list_and_idx = []
if (not (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc)):
if (not (self.cfg.fedbalancer or self.cfg.oortbalancer or (self.cfg.ss_baseline and self.is_big_client))):
if ((down_end_time - start_t) > self.deadline):
self.update_size = 0
failed_reason = 'failed when downloading'
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif ((train_end_time - start_t) > self.deadline):
train_time_limit = (self.deadline - self.act_download_time)
if (train_time_limit <= 0):
train_time_limit = 0.001
available_time = self.timer.get_available_time((start_t + self.act_download_time), train_time_limit)
self.update_size = 0
if (self.cfg.fedprox or self.cfg.fedbalancer or self.cfg.oortbalancer):
ne = (- 1)
for i in range(1, num_epochs):
et = self.timer.get_future_time(down_end_time, (((train_time * i) / num_epochs) + upload_time))
if ((et - start_t) <= self.deadline):
ne = i
if self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1))
elif (self.cfg.fedprox and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
elif ((self.cfg.fedbalancer or self.cfg.oortbalancer) and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
else:
failed_reason = 'failed when training'
self.sorted_loss = []
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
failed_reason = 'failed when training'
self.sorted_loss = []
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif ((up_end_time - start_t) > self.deadline):
if (self.cfg.fedprox or self.cfg.fedbalancer or self.cfg.oortbalancer):
ne = (- 1)
for i in range(1, num_epochs):
et = self.timer.get_future_time(down_end_time, (((train_time * i) / num_epochs) + upload_time))
if ((et - start_t) <= self.deadline):
ne = i
if self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1))
elif (self.cfg.fedprox and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
elif ((self.cfg.fedbalancer or self.cfg.oortbalancer) and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
else:
failed_reason = 'failed when uploading'
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
failed_reason = 'failed when uploading'
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1), (- 1), (- 1))
else:
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, num_epochs, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, num_epochs, batch_size)
logger.debug('client {} train-epochs={}'.format(self.id, num_epochs))
elif ((down_end_time - start_t) > self.deadline):
self.update_size = 0
failed_reason = 'failed when downloading'
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif ((inference_end_time - start_t) > self.deadline):
self.update_size = 0
failed_reason = 'failed when inferencing'
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif ((train_end_time - start_t) > self.deadline):
train_time_limit = ((self.deadline - self.act_download_time) - self.act_inference_time)
if (train_time_limit <= 0):
train_time_limit = 0.001
available_time = self.timer.get_available_time(((start_t + self.act_download_time) + self.act_inference_time), train_time_limit)
self.update_size = 0
if (self.cfg.fedprox or self.cfg.fedbalancer or self.cfg.oortbalancer):
ne = (- 1)
for i in range(1, num_epochs):
et = self.timer.get_future_time(inference_end_time, (((train_time * i) / num_epochs) + upload_time))
if ((et - start_t) <= self.deadline):
ne = i
if self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1), (- 1), (- 1))
elif (self.cfg.fedprox and (ne != (- 1))):
if self.cfg.oort:
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
elif ((self.cfg.fedbalancer or self.cfg.oortbalancer) and (ne != (- 1))):
if self.cfg.oort:
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
else:
failed_reason = 'failed when training'
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
if ((self.act_download_time + self.act_inference_time) > self.deadline):
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
if ((self.act_download_time + self.act_inference_time) > self.deadline):
self.sorted_loss = []
failed_reason = 'failed when training'
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif ((up_end_time - start_t) > self.deadline):
if (self.cfg.fedprox or self.cfg.fedbalancer or self.cfg.oortbalancer):
ne = (- 1)
for i in range(1, num_epochs):
et = self.timer.get_future_time(inference_end_time, (((train_time * i) / num_epochs) + upload_time))
if ((et - start_t) <= self.deadline):
ne = i
if self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1), (- 1), (- 1))
elif (self.cfg.fedprox and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
elif ((self.cfg.fedbalancer or self.cfg.oortbalancer) and (ne != (- 1))):
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, ne, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, ne, batch_size)
train_time *= (ne / num_epochs)
logger.debug('client {} train-epochs={}'.format(self.id, ne))
else:
failed_reason = 'failed when uploading'
if ((self.act_download_time + self.act_inference_time) > self.deadline):
self.sorted_loss = []
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
else:
failed_reason = 'failed when uploading'
if ((self.act_download_time + self.act_inference_time) > self.deadline):
self.sorted_loss = []
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
elif self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1), (- 1), (- 1))
else:
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, num_epochs, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, num_epochs, batch_size)
logger.debug('client {} train-epochs={}'.format(self.id, num_epochs))
elif self.cfg.no_training:
(update, acc, loss) = ((- 1), (- 1), (- 1), (- 1), (- 1))
else:
if (self.cfg.oort or self.cfg.oortbalancer):
(update, acc, loss, data_loss_list_and_idx) = self.model.oorttrain(data_idx, xss, yss, num_epochs, batch_size, self.cfg.oortbalancer)
else:
(update, acc, loss, data_loss_list_and_idx) = self.model.train(data, data_idx, num_epochs, batch_size)
logger.debug('client {} train-epochs={}'.format(self.id, num_epochs))
if ((self.cfg.fb_inference_pipelining or (self.cfg.ss_baseline and self.is_big_client)) and (len(data_loss_list_and_idx) > 0)):
data_loss_list = [x for (x, _) in data_loss_list_and_idx]
data_loss_list_idx = [x for (_, x) in data_loss_list_and_idx]
for (dll_idx, d_idx) in enumerate(data_loss_list_idx):
self.whole_data_loss_list[d_idx] = data_loss_list[dll_idx]
num_train_samples = len(data['y'])
simulate_time_c = (((download_time + inference_time) + train_time) + upload_time)
if ((self.cfg.fedprox or self.cfg.fedbalancer or self.cfg.oortbalancer) and (ne != (- 1))):
self.act_train_time = ((self.act_train_time * ne) / num_epochs)
total_cost = (((self.act_download_time + self.act_inference_time) + self.act_train_time) + self.act_upload_time)
if ((total_cost > self.deadline) and (not (self.cfg.oort_pacer or self.cfg.ddl_baseline_smartpc))):
failed_reason = 'failed when uploading'
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
if ((self.act_download_time + self.act_inference_time) > self.deadline):
self.sorted_loss = []
raise timeout_decorator.timeout_decorator.TimeoutError(failed_reason)
if ((inference_time != 0) and (not self.cfg.fb_inference_pipelining)):
self.inference_times.append(inference_time)
self.inference_times_per_sample.append((self.inference_times[(- 1)] / user_whole_data_len))
self.per_epoch_train_times.append(train_time_per_epoch)
self.per_batch_train_times.append(train_time_per_batch)
if (not self.cfg.oortbalancer):
self.trained_num_of_samples.append(len(data['x']))
if (ne == (- 1)):
return (simulate_time_c, num_train_samples, update, acc, loss, self.update_size, self.sorted_loss, download_time, upload_time, train_time, inference_time, num_epochs)
else:
return (simulate_time_c, num_train_samples, update, acc, loss, self.update_size, self.sorted_loss, download_time, upload_time, train_time, inference_time, ne)
return train_with_simulate_time(self, start_t, num_epochs, batch_size)
def test(self, set_to_use='test'):
assert (set_to_use in ['train', 'test', 'val'])
if (set_to_use == 'train'):
data = self.train_data
elif ((set_to_use == 'test') or (set_to_use == 'val')):
data = self.eval_data
return self.model.test(data)
def num_test_samples(self):
if (self.eval_data is None):
return 0
return len(self.eval_data['y'])
def num_train_samples(self):
if (self.train_data is None):
return 0
return len(self.train_data['y'])
def num_samples(self):
train_size = 0
if (self.train_data is not None):
train_size = len(self.train_data['y'])
test_size = 0
if (self.eval_data is not None):
test_size = len(self.eval_data['y'])
return (train_size + test_size)
def model(self):
return self._model
def model(self, model):
warnings.warn('The current implementation shares the model among all clients.Setting it on one client will effectively modify all clients.')
self._model = model
def set_deadline(self, deadline=(- 1)):
if (deadline < 0):
self.deadline = sys.maxsize
else:
self.deadline = deadline
logger.debug("client {}'s deadline is set to {}".format(self.id, self.deadline))
def set_loss_threshold(self, loss_threshold=(- 1)):
if (loss_threshold < 0):
self.loss_threshold = 0
else:
self.loss_threshold = loss_threshold
logger.debug("client {}'s loss threshold is set to {}".format(self.id, self.loss_threshold))
def upload_suc(self, start_t, num_epochs=1, batch_size=10):
num_data = min(len(self.train_data['x']), self.cfg.max_sample)
if (self.device == None):
download_time = 0.0
upload_time = 0.0
else:
download_time = self.device.get_download_time()
upload_time = self.device.get_upload_time()
(train_time, _) = self.device.get_train_time_and_train_time_per_batch(num_data, batch_size, num_epochs)
train_time_limit = ((self.deadline - download_time) - upload_time)
if (train_time_limit < 0):
train_time_limit = 0.001
available_time = self.timer.get_available_time((start_t + download_time), train_time_limit)
logger.debug('client {}: train time:{}'.format(self.id, train_time))
logger.debug('client {}: available time:{}'.format(self.id, available_time))
num_data = min(len(self.train_data['x']), self.cfg.max_sample)
(xs, ys) = zip(*random.sample(list(zip(self.train_data['x'], self.train_data['y'])), num_data))
data = {'x': xs, 'y': ys}
if (not self.timer.check_comm_suc(start_t, download_time)):
return False
if (train_time > train_time_limit):
return False
elif (train_time > available_time):
return False
if (not self.timer.check_comm_suc(((start_t + download_time) + train_time), upload_time)):
return False
else:
return True
def get_device_model(self):
if (self.device == None):
return 'None'
return self.device.device_model |
def create_dataset(cfg):
pre_transform = PositionalEncodingTransform(rw_dim=cfg.pos_enc.rw_dim, lap_dim=cfg.pos_enc.lap_dim)
if ((cfg.dataset == 'MNIST') or (cfg.dataset == 'CIFAR10')):
transform_train = transform_eval = SuperpixelTransform()
elif (cfg.dataset == 'CSL'):
transform_train = transform_eval = CSLTransform()
else:
transform_train = transform_eval = None
if (cfg.metis.n_patches > 0):
_transform_train = GraphPartitionTransform(n_patches=cfg.metis.n_patches, metis=cfg.metis.enable, drop_rate=cfg.metis.drop_rate, num_hops=cfg.metis.num_hops, is_directed=(cfg.dataset == 'TreeDataset'), patch_rw_dim=cfg.pos_enc.patch_rw_dim, patch_num_diff=cfg.pos_enc.patch_num_diff)
_transform_eval = GraphPartitionTransform(n_patches=cfg.metis.n_patches, metis=cfg.metis.enable, drop_rate=0.0, num_hops=cfg.metis.num_hops, is_directed=(cfg.dataset == 'TreeDataset'), patch_rw_dim=cfg.pos_enc.patch_rw_dim, patch_num_diff=cfg.pos_enc.patch_num_diff)
if (cfg.dataset in ['MNIST', 'CIFAR10', 'CSL']):
transform_train = Compose([transform_train, _transform_train])
transform_eval = Compose([transform_eval, _transform_eval])
else:
transform_train = _transform_train
transform_eval = _transform_eval
if (cfg.dataset == 'ZINC'):
root = 'dataset/ZINC'
train_dataset = ZINC(root, subset=True, split='train', pre_transform=pre_transform, transform=transform_train)
val_dataset = ZINC(root, subset=True, split='val', pre_transform=pre_transform, transform=transform_eval)
test_dataset = ZINC(root, subset=True, split='test', pre_transform=pre_transform, transform=transform_eval)
elif ((cfg.dataset == 'MNIST') or (cfg.dataset == 'CIFAR10')):
root = 'dataset'
train_dataset = GNNBenchmarkDataset(root, cfg.dataset, split='train', pre_transform=pre_transform, transform=transform_train)
val_dataset = GNNBenchmarkDataset(root, cfg.dataset, split='val', pre_transform=pre_transform, transform=transform_eval)
test_dataset = GNNBenchmarkDataset(root, cfg.dataset, split='test', pre_transform=pre_transform, transform=transform_eval)
elif ('ogbg' in cfg.dataset):
from ogb.graphproppred import PygGraphPropPredDataset
dataset = PygGraphPropPredDataset(cfg.dataset, 'dataset', pre_transform=pre_transform)
split_idx = dataset.get_idx_split()
(train_dataset, val_dataset, test_dataset) = (dataset[split_idx['train']], dataset[split_idx['valid']], dataset[split_idx['test']])
(train_dataset.transform, val_dataset.transform, test_dataset.transform) = (transform_train, transform_eval, transform_eval)
elif (cfg.dataset == 'peptides-func'):
dataset = PeptidesFunctionalDataset(root='dataset', pre_transform=pre_transform)
split_idx = dataset.get_idx_split()
(train_dataset, val_dataset, test_dataset) = (dataset[split_idx['train']], dataset[split_idx['val']], dataset[split_idx['test']])
(train_dataset.transform, val_dataset.transform, test_dataset.transform) = (transform_train, transform_eval, transform_eval)
elif (cfg.dataset == 'peptides-struct'):
dataset = PeptidesStructuralDataset(root='dataset', pre_transform=pre_transform)
split_idx = dataset.get_idx_split()
(train_dataset, val_dataset, test_dataset) = (dataset[split_idx['train']], dataset[split_idx['val']], dataset[split_idx['test']])
(train_dataset.transform, val_dataset.transform, test_dataset.transform) = (transform_train, transform_eval, transform_eval)
elif (cfg.dataset == 'CSL'):
root = 'dataset'
dataset = GNNBenchmarkDataset(root, cfg.dataset, pre_transform=pre_transform)
return (dataset, transform_train, transform_eval)
elif (cfg.dataset == 'exp-classify'):
root = 'dataset/EXP/'
dataset = PlanarSATPairsDataset(root, pre_transform=pre_transform)
return (dataset, transform_train, transform_eval)
elif (cfg.dataset == 'sr25-classify'):
root = 'dataset/sr25'
dataset = SRDataset(root, pre_transform=pre_transform)
dataset.transform = transform_eval
dataset.data.x = dataset.data.x.long()
dataset.data.y = torch.arange(len(dataset.data.y)).long()
dataset = [x for x in dataset]
return (dataset, dataset, dataset)
elif (cfg.dataset == 'TreeDataset'):
root = 'dataset/TreeDataset'
dataset = TreeDataset(root, cfg.depth)
(train_dataset, val_dataset, test_dataset) = (dataset.train, dataset.val, dataset.test)
if (transform_train is not None):
train_dataset = [transform_train(x) for x in train_dataset]
val_dataset = [transform_train(x) for x in val_dataset]
test_dataset = [transform_train(x) for x in test_dataset]
return (train_dataset, val_dataset, test_dataset)
else:
print('Dataset not supported.')
exit(1)
torch.set_num_threads(cfg.num_workers)
if (not cfg.metis.online):
train_dataset = [x for x in train_dataset]
val_dataset = [x for x in val_dataset]
test_dataset = [x for x in test_dataset]
return (train_dataset, val_dataset, test_dataset) |
class _OmeTiffVIPSReader(_VIPSReader):
def __init__(self, *args, **kwargs):
self.page_labels = {0: 'label', 1: 'overview', 2: 'main', 3: 'macro'}
self.num_pyramid_levels = 5
super().__init__(*args, **kwargs)
def get_page_by_label(self, label: str) -> int:
for (page, page_label) in self.page_labels.items():
if (page_label == label):
return page
raise ValueError(f'Unknown page label {label}')
def _load_levels(self, vips_image: Optional['vips.Image']):
log.debug('Attempting to read levels from OME-TIFF')
if (not ((self.properties['n-pages'] % 3) == 0)):
raise errors.SlideError(f"Unexpected number of pages in OME-TIFF. Expected a multiple of 3, but found {self.properties['n-pages']}.")
self.level_count = self.num_pyramid_levels
main_page = self.get_page_by_label('main')
self.levels = []
for lev in range(self.level_count):
temp_img = vips.Image.new_from_file(self.path, page=(main_page * 3), subifd=(lev - 1))
width = int(temp_img.get('width'))
height = int(temp_img.get('height'))
downsample = float((int(self.properties[OPS_WIDTH]) / width))
self.levels += [{'dimensions': (width, height), 'width': width, 'height': height, 'downsample': downsample, 'level': lev}]
self.levels = sorted(self.levels, key=(lambda x: x['width']), reverse=True)
log.debug(f'Read {self.level_count} levels.')
self.level_downsamples = [lev['downsample'] for lev in self.levels]
self.level_dimensions = [lev['dimensions'] for lev in self.levels]
(width, height) = self.levels[0]['dimensions']
self.properties[OPS_WIDTH] = width
self.properties[OPS_HEIGHT] = height
self.dimensions = (width, height)
for lev in range(self.level_count):
self.levels[lev]['downsample'] = float((int(self.properties[OPS_WIDTH]) / self.levels[lev]['width']))
self.level_downsamples[lev] = self.levels[lev]['downsample']
def thumbnail(self, width: int=512, fail: bool=True, access=vips.enums.Access.RANDOM, level: Optional[int]=2, **kwargs) -> np.ndarray:
thumbnail = self.read_level(fail=fail, access=access, level=level, **kwargs)
try:
thumb = vips2numpy(thumbnail)
return thumb
except vips.error.Error as e:
raise sf.errors.SlideLoadError(f'Error loading slide thumbnail: {e}')
def read_level(self, fail: bool=True, access=vips.enums.Access.RANDOM, to_numpy: bool=False, level: int=0, **kwargs) -> Union[(vips.Image, np.ndarray)]:
main_page = self.get_page_by_label('main')
(r, g, b) = [vips.Image.new_from_file(self.path, fail=fail, access=access, page=((main_page * 3) + n), subifd=(level - 1), **kwargs) for n in range(3)]
image = r.bandjoin([g, b])
image = self.bound_and_transform(image, level=level)
if to_numpy:
return vips2numpy(image)
else:
return image |
def get_input_transform():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
transf = transforms.Compose([transforms.Resize((256, 256)), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
return transf |
.mujoco
.no_cover
.timeout(20)
def test_maml_halfcheetah():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'torch/maml_trpo_half_cheetah_dir.py')), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1'], check=False).returncode == 0) |
class TFRobertaForTokenClassification():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def topk_meter(ctx: Context, train_ctx: Context, k: int=1) -> float:
def accuracy(output, target, k=1):
batch_size = target.size(0)
(_, pred) = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
return correct_k.mul_((100.0 / batch_size))
if (not ('meter' in ctx)):
ctx.meter = AverageMeter()
if (train_ctx.batch_idx == 0):
ctx.meter.reset()
acc = accuracy(train_ctx.output, train_ctx.target, k)
ctx.meter.update(acc.item())
return ctx.meter.avg |
(version='2.0')
class TuningItem():
def __init__(self, name, options=[], item_type=None):
self.name = name
self._options = options
self.item_type = item_type
def options(self):
return self._options
def get_options_name(self):
return [o.name for o in self.options]
def append(self, option):
self._options.append(option)
def remove(self, option):
if (option in self._options):
self._options.remove(option)
def get_option_by_name(self, option_name):
for option in self.options:
if (isinstance(option, TuningItem) and (option.name == option_name)):
return option
return None
def get_details(self, depth=0):
details = [(('\t' * depth) + f'{self.name}, {self.item_type}')]
for option in self.options:
if (isinstance(option, int) or isinstance(option, str)):
details.append((('\t' * depth) + str(option)))
else:
details.append(option.get_details((depth + 1)))
return '\n'.join(details) |
class UncondMetrics(Metric):
full_state_update = True
def __init__(self, top_k=3, R_size=32, diversity_times=300, dist_sync_on_step=True, **kwargs):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.name = 'fid, kid, and diversity scores'
self.top_k = top_k
self.R_size = R_size
self.diversity_times = 300
self.add_state('count', default=torch.tensor(0), dist_reduce_fx='sum')
self.add_state('count_seq', default=torch.tensor(0), dist_reduce_fx='sum')
self.metrics = []
self.add_state('KID_mean', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.add_state('KID_std', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.metrics.extend(['KID_mean', 'KID_std'])
self.add_state('FID', default=torch.tensor(0.0), dist_reduce_fx='mean')
self.metrics.append('FID')
self.add_state('Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.add_state('gt_Diversity', default=torch.tensor(0.0), dist_reduce_fx='sum')
self.metrics.extend(['Diversity', 'gt_Diversity'])
self.add_state('recmotion_embeddings', default=[], dist_reduce_fx=None)
self.add_state('gtmotion_embeddings', default=[], dist_reduce_fx=None)
def compute(self, sanity_flag):
count = self.count.item()
count_seq = self.count_seq.item()
metrics = {metric: getattr(self, metric) for metric in self.metrics}
if sanity_flag:
return metrics
all_gtmotions = torch.cat(self.gtmotion_embeddings, axis=0).cpu()
all_genmotions = torch.cat(self.recmotion_embeddings, axis=0).cpu()
(KID_mean, KID_std) = calculate_kid(all_gtmotions, all_genmotions)
metrics['KID_mean'] = KID_mean
metrics['KID_std'] = KID_std
all_genmotions = all_genmotions.numpy()
all_gtmotions = all_gtmotions.numpy()
(mu, cov) = calculate_activation_statistics_np(all_genmotions)
(gt_mu, gt_cov) = calculate_activation_statistics_np(all_gtmotions)
metrics['FID'] = calculate_frechet_distance_np(gt_mu, gt_cov, mu, cov)
assert (count_seq > self.diversity_times)
print(all_genmotions.shape)
print(all_gtmotions.shape)
metrics['Diversity'] = calculate_diversity_np(all_genmotions, self.diversity_times)
metrics['gt_Diversity'] = calculate_diversity_np(all_gtmotions, self.diversity_times)
return {**metrics}
def update(self, gtmotion_embeddings: Tensor, lengths: List[int], recmotion_embeddings=None):
self.count += sum(lengths)
self.count_seq += len(lengths)
if (recmotion_embeddings is not None):
recmotion_embeddings = torch.flatten(recmotion_embeddings, start_dim=1).detach()
self.recmotion_embeddings.append(recmotion_embeddings)
gtmotion_embeddings = torch.flatten(gtmotion_embeddings, start_dim=1).detach()
self.gtmotion_embeddings.append(gtmotion_embeddings) |
class LinearBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, expansion):
super(LinearBottleneck, self).__init__()
self.residual = ((in_channels == out_channels) and (stride == 1))
mid_channels = ((in_channels * 6) if expansion else in_channels)
self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, activation='relu6')
self.conv2 = dwconv3x3_block(in_channels=mid_channels, out_channels=mid_channels, stride=stride, activation='relu6')
self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, activation=None)
def forward(self, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = (x + identity)
return x |
def _one_hot_encode_helper(df, class_name, class_range, features_generated):
for i in class_range:
df[((class_name + '_') + str(i))] = 0
df.loc[((df[class_name] == i), ((class_name + '_') + str(i)))] = 1
features_generated.append(((class_name + '_') + str(i)))
df.drop([class_name], axis=1, inplace=True)
features_generated.remove(class_name)
return df |
def adjust_learning_rate_poly(args, optimizer, iter, power=0.9):
base_lr = args.lr
max_iter = args.max_steps
reduce = ((1 - (float(iter) / max_iter)) ** power)
lr = (base_lr * reduce)
optimizer.param_groups[0]['lr'] = (lr * 1)
optimizer.param_groups[1]['lr'] = (lr * 2)
optimizer.param_groups[2]['lr'] = (lr * 10)
optimizer.param_groups[3]['lr'] = (lr * 20) |
def resnet18(pretrained=False, output_channels=512):
model = ResNet(BasicBlock, [2, 2, 2, 2], output_channels=output_channels)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model |
class ReaderInputTensors(NamedTuple):
path_source_token_indices: tf.Tensor
path_indices: tf.Tensor
path_target_token_indices: tf.Tensor
context_valid_mask: tf.Tensor
target_index: Optional[tf.Tensor] = None
target_string: Optional[tf.Tensor] = None
path_source_token_strings: Optional[tf.Tensor] = None
path_strings: Optional[tf.Tensor] = None
path_target_token_strings: Optional[tf.Tensor] = None |
def gradient_descent(energy_or_force: Callable[(..., Array)], shift_fn: ShiftFn, step_size: float) -> Minimizer[Array]:
force = quantity.canonicalize_force(energy_or_force)
def init_fn(R: Array, **unused_kwargs) -> Array:
return R
def apply_fn(R: Array, **kwargs) -> Array:
R = shift_fn(R, (step_size * force(R, **kwargs)), **kwargs)
return R
return (init_fn, apply_fn) |
class StackelbergEnv(PhantomEnv):
def __init__(self, num_steps: int, network: Network, leader_agents: Sequence[AgentID], follower_agents: Sequence[AgentID], env_supertype: Optional[Supertype]=None, agent_supertypes: Optional[Mapping[(AgentID, Supertype)]]=None) -> None:
super().__init__(num_steps, network, env_supertype, agent_supertypes)
for aid in (leader_agents + follower_agents):
assert (aid in network.agent_ids), f"Agent '{aid}' not in network"
for aid in leader_agents:
assert (aid not in follower_agents), f"Agent '{aid}' not in network"
self.leader_agents = leader_agents
self.follower_agents = follower_agents
self._rewards: Dict[(AgentID, Optional[float])] = {}
def reset(self, seed: Optional[int]=None, options: Optional[Dict[(str, Any)]]=None) -> Tuple[(Dict[(AgentID, Any)], Dict[(str, Any)])]:
logger.log_reset()
gym.Env.reset(self, seed=seed, options=options)
self._current_step = 0
for sampler in self._samplers:
sampler.sample()
if (self.env_supertype is not None):
self.env_type = self.env_supertype.sample()
self.network.reset()
self._terminations = set()
self._truncations = set()
self._rewards = {aid: None for aid in self.strategic_agent_ids}
self._make_ctxs([aid for aid in self.leader_agents if (aid in self.strategic_agent_ids)])
obs = {ctx.agent.id: ctx.agent.encode_observation(ctx) for ctx in self._ctxs.values()}
logger.log_observations(obs)
return ({k: v for (k, v) in obs.items() if (v is not None)}, {})
def step(self, actions: Mapping[(AgentID, Any)]) -> PhantomEnv.Step:
self._current_step += 1
logger.log_step(self.current_step, self.num_steps)
logger.log_actions(actions)
logger.log_start_decoding_actions()
self._make_ctxs(self.agent_ids)
(acting_agents, next_acting_agents) = ((self.leader_agents, self.follower_agents) if ((self.current_step % 2) == 1) else (self.follower_agents, self.leader_agents))
self._handle_acting_agents(acting_agents, actions)
self.resolve_network()
observations: Dict[(AgentID, Any)] = {}
rewards: Dict[(AgentID, float)] = {}
terminations: Dict[(AgentID, bool)] = {}
truncations: Dict[(AgentID, bool)] = {}
infos: Dict[(AgentID, Dict[(str, Any)])] = {}
for aid in self.strategic_agent_ids:
if ((aid in self._terminations) or (aid in self._truncations)):
continue
ctx = self._ctxs[aid]
if (aid in next_acting_agents):
obs = ctx.agent.encode_observation(ctx)
if (obs is not None):
observations[aid] = obs
infos[aid] = ctx.agent.collect_infos(ctx)
if (aid in acting_agents):
self._rewards[aid] = ctx.agent.compute_reward(ctx)
terminations[aid] = ctx.agent.is_terminated(ctx)
truncations[aid] = ctx.agent.is_truncated(ctx)
if terminations[aid]:
self._terminations.add(aid)
if truncations[aid]:
self._truncations.add(aid)
logger.log_step_values(observations, rewards, terminations, truncations, infos)
logger.log_metrics(self)
terminations['__all__'] = self.is_terminated()
truncations['__all__'] = self.is_truncated()
if (terminations['__all__'] or truncations['__all__']):
logger.log_episode_done()
return self.Step(observations, self._rewards, terminations, truncations, infos)
rewards = {aid: self._rewards[aid] for aid in observations if (self._rewards[aid] is not None)}
return self.Step(observations, rewards, terminations, truncations, infos) |
class SquadDataTrainingArguments(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_template_counts(model_id):
import tensorflow as tf
import numpy as np
print(('Getting template counts for %s' % model_id))
graph = tf.Graph()
with graph.as_default():
builder = get_builder(model_id)
(features, labels) = builder.get_inputs(mode='train', repeat=False)
spec = builder.get_estimator_spec(features, labels, mode='eval')
predictions = spec.predictions
probs = predictions['probs']
counts = tf.argmax(probs, axis=(- 1))
totals = np.zeros((builder.n_templates,), dtype=np.int32)
saver = tf.train.Saver()
with tf.train.MonitoredSession() as sess:
saver.restore(sess, tf.train.latest_checkpoint(builder.model_dir))
spinner = Spinner()
while (not sess.should_stop()):
c = sess.run(counts)
for ci in c:
totals[ci] += 1
spinner.next()
spinner.finish()
return totals |
def _vgg_replace_fc(model, output_dim):
model.fc = torch.nn.Identity()
model.fc.in_features = model.classifier[0].in_features
delattr(model, 'classifier')
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
forwardType = types.MethodType
model.forward = forwardType(forward, model)
return _replace_fc(model, output_dim) |
_cache()
def is_torch_npu_available(check_device=False):
try:
import torch
except (ImportError, ModuleNotFoundError):
return False
if (importlib.util.find_spec('torch_npu') is None):
return False
import torch_npu
if check_device:
try:
_ = torch.npu.device_count()
return torch.npu.is_available()
except RuntimeError:
return False
return (hasattr(torch, 'npu') and torch.npu.is_available()) |
def create_armature_mesh(scene: bpy.types.Scene, armature_object: bpy.types.Object, mesh_name: str) -> bpy.types.Object:
assert (armature_object.type == 'ARMATURE'), 'Error'
assert (len(armature_object.data.bones) != 0), 'Error'
def add_rigid_vertex_group(target_object: bpy.types.Object, name: str, vertex_indices: Iterable[int]) -> None:
new_vertex_group = target_object.vertex_groups.new(name=name)
for vertex_index in vertex_indices:
new_vertex_group.add([vertex_index], 1.0, 'REPLACE')
def generate_bone_mesh_pydata(radius: float, length: float) -> Tuple[(List[mathutils.Vector], List[List[int]])]:
base_radius = radius
top_radius = (0.5 * radius)
vertices = [mathutils.Vector(((- base_radius), 0.0, (+ base_radius))), mathutils.Vector(((+ base_radius), 0.0, (+ base_radius))), mathutils.Vector(((+ base_radius), 0.0, (- base_radius))), mathutils.Vector(((- base_radius), 0.0, (- base_radius))), mathutils.Vector(((- top_radius), length, (+ top_radius))), mathutils.Vector(((+ top_radius), length, (+ top_radius))), mathutils.Vector(((+ top_radius), length, (- top_radius))), mathutils.Vector(((- top_radius), length, (- top_radius))), mathutils.Vector((0.0, (- base_radius), 0.0)), mathutils.Vector((0.0, (length + top_radius), 0.0))]
faces = [[8, 1, 0], [8, 2, 1], [8, 3, 2], [8, 0, 3], [9, 4, 5], [9, 5, 6], [9, 6, 7], [9, 7, 4], [0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
return (vertices, faces)
armature_data: bpy.types.Armature = armature_object.data
vertices: List[mathutils.Vector] = []
faces: List[List[int]] = []
vertex_groups: List[Dict[(str, Any)]] = []
for bone in armature_data.bones:
radius = (0.1 * (0.1 + bone.length))
(temp_vertices, temp_faces) = generate_bone_mesh_pydata(radius, bone.length)
vertex_index_offset = len(vertices)
temp_vertex_group = {'name': bone.name, 'vertex_indices': []}
for (local_index, vertex) in enumerate(temp_vertices):
vertices.append((bone.matrix_local vertex))
temp_vertex_group['vertex_indices'].append((local_index + vertex_index_offset))
vertex_groups.append(temp_vertex_group)
for face in temp_faces:
if (len(face) == 3):
faces.append([(face[0] + vertex_index_offset), (face[1] + vertex_index_offset), (face[2] + vertex_index_offset)])
else:
faces.append([(face[0] + vertex_index_offset), (face[1] + vertex_index_offset), (face[2] + vertex_index_offset), (face[3] + vertex_index_offset)])
new_object = create_mesh_from_pydata(scene, vertices, faces, mesh_name, mesh_name)
new_object.matrix_world = armature_object.matrix_world
for vertex_group in vertex_groups:
add_rigid_vertex_group(new_object, vertex_group['name'], vertex_group['vertex_indices'])
armature_modifier = new_object.modifiers.new('Armature', 'ARMATURE')
armature_modifier.object = armature_object
armature_modifier.use_vertex_groups = True
add_subdivision_surface_modifier(new_object, 1, is_simple=True)
add_subdivision_surface_modifier(new_object, 2, is_simple=False)
bpy.ops.object.select_all(action='DESELECT')
new_object.select_set(True)
armature_object.select_set(True)
bpy.context.view_layer.objects.active = armature_object
bpy.ops.object.parent_set(type='OBJECT')
return new_object |
def new_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr |
def _reg_ndarray(cls, fcreate):
global _TVM_ND_CLS
_TVM_ND_CLS[cls._array_type_code] = fcreate |
def test_snapshotKeplerPotential_zforce_naz():
s = pynbody.new(star=1)
s['mass'] = 1.0
s['eps'] = 0.0
sp = potential.SnapshotRZPotential(s, num_threads=1)
spaz = potential.SnapshotRZPotential(s, num_threads=1, nazimuths=12)
assert (numpy.fabs((sp.zforce(1.0, 0.0) - spaz.zforce(1.0, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.zforce(0.5, 0.0) - spaz.zforce(0.5, 0.0))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.zforce(1.0, 0.5) - spaz.zforce(1.0, 0.5))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
assert (numpy.fabs((sp.zforce(1.0, (- 0.5)) - spaz.zforce(1.0, (- 0.5)))) < (10.0 ** (- 8.0))), 'SnapshotRZPotential with single unit mass for naz=4 does not agree with naz=12'
return None |
def test_laplacian_random_walk():
num_v = 20
num_e = 50
for _ in range(3):
g = Graph(num_v)
A = torch.zeros((num_v, num_v))
for _ in range(num_e):
s = random.randrange(num_v)
d = random.randrange(num_v)
if (s == d):
continue
g.add_edges((s, d))
A[(s, d)] = 1
A[(d, s)] = 1
D = A.sum(0)
D_inv_1 = (D ** (- 1))
D_inv_1[torch.isinf(D_inv_1)] = 0
D_inv_1 = torch.diag(D_inv_1.view((- 1)))
L = (torch.eye(num_v) - (D_inv_1 A))
assert (pytest.approx(g.L_rw.to_dense()) == L) |
_model
def repvgg_b1g4(pretrained=False, **kwargs):
return _create_byobnet('repvgg_b1g4', pretrained=pretrained, **kwargs) |
def GetSvnInfo():
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1)
rel_path = m.group(2)
root = os.path.realpath((rel_path.count('/') * '../'))
return (project, root)
return (None, None) |
def iterate_dict_combinations(a: Mapping[(K, Collection[V])]) -> Iterator[Mapping[(K, V)]]:
ks = list(a)
vs = [a[_] for _ in ks]
alls = list(itertools.product(*tuple(vs)))
for x in alls:
d = frozendict(zip(ks, x))
(yield d) |
class GenerationConfig(FairseqDataclass):
beam: int = field(default=5, metadata={'help': 'beam size'})
beam_mt: int = field(default=0, metadata={'help': 'beam size for the first-pass decoder'})
nbest: int = field(default=1, metadata={'help': 'number of hypotheses to output'})
max_len_a: float = field(default=0, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length'})
max_len_b: int = field(default=200, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length'})
max_len_a_mt: float = field(default=0, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length for the first-pass decoder'})
max_len_b_mt: int = field(default=200, metadata={'help': 'generate sequences of maximum length ax + b, where x is the source length for the first-pass decoder'})
min_len: int = field(default=1, metadata={'help': 'minimum generation length'})
match_source_len: bool = field(default=False, metadata={'help': 'generations should match the source length'})
unnormalized: bool = field(default=False, metadata={'help': 'compare unnormalized hypothesis scores'})
no_early_stop: bool = field(default=False, metadata={'help': 'deprecated'})
no_beamable_mm: bool = field(default=False, metadata={'help': "don't use BeamableMM in attention layers"})
lenpen: float = field(default=1, metadata={'help': 'length penalty: <1.0 favors shorter, >1.0 favors longer sentences'})
lenpen_mt: float = field(default=1, metadata={'help': 'length penalty for the first-pass decoder: <1.0 favors shorter, >1.0 favors longer sentences'})
unkpen: float = field(default=0, metadata={'help': 'unknown word penalty: <0 produces more unks, >0 produces fewer'})
replace_unk: Optional[str] = field(default=None, metadata={'help': 'perform unknown replacement (optionally with alignment dictionary)', 'argparse_const': ' '})
sacrebleu: bool = field(default=False, metadata={'help': 'score with sacrebleu'})
score_reference: bool = field(default=False, metadata={'help': 'just score the reference translation'})
prefix_size: int = field(default=0, metadata={'help': 'initialize generation by target prefix of given length'})
no_repeat_ngram_size: int = field(default=0, metadata={'help': 'ngram blocking such that this size ngram cannot be repeated in the generation'})
sampling: bool = field(default=False, metadata={'help': 'sample hypotheses instead of using beam search'})
sampling_topk: int = field(default=(- 1), metadata={'help': 'sample from top K likely next words instead of all words'})
sampling_topp: float = field(default=(- 1.0), metadata={'help': 'sample from the smallest set whose cumulative probability mass exceeds p for next words'})
constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field(default=None, metadata={'help': 'enables lexically constrained decoding', 'argparse_const': 'ordered'})
temperature: float = field(default=1.0, metadata={'help': 'temperature for generation'})
diverse_beam_groups: int = field(default=(- 1), metadata={'help': 'number of groups for Diverse Beam Search'})
diverse_beam_strength: float = field(default=0.5, metadata={'help': 'strength of diversity penalty for Diverse Beam Search'})
diversity_rate: float = field(default=(- 1.0), metadata={'help': 'strength of diversity penalty for Diverse Siblings Search'})
print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field(default=None, metadata={'help': 'if set, uses attention feedback to compute and print alignment to source tokens (valid options are: hard, soft, otherwise treated as hard alignment)', 'argparse_const': 'hard'})
print_step: bool = field(default=False, metadata={'help': 'print steps'})
lm_path: Optional[str] = field(default=None, metadata={'help': 'path to lm checkpoint for lm fusion'})
lm_weight: float = field(default=0.0, metadata={'help': 'weight for lm probs for lm fusion'})
iter_decode_eos_penalty: float = field(default=0.0, metadata={'help': 'if > 0.0, it penalized early-stopping in decoding.'})
iter_decode_max_iter: int = field(default=10, metadata={'help': 'maximum iterations for iterative refinement.'})
iter_decode_force_max_iter: bool = field(default=False, metadata={'help': 'if set, run exact the maximum number of iterations without early stop'})
iter_decode_with_beam: int = field(default=1, metadata={'help': 'if > 1, model will generate translations varying by the lengths.'})
iter_decode_with_external_reranker: bool = field(default=False, metadata={'help': 'if set, the last checkpoint are assumed to be a reranker to rescore the translations'})
retain_iter_history: bool = field(default=False, metadata={'help': 'if set, decoding returns the whole history of iterative refinement'})
retain_dropout: bool = field(default=False, metadata={'help': 'Use dropout at inference time'})
retain_dropout_modules: Any = field(default=None, metadata={'help': 'if set, only retain dropout for the specified modules; if not set, then dropout will be retained for all modules'})
decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field(default=None, metadata={'help': 'special decoding format for advanced decoding.'})
no_seed_provided: bool = field(default=False, metadata={'help': 'if set, dont use seed for initializing random generators'})
eos_token: Optional[str] = field(default=None, metadata={'help': 'EOS token'}) |
def init_classifier(layer_sizes):
classifier = construct_classifier(layer_sizes, 'sigmoid')
return classifier |
class _Dataset():
name: str
sources: typing.List[typing.Callable]
split_proportions: typing.Dict[(str, float)]
reactant_to_reactant_id_json_path: str |
class WordsSubtokenMetricBase(tf.metrics.Metric):
FilterType = Callable[([tf.Tensor, tf.Tensor], tf.Tensor)]
def __init__(self, index_to_word_table: Optional[tf.lookup.StaticHashTable]=None, topk_predicted_words=None, predicted_words_filters: Optional[List[FilterType]]=None, subtokens_delimiter: str='|', name=None, dtype=None):
super(WordsSubtokenMetricBase, self).__init__(name=name, dtype=dtype)
self.tp = self.add_weight('true_positives', shape=(), initializer=tf.zeros_initializer)
self.fp = self.add_weight('false_positives', shape=(), initializer=tf.zeros_initializer)
self.fn = self.add_weight('false_negatives', shape=(), initializer=tf.zeros_initializer)
self.index_to_word_table = index_to_word_table
self.topk_predicted_words = topk_predicted_words
self.predicted_words_filters = predicted_words_filters
self.subtokens_delimiter = subtokens_delimiter
def _get_true_target_word_string(self, true_target_word):
if (self.index_to_word_table is None):
return true_target_word
true_target_word_index = tf.cast(true_target_word, dtype=self.index_to_word_table.key_dtype)
return self.index_to_word_table.lookup(true_target_word_index)
def update_state(self, true_target_word, predictions, sample_weight=None):
if (sample_weight is not None):
raise NotImplemented('WordsSubtokenMetricBase with non-None `sample_weight` is not implemented.')
topk_predicted_words = (predictions if (self.topk_predicted_words is None) else self.topk_predicted_words)
assert (topk_predicted_words is not None)
predicted_word = self._get_prediction_from_topk(topk_predicted_words)
true_target_word_string = self._get_true_target_word_string(true_target_word)
true_target_word_string = tf.reshape(true_target_word_string, [(- 1)])
true_target_subwords = tf.compat.v1.string_split(true_target_word_string, sep=self.subtokens_delimiter)
prediction_subwords = tf.compat.v1.string_split(predicted_word, sep=self.subtokens_delimiter)
true_target_subwords = tf.sparse.to_dense(true_target_subwords, default_value='<PAD>')
prediction_subwords = tf.sparse.to_dense(prediction_subwords, default_value='<PAD>')
true_target_subwords_mask = tf.not_equal(true_target_subwords, '<PAD>')
prediction_subwords_mask = tf.not_equal(prediction_subwords, '<PAD>')
true_target_subwords = tf.expand_dims(true_target_subwords, (- 1))
prediction_subwords = tf.expand_dims(prediction_subwords, (- 1))
true_target_subwords__in__prediction_subwords = tf.reduce_any(tf.equal(true_target_subwords, tf.transpose(prediction_subwords, perm=[0, 2, 1])), axis=2)
prediction_subwords__in__true_target_subwords = tf.reduce_any(tf.equal(prediction_subwords, tf.transpose(true_target_subwords, perm=[0, 2, 1])), axis=2)
batch_true_positive = tf.reduce_sum(tf.cast(tf.logical_and(prediction_subwords__in__true_target_subwords, prediction_subwords_mask), tf.float32))
batch_false_positive = tf.reduce_sum(tf.cast(tf.logical_and((~ prediction_subwords__in__true_target_subwords), prediction_subwords_mask), tf.float32))
batch_false_negative = tf.reduce_sum(tf.cast(tf.logical_and((~ true_target_subwords__in__prediction_subwords), true_target_subwords_mask), tf.float32))
self.tp.assign_add(batch_true_positive)
self.fp.assign_add(batch_false_positive)
self.fn.assign_add(batch_false_negative)
def _get_prediction_from_topk(self, topk_predicted_words):
masks = []
if (self.predicted_words_filters is not None):
masks = [fltr(topk_predicted_words) for fltr in self.predicted_words_filters]
if masks:
legal_predicted_target_words_mask = reduce(tf.logical_and, masks)
else:
legal_predicted_target_words_mask = tf.cast(tf.ones_like(topk_predicted_words), dtype=tf.bool)
first_legal_predicted_target_word_mask = common.tf_get_first_true(legal_predicted_target_words_mask)
first_legal_predicted_target_word_idx = tf.where(first_legal_predicted_target_word_mask)
first_legal_predicted_word_string = tf.gather_nd(topk_predicted_words, first_legal_predicted_target_word_idx)
prediction = tf.reshape(first_legal_predicted_word_string, [(- 1)])
return prediction
def result(self):
...
def reset_states(self):
for v in self.variables:
K.set_value(v, 0) |
class ViTMAELayer(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def is_overlapping(sim, name=None):
sim.forward()
ncon = sim.data.ncon
for contact_ind in range(ncon):
contact = sim.data.contact[contact_ind]
geom1 = sim.model._geom_id2name[contact.geom1]
geom2 = sim.model._geom_id2name[contact.geom2]
relevant_name = ((name is None) or ((geom1 == name) or (geom2 == name)))
if ((contact.dist < 0) and relevant_name):
return True
return False |
class UniformQuantizeGrad(InplaceFunction):
def forward(ctx, input, num_bits=None, qparams=None, flatten_dims=_DEFAULT_FLATTEN_GRAD, reduce_dim=0, dequantize=True, signed=False, stochastic=True):
ctx.num_bits = num_bits
ctx.qparams = qparams
ctx.flatten_dims = flatten_dims
ctx.stochastic = stochastic
ctx.signed = signed
ctx.dequantize = dequantize
ctx.reduce_dim = reduce_dim
ctx.inplace = False
return input
def backward(ctx, grad_output):
qparams = ctx.qparams
with torch.no_grad():
if (qparams is None):
assert (ctx.num_bits is not None), 'either provide qparams of num_bits to quantize'
qparams = calculate_qparams(grad_output, num_bits=ctx.num_bits, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim, reduce_type='extreme')
grad_input = quantize(grad_output, num_bits=None, qparams=qparams, flatten_dims=ctx.flatten_dims, reduce_dim=ctx.reduce_dim, dequantize=True, signed=ctx.signed, stochastic=ctx.stochastic, inplace=False)
return (grad_input, None, None, None, None, None, None, None) |
def create_squeezenet_ssd_lite(num_classes, is_test=False):
base_net = squeezenet1_1(False).features
source_layer_indexes = [12]
extras = ModuleList([Sequential(Conv2d(in_channels=512, out_channels=256, kernel_size=1), ReLU(), SeperableConv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=2)), Sequential(Conv2d(in_channels=512, out_channels=256, kernel_size=1), ReLU(), SeperableConv2d(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1)), Sequential(Conv2d(in_channels=512, out_channels=128, kernel_size=1), ReLU(), SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)), Sequential(Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(), SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1)), Sequential(Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(), SeperableConv2d(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1))])
regression_headers = ModuleList([SeperableConv2d(in_channels=512, out_channels=(6 * 4), kernel_size=3, padding=1), SeperableConv2d(in_channels=512, out_channels=(6 * 4), kernel_size=3, padding=1), SeperableConv2d(in_channels=512, out_channels=(6 * 4), kernel_size=3, padding=1), SeperableConv2d(in_channels=256, out_channels=(6 * 4), kernel_size=3, padding=1), SeperableConv2d(in_channels=256, out_channels=(6 * 4), kernel_size=3, padding=1), Conv2d(in_channels=256, out_channels=(6 * 4), kernel_size=1)])
classification_headers = ModuleList([SeperableConv2d(in_channels=512, out_channels=(6 * num_classes), kernel_size=3, padding=1), SeperableConv2d(in_channels=512, out_channels=(6 * num_classes), kernel_size=3, padding=1), SeperableConv2d(in_channels=512, out_channels=(6 * num_classes), kernel_size=3, padding=1), SeperableConv2d(in_channels=256, out_channels=(6 * num_classes), kernel_size=3, padding=1), SeperableConv2d(in_channels=256, out_channels=(6 * num_classes), kernel_size=3, padding=1), Conv2d(in_channels=256, out_channels=(6 * num_classes), kernel_size=1)])
return SSD(num_classes, base_net, source_layer_indexes, extras, classification_headers, regression_headers, is_test=is_test, config=config) |
def m2(solution):
if (solution.size > 1):
i = random.randrange(1, solution.size)
solution.remove_city(index=i)
return solution |
def test_octree_voxel_grid_convert():
pcd_data = o3d.data.PLYPointCloud()
pcd = o3d.io.read_point_cloud(pcd_data.path)
octree = o3d.geometry.Octree(8)
octree.convert_from_point_cloud(pcd)
voxel_grid = octree.to_voxel_grid()
octree_copy = voxel_grid.to_octree(max_depth=8) |
def train_epoch(epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir='', amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None):
if (args.mixup_off_epoch and (epoch >= args.mixup_off_epoch)):
if (args.prefetcher and loader.mixup_enabled):
loader.mixup_enabled = False
elif (mixup_fn is not None):
mixup_fn.mixup_enabled = False
second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.train()
end = time.time()
last_idx = (len(loader) - 1)
num_updates = (epoch * len(loader))
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
data_time_m.update((time.time() - end))
if (not args.prefetcher):
(input, target) = (input.cuda(), target.cuda())
if (mixup_fn is not None):
(input, target) = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
loss = loss_fn(output, target)
if (not args.distributed):
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if (loss_scaler is not None):
loss_scaler(loss, optimizer, clip_grad=args.clip_grad, parameters=model.parameters(), create_graph=second_order)
else:
loss.backward(create_graph=second_order)
if (args.clip_grad is not None):
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
torch.cuda.synchronize()
if (model_ema is not None):
model_ema.update(model)
num_updates += 1
batch_time_m.update((time.time() - end))
if (last_batch or ((batch_idx % args.log_interval) == 0)):
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = (sum(lrl) / len(lrl))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if (dist.get_rank() == 0):
_logger.info('Train: {} [{:>4d}/{} ({:>3.0f}%)] Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) Time: {rate:>4.0f}/s ({rate_avg:>4.0f}/s) LR: {lr:.3e} Data: {data_time.sum:.3f}'.format(epoch, batch_idx, len(loader), ((100.0 * batch_idx) / last_idx), loss=losses_m, batch_time=batch_time_m, rate=((input.size(0) * args.world_size) / batch_time_m.val), rate_avg=((input.size(0) * args.world_size) / batch_time_m.avg), lr=lr, data_time=data_time_m))
if (args.save_images and output_dir):
torchvision.utils.save_image(input, os.path.join(output_dir, ('train-batch-%d.jpg' % batch_idx)), padding=0, normalize=True)
if ((saver is not None) and args.recovery_interval and (last_batch or (((batch_idx + 1) % args.recovery_interval) == 0))):
saver.save_recovery(epoch, batch_idx=batch_idx)
if (lr_scheduler is not None):
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
if hasattr(optimizer, 'sync_lookahead'):
optimizer.sync_lookahead()
return OrderedDict([('loss', losses_m.avg)]) |
class ColorJitter(object):
def __init__(self, brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2, p=0.5):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.p = p
self.t = A.ColorJitter(brightness=brightness, contrast=self.contrast, saturation=self.saturation, p=self.p)
def __call__(self, image):
return self.t(image=image)['image']
def __repr__(self):
return (self.__class__.__name__ + '(brightness={0}, contrast={1}, saturation=(2), p={3})'.format(self.brightness, self.contrast, self.saturation, self.hue, self.p)) |
class PrecisionRecallMeter():
def __init__(self) -> None:
self.all_y_true = np.zeros((0, 1))
self.all_y_hat = np.zeros((0, 1))
self.all_y_hat_probs = np.zeros((0, 1))
def update(self, y_true: np.ndarray, y_hat: np.ndarray, y_hat_probs: np.ndarray) -> None:
y_true = y_true.reshape((- 1), 1)
y_hat = y_hat.reshape((- 1), 1)
y_hat_probs = y_hat_probs.reshape((- 1), 1)
self.all_y_true = np.vstack([self.all_y_true, y_true])
self.all_y_hat = np.vstack([self.all_y_hat, y_hat])
self.all_y_hat_probs = np.vstack([self.all_y_hat_probs, y_hat_probs])
def get_metrics(self) -> Tuple[(float, float, float)]:
ap = calc_ap(y_hat=self.all_y_hat, y_true=self.all_y_true, y_hat_probs=self.all_y_hat_probs)
plot_precision_recall_curve_sklearn(y_hat=self.all_y_hat, y_true=self.all_y_true, y_hat_probs=self.all_y_hat_probs, save_plot=True)
return ap
def save_pr_curve(self, save_fpath: str) -> None:
ap = calc_ap(y_hat=self.all_y_hat, y_true=self.all_y_true, y_hat_probs=self.all_y_hat_probs)
plot_precision_recall_curve_sklearn(y_hat=self.all_y_hat, y_true=self.all_y_true, y_hat_probs=self.all_y_hat_probs, save_plot=True, save_fpath=save_fpath) |
def process_conceptual_caption(tsv, imgs, db, tokenizer, split):
id2len = {}
txt2img = {}
img2txts = defaultdict(list)
for line in tqdm(tsv, desc='processing conceptual captions'):
fields = line.strip().split('\t')
assert (len(fields) == 4)
(id_, _, caption, success) = fields
if (success == 'fail'):
continue
assert (success == 'success')
(input_ids, toked_caption) = tokenizer(caption)
assert input_ids
img_fname = f'gcc_{split}_{int(id_):012}.npz'
if (img_fname not in imgs):
continue
id2len[id_] = len(input_ids)
txt2img[id_] = img_fname
img2txts[img_fname].append(id_)
db[id_] = {'id': id_, 'toked_caption': toked_caption, 'input_ids': input_ids, 'img_fname': img_fname}
return (id2len, txt2img, img2txts) |
class MotorModel(object):
def __init__(self, kp=1.2, kd=0, torque_limits=None, motor_control_mode=robot_config.MotorControlMode.POSITION):
self._kp = kp
self._kd = kd
self._torque_limits = torque_limits
self._motor_control_mode = motor_control_mode
self._resistance = MOTOR_RESISTANCE
self._voltage = MOTOR_VOLTAGE
self._torque_constant = MOTOR_TORQUE_CONSTANT
self._viscous_damping = MOTOR_VISCOUS_DAMPING
self._current_table = [0, 10, 20, 30, 40, 50, 60]
self._torque_table = [0, 1, 1.9, 2.45, 3.0, 3.25, 3.5]
self._strength_ratios = ([1.0] * NUM_MOTORS)
def set_strength_ratios(self, ratios):
self._strength_ratios = np.array(ratios)
def set_motor_gains(self, kp, kd):
self._kp = kp
self._kd = kd
def set_voltage(self, voltage):
self._voltage = voltage
def get_voltage(self):
return self._voltage
def set_viscous_damping(self, viscous_damping):
self._viscous_damping = viscous_damping
def get_viscous_dampling(self):
return self._viscous_damping
def convert_to_torque(self, motor_commands, motor_angle, motor_velocity, true_motor_velocity, motor_control_mode=None):
if (not motor_control_mode):
motor_control_mode = self._motor_control_mode
if ((motor_control_mode is robot_config.MotorControlMode.TORQUE) or (motor_control_mode is robot_config.MotorControlMode.HYBRID)):
raise ValueError('{} is not a supported motor control mode'.format(motor_control_mode))
kp = self._kp
kd = self._kd
if (motor_control_mode is robot_config.MotorControlMode.PWM):
pd_max = ((((- 1) * kp) * (motor_angle - MOTOR_POS_UB)) - ((kd / 2.0) * motor_velocity))
pd_min = ((((- 1) * kp) * (motor_angle - MOTOR_POS_LB)) - ((kd / 2.0) * motor_velocity))
pwm = ((motor_commands + np.minimum(pd_max, 0)) + np.maximum(pd_min, 0))
else:
pwm = ((((- 1) * kp) * (motor_angle - motor_commands)) - (kd * motor_velocity))
pwm = np.clip(pwm, (- 1.0), 1.0)
return self._convert_to_torque_from_pwm(pwm, true_motor_velocity)
def _convert_to_torque_from_pwm(self, pwm, true_motor_velocity):
observed_torque = np.clip((self._torque_constant * ((np.asarray(pwm) * self._voltage) / self._resistance)), (- OBSERVED_TORQUE_LIMIT), OBSERVED_TORQUE_LIMIT)
if (self._torque_limits is not None):
observed_torque = np.clip(observed_torque, ((- 1.0) * self._torque_limits), self._torque_limits)
voltage_net = np.clip(((np.asarray(pwm) * self._voltage) - ((self._torque_constant + self._viscous_damping) * np.asarray(true_motor_velocity))), (- VOLTAGE_CLIPPING), VOLTAGE_CLIPPING)
current = (voltage_net / self._resistance)
current_sign = np.sign(current)
current_magnitude = np.absolute(current)
actual_torque = np.interp(current_magnitude, self._current_table, self._torque_table)
actual_torque = np.multiply(current_sign, actual_torque)
actual_torque = np.multiply(self._strength_ratios, actual_torque)
if (self._torque_limits is not None):
actual_torque = np.clip(actual_torque, ((- 1.0) * self._torque_limits), self._torque_limits)
return (actual_torque, observed_torque) |
def pca_features(features: dict[(int, dict[(int, np.ndarray)])], dim: int, standardize: bool=True, **kwargs):
features_all = np.concatenate([features[video_index][half_index] for video_index in features for half_index in features[video_index]])
pca = PCA(n_components=dim, **kwargs)
if standardize:
features_all = ((features_all - np.mean(features_all, 0, keepdims=True)) / np.std(features_all, 0, keepdims=True))
features_all = pca.fit_transform(features_all)
features_pca = {video_index: {} for video_index in features}
slice_min: int = 0
for video_index in features:
for half_index in features[video_index]:
slice_max = (slice_min + features[video_index][half_index].shape[0])
features_pca[video_index][half_index] = features_all[slice_min:slice_max]
slice_min = slice_max
return features_pca |
_module()
class ISEKAIMetrics(LCLComputeMetrics):
def __init__(self, filename, *args, **kwargs):
super().__init__(filename, *args, **kwargs)
self.gt_pairs = self.get_pairs_isekai()
def get_pairs_isekai(self):
target_pairs = []
with jsonlines.open(self.filename) as reader:
for metas in reader:
positive_prompt = metas['positive_promt'].lower()
negative_prompt = metas['negative_promt'].lower()
target_pairs.append([positive_prompt, negative_prompt])
return target_pairs
def get_neg_pair(self, index, target):
pair = self.gt_pairs[index]
pos_target = target
for name in pair:
if (name != pos_target):
neg_target = name
return neg_target |
.nightly
.no_cover
.timeout(120)
def test_rl2_metaworld_ml1_push():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'tf/rl2_ppo_metaworld_ml1_push.py')), '--n_epochs', '1', '--episode_per_task', '1', '--meta_batch_size', '10'], check=False).returncode == 0) |
class TripletNet(nn.Module):
def __init__(self, embeddingnet):
super(TripletNet, self).__init__()
self.embeddingnet = embeddingnet
def forward(self, a, p, n):
embedded_a = self.embeddingnet(a)
embedded_p = self.embeddingnet(p)
embedded_n = self.embeddingnet(n)
return (embedded_a, embedded_p, embedded_n) |
def calc_model_flops(model, input_size, mul_add=False):
hook_list = []
module_flops = []
def conv_hook(self, input, output):
(output_channels, output_height, output_width) = output[0].size()
bias_ops = (1 if (self.bias is not None) else 0)
kernel_ops = ((self.kernel_size[0] * self.kernel_size[1]) * (self.cur_in_ch / self.cur_group))
flops = (((((kernel_ops * (2 if mul_add else 1)) + bias_ops) * output_channels) * output_height) * output_width)
module_flops.append(flops)
def linear_hook(self, input, output):
weight_ops = (self.weight.nelement() * (2 if mul_add else 1))
bias_ops = self.bias.nelement()
flops = (weight_ops + bias_ops)
module_flops.append(flops)
for m in model.modules():
if isinstance(m, nn.Conv2d):
hook_list.append(m.register_forward_hook(conv_hook))
elif isinstance(m, nn.Linear):
hook_list.append(m.register_forward_hook(linear_hook))
dummy_input = torch.rand(1, 3, input_size, input_size).cuda()
model(dummy_input)
for hook in hook_list:
hook.remove()
return round((sum(module_flops) / 1000000.0), 2) |
def append_pod_ip_to_env(env):
pod_ip_var = V1EnvVar(name='POD_IP', value_from=V1EnvVarSource(field_ref=V1ObjectFieldSelector(field_path='status.podIP')))
node_ip_var = V1EnvVar(name='NODE_IP', value_from=V1EnvVarSource(field_ref=V1ObjectFieldSelector(field_path='status.hostIP')))
if env:
env.append(pod_ip_var)
env.append(node_ip_var)
else:
env = [pod_ip_var, node_ip_var]
return env |
def test_visibility_filter():
vis = ShapelyViz()
sensor_pose: SE2Transform = SE2Transform(p=[(- 2), (- 1)], theta=2.3)
lidar_fov = Point(sensor_pose.p).buffer(20)
vis.add_shape(lidar_fov, color='gray', alpha=0.5)
obs1 = Polygon([(10, 10), (10, 15), (15, 15), (15, 10)])
obs2 = Polygon([((- 3), (- 3)), ((- 3), (- 7)), ((- 8), (- 10)), ((- 12), (- 9)), ((- 12), (- 3))])
obs3 = Polygon([((- 3), 3), ((- 3), 7), ((- 8), 10), ((- 12), 9), ((- 12), 3)])
obs4 = LinearRing([[(- 20), (- 20)], [(- 20), 20], [20, 20], [20, (- 20)], [(- 20), (- 20)]])
vis.add_shape(obs1, color='black')
vis.add_shape(obs2, color='black')
vis.add_shape(obs3, color='black')
vis.add_shape(obs4, color='black')
(pov_x, pov_y) = lidar_fov.centroid.xy
obs = list(interleave([shapely2crPolygons(o) for o in [obs1, obs2, obs3, obs4]]))
lidar2d = VisRangeSensor(field_of_view=(2 * pi))
lidar2d.pose = sensor_pose
sensor_view: Polygon = lidar2d.fov_as_polygon(obs)
vis.add_shape(sensor_view, color='cyan', alpha=0.5)
plt.plot(pov_x, pov_y, 'r+')
plt.gca().set_aspect('equal')
plt.savefig((OUT_TESTS_DIR + '/visibility_filter.png')) |
class DeploymentConfig(object):
def __init__(self, num_clones=1, clone_on_cpu=False, replica_id=0, num_replicas=1, num_ps_tasks=0, worker_job_name='worker', ps_job_name='ps'):
if (num_replicas > 1):
if (num_ps_tasks < 1):
raise ValueError('When using replicas num_ps_tasks must be positive')
if ((num_replicas > 1) or (num_ps_tasks > 0)):
if (not worker_job_name):
raise ValueError('Must specify worker_job_name when using replicas')
if (not ps_job_name):
raise ValueError('Must specify ps_job_name when using parameter server')
if (replica_id >= num_replicas):
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = (('/job:' + ps_job_name) if (num_ps_tasks > 0) else '')
self._worker_device = (('/job:' + worker_job_name) if (num_ps_tasks > 0) else '')
def num_clones(self):
return self._num_clones
def clone_on_cpu(self):
return self._clone_on_cpu
def replica_id(self):
return self._replica_id
def num_replicas(self):
return self._num_replicas
def num_ps_tasks(self):
return self._num_ps_tasks
def ps_device(self):
return self._ps_device
def worker_device(self):
return self._worker_device
def caching_device(self):
if (self._num_ps_tasks > 0):
return (lambda op: op.device)
else:
return None
def clone_device(self, clone_index):
if (clone_index >= self._num_clones):
raise ValueError('clone_index must be less than num_clones')
device = ''
if (self._num_ps_tasks > 0):
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
device += ('/device:GPU:%d' % clone_index)
return device
def clone_scope(self, clone_index):
if (clone_index >= self._num_clones):
raise ValueError('clone_index must be less than num_clones')
scope = ''
if (self._num_clones > 1):
scope = ('clone_%d' % clone_index)
return scope
def optimizer_device(self):
if ((self._num_ps_tasks > 0) or (self._num_clones > 0)):
return (self._worker_device + '/device:CPU:0')
else:
return ''
def inputs_device(self):
device = ''
if (self._num_ps_tasks > 0):
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
device = ''
if (self._num_ps_tasks > 0):
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = (op if isinstance(op, tf.NodeDef) else op.node_def)
if node_def.op.startswith('Variable'):
t = self._task
self._task = ((self._task + 1) % self._tasks)
d = ('%s/task:%d' % (self._device, t))
return d
else:
return op.device
if (not self._num_ps_tasks):
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose |
def value_to_vector(value, ndim, dtype=float):
value = np.asarray(value, dtype=dtype)
if (value.ndim == 0):
vec = np.asarray(np.repeat(value, ndim), dtype=dtype)
else:
vec = np.asarray(value)
if (vec.size != ndim):
raise ValueError(f'input vector ({value}) does not have the correct dimensions (ndim = {ndim})')
return vec |
_module()
class ShallowCNN(BaseModule):
def __init__(self, input_channels=1, hidden_dim=512, init_cfg=[dict(type='Kaiming', layer='Conv2d'), dict(type='Uniform', layer='BatchNorm2d')]):
super().__init__(init_cfg=init_cfg)
assert isinstance(input_channels, int)
assert isinstance(hidden_dim, int)
self.conv1 = ConvModule(input_channels, (hidden_dim // 2), kernel_size=3, stride=1, padding=1, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'))
self.conv2 = ConvModule((hidden_dim // 2), hidden_dim, kernel_size=3, stride=1, padding=1, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'))
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
def forward(self, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = self.pool(x)
return x |
class ConsistencyDecoderScheduler(SchedulerMixin, ConfigMixin):
order = 1
_to_config
def __init__(self, num_train_timesteps: int=1024, sigma_data: float=0.5):
betas = betas_for_alpha_bar(num_train_timesteps)
alphas = (1.0 - betas)
alphas_cumprod = torch.cumprod(alphas, dim=0)
self.sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = torch.sqrt((1.0 - alphas_cumprod))
sigmas = torch.sqrt(((1.0 / alphas_cumprod) - 1))
sqrt_recip_alphas_cumprod = torch.sqrt((1.0 / alphas_cumprod))
self.c_skip = ((sqrt_recip_alphas_cumprod * (sigma_data ** 2)) / ((sigmas ** 2) + (sigma_data ** 2)))
self.c_out = ((sigmas * sigma_data) / (((sigmas ** 2) + (sigma_data ** 2)) ** 0.5))
self.c_in = (sqrt_recip_alphas_cumprod / (((sigmas ** 2) + (sigma_data ** 2)) ** 0.5))
def set_timesteps(self, num_inference_steps: Optional[int]=None, device: Union[(str, torch.device)]=None):
if (num_inference_steps != 2):
raise ValueError('Currently more than 2 inference steps are not supported.')
self.timesteps = torch.tensor([1008, 512], dtype=torch.long, device=device)
self.sqrt_alphas_cumprod = self.sqrt_alphas_cumprod.to(device)
self.sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod.to(device)
self.c_skip = self.c_skip.to(device)
self.c_out = self.c_out.to(device)
self.c_in = self.c_in.to(device)
def init_noise_sigma(self):
return self.sqrt_one_minus_alphas_cumprod[self.timesteps[0]]
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int]=None) -> torch.FloatTensor:
return (sample * self.c_in[timestep])
def step(self, model_output: torch.FloatTensor, timestep: Union[(float, torch.FloatTensor)], sample: torch.FloatTensor, generator: Optional[torch.Generator]=None, return_dict: bool=True) -> Union[(ConsistencyDecoderSchedulerOutput, Tuple)]:
x_0 = ((self.c_out[timestep] * model_output) + (self.c_skip[timestep] * sample))
timestep_idx = torch.where((self.timesteps == timestep))[0]
if (timestep_idx == (len(self.timesteps) - 1)):
prev_sample = x_0
else:
noise = randn_tensor(x_0.shape, generator=generator, dtype=x_0.dtype, device=x_0.device)
prev_sample = ((self.sqrt_alphas_cumprod[self.timesteps[(timestep_idx + 1)]].to(x_0.dtype) * x_0) + (self.sqrt_one_minus_alphas_cumprod[self.timesteps[(timestep_idx + 1)]].to(x_0.dtype) * noise))
if (not return_dict):
return (prev_sample,)
return ConsistencyDecoderSchedulerOutput(prev_sample=prev_sample) |
class ResNeXtBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, cardinality, bottleneck_width, bottleneck_factor=4):
super(ResNeXtBottleneck, self).__init__()
mid_channels = (out_channels // bottleneck_factor)
D = int(math.floor((mid_channels * (bottleneck_width / 64.0))))
group_width = (cardinality * D)
self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=group_width)
self.conv2 = conv3x3_block(in_channels=group_width, out_channels=group_width, stride=stride, groups=cardinality)
self.conv3 = conv1x1_block(in_channels=group_width, out_channels=out_channels, activation=None)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x |
def expand(bbox, expansion_factor=1, expansion_abs=0):
center_point = center(bbox)
new_size = np.maximum((bbox[2:] * expansion_factor), (bbox[2:] + expansion_abs))
return np.concatenate([(center_point - (new_size / 2)), new_size]) |
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if (self.bias is not None):
return (output + self.bias)
else:
return output
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
def init_args():
parser = argparse.ArgumentParser(description='Convert cartesian coordinate system to geodetic system.')
parser.add_argument('-v', '--version', action='version', version='%(prog)s 0.0.1')
parser.add_argument('-x', metavar='<val>', dest='x', type=float, required=True, help='the X coordinate')
parser.add_argument('-y', metavar='<val>', dest='y', type=float, required=True, help='the Y coordinate')
parser.add_argument('-z', metavar='<val>', dest='z', type=float, required=True, help='the Z coordinate')
return parser.parse_args() |
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = max(0, (max_ending_here + x))
max_so_far = max(max_so_far, max_ending_here)
return max_so_far |
def is_keras_nlp_available():
return (is_tensorflow_text_available() and (importlib.util.find_spec('keras_nlp') is not None)) |
def get_act_fn(name: Union[(Callable, str)]='relu'):
if (not name):
return None
if isinstance(name, Callable):
return name
if (not (is_no_jit() or is_exportable() or is_scriptable())):
if (name in _ACT_FN_ME):
return _ACT_FN_ME[name]
if (is_exportable() and (name in ('silu', 'swish'))):
return swish
if (not (is_no_jit() or is_exportable())):
if (name in _ACT_FN_JIT):
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name] |
class ResNet(nn.Module):
def __init__(self, block, layers, input_channel=3, num_classes=1000, features=64):
self.inplanes = features
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(input_channel, features, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(features)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, features, layers[0])
self.layer2 = self._make_layer(block, (features * 2), layers[1], stride=2)
self.layer3 = self._make_layer(block, (features * 4), layers[2], stride=2)
self.layer4 = self._make_layer(block, (features * 8), layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((features * 8) * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class phase(Enum):
TRAIN = 'train'
VAL = 'valid'
TRAINVAL = 'trainval'
TRAINTESTDEVOT = 'train_testdev_ot' |
class MutableModule(BaseModule):
def __init__(self, symbol, data_names, label_names, logger=logging, context=ctx.cpu(), work_load_list=None, max_data_shapes=None, max_label_shapes=None, fixed_param_prefix=None):
super(MutableModule, self).__init__(logger=logger)
self._symbol = symbol
self._data_names = data_names
self._label_names = label_names
self._context = context
self._work_load_list = work_load_list
self._curr_module = None
self._max_data_shapes = max_data_shapes
self._max_label_shapes = max_label_shapes
self._fixed_param_prefix = fixed_param_prefix
fixed_param_names = list()
if (fixed_param_prefix is not None):
for name in self._symbol.list_arguments():
for prefix in self._fixed_param_prefix:
if name.startswith(prefix):
fixed_param_names.append(name)
self._fixed_param_names = fixed_param_names
self._preload_opt_states = None
def _reset_bind(self):
self.binded = False
self._curr_module = None
def data_names(self):
return self._data_names
def output_names(self):
return self._symbol.list_outputs()
def data_shapes(self):
assert self.binded
return self._curr_module.data_shapes
def label_shapes(self):
assert self.binded
return self._curr_module.label_shapes
def output_shapes(self):
assert self.binded
return self._curr_module.output_shapes
def get_params(self):
assert (self.binded and self.params_initialized)
return self._curr_module.get_params()
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_init=False, allow_extra=True):
if (self.params_initialized and (not force_init)):
return
assert self.binded, 'call bind before initializing the parameters'
self._curr_module.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init)
self.params_initialized = True
def bind(self, data_shapes, label_shapes=None, for_training=True, inputs_need_grad=False, force_rebind=False, shared_module=None, grad_req='write'):
if self.params_initialized:
(arg_params, aux_params) = self.get_params()
if force_rebind:
self._reset_bind()
if self.binded:
self.logger.warning('Already binded, ignoring bind()')
return
assert (shared_module is None), 'shared_module for MutableModule is not supported'
self.for_training = for_training
self.inputs_need_grad = inputs_need_grad
self.binded = True
max_shapes_dict = dict()
if (self._max_data_shapes is not None):
max_shapes_dict.update(dict(self._max_data_shapes[0]))
if (self._max_label_shapes is not None):
max_shapes_dict.update(dict(self._max_label_shapes[0]))
max_data_shapes = list()
for (name, shape) in data_shapes[0]:
if (name in max_shapes_dict):
max_data_shapes.append((name, max_shapes_dict[name]))
else:
max_data_shapes.append((name, shape))
max_label_shapes = list()
if (not (label_shapes.count(None) == len(label_shapes))):
for (name, shape) in label_shapes[0]:
if (name in max_shapes_dict):
max_label_shapes.append((name, max_shapes_dict[name]))
else:
max_label_shapes.append((name, shape))
if (len(max_label_shapes) == 0):
max_label_shapes = None
module = Module(self._symbol, self._data_names, self._label_names, logger=self.logger, context=self._context, work_load_list=self._work_load_list, fixed_param_names=self._fixed_param_names)
module.bind([max_data_shapes for _ in xrange(len(self._context))], [max_label_shapes for _ in xrange(len(self._context))], for_training, inputs_need_grad, force_rebind=False, shared_module=None)
self._curr_module = module
if self.params_initialized:
self.set_params(arg_params, aux_params)
def save_checkpoint(self, prefix, epoch, save_optimizer_states=False):
self._curr_module.save_checkpoint(prefix, epoch, save_optimizer_states)
def init_optimizer(self, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), force_init=False):
assert (self.binded and self.params_initialized)
if (self.optimizer_initialized and (not force_init)):
self.logger.warning('optimizer already initialized, ignoring.')
return
self._curr_module._preload_opt_states = self._preload_opt_states
self._curr_module.init_optimizer(kvstore, optimizer, optimizer_params, force_init=force_init)
self.optimizer_initialized = True
def fit(self, train_data, eval_data=None, eval_metric='acc', epoch_end_callback=None, batch_end_callback=None, kvstore='local', optimizer='sgd', optimizer_params=(('learning_rate', 0.01),), eval_end_callback=None, eval_batch_end_callback=None, initializer=Uniform(0.01), arg_params=None, aux_params=None, allow_missing=False, force_rebind=False, force_init=False, begin_epoch=0, num_epoch=None, validation_metric=None, monitor=None, prefix=None):
assert (num_epoch is not None), 'please specify number of epochs'
self.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label, for_training=True, force_rebind=force_rebind)
if (monitor is not None):
self.install_monitor(monitor)
self.init_params(initializer=initializer, arg_params=arg_params, aux_params=aux_params, allow_missing=allow_missing, force_init=force_init)
self.init_optimizer(kvstore=kvstore, optimizer=optimizer, optimizer_params=optimizer_params)
if (validation_metric is None):
validation_metric = eval_metric
if (not isinstance(eval_metric, metric.EvalMetric)):
eval_metric = metric.create(eval_metric)
for epoch in range(begin_epoch, num_epoch):
tic = time.time()
eval_metric.reset()
for (nbatch, data_batch) in enumerate(train_data):
if (monitor is not None):
monitor.tic()
self.forward_backward(data_batch)
self.update()
self.update_metric(eval_metric, data_batch.label)
if (monitor is not None):
monitor.toc_print()
if (batch_end_callback is not None):
batch_end_params = BatchEndParam(epoch=epoch, nbatch=nbatch, eval_metric=eval_metric, locals=locals())
for callback in _as_list(batch_end_callback):
if isfunction(callback):
if ('iter_no' in callback.__code__.co_varnames):
(arg_params, aux_params) = self.get_params()
self.set_params(arg_params, aux_params)
callback(epoch, nbatch, self.symbol, arg_params, aux_params)
continue
callback(batch_end_params)
for (name, val) in eval_metric.get_name_value():
self.logger.info('Epoch[%d] Train-%s=%f', epoch, name, val)
toc = time.time()
self.logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
(arg_params, aux_params) = self.get_params()
self.set_params(arg_params, aux_params)
if (epoch_end_callback is not None):
for callback in _as_list(epoch_end_callback):
callback(epoch, self.symbol, arg_params, aux_params)
if eval_data:
res = self.score(eval_data, validation_metric, score_end_callback=eval_end_callback, batch_end_callback=eval_batch_end_callback, epoch=epoch)
for (name, val) in res:
self.logger.info('Epoch[%d] Validation-%s=%f', epoch, name, val)
train_data.reset()
def forward(self, data_batch, is_train=None):
assert (self.binded and self.params_initialized)
if (self._curr_module.label_shapes is not None):
current_shapes = [dict((self._curr_module.data_shapes[i] + self._curr_module.label_shapes[i])) for i in xrange(len(self._context))]
else:
current_shapes = [dict(self._curr_module.data_shapes[i]) for i in xrange(len(self._context))]
if is_train:
input_shapes = [dict((data_batch.provide_data[i] + data_batch.provide_label[i])) for i in xrange(len(self._context))]
else:
input_shapes = [dict(data_batch.provide_data[i]) for i in xrange(len(data_batch.provide_data))]
shape_changed = (len(current_shapes) != len(input_shapes))
for (pre, cur) in zip(current_shapes, input_shapes):
for (k, v) in pre.items():
if (v != cur[k]):
shape_changed = True
if shape_changed:
module = Module(self._symbol, self._data_names, self._label_names, logger=self.logger, context=[self._context[i] for i in xrange(len(data_batch.provide_data))], work_load_list=self._work_load_list, fixed_param_names=self._fixed_param_names)
module.bind(data_batch.provide_data, data_batch.provide_label, self._curr_module.for_training, self._curr_module.inputs_need_grad, force_rebind=False, shared_module=self._curr_module)
self._curr_module = module
self._curr_module.forward(data_batch, is_train=is_train)
def backward(self, out_grads=None):
assert (self.binded and self.params_initialized)
self._curr_module.backward(out_grads=out_grads)
def update(self):
assert (self.binded and self.params_initialized and self.optimizer_initialized)
self._curr_module.update()
def get_outputs(self, merge_multi_context=True):
assert (self.binded and self.params_initialized)
return self._curr_module.get_outputs(merge_multi_context=merge_multi_context)
def get_input_grads(self, merge_multi_context=True):
assert (self.binded and self.params_initialized and self.inputs_need_grad)
return self._curr_module.get_input_grads(merge_multi_context=merge_multi_context)
def update_metric(self, eval_metric, labels):
assert (self.binded and self.params_initialized)
self._curr_module.update_metric(eval_metric, labels)
def install_monitor(self, mon):
assert self.binded
self._curr_module.install_monitor(mon) |
def prototype_twitter_lstm():
state = prototype_state()
state['train_dialogues'] = '../TwitterData/Training.dialogues.pkl'
state['test_dialogues'] = '../TwitterData/Test.dialogues.pkl'
state['valid_dialogues'] = '../TwitterData/Validation.dialogues.pkl'
state['dictionary'] = '../TwitterData/Dataset.dict.pkl'
state['save_dir'] = 'Output'
state['max_grad_steps'] = 80
state['valid_freq'] = 5000
state['prefix'] = 'TwitterModel_'
state['updater'] = 'adam'
state['deep_dialogue_input'] = True
state['deep_out'] = True
state['reset_utterance_decoder_at_end_of_utterance'] = False
state['collaps_to_standard_rnn'] = True
state['bs'] = 80
state['decoder_bias_type'] = 'all'
state['direct_connection_between_encoders_and_decoder'] = False
state['deep_direct_connection'] = False
state['qdim_encoder'] = 10
state['qdim_decoder'] = 2000
state['sdim'] = 10
state['rankdim'] = 400
return state |
def segment_eval(batches, predictions, label_map, type_int_int_map, labels_id_str_map, vocab_id_str_map, outside_idx, pad_width, start_end, extra_text='', verbose=False):
if (extra_text != ''):
print(extra_text)
def print_context(width, start, tok_list, pred_list, gold_list):
for offset in range((- width), (width + 1)):
idx = (offset + start)
if (0 <= idx < len(tok_list)):
print(('%s\t%s\t%s' % (vocab_id_str_map[tok_list[idx]], labels_id_str_map[pred_list[idx]], labels_id_str_map[gold_list[idx]])))
print()
pred_counts = {t: 0 for t in label_map.values()}
gold_counts = {t: 0 for t in label_map.values()}
correct_counts = {t: 0 for t in label_map.values()}
token_count = 0
for (predictions, (dev_label_batch, dev_token_batch, _, _, dev_seq_len_batch, _, _)) in zip(predictions, batches):
for (preds, labels, tokens, seq_lens) in zip(predictions, dev_label_batch, dev_token_batch, dev_seq_len_batch):
start = pad_width
for seq_len in seq_lens:
predicted = preds[start:(seq_len + start)]
golds = labels[start:(seq_len + start)]
toks = tokens[start:(seq_len + start)]
for i in range(seq_len):
token_count += 1
pred = predicted[i]
gold = golds[i]
gold_str = labels_id_str_map[gold]
pred_str = labels_id_str_map[pred]
gold_prev = (None if (i == 0) else labels_id_str_map[golds[(i - 1)]])
pred_prev = (None if (i == 0) else labels_id_str_map[predicted[(i - 1)]])
pred_type = type_int_int_map[pred]
gold_type = type_int_int_map[gold]
pred_start = False
gold_start = False
if is_seg_start(pred_str, pred_prev):
pred_counts[pred_type] += 1
pred_start = True
if is_seg_start(gold_str, gold_prev):
gold_counts[gold_type] += 1
gold_start = True
if (pred_start and gold_start and (pred_type == gold_type)):
if (i == (seq_len - 1)):
correct_counts[gold_type] += 1
else:
j = (i + 1)
stop_search = False
while ((j < seq_len) and (not stop_search)):
pred2 = labels_id_str_map[predicted[j]]
gold2 = labels_id_str_map[golds[j]]
pred_type2 = type_int_int_map[predicted[j]]
pred_continue = is_continue(pred2)
gold_continue = is_continue(gold2)
if ((not pred_continue) or (not gold_continue) or (pred_type2 != gold_type) or (j == (seq_len - 1))):
if (((not pred_continue) and (not gold_continue)) or (pred_continue and gold_continue and (pred_type2 == gold_type))):
correct_counts[gold_type] += 1
stop_search = True
j += 1
start += (seq_len + ((2 if start_end else 1) * pad_width))
all_correct = np.sum([(p if (i not in outside_idx) else 0) for (i, p) in enumerate(correct_counts.values())])
all_pred = np.sum([(p if (i not in outside_idx) else 0) for (i, p) in enumerate(pred_counts.values())])
all_gold = np.sum([(p if (i not in outside_idx) else 0) for (i, p) in enumerate(gold_counts.values())])
precisions = np.array([((correct_counts[i] / pred_counts[i]) if (pred_counts[i] != 0) else 0.0) for i in pred_counts.keys()])
recalls = np.array([((correct_counts[i] / gold_counts[i]) if (gold_counts[i] != 0) else 1.0) for i in gold_counts.keys()])
f1s = [((((2 * precision) * recall) / (recall + precision)) if ((recall + precision) != 0) else 0.0) for (precision, recall) in zip(precisions, recalls)]
in_indices = np.where((np.array(gold_counts.values()) != 0))[0]
precision_macro = np.mean(precisions[in_indices])
recall_macro = np.mean(recalls[in_indices])
f1_macro = (((2 * precision_macro) * recall_macro) / (precision_macro + recall_macro))
precision_micro = (all_correct / all_pred)
recall_micro = (all_correct / all_gold)
f1_micro = (((2 * precision_micro) * recall_micro) / (precision_micro + recall_micro))
accuracy = (all_correct / all_gold)
print(('\t%10s\tPrec\tRecall' % 'F1'))
print(('%10s\t%2.2f\t%2.2f\t%2.2f' % ('Micro (Seg)', (f1_micro * 100), (precision_micro * 100), (recall_micro * 100))))
print(('%10s\t%2.2f\t%2.2f\t%2.2f' % ('Macro (Seg)', (f1_macro * 100), (precision_macro * 100), (recall_macro * 100))))
print('-------')
for t in label_map:
idx = label_map[t]
if (idx not in outside_idx):
print(('%10s\t%2.2f\t%2.2f\t%2.2f' % (t, (f1s[idx] * 100), (precisions[idx] * 100), (recalls[idx] * 100))))
print(('Processed %d tokens with %d phrases; found: %d phrases; correct: %d.' % (token_count, all_gold, all_pred, all_correct)))
sys.stdout.flush()
return (f1_micro, precision_micro) |
(components=list, static_timestepping_func=object, H='double', a='double', a_next='double', bottleneck=str, bottleneck_hubble=str, component='Component', extreme_force=str, force=str, gridsize='Py_ssize_t', key=tuple, measurements=dict, method=str, n='int', resolution='Py_ssize_t', scale='double', t='double', v_max='double', v_rms='double', a_max='double', t_courant='double', t_decay='double', t_dynamical='double', t_hubble='double', t_max='double', t_pm='double', t_w='double', x_max='double', t_a_early='double', t_a_late='double', _bar='double', _bar_component='double', returns=tuple)
def get_base_timestep_size(components, static_timestepping_func=None):
t = universals.t
a = universals.a
if (static_timestepping_func is not None):
t_max = static_timestepping_func(a)
bottleneck = bottleneck_static_timestepping
return (t_max, bottleneck)
H = hubble(a)
t_max =
bottleneck = ''
measurements = {}
_bar = 0
for component in components:
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
_bar += ((a ** ((- 3) * (1 + component.w_eff(a=a)))) * component._bar)
t_dynamical = (fac_dynamical / (sqrt((G_Newton * _bar)) + machine_))
if (t_dynamical < t_max):
t_max = t_dynamical
bottleneck = 'the dynamical time scale'
if enable_Hubble:
a_next = (a + a_max_late)
if (a_next < 1):
t_a_late = (t_base_background_factor * (cosmic_time(a_next) - t))
if (t_a_late < t_max):
t_max = t_a_late
bottleneck = 'the maximum allowed a (late)'
if enable_Hubble:
t_hubble = (fac_hubble / H)
bottleneck_hubble = 'the Hubble time'
a_next = (a + a_max_early)
if (a_next < 1):
t_a_early = (t_base_background_factor * (cosmic_time(a_next) - t))
if (t_a_early > t_hubble):
t_hubble = t_a_early
bottleneck_hubble = 'the maximum allowed a (early)'
if (t_hubble < t_max):
t_max = t_hubble
bottleneck = bottleneck_hubble
for component in components:
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
t_w = (fac_w / (abs(cast(component.w(a=a), 'double')) + machine_))
if (t_w < t_max):
t_max = t_w
bottleneck = f'w of {component.name}'
for component in components:
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
_bar_component = (component._bar * (a ** ((- 3) * (1 + component.w_eff(a=a)))))
t_decay = (((fac_ / (abs(component.(a)) + machine_)) * _bar) / _bar_component)
if (t_decay < t_max):
t_max = t_decay
bottleneck = f'decay rate of {component.name}'
for component in components:
if (component.representation == 'particles'):
continue
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
key = (component, 'v_max')
v_max = measurements[key] = (measurements[key] if (key in measurements) else measure(component, 'v_max'))
if (v_max == 0):
v_max = machine_
x_max = (boxsize / component.gridsize)
t_courant = ((fac_courant * x_max) / v_max)
if (t_courant < t_max):
t_max = t_courant
bottleneck = f'the Courant condition for {component.name}'
for component in components:
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
resolution = 0
for (force, method) in component.forces.items():
if (method != 'pm'):
continue
for (method, gridsizes) in component.potential_gridsizes[force].items():
if (method != 'pm'):
continue
gridsize = np.max(gridsizes)
if (gridsize > resolution):
resolution = gridsize
extreme_force = force
if (resolution == 0):
continue
key = (component, 'v_rms')
v_rms = measurements[key] = (measurements[key] if (key in measurements) else measure(component, 'v_rms'))
if (component.representation == 'fluid'):
v_rms -= ((light_speed * sqrt(component.w(a=a))) / a)
if (v_rms < machine_):
v_rms = machine_
x_max = (boxsize / resolution)
t_pm = ((fac_pm * x_max) / v_rms)
if (t_pm < t_max):
t_max = t_pm
bottleneck = f'the PM method of the {extreme_force} force for {component.name}'
for component in components:
if ((component.representation == 'fluid') and component.is_linear(0)):
continue
scale =
for (force, method) in component.forces.items():
if (method != 'p3m'):
continue
if (force != 'gravity'):
abort(f'Force "{force}" with method "P3M" unknown to get_base_timestep_size()')
if (R[shortrange_params['gravity']['scale']] < scale):
scale = R[shortrange_params['gravity']['scale']]
extreme_force = 'gravity'
if (scale == ):
continue
key = (component, 'v_rms')
v_rms = measurements[key] = (measurements[key] if (key in measurements) else measure(component, 'v_rms'))
if (v_rms < machine_):
v_rms = machine_
x_max = scale
t_p3m = ((fac_p3m * x_max) / v_rms)
if (t_p3m < t_max):
t_max = t_p3m
bottleneck = f'the P3M method of the {extreme_force} force for {component.name}'
if (t in initial_fac_times):
t_max *= t_initial_fac
if (master and isinstance(static_timestepping, str)):
if ((t + t_max) < cosmic_time(1)):
a_max = (scale_factor((t + t_max)) - a)
n = int(ceil((log10((1 / t_reltol)) + 0.5)))
with open_file(static_timestepping, mode='a', encoding='utf-8') as f:
if (f.tell() == 0):
static_timestepping_header = [f'Time-stepping recorded by CONCEPT job {jobid}', '', '{}a{}a'.format((' ' * ((n + 3) // 2)), (' ' * (n + 5)))]
f.write(unicode(('\n'.join([f'# {line}' for line in static_timestepping_header]) + '\n')))
f.write(f'''{{:.{n}e}} {{:.{n}e}}
'''.format(a, a_max))
return (t_max, bottleneck) |
def starListParser(input_list: str):
input_list = input_list.strip().lower()
items = [el.strip() for el in input_list.split('*')]
items = [el for el in items if (len(el) != 0)]
return items |
def rmse(targets: List[float], preds: List[float]) -> float:
return math.sqrt(mean_squared_error(targets, preds)) |
def convert_to_nii_gz(filename):
f = sitk.ReadImage(filename)
sitk.WriteImage(f, (os.path.splitext(filename)[0] + '.nii.gz'))
os.remove(filename) |
def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'):
interpolate_mode_dict = {'bilinear': 0, 'nearest': 1}
assert (len(img.shape) == 3)
assert ((len(flow.shape) == 3) and (flow.shape[2] == 2))
assert (flow.shape[:2] == img.shape[:2])
assert (interpolate_mode in interpolate_mode_dict.keys())
interpolate_mode = interpolate_mode_dict[interpolate_mode]
img_float = img.astype(np.float64)
out = flow_warp_c(img_float, flow.astype(np.float64), filling_value=filling_value, interpolate_mode=interpolate_mode)
return out |
class ConvModule(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, activation='relu', inplace=True, activate_last=True):
super(ConvModule, self).__init__()
assert ((conv_cfg is None) or isinstance(conv_cfg, dict))
assert ((norm_cfg is None) or isinstance(norm_cfg, dict))
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.activate_last = activate_last
self.with_norm = (norm_cfg is not None)
self.with_activatation = (activation is not None)
if (bias == 'auto'):
bias = (False if self.with_norm else True)
self.with_bias = bias
if (self.with_norm and self.with_bias):
warnings.warn('ConvModule has norm and bias at the same time')
self.conv = build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
if self.with_norm:
norm_channels = (out_channels if self.activate_last else in_channels)
(self.norm_name, norm) = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
if self.with_activatation:
if (self.activation not in ['relu']):
raise ValueError('{} is currently not supported.'.format(self.activation))
if (self.activation == 'relu'):
self.activate = nn.ReLU(inplace=inplace)
self.init_weights()
def norm(self):
return getattr(self, self.norm_name)
def init_weights(self):
nonlinearity = ('relu' if (self.activation is None) else self.activation)
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, activate=True, norm=True):
if self.activate_last:
x = self.conv(x)
if (norm and self.with_norm):
x = self.norm(x)
if (activate and self.with_activatation):
x = self.activate(x)
else:
if (norm and self.with_norm):
x = self.norm(x)
if (activate and self.with_activatation):
x = self.activate(x)
x = self.conv(x)
return x |
def greedy_select(logits, mask=None):
probs = masked_softmax(logits=logits, mask=mask)
one_hot = convert_to_one_hot(indices=probs.max(1)[1], num_classes=logits.size(1))
return one_hot |
def master_params_to_model_params(param_groups_and_shapes, master_params):
for (master_param, (param_group, _)) in zip(master_params, param_groups_and_shapes):
for ((_, param), unflat_master_param) in zip(param_group, unflatten_master_params(param_group, master_param.view((- 1)))):
param.detach().copy_(unflat_master_param) |
def path(elem, dr=None):
if (dr is None):
dr = _default_dr()
return os.path.join(os.path.dirname(os.path.realpath(__file__)), ('filter/%s/%s.filt' % (_dr_string(dr), elem.lower().capitalize()))) |
def parse_args():
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use', default=0, type=int)
parser.add_argument('--def', dest='prototxt', help='prototxt file defining the network', default=None, type=str)
parser.add_argument('--net', dest='caffemodel', help='model to test', default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file', help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait', help='wait until net file exists', default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name', help='dataset to test', default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode', action='store_true')
parser.add_argument('--set', dest='set_cfgs', help='set config keys', default=None, nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections', action='store_true')
parser.add_argument('--num_dets', dest='max_per_image', help='max number of detections per image', default=400, type=int)
parser.add_argument('--rpn_file', dest='rpn_file', default=None, type=str)
parser.add_argument('--test_label', dest='test_label', help='place to save', default=None, type=str)
parser.add_argument('--test_image', dest='test_image', help='place to save', default=None, type=str)
if (len(sys.argv) == 1):
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args |
class BasicContextFPN(HybridBlock):
def __init__(self, dilations=[1, 1, 2, 4, 8, 16], channels=16, classes=1, conv_mode='xxx', fuse_mode='xxx', act_type='relu', skernel=3, act_dilation=16, useReLU=False, use_act_head=False, check_fullly=False, act_layers=4, act_order='xxx', asBackbone=False, addstem=False, maxpool=True, **kwargs):
super(BasicContextFPN, self).__init__(**kwargs)
assert (act_type in ['swish', 'prelu', 'relu', 'xUnit', 'SeqATAC', 'SpaATAC', 'ChaATAC', 'MSSeqATAC', 'MSSeqATACAdd', 'MSSeqATACConcat']), 'Unknown act_type'
assert (conv_mode in ['fixed', 'learned', 'ChaDyReF', 'SeqDyReF', 'SK_ChaDyReF', 'SK_1x1DepthDyReF', 'SK_MSSpaDyReF', 'SK_SpaDyReF', 'Direct_Add', 'SKCell', 'SK_SeqDyReF', 'Sub_MSSpaDyReF', 'SK_MSSeqDyReF']), 'Unknown conv_mode'
stem_width = int((channels // 2))
self.layer_num = len(dilations)
with self.name_scope():
self.stem = nn.HybridSequential(prefix='stem')
if addstem:
self.stem.add(nn.Conv2D(channels=stem_width, kernel_size=3, strides=2, padding=1, use_bias=False))
self.stem.add(nn.BatchNorm(in_channels=stem_width))
self.stem.add(nn.Activation('relu'))
self.stem.add(nn.Conv2D(channels=stem_width, kernel_size=3, strides=1, padding=1, use_bias=False))
self.stem.add(nn.BatchNorm(in_channels=stem_width))
self.stem.add(nn.Activation('relu'))
self.stem.add(nn.Conv2D(channels=(stem_width * 2), kernel_size=3, strides=1, padding=1, use_bias=False))
self.stem.add(nn.BatchNorm(in_channels=(stem_width * 2)))
self.stem.add(nn.Activation('relu'))
if maxpool:
self.stem.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
self.stage_1 = nn.HybridSequential(prefix='stage_1')
self.stage_1.add(self._make_layer(dilation=dilations[0], channels=channels, stage_index=0, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone))
if (self.layer_num >= 2):
self.stage_1.add(self._make_layer(dilation=dilations[1], channels=channels, stage_index=1, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone))
if (self.layer_num >= 3):
self.stage_2 = self._make_layer(dilation=dilations[2], channels=channels, stage_index=2, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone)
self.fuse12 = self._fuse_layer(fuse_mode=fuse_mode, channels=channels, act_dilation=act_dilation, useReLU=useReLU, fuse_index=12)
if (self.layer_num >= 4):
self.stage_3 = self._make_layer(dilation=dilations[3], channels=channels, stage_index=3, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone)
self.fuse23 = self._fuse_layer(fuse_mode=fuse_mode, channels=channels, act_dilation=act_dilation, useReLU=useReLU, fuse_index=23)
if (self.layer_num >= 5):
self.stage_4 = self._make_layer(dilation=dilations[4], channels=channels, stage_index=4, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone)
self.fuse34 = self._fuse_layer(fuse_mode=fuse_mode, channels=channels, act_dilation=act_dilation, useReLU=useReLU, fuse_index=34)
if (self.layer_num >= 6):
self.stage_5 = self._make_layer(dilation=dilations[5], channels=channels, stage_index=5, conv_mode=conv_mode, act_type=act_type, skernel=skernel, act_dilation=act_dilation, useReLU=useReLU, check_fullly=check_fullly, act_layers=act_layers, act_order=act_order, asBackbone=asBackbone)
self.fuse45 = self._fuse_layer(fuse_mode=fuse_mode, channels=channels, act_dilation=act_dilation, useReLU=useReLU, fuse_index=45)
self.head = _FCNHead(in_channels=channels, channels=classes)
def _make_layer(self, dilation, channels, stage_index, conv_mode, act_type, skernel, act_dilation, useReLU, check_fullly, act_layers, act_order, asBackbone):
layer = nn.HybridSequential(prefix=('stage%d_' % stage_index))
with layer.name_scope():
if (conv_mode == 'fixed'):
layer.add(nn.Conv2D(channels=channels, kernel_size=3, dilation=dilation, padding=dilation))
elif (conv_mode == 'learned'):
layer.add(LearnedConv(channels=channels, dilations=dilation))
elif (conv_mode == 'ChaDyReF'):
layer.add(ChaDyReFConv(channels=channels, dilations=dilation))
elif (conv_mode == 'SK_ChaDyReF'):
layer.add(SK_ChaDyReFConv(channels=channels, dilations=dilation))
elif (conv_mode == 'SK_1x1DepthDyReF'):
layer.add(SK_1x1DepthDyReFConv(channels=channels, dilations=dilation))
elif (conv_mode == 'SK_MSSpaDyReF'):
layer.add(SK_MSSpaDyReFConv(channels=channels, dilations=dilation, asBackbone=asBackbone))
elif (conv_mode == 'Direct_Add'):
layer.add(Direct_AddConv(channels=channels, dilations=dilation, asBackbone=asBackbone))
elif (conv_mode == 'SK_SpaDyReF'):
layer.add(SK_SpaDyReFConv(channels=channels, dilations=dilation, act_dilation=act_dilation))
elif (conv_mode == 'SKCell'):
layer.add(SKConv(channels=channels, dilations=dilation))
elif (conv_mode == 'SeqDyReF'):
layer.add(SeqDyReFConv(channels=channels, dilations=dilation, act_dilation=act_dilation, useReLU=useReLU, asBackbone=asBackbone))
elif (conv_mode == 'SK_SeqDyReF'):
layer.add(SK_SeqDyReFConv(channels=channels, dilations=dilation, act_dilation=act_dilation, useReLU=useReLU, asBackbone=asBackbone))
else:
raise ValueError('Unknown conv_mode')
layer.add(nn.BatchNorm())
layer.add(nn.Activation('relu'))
return layer
def _fuse_layer(self, fuse_mode, channels, act_dilation, useReLU, fuse_index):
if (fuse_mode == 'Direct_Add'):
fuse_layer = Direct_AddFuse(channels=channels)
elif (fuse_mode == 'SK'):
fuse_layer = SKFuse(channels=channels)
elif (fuse_mode == 'LocalCha'):
fuse_layer = LocalChaFuse(channels=channels)
elif (fuse_mode == 'GlobalCha'):
fuse_layer = GlobalChaFuse(channels=channels)
elif (fuse_mode == 'LocalGlobalCha'):
fuse_layer = LocalGlobalChaFuse(channels=channels)
elif (fuse_mode == 'LocalSpa'):
fuse_layer = LocalSpaFuse(channels=channels, act_dilation=act_dilation)
elif (fuse_mode == 'GlobalSpa'):
fuse_layer = GlobalSpaFuse(channels=channels, act_dilation=act_dilation)
elif (fuse_mode == 'SK_MSSpa'):
fuse_layer = SK_MSSpaFuse(channels=channels, act_dilation=act_dilation)
else:
raise ValueError('Unknown fuse_mode')
return fuse_layer
def hybrid_forward(self, F, x):
(_, _, hei, wid) = x.shape
xs = self.stem(x)
x1 = self.stage_1(xs)
if (self.layer_num <= 2):
xf = x1
elif (self.layer_num == 3):
x2 = self.stage_2(x1)
xf = self.fuse12(x2, x1)
elif (self.layer_num == 4):
x2 = self.stage_2(x1)
x3 = self.stage_3(x2)
xf = self.fuse23(x3, x2)
xf = self.fuse12(xf, x1)
elif (self.layer_num == 5):
x2 = self.stage_2(x1)
x3 = self.stage_3(x2)
x4 = self.stage_4(x3)
xf = self.fuse34(x4, x3)
xf = self.fuse23(xf, x2)
xf = self.fuse12(xf, x1)
elif (self.layer_num == 6):
x2 = self.stage_2(x1)
x3 = self.stage_3(x2)
x4 = self.stage_4(x3)
x5 = self.stage_5(x4)
xf = self.fuse45(x5, x4)
xf = self.fuse34(xf, x3)
xf = self.fuse23(xf, x2)
xf = self.fuse12(xf, x1)
xo = self.head(xf)
out = F.contrib.BilinearResize2D(xo, height=hei, width=wid)
return out
def evaluate(self, x):
return self.forward(x) |
def test_digits_sqrt_modular_object():
model = GraphCutSelection(100, 'cosine', optimizer=ModularGreedy(random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking, digits_cosine_modular_ranking)
assert_array_almost_equal(model.gains, digits_cosine_modular_gains, 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def preprocess_strategy(dataset):
evaluate_transforms = None
if dataset.startswith('CUB'):
train_transforms = transforms.Compose([transforms.Resize(448), transforms.CenterCrop(448), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
val_transforms = transforms.Compose([transforms.Resize(448), transforms.CenterCrop(448), transforms.ToTensor(), normalize])
evaluate_transforms = transforms.Compose([transforms.Resize(448), CenterCropWithFlip(448), transforms.Lambda((lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]))), transforms.Lambda((lambda crops: torch.stack([normalize(crop) for crop in crops])))])
elif dataset.startswith('Aircraft'):
train_transforms = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop(448), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
val_transforms = transforms.Compose([transforms.Resize((512, 512)), transforms.CenterCrop(448), transforms.ToTensor(), normalize])
evaluate_transforms = transforms.Compose([transforms.Resize((512, 512)), CenterCropWithFlip(448), transforms.Lambda((lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]))), transforms.Lambda((lambda crops: torch.stack([normalize(crop) for crop in crops])))])
elif dataset.startswith('Cars'):
train_transforms = transforms.Compose([transforms.Resize((448, 448)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
val_transforms = transforms.Compose([transforms.Resize((448, 448)), transforms.ToTensor(), normalize])
evaluate_transforms = transforms.Compose([transforms.Resize((448, 448)), CenterCropWithFlip(448), transforms.Lambda((lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]))), transforms.Lambda((lambda crops: torch.stack([normalize(crop) for crop in crops])))])
elif dataset.startswith('ImageNet'):
train_transforms = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize])
val_transforms = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])
evaluate_transforms = transforms.Compose([transforms.Resize(256), transforms.TenCrop(224), transforms.Lambda((lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops]))), transforms.Lambda((lambda crops: torch.stack([normalize(crop) for crop in crops])))])
else:
raise KeyError("=> transform method of '{}' does not exist!".format(dataset))
return (train_transforms, val_transforms, evaluate_transforms) |
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = (max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far |
def process_image(img):
size = img.shape
(h, w) = (size[0], size[1])
scale = (max(w, h) / float(min_side))
(new_w, new_h) = (int((w / scale)), int((h / scale)))
resize_img = cv2.resize(img, (new_w, new_h))
if (((new_w % 2) != 0) and ((new_h % 2) == 0)):
(top, bottom, left, right) = (((min_side - new_h) / 2), ((min_side - new_h) / 2), (((min_side - new_w) / 2) + 1), ((min_side - new_w) / 2))
elif (((new_h % 2) != 0) and ((new_w % 2) == 0)):
(top, bottom, left, right) = ((((min_side - new_h) / 2) + 1), ((min_side - new_h) / 2), ((min_side - new_w) / 2), ((min_side - new_w) / 2))
elif (((new_h % 2) == 0) and ((new_w % 2) == 0)):
(top, bottom, left, right) = (((min_side - new_h) / 2), ((min_side - new_h) / 2), ((min_side - new_w) / 2), ((min_side - new_w) / 2))
else:
(top, bottom, left, right) = ((((min_side - new_h) / 2) + 1), ((min_side - new_h) / 2), (((min_side - new_w) / 2) + 1), ((min_side - new_w) / 2))
pad_img = cv2.copyMakeBorder(resize_img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
return pad_img |
def get_args_parser():
parser = argparse.ArgumentParser('Set grounded situation recognition transformer', add_help=False)
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--lr_backbone', default=1e-05, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--weight_decay', default=0.0001, type=float)
parser.add_argument('--clip_max_norm', default=0.1, type=float, help='gradient clipping max norm')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--epochs', default=40, type=int)
parser.add_argument('--backbone', default='resnet50', type=str, help='Name of the convolutional backbone to use')
parser.add_argument('--position_embedding', default='learned', type=str, choices=('sine', 'learned'), help='Type of positional embedding to use on top of the image features')
parser.add_argument('--enc_layers', default=6, type=int, help='Number of encoding layers in the transformer')
parser.add_argument('--dec_layers', default=6, type=int, help='Number of decoding layers in the transformer')
parser.add_argument('--dim_feedforward', default=2048, type=int, help='Intermediate size of the feedforward layers in the transformer blocks')
parser.add_argument('--hidden_dim', default=512, type=int, help='Size of the embeddings (dimension of the transformer)')
parser.add_argument('--dropout', default=0.15, type=float, help='Dropout applied in the transformer')
parser.add_argument('--nheads', default=8, type=int, help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--noun_loss_coef', default=1, type=float)
parser.add_argument('--verb_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--bbox_conf_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=5, type=float)
parser.add_argument('--dataset_file', default='swig')
parser.add_argument('--swig_path', type=str, default='SWiG')
parser.add_argument('--dev', default=False, action='store_true')
parser.add_argument('--test', default=False, action='store_true')
parser.add_argument('--inference', default=False)
parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda', help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument('--num_workers', default=4, type=int)
parser.add_argument('--saved_model', default='gsrtr_checkpoint.pth', help='path where saved model is')
parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser |
class Dynamics(nn.Module):
def __init__(self, rp_shape, act_shape):
super().__init__()
self.rp_shape = rp_shape
self.layer0 = Conv((rp_shape[0] + act_shape[0]), num_filters, 3, bn=True)
self.blocks = nn.ModuleList([ResidualBlock(num_filters) for _ in range(num_blocks)])
def forward(self, rp, a):
h = torch.cat([rp, a], dim=1)
h = self.layer0(h)
for block in self.blocks:
h = block(h)
return h |
def test_isotropic_eddington_dehnencore_in_nfw_dens_spherically_symmetric():
pot = potential.NFWPotential(amp=2.3, a=1.3)
denspot = potential.DehnenCoreSphericalPotential(amp=2.5, a=1.15)
dfp = eddingtondf(pot=pot, denspot=denspot)
numpy.random.seed(10)
samp = dfp.sample(n=100000)
tol = 0.01
check_spherical_symmetry(samp, 0, 0, tol)
check_spherical_symmetry(samp, 1, 0, tol)
check_spherical_symmetry(samp, 1, (- 1), tol)
check_spherical_symmetry(samp, 1, 1, tol)
check_spherical_symmetry(samp, 2, 0, tol)
check_spherical_symmetry(samp, 2, (- 1), tol)
check_spherical_symmetry(samp, 2, (- 2), tol)
check_spherical_symmetry(samp, 2, 1, tol)
check_spherical_symmetry(samp, 2, 2, tol)
check_spherical_symmetry(samp, 3, 1, tol)
check_spherical_symmetry(samp, 9, (- 6), tol)
return None |
def create_get_pure_strat_cached(cache: dict):
def load_pure_strat_cached(policy: Policy, pure_strat_spec):
pure_strat_checkpoint_path = pure_strat_spec.metadata['checkpoint_path']
if (pure_strat_checkpoint_path in cache):
weights = cache[pure_strat_checkpoint_path]
else:
checkpoint_data = deepdish.io.load(path=pure_strat_checkpoint_path)
weights = checkpoint_data['weights']
weights = {k.replace('_dot_', '.'): v for (k, v) in weights.items()}
cache[pure_strat_checkpoint_path] = weights
policy.set_weights(weights=weights)
policy.policy_spec = pure_strat_spec
return load_pure_strat_cached |
def test_geotext_case_sensitive_demo_data():
config = GeoTextConfiguration(**{'use_demo_data': True, 'case_sensitive': False})
geotext = GeoText(config)
text = 'berlin ist ne tolle stadt'
output = geotext.extract(input_text=text)
assert (output['cities']['Berlin']['span_info'] == [(0, 6)])
assert (output['cities']['Berlin']['found_as'] == ['berlin']) |
class SoftmaxParameter(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SOFTMAXPARAMETER |
def move_element_to_front(list, element):
if (element in list):
idx = list.index(element)
list.insert(0, list.pop(idx))
return list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.