code stringlengths 101 5.91M |
|---|
def choose_devices(target_abi, target_ids):
device_clazz = device_class(target_abi)
devices = device_clazz.list_devices()
if (target_ids == 'all'):
run_devices = devices
elif (target_ids == 'random'):
unlocked_devices = [dev for dev in devices if (not util.is_device_locked(dev))]
if unlocked_devices:
run_devices = [random.choice(unlocked_devices)]
else:
run_devices = [random.choice(devices)]
else:
device_id_list = [dev.strip() for dev in target_ids.split(',')]
run_devices = [dev for dev in device_id_list if (dev in devices)]
return run_devices |
def set_ro(ro):
from ..util._optional_deps import _APY_LOADED
if _APY_LOADED:
from astropy import units
if (_APY_LOADED and isinstance(ro, units.Quantity)):
ro = ro.to(units.kpc).value
__config__.set('normalization', 'ro', str(ro)) |
class ExternalProcess(object):
_ACTION = 1
_RESET = 2
_CLOSE = 3
_ATTRIBUTE = 4
_TRANSITION = 5
_OBSERV = 6
_EXCEPTION = 7
_VALUE = 8
def __init__(self, constructor):
(self._conn, conn) = multiprocessing.Pipe()
self._process = multiprocessing.Process(target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
def observation_space(self):
if (not self._observ_space):
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
def action_space(self):
if (not self._action_space):
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
self._conn.send((self._ATTRIBUTE, name))
return self._receive(self._VALUE)
def step(self, action, blocking=True):
self._conn.send((self._ACTION, action))
if blocking:
return self._receive(self._TRANSITION)
else:
return functools.partial(self._receive, self._TRANSITION)
def reset(self, blocking=True):
self._conn.send((self._RESET, None))
if blocking:
return self._receive(self._OBSERV)
else:
return functools.partial(self._receive, self._OBSERV)
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
pass
self._process.join()
def _receive(self, expected_message):
(message, payload) = self._conn.recv()
if (message == self._EXCEPTION):
stacktrace = payload
raise Exception(stacktrace)
if (message == expected_message):
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
try:
env = constructor()
while True:
try:
if (not conn.poll(0.1)):
continue
(message, payload) = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if (message == self._ACTION):
action = payload
conn.send((self._TRANSITION, env.step(action)))
continue
if (message == self._RESET):
assert (payload is None)
conn.send((self._OBSERV, env.reset()))
continue
if (message == self._ATTRIBUTE):
name = payload
conn.send((self._VALUE, getattr(env, name)))
continue
if (message == self._CLOSE):
assert (payload is None)
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
conn.send((self._EXCEPTION, stacktrace))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.close() |
def test(config, test_dataset, testloader, model, sv_dir='./', sv_pred=True):
model.eval()
with torch.no_grad():
for (_, batch) in enumerate(tqdm(testloader)):
(image, size, name) = batch
size = size[0]
pred = test_dataset.single_scale_inference(config, model, image.cuda())
if ((pred.size()[(- 2)] != size[0]) or (pred.size()[(- 1)] != size[1])):
pred = F.interpolate(pred, size[(- 2):], mode='bilinear', align_corners=config.MODEL.ALIGN_CORNERS)
if sv_pred:
sv_path = os.path.join(sv_dir, 'test_results')
if (not os.path.exists(sv_path)):
os.mkdir(sv_path)
test_dataset.save_pred(pred, sv_path, name) |
def get_xdensenet_cifar(num_classes, blocks, growth_rate, bottleneck, expand_ratio=2, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (num_classes in [10, 100])
if bottleneck:
assert (((blocks - 4) % 6) == 0)
layers = ([((blocks - 4) // 6)] * 3)
else:
assert (((blocks - 4) % 3) == 0)
layers = ([((blocks - 4) // 3)] * 3)
init_block_channels = (2 * growth_rate)
from functools import reduce
channels = reduce((lambda xi, yi: (xi + [reduce((lambda xj, yj: (xj + [(xj[(- 1)] + yj)])), ([growth_rate] * yi), [(xi[(- 1)][(- 1)] // 2)])[1:]])), layers, [[(init_block_channels * 2)]])[1:]
net = CIFARXDenseNet(channels=channels, init_block_channels=init_block_channels, num_classes=num_classes, bottleneck=bottleneck, expand_ratio=expand_ratio, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
class IntensiveReader(BaseReader):
name: str = 'intensive'
def postprocess(self, output: EvalLoopOutput, eval_examples: datasets.Dataset, eval_dataset: datasets.Dataset, log_level: int=logging.WARNING, mode: str='evaluate') -> Union[(List[Dict[(str, Any)]], EvalPrediction)]:
(predictions, nbest_json, scores_diff_json) = self.compute_predictions(eval_examples, eval_dataset, output.predictions, version_2_with_negative=self.data_args.version_2_with_negative, n_best_size=self.data_args.n_best_size, max_answer_length=self.data_args.max_answer_length, null_score_diff_threshold=self.data_args.null_score_diff_threshold, output_dir=self.args.output_dir, log_level=log_level, n_tops=(self.data_args.start_n_top, self.data_args.end_n_top))
if (mode == 'retro_inference'):
return (nbest_json, scores_diff_json)
if self.data_args.version_2_with_negative:
formatted_predictions = [{'id': k, 'prediction_text': v, 'no_answer_probability': scores_diff_json[k]} for (k, v) in predictions.items()]
else:
formatted_predictions = [{'id': k, 'prediction_text': v} for (k, v) in predictions.items()]
if (mode == 'predict'):
return formatted_predictions
else:
references = [{'id': ex[C.ID_COLUMN_NAME], 'answers': ex[C.ANSWER_COLUMN_NAME]} for ex in eval_examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
def compute_predictions(self, examples: datasets.Dataset, features: datasets.Dataset, predictions: Tuple[(np.ndarray, np.ndarray)], version_2_with_negative: bool=False, n_best_size: int=20, max_answer_length: int=30, null_score_diff_threshold: float=0.0, output_dir: Optional[str]=None, log_level: Optional[int]=logging.WARNING, n_tops: Tuple[(int, int)]=((- 1), (- 1)), use_choice_logits: bool=False):
if (len(predictions) not in [2, 3]):
raise ValueError('`predictions` should be a tuple with two or three elements (start_logits, end_logits, choice_logits).')
(all_start_logits, all_end_logits) = predictions[:2]
all_choice_logits = None
if (len(predictions) == 3):
all_choice_logits = predictions[(- 1)]
example_id_to_index = {k: i for (i, k) in enumerate(examples[C.ID_COLUMN_NAME])}
features_per_example = collections.defaultdict(list)
for (i, feature) in enumerate(features):
features_per_example[example_id_to_index[feature['example_id']]].append(i)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = (collections.OrderedDict() if version_2_with_negative else None)
logger.setLevel(log_level)
logger.info(f'Post-processing {len(examples)} example predictions split into {len(features)} features.')
for (example_index, example) in enumerate(tqdm(examples)):
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
for feature_index in feature_indices:
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
feature_null_score = (start_logits[0] + end_logits[0])
if (all_choice_logits is not None):
choice_logits = all_choice_logits[feature_index]
if use_choice_logits:
feature_null_score = choice_logits[1]
offset_mapping = features[feature_index]['offset_mapping']
token_is_max_context = features[feature_index].get('token_is_max_context', None)
if ((min_null_prediction is None) or (min_null_prediction['score'] > feature_null_score)):
min_null_prediction = {'offsets': (0, 0), 'score': feature_null_score, 'start_logit': start_logits[0], 'end_logit': end_logits[0]}
start_indexes = np.argsort(start_logits)[(- 1):((- n_best_size) - 1):(- 1)].tolist()
end_indexes = np.argsort(end_logits)[(- 1):((- n_best_size) - 1):(- 1)].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
if ((start_index >= len(offset_mapping)) or (end_index >= len(offset_mapping)) or (not offset_mapping[start_index]) or (not offset_mapping[end_index])):
continue
if ((end_index < start_index) or (((end_index - start_index) + 1) > max_answer_length)):
continue
if ((token_is_max_context is not None) and (not token_is_max_context.get(str(start_index), False))):
continue
prelim_predictions.append({'offsets': (offset_mapping[start_index][0], offset_mapping[end_index][1]), 'score': (start_logits[start_index] + end_logits[end_index]), 'start_logit': start_logits[start_index], 'end_logit': end_logits[end_index]})
if version_2_with_negative:
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction['score']
predictions = sorted(prelim_predictions, key=(lambda x: x['score']), reverse=True)[:n_best_size]
if (version_2_with_negative and (not any(((p['offsets'] == (0, 0)) for p in predictions)))):
predictions.append(min_null_prediction)
context = example['context']
for pred in predictions:
offsets = pred.pop('offsets')
pred['text'] = context[offsets[0]:offsets[1]]
if ((len(predictions) == 0) or ((len(predictions) == 1) and (predictions[0]['text'] == ''))):
predictions.insert(0, {'text': '', 'start_logit': 0.0, 'end_logit': 0.0, 'score': 0.0})
scores = np.array([pred.pop('score') for pred in predictions])
exp_scores = np.exp((scores - np.max(scores)))
probs = (exp_scores / exp_scores.sum())
for (prob, pred) in zip(probs, predictions):
pred['probability'] = prob
if (not version_2_with_negative):
all_predictions[example[C.ID_COLUMN_NAME]] = predictions[0]['text']
else:
i = 0
try:
while (predictions[i]['text'] == ''):
i += 1
except:
i = 0
best_non_null_pred = predictions[i]
score_diff = ((null_score - best_non_null_pred['start_logit']) - best_non_null_pred['end_logit'])
scores_diff_json[example[C.ID_COLUMN_NAME]] = float(score_diff)
if (score_diff > null_score_diff_threshold):
all_predictions[example[C.ID_COLUMN_NAME]] = ''
else:
all_predictions[example[C.ID_COLUMN_NAME]] = best_non_null_pred['text']
all_nbest_json[example[C.ID_COLUMN_NAME]] = [{k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for (k, v) in pred.items()} for pred in predictions]
if (output_dir is not None):
if (not os.path.isdir(output_dir)):
raise EnvironmentError(f'{output_dir} is not a directory.')
prediction_file = os.path.join(output_dir, C.INTENSIVE_PRED_FILE_NAME)
nbest_file = os.path.join(output_dir, C.NBEST_PRED_FILE_NAME)
if version_2_with_negative:
null_odds_file = os.path.join(output_dir, C.SCORE_DIFF_FILE_NAME)
logger.info(f'Saving predictions to {prediction_file}.')
with open(prediction_file, 'w') as writer:
writer.write((json.dumps(all_predictions, indent=4) + '\n'))
logger.info(f'Saving nbest_preds to {nbest_file}.')
with open(nbest_file, 'w') as writer:
writer.write((json.dumps(all_nbest_json, indent=4) + '\n'))
if version_2_with_negative:
logger.info(f'Saving null_odds to {null_odds_file}.')
with open(null_odds_file, 'w') as writer:
writer.write((json.dumps(scores_diff_json, indent=4) + '\n'))
return (all_predictions, all_nbest_json, scores_diff_json) |
def gen_CNN(channels, conv=nn.Conv1d, bias=True, activation=nn.ReLU, batch_norm=None, instance_norm=None):
layers = []
for i in range((len(channels) - 1)):
(in_size, out_size) = channels[i:(i + 2)]
layers.append(conv(in_size, out_size, 1, bias=bias))
if (batch_norm is not None):
layers.append(batch_norm(out_size))
if (activation is not None):
layers.append(activation(inplace=True))
if (instance_norm is not None):
layers.append(instance_norm(out_size, affine=False, track_running_stats=False))
return nn.Sequential(*layers) |
def main(args):
if (args.seed is not None):
print('* absolute seed: {}'.format(args.seed))
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
is_train = (True if (not args.evaluate) else False)
(train_loader, val_loader, num_classes) = make_data_loader(args, is_train=is_train)
model = get_model(num_classes, args)
criterion = torch.nn.MultiLabelSoftMarginLoss()
trainer = Trainer(model, criterion, train_loader, val_loader, args)
if is_train:
trainer.train()
else:
trainer.validate() |
def ControlTypeChange(choice):
if (choice == 'pos'):
return gr.update(visible=False)
elif (choice == 'sentiment'):
return gr.update(visible=True) |
def test_scatter():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[(- 1)])
assert torch.allclose(input, output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[(- 1)])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input, output)
if torch.cuda.is_available():
input = torch.zeros([1, 3, 3, 3])
output = scatter(input=input, devices=[0])
assert torch.allclose(input.cuda(), output)
inputs = [torch.zeros([1, 3, 3, 3]), torch.zeros([1, 4, 4, 4])]
outputs = scatter(input=inputs, devices=[0])
for (input, output) in zip(inputs, outputs):
assert torch.allclose(input.cuda(), output)
with pytest.raises(Exception):
scatter(5, [(- 1)]) |
def short_hash(name):
if (name not in _model_sha256):
raise ValueError('Pretrained model for {name} is not available.'.format(name=name))
return _model_sha256[name][:8] |
class TFWorkerWrapper(Worker):
def __init__(self):
self._inner_worker = None
self._sess = None
self._sess_entered = None
self.worker_init()
def worker_init(self):
self._sess = tf.compat.v1.get_default_session()
if (not self._sess):
self._sess = tf.compat.v1.Session()
self._sess_entered = True
self._sess.__enter__()
def shutdown(self):
self._inner_worker.shutdown()
if (tf.compat.v1.get_default_session() and self._sess_entered):
self._sess_entered = False
self._sess.__exit__(None, None, None)
def agent(self):
return self._inner_worker.agent
def agent(self, agent):
self._inner_worker.agent = agent
def env(self):
return self._inner_worker.env
def env(self, env):
self._inner_worker.env = env
def update_agent(self, agent_update):
self._inner_worker.update_agent(agent_update)
def update_env(self, env_update):
self._inner_worker.update_env(env_update)
def rollout(self):
return self._inner_worker.rollout()
def start_rollout(self):
self._inner_worker.start_rollout()
def step_rollout(self):
return self._inner_worker.step_rollout()
def collect_rollout(self):
return self._inner_worker.collect_rollout() |
def get_latest_scene(s3_scene_jsons):
scenes = [open_remote_pb_object(scene_json, Scene) for scene_json in s3_scene_jsons]
creation_ts = [_s.creation_date.ToMicroseconds() for _s in scenes]
index = creation_ts.index(max(creation_ts))
return (scenes[index], s3_scene_jsons[index]) |
class SparkEvaluator(Evaluator[S]):
def __init__(self, processes: int=8):
self.spark_conf = SparkConf().setAppName('jmetalpy').setMaster(f'local[{processes}]')
self.spark_context = SparkContext(conf=self.spark_conf)
logger = self.spark_context._jvm.org.apache.log4j
logger.LogManager.getLogger('org').setLevel(logger.Level.WARN)
def evaluate(self, solution_list: List[S], problem: Problem) -> List[S]:
solutions_to_evaluate = self.spark_context.parallelize(solution_list)
return solutions_to_evaluate.map((lambda s: problem.evaluate(s))).collect() |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, cardinality=1, base_width=64, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None):
super(BasicBlock, self).__init__()
assert (cardinality == 1), 'BasicBlock only supports cardinality of 1'
first_planes = (planes // reduce_first)
outplanes = (planes * self.expansion)
first_dilation = (first_dilation or dilation)
use_aa = ((aa_layer is not None) and ((stride == 2) or (first_dilation != dilation)))
self.conv1 = nn.Conv2d(inplanes, first_planes, kernel_size=3, stride=(1 if use_aa else stride), padding=first_dilation, dilation=first_dilation, bias=False)
self.bn1 = norm_layer(first_planes)
self.act1 = act_layer(inplace=True)
self.aa = (aa_layer(channels=first_planes, stride=stride) if use_aa else None)
self.conv2 = nn.Conv2d(first_planes, outplanes, kernel_size=3, padding=dilation, dilation=dilation, bias=False)
self.bn2 = norm_layer(outplanes)
self.attn_layer = attn_layer
if (attn_layer == 'our_se'):
self.se = SE_Our(outplanes, outplanes)
else:
self.se = create_attn(attn_layer, outplanes)
self.act2 = act_layer(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.drop_block = drop_block
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.bn2.weight)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.bn1(x)
if (self.drop_block is not None):
x = self.drop_block(x)
x = self.act1(x)
if (self.aa is not None):
x = self.aa(x)
x = self.conv2(x)
x = self.bn2(x)
if (self.drop_block is not None):
x = self.drop_block(x)
if (self.downsample is not None):
residual = self.downsample(residual)
if (self.attn_layer == 'our_se'):
residual = (residual * self.se(residual))
elif (self.se is not None):
x = self.se(x)
if (self.drop_path is not None):
x = self.drop_path(x)
x += residual
x = self.act2(x)
return x |
def _fused_batch_norm(inputs, decay=0.999, center=True, scale=False, epsilon=0.001, activation_fn=None, param_initializers=None, param_regularizers=None, updates_collections=ops.GraphKeys.UPDATE_OPS, is_training=True, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, data_format=DATA_FORMAT_NHWC, zero_debias_moving_mean=False, scope=None):
if (data_format not in (DATA_FORMAT_NCHW, DATA_FORMAT_NHWC)):
raise ValueError('data_format has to be either NCHW or NHWC.')
with variable_scope.variable_scope(scope, 'BatchNorm', [inputs], reuse=reuse) as sc:
inputs = ops.convert_to_tensor(inputs)
original_shape = inputs.get_shape()
original_inputs = inputs
original_rank = original_shape.ndims
if (original_rank is None):
raise ValueError(('Inputs %s has undefined rank' % inputs.name))
elif (original_rank not in [2, 4]):
raise ValueError(('Inputs %s has unsupported rank. Expected 2 or 4 but got %d' % (inputs.name, original_rank)))
if (original_rank == 2):
channels = inputs.get_shape()[(- 1)].value
if (channels is None):
raise ValueError('`C` dimension must be known but is None')
new_shape = [(- 1), 1, 1, channels]
if (data_format == DATA_FORMAT_NCHW):
new_shape = [(- 1), channels, 1, 1]
inputs = array_ops.reshape(inputs, new_shape)
inputs_shape = inputs.get_shape()
if (data_format == DATA_FORMAT_NHWC):
params_shape = inputs_shape[(- 1):]
else:
params_shape = inputs_shape[1:2]
if (not params_shape.is_fully_defined()):
raise ValueError(('Inputs %s has undefined `C` dimension %s.' % (inputs.name, params_shape)))
beta_collections = utils.get_variable_collections(variables_collections, 'beta')
variable_dtype = dtypes.float32
if (not param_initializers):
param_initializers = {}
if (not param_regularizers):
param_regularizers = {}
beta_regularizer = param_regularizers.get('beta')
gamma_regularizer = param_regularizers.get('gamma')
if center:
beta_initializer = param_initializers.get('beta', init_ops.zeros_initializer())
beta = variables.model_variable('beta', shape=params_shape, dtype=variable_dtype, initializer=beta_initializer, regularizer=beta_regularizer, collections=beta_collections, trainable=trainable)
else:
beta = array_ops.constant(0.0, dtype=variable_dtype, shape=params_shape)
if scale:
gamma_collections = utils.get_variable_collections(variables_collections, 'gamma')
gamma_initializer = param_initializers.get('gamma', init_ops.ones_initializer())
gamma = variables.model_variable('gamma', shape=params_shape, dtype=variable_dtype, initializer=gamma_initializer, regularizer=gamma_regularizer, collections=gamma_collections, trainable=trainable)
else:
gamma = array_ops.constant(1.0, dtype=variable_dtype, shape=params_shape)
with variable_scope.variable_scope(variable_scope.get_variable_scope()) as local_scope:
local_scope.set_partitioner(None)
moving_mean_collections = utils.get_variable_collections(variables_collections, 'moving_mean')
moving_mean_initializer = param_initializers.get('moving_mean', init_ops.zeros_initializer())
moving_mean = variables.model_variable('moving_mean', shape=params_shape, dtype=variable_dtype, initializer=moving_mean_initializer, trainable=False, collections=moving_mean_collections)
moving_variance_collections = utils.get_variable_collections(variables_collections, 'moving_variance')
moving_variance_initializer = param_initializers.get('moving_variance', init_ops.ones_initializer())
moving_variance = variables.model_variable('moving_variance', shape=params_shape, dtype=variable_dtype, initializer=moving_variance_initializer, trainable=False, collections=moving_variance_collections)
def _fused_batch_norm_training():
return nn.fused_batch_norm(inputs, gamma, beta, epsilon=epsilon, data_format=data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(inputs, gamma, beta, mean=moving_mean, variance=moving_variance, epsilon=epsilon, is_training=False, data_format=data_format)
(outputs, mean, variance) = utils.smart_cond(is_training, _fused_batch_norm_training, _fused_batch_norm_inference)
is_training_value = utils.constant_value(is_training)
need_updates = ((is_training_value is None) or is_training_value)
if need_updates:
if (updates_collections is None):
no_updates = (lambda : outputs)
def _force_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
with ops.control_dependencies([update_moving_mean, update_moving_variance]):
return array_ops.identity(outputs)
outputs = utils.smart_cond(is_training, _force_updates, no_updates)
else:
moving_vars_fn = (lambda : (moving_mean, moving_variance))
def _delay_updates():
update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, decay, zero_debias=zero_debias_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, decay, zero_debias=False)
return (update_moving_mean, update_moving_variance)
(update_mean, update_variance) = utils.smart_cond(is_training, _delay_updates, moving_vars_fn)
ops.add_to_collections(updates_collections, update_mean)
ops.add_to_collections(updates_collections, update_variance)
outputs.set_shape(inputs_shape)
if (original_shape.ndims == 2):
outputs = array_ops.reshape(outputs, array_ops.shape(original_inputs))
if (activation_fn is not None):
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections, sc.name, outputs) |
def _create_local(name, shape, collections=None, validate_shape=True, dtype=tf.float32):
collections = list((collections or []))
collections += [ops.GraphKeys.LOCAL_VARIABLES]
return variables.Variable(initial_value=array_ops.zeros(shape, dtype=dtype), name=name, trainable=False, collections=collections, validate_shape=validate_shape) |
def rank_by_significance(embeddings, class_embeddings):
similarities = cosine_similarity_embeddings(embeddings, class_embeddings)
significance_score = [np.max(softmax(similarity)) for similarity in similarities]
significance_ranking = {i: r for (r, i) in enumerate(np.argsort((- np.array(significance_score))))}
return significance_ranking |
def is_torchdynamo_available():
if (not is_torch_available()):
return False
try:
import torch._dynamo as dynamo
return True
except Exception:
return False |
def blend_images(image, image2, should_blend=0, alpha=0.5, scope=None):
with tf.name_scope(scope, 'blend_images', [image, image2]):
if (should_blend == 0):
return image
else:
image = tf.py_func(blend_images_np, [image, image2, alpha])
return image |
def parse_args(input_args=None):
parser = argparse.ArgumentParser(description='Simple example of a training script.')
parser.add_argument('--pretrained_model_name_or_path', type=str, default=None, required=True, help='Path to pretrained model or model identifier from huggingface.co/models.')
parser.add_argument('--revision', type=str, default=None, required=False, help='Revision of pretrained model identifier from huggingface.co/models.')
parser.add_argument('--tokenizer_name', type=str, default=None, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--instance_data_dir', type=str, default=None, required=True, help='A folder containing the training data of instance images.')
parser.add_argument('--class_data_dir', type=str, default=None, required=False, help='A folder containing the training data of class images.')
parser.add_argument('--instance_prompt', type=str, default='a photo of sks dog', required=False, help='The prompt with identifier specifying the instance')
parser.add_argument('--class_prompt', type=str, default=None, help='The prompt to specify images in the same class as provided instance images.')
parser.add_argument('--with_prior_preservation', default=False, action='store_true', help='Flag to add prior preservation loss.')
parser.add_argument('--prior_loss_weight', type=float, default=1.0, help='The weight of prior preservation loss.')
parser.add_argument('--num_class_images', type=int, default=100, help='Minimal class images for prior preservation loss. If there are not enough images already present in class_data_dir, additional images will be sampled with class_prompt.')
parser.add_argument('--output_dir', type=str, default='text-inversion-model', help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.')
parser.add_argument('--resolution', type=int, default=512, help='The resolution for input images, all the images in the train/validation dataset will be resized to this resolution')
parser.add_argument('--placement', type=str, default='cpu', help='Placement Policy for Gemini. Valid when using colossalai as dist plan.')
parser.add_argument('--center_crop', default=False, action='store_true', help='Whether to center crop the input images to the resolution. If not set, the images will be randomly cropped. The images will be resized to the resolution first before cropping.')
parser.add_argument('--train_batch_size', type=int, default=4, help='Batch size (per device) for the training dataloader.')
parser.add_argument('--sample_batch_size', type=int, default=4, help='Batch size (per device) for sampling images.')
parser.add_argument('--num_train_epochs', type=int, default=1)
parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.')
parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.')
parser.add_argument('--gradient_checkpointing', action='store_true', help='Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.')
parser.add_argument('--learning_rate', type=float, default=5e-06, help='Initial learning rate (after the potential warmup period) to use.')
parser.add_argument('--scale_lr', action='store_true', default=False, help='Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.')
parser.add_argument('--lr_scheduler', type=str, default='constant', help='The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"]')
parser.add_argument('--lr_warmup_steps', type=int, default=500, help='Number of steps for the warmup in the lr scheduler.')
parser.add_argument('--use_8bit_adam', action='store_true', help='Whether or not to use 8-bit Adam from bitsandbytes.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.')
parser.add_argument('--hub_token', type=str, default=None, help='The token to use to push to the Model Hub.')
parser.add_argument('--hub_model_id', type=str, default=None, help='The name of the repository to keep in sync with the local `output_dir`.')
parser.add_argument('--logging_dir', type=str, default='logs', help='[TensorBoard]( log directory. Will default to *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***.')
parser.add_argument('--mixed_precision', type=str, default=None, choices=['no', 'fp16', 'bf16'], help='Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
if (input_args is not None):
args = parser.parse_args(input_args)
else:
args = parser.parse_args()
env_local_rank = int(os.environ.get('LOCAL_RANK', (- 1)))
if ((env_local_rank != (- 1)) and (env_local_rank != args.local_rank)):
args.local_rank = env_local_rank
if args.with_prior_preservation:
if (args.class_data_dir is None):
raise ValueError('You must specify a data directory for class images.')
if (args.class_prompt is None):
raise ValueError('You must specify prompt for class images.')
else:
if (args.class_data_dir is not None):
logger.warning('You need not use --class_data_dir without --with_prior_preservation.')
if (args.class_prompt is not None):
logger.warning('You need not use --class_prompt without --with_prior_preservation.')
return args |
def accuracy(logits, labels):
(_, indices) = torch.max(logits, dim=1)
if (len(indices.shape) > 1):
indices = indices.view((- 1))
labels = labels.view((- 1))
correct = torch.sum((indices == labels))
return ((correct.item() * 1.0) / len(labels)) |
def mobilenet_v2(pretrained: bool=False, include_top: bool=False, freeze: bool=False):
model = torchvision.models.mobilenet_v2(pretrained)
if freeze:
set_parameter_requires_grad(model, 'classifier')
if (not include_top):
output_size = model.classifier[1].in_features
model.classifier = nn.Identity()
return BackboneModule(model, output_size)
else:
return model |
class NonMaximaSuppression2d(nn.Module):
def __init__(self, kernel_size: Tuple[(int, int)]):
super(NonMaximaSuppression2d, self).__init__()
self.kernel_size: Tuple[(int, int)] = kernel_size
self.padding: Tuple[(int, int, int, int)] = self._compute_zero_padding2d(kernel_size)
self.kernel = _get_nms_kernel2d(*kernel_size)
def _compute_zero_padding2d(kernel_size: Tuple[(int, int)]) -> Tuple[(int, int, int, int)]:
assert isinstance(kernel_size, tuple), type(kernel_size)
assert (len(kernel_size) == 2), kernel_size
def pad(x):
return ((x - 1) // 2)
(ky, kx) = kernel_size
return (pad(ky), pad(ky), pad(kx), pad(kx))
def forward(self, x: torch.Tensor) -> torch.Tensor:
assert (len(x.shape) == 4), x.shape
(B, CH, H, W) = x.size()
max_non_center = F.conv2d(F.pad(x, list(self.padding)[::(- 1)], mode='replicate'), self.kernel.repeat(CH, 1, 1, 1).to(x.device, x.dtype), stride=1, groups=CH).view(B, CH, (- 1), H, W).max(dim=2)[0]
mask = (x > max_non_center)
return (x * mask.to(x.dtype)) |
def _fake_roi_head(with_shared_head=False):
if (not with_shared_head):
roi_head = Config(dict(type='StandardRoIHead', bbox_roi_extractor=dict(type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), out_channels=1, featmap_strides=[4, 8, 16, 32]), bbox_head=dict(type='Shared2FCBBoxHead', in_channels=1, fc_out_channels=1, num_classes=4), mask_roi_extractor=dict(type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1, featmap_strides=[4, 8, 16, 32]), mask_head=dict(type='FCNMaskHead', num_convs=1, in_channels=1, conv_out_channels=1, num_classes=4), train_cfg=dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=True, ignore_iof_thr=(- 1)), sampler=dict(type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=(- 1), add_gt_as_proposals=True), mask_size=28, pos_weight=(- 1), debug=False), test_cfg=dict(score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
else:
roi_head = Config(dict(type='StandardRoIHead', shared_head=dict(type='ResLayer', depth=50, stage=3, stride=2, dilation=1, style='caffe', norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True), bbox_roi_extractor=dict(type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0), out_channels=1, featmap_strides=[16]), bbox_head=dict(type='BBoxHead', with_avg_pool=True, in_channels=2048, roi_feat_size=7, num_classes=4), mask_roi_extractor=None, mask_head=dict(type='FCNMaskHead', num_convs=0, in_channels=2048, conv_out_channels=1, num_classes=4), train_cfg=dict(assigner=dict(type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=(- 1)), sampler=dict(type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=(- 1), add_gt_as_proposals=True), mask_size=14, pos_weight=(- 1), debug=False), test_cfg=dict(score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100, mask_thr_binary=0.5)))
return roi_head |
def resnet50s16(pretrained=False, finetune_layers=(), s16_feats=('layer4',), s8_feats=('layer2',), s4_feats=('layer1',), **kwargs):
model = ResNetS16(finetune_layers, s16_feats, s8_feats, s4_feats, Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir=config['nn_weights_path']))
return model |
def logging(s, log_path, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(log_path, 'a+') as f_log:
f_log.write((s + '\n')) |
class ConfigDense(NamedTuple):
seed: int = 0
floatx: Any = 'float64'
jitter: float = 1e-06
num_test: int = 128
num_cond: int = 32
num_samples: int = 16384
shard_size: int = 1024
input_dims: int = 3
kernel_variance: float = 0.9
rel_lengthscales_min: float = 0.05
rel_lengthscales_max: float = 0.5
noise_variance: float = 0.01
def error_tol(self):
return (4 * (self.num_samples ** (- 0.5))) |
class TestVisualization(TestCase):
def test_import_should_okay(self):
try:
from bigdl.nano.automl.hpo.visualization import plot_optimization_history
except ImportError:
self.fail('cannot import plot_optimization_history from nano.aotoml.hpo.visualization.')
try:
from bigdl.nano.automl.hpo.visualization import plot_parallel_coordinate
except ImportError:
self.fail('cannot import plot_parallel_coordinate from nano.aotoml.hpo.visualization.')
try:
from bigdl.nano.automl.hpo.visualization import plot_intermediate_values
except ImportError:
self.fail('cannot import plot_intermediate_values from nano.aotoml.hpo.visualization.')
try:
from bigdl.nano.automl.hpo.visualization import plot_contour
except ImportError:
self.fail('cannot import plot_contour from nano.aotoml.hpo.visualization.')
try:
from bigdl.nano.automl.hpo.visualization import plot_param_importances
except ImportError:
self.fail('cannot import plot_param_importances from nano.aotoml.hpo.visualization.') |
class _IndexToTokenDefaultDict(_NamespaceDependentDefaultDict):
def __init__(self, non_padded_namespaces: Set[str], padding_token: str, oov_token: str) -> None:
super(_IndexToTokenDefaultDict, self).__init__(non_padded_namespaces, (lambda : {0: padding_token, 1: oov_token}), (lambda : {})) |
def format_if_possible(format, value):
try:
return (format % value)
except:
return ('%s' % value) |
def extract_valid_amino_acid(m, amino_acids):
ms = SplitMolByPDBResidues(m)
valid_ms = [ms[k] for k in ms.keys()]
ret_m = None
for i in range(len(valid_ms)):
if (i == 0):
ret_m = valid_ms[0]
else:
ret_m = CombineMols(ret_m, valid_ms[i])
return ret_m |
class TestTPProfile(unittest.TestCase):
def test_isothermal(self):
profile = Profile(num_profile_heights=130)
profile.set_isothermal(1300)
self.assertEqual(len(profile.pressures), 130)
self.assertEqual(len(profile.temperatures), 130)
self.assertTrue(np.all((profile.temperatures == 1300)))
def test_parametric(self):
profile = Profile()
P0 = np.min(profile.pressures)
T0 = 1300
P1 = 0.001
alpha1 = 0.3
alpha2 = 0.5
P3 = 10000.0
T3 = 2000
(P2, T2) = profile.set_parametric(T0, P1, alpha1, alpha2, P3, T3)
self.assertTrue((abs((P3 - (P2 * np.exp((alpha2 * ((T3 - T2) ** 0.5)))))) < (0.001 * P3)))
T1 = (((np.log((P1 / P0)) ** 2) / (alpha1 ** 2)) + T0)
self.assertTrue((abs((P1 - (P0 * np.exp((alpha1 * ((T1 - T0) ** 0.5)))))) < (0.001 * P1)))
self.assertTrue((abs((P1 - (P2 * np.exp(((- alpha2) * ((T1 - T2) ** 0.5)))))) < (0.001 * P1)))
def test_set_opacity(self):
p = Profile()
p.set_isothermal(1200)
calc = EclipseDepthCalculator()
(wavelengths, depths, info_dict) = calc.compute_depths(p, R_sun, M_jup, R_jup, 5700, full_output=True)
p.set_from_opacity(1700, info_dict)
self.assertTrue(np.all((p.temperatures > 0)))
self.assertTrue(np.all((~ np.isnan(p.temperatures))))
def test_radiative_solution(self):
p = Profile()
p.set_from_radiative_solution(5040, (0.756 * R_sun), (0.031 * AU), (0.885 * M_jup), R_jup, 1, np.log10(0.003), np.log10(0.158), np.log10(0.158), 0.5, 100)
is_upper_atm = np.logical_and((p.pressures > 0.1), (p.pressures < 1000.0))
self.assertTrue(np.all((p.temperatures[is_upper_atm] > 1000)))
self.assertTrue(np.all((p.temperatures[is_upper_atm] < 1100)))
is_lower_atm = np.logical_and((p.pressures > 100000.0), (p.pressures < 3000000.0))
self.assertTrue(np.all((p.temperatures[is_lower_atm] > 1600)))
self.assertTrue(np.all((p.temperatures[is_lower_atm] < 1700)))
self.assertTrue(np.all((np.diff(p.temperatures) > 0))) |
_module()
class Mask2Former(MaskFormer):
'Implementation of `Masked-attention Mask\n Transformer for Universal Image Segmentation\n <
def __init__(self, backbone, neck=None, panoptic_head=None, panoptic_fusion_head=None, train_cfg=None, test_cfg=None, init_cfg=None):
super().__init__(backbone, neck=neck, panoptic_head=panoptic_head, panoptic_fusion_head=panoptic_fusion_head, train_cfg=train_cfg, test_cfg=test_cfg, init_cfg=init_cfg) |
def uniform(lower: float, upper: float) -> 'tune.sample.Float':
return tune.uniform(lower, upper) |
class NiftiEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
self.test_nifti = None
self.reference_nifti = None
super(NiftiEvaluator, self).__init__(*args, **kwargs)
def set_test(self, test):
if (test is not None):
self.test_nifti = sitk.ReadImage(test)
super(NiftiEvaluator, self).set_test(sitk.GetArrayFromImage(self.test_nifti))
else:
self.test_nifti = None
super(NiftiEvaluator, self).set_test(test)
def set_reference(self, reference):
if (reference is not None):
self.reference_nifti = sitk.ReadImage(reference)
super(NiftiEvaluator, self).set_reference(sitk.GetArrayFromImage(self.reference_nifti))
else:
self.reference_nifti = None
super(NiftiEvaluator, self).set_reference(reference)
def evaluate(self, test=None, reference=None, voxel_spacing=None, **metric_kwargs):
if (voxel_spacing is None):
voxel_spacing = np.array(self.test_nifti.GetSpacing())[::(- 1)]
metric_kwargs['voxel_spacing'] = voxel_spacing
return super(NiftiEvaluator, self).evaluate(test, reference, **metric_kwargs) |
def test_chrono_system_clock():
date1 = m.test_chrono1()
date2 = datetime.datetime.today()
assert isinstance(date1, datetime.datetime)
diff = abs((date1 - date2))
assert (diff.days == 0)
assert (diff.seconds == 0)
assert (diff.microseconds < 500000) |
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if (world_size < 2):
return loss_dict
with torch.no_grad():
loss_names = []
all_losses = []
for k in sorted(loss_dict.keys()):
loss_names.append(k)
all_losses.append(loss_dict[k])
all_losses = torch.stack(all_losses, dim=0)
torch.distributed.reduce(all_losses, dst=0)
if (torch.distributed.get_rank() == 0):
all_losses /= world_size
reduced_losses = {k: v for (k, v) in zip(loss_names, all_losses)}
return reduced_losses |
class EZ_agent():
def __init__(self, args, logger):
self.args = args
self.lr = args.lr
self.noise_dim = self.args.noise_dim
self.state_shape = self.args.state_shape
self.policy = Policy(args)
self.optimizer = optim.Adam(self.policy.parameters(), lr=self.lr)
self.entropy_scaling = args.entropy_scaling
self.uniform_distrib = torch.distributions.one_hot_categorical.OneHotCategorical(torch.tensor([(1 / self.args.noise_dim) for _ in range(self.args.noise_dim)]).repeat(self.args.batch_size_run, 1))
self.buffer = deque(maxlen=self.args.bandit_buffer)
self.epsilon_floor = args.bandit_epsilon
self.logger = logger
def sample(self, state, test_mode):
if test_mode:
return self.uniform_distrib.sample()
else:
probs = self.policy(state)
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs)
action = m.sample().cpu()
return action
def update_returns(self, states, actions, returns, test_mode, t):
if test_mode:
return
for (s, a, r) in zip(states, actions, returns):
self.buffer.append((s, a, torch.tensor(r, dtype=torch.float)))
for _ in range(self.args.bandit_iters):
idxs = np.random.randint(0, len(self.buffer), size=self.args.bandit_batch)
batch_elems = [self.buffer[i] for i in idxs]
states_ = torch.stack([x[0] for x in batch_elems]).to(states.device)
actions_ = torch.stack([x[1] for x in batch_elems]).to(states.device)
returns_ = torch.stack([x[2] for x in batch_elems]).to(states.device)
probs = self.policy(states_)
m = torch.distributions.one_hot_categorical.OneHotCategorical(probs)
log_probs = m.log_prob(actions_.to(probs.device))
self.optimizer.zero_grad()
policy_loss = ((- torch.dot(log_probs, torch.tensor(returns_, device=log_probs.device).float())) + (self.entropy_scaling * log_probs.sum()))
policy_loss.backward()
self.optimizer.step()
mean_entropy = m.entropy().mean()
self.logger.log_stat('bandit_entropy', mean_entropy.item(), t)
def cuda(self):
self.policy.cuda()
def save_model(self, path):
torch.save(self.policy.state_dict(), '{}/ez_bandit_policy.th'.format(path)) |
def create_bio_labels(text, opinions):
offsets = [l[0] for l in tk.span_tokenize(text)]
columns = ['Source', 'Target', 'Polar_expression']
labels = {c: (['O'] * len(offsets)) for c in columns}
anns = {c: [] for c in columns}
for o in opinions:
try:
anns['Source'].extend(get_bio_holder(o))
except:
pass
try:
anns['Target'].extend(get_bio_target(o))
except:
pass
try:
anns['Polar_expression'].extend(get_bio_expression(o))
except:
pass
for c in columns:
for (bidx, tags) in anns[c]:
labels[c] = replace_with_labels(labels[c], offsets, bidx, tags)
labels[c] = restart_orphans(labels[c])
return labels |
class SingletonMeter(meter.Meter):
def __init__(self, maxlen=1):
super(SingletonMeter, self).__init__()
self.__val = None
def reset(self):
old_val = self.__val
self.__val = None
return old_val
def add(self, value):
self.__val = value
def value(self):
return self.__val |
def mouse_release(event):
global fixed_left, fixed_right, fixed_top, fixed_bottom, root
currentx = (root.winfo_pointerx() - root.winfo_rootx())
currenty = (root.winfo_pointery() - root.winfo_rooty())
for frame in root.winfo_children():
frame.grid_forget()
if (move_fixed_area == True):
width_move = (currentx - locationx)
height_move = (currenty - locationy)
global step
if (step == 0):
width_move = 143
height_move = (- 130)
elif (step == 1):
width_move = (341 - 334)
height_move = (303 - 127)
elif (step == 2):
width_move = (80 - 341)
height_move = ((101 - 303) - 20)
step += 1
fixed_left += width_move
fixed_right += width_move
fixed_top += height_move
fixed_bottom += height_move
main() |
def ze_grad(pred, target, classes, gpu):
pred_grad = torch.zeros_like(pred).to(gpu)
target_grad = torch.zeros_like(target).to(gpu)
pred_argm = torch.argmax(pred, 1)
target_argm = torch.argmax(target, 1)
for i in range(target.shape[0]):
pred_grad[(i, pred_argm[i])] += 1.0
target_grad[(i, target_argm[i])] += 1.0
return (pred_grad - target_grad) |
def test_audio_dataset_init_reproducible(fs, mocker):
dataset_a = audio_dataset(fs, mocker)
dataset_b = AudioDataset(TEST_DATA_DIR, TEST_META_FILE, TEST_SAMPLE_RATE, TEST_NUM_SAMPLES)
assert (dataset_a.file_list == dataset_b.file_list) |
def _variable_assign(var, new_value):
return state_ops.assign(var, new_value, name=(var.op.name + '_assign')) |
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if (args.cfg_options is not None):
cfg.merge_from_dict(args.cfg_options)
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
elif (cfg.get('work_dir', None) is None):
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
(_, world_size) = get_dist_info()
cfg.gpu_ids = range(world_size)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
logger.info(f'Distributed training: {distributed}')
logger.info(f'''Config:
{cfg.pretty_text}''')
if (args.seed is not None):
logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
pretrained_dict = torch.load(args.pre_train)
model_dict = model.state_dict()
pretrained_dict = pretrained_dict['state_dict']
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('pretrained model loaded from {}.'.format(args.pre_train))
for p in model.neck.parameters():
p.requires_grad = False
for p in model.bbox_head.parameters():
p.requires_grad = False
print(' trained param ')
for (name, param) in model.named_parameters():
if param.requires_grad:
print(name)
print(' trained param END')
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=(__version__ + get_git_hash()[:7]), CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=(not args.no_validate), timestamp=timestamp, meta=meta) |
def save_model(model, args, save_dir, model_name, should_print=True):
save_path = ('%s/model_%s' % (save_dir, str(model_name)))
save_dict = {'model': model.state_dict(), 'args': args}
torch.save(save_dict, save_path)
if should_print:
print(('Model saved to: %s' % save_path)) |
class MockS3Client():
def __init__(self, enable_mc=True):
self.enable_mc = enable_mc
def Get(self, filepath):
with open(filepath, 'rb') as f:
content = f.read()
return content |
class CLAM_SB(nn.Module):
def __init__(self, gate=True, size_arg='small', dropout=False, k_sample=8, n_classes=2, instance_loss_fn=nn.CrossEntropyLoss(), subtyping=False):
super(CLAM_SB, self).__init__()
self.size_dict = {'small': [1024, 512, 256], 'big': [1024, 512, 384]}
size = self.size_dict[size_arg]
fc = [nn.Linear(size[0], size[1]), nn.ReLU()]
if dropout:
fc.append(nn.Dropout(0.25))
if gate:
attention_net = Attn_Net_Gated(L=size[1], D=size[2], dropout=dropout, n_classes=1)
else:
attention_net = Attn_Net(L=size[1], D=size[2], dropout=dropout, n_classes=1)
fc.append(attention_net)
self.attention_net = nn.Sequential(*fc)
self.classifiers = nn.Linear(size[1], n_classes)
instance_classifiers = [nn.Linear(size[1], 2) for i in range(n_classes)]
self.instance_classifiers = nn.ModuleList(instance_classifiers)
self.k_sample = k_sample
self.instance_loss_fn = instance_loss_fn
self.n_classes = n_classes
self.subtyping = subtyping
initialize_weights(self)
def relocate(self):
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
self.attention_net = self.attention_net.to(device)
self.classifiers = self.classifiers.to(device)
self.instance_classifiers = self.instance_classifiers.to(device)
def create_positive_targets(length, device):
return torch.full((length,), 1, device=device, dtype=torch.long)
def create_negative_targets(length, device):
return torch.full((length,), 0, device=device, dtype=torch.long)
def inst_eval(self, A, h, classifier):
device = h.device
if (len(A.shape) == 1):
A = A.view(1, (- 1))
top_p_ids = torch.topk(A, self.k_sample)[1][(- 1)]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
top_n_ids = torch.topk((- A), self.k_sample, dim=1)[1][(- 1)]
top_n = torch.index_select(h, dim=0, index=top_n_ids)
p_targets = self.create_positive_targets(self.k_sample, device)
n_targets = self.create_negative_targets(self.k_sample, device)
all_targets = torch.cat([p_targets, n_targets], dim=0)
all_instances = torch.cat([top_p, top_n], dim=0)
logits = classifier(all_instances)
all_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, all_targets)
return (instance_loss, all_preds, all_targets)
def inst_eval_out(self, A, h, classifier):
device = h.device
if (len(A.shape) == 1):
A = A.view(1, (- 1))
top_p_ids = torch.topk(A, self.k_sample)[1][(- 1)]
top_p = torch.index_select(h, dim=0, index=top_p_ids)
p_targets = self.create_negative_targets(self.k_sample, device)
logits = classifier(top_p)
p_preds = torch.topk(logits, 1, dim=1)[1].squeeze(1)
instance_loss = self.instance_loss_fn(logits, p_targets)
return (instance_loss, p_preds, p_targets)
def forward(self, h, label=None, instance_eval=False, return_features=False, attention_only=False, train_epoch=(- 1), testing=False, instance_mask=None):
(A, h) = self.attention_net(h)
A = torch.transpose(A, 1, 0)
if attention_only:
return A
A_raw = A
A = F.softmax(A, dim=1)
if instance_eval:
total_inst_loss = 0.0
all_preds = []
all_targets = []
inst_labels = F.one_hot(label, num_classes=self.n_classes).squeeze()
for i in range(len(self.instance_classifiers)):
inst_label = inst_labels[i].item()
classifier = self.instance_classifiers[i]
if (inst_label == 1):
(instance_loss, preds, targets) = self.inst_eval(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
elif self.subtyping:
(instance_loss, preds, targets) = self.inst_eval_out(A, h, classifier)
all_preds.extend(preds.cpu().numpy())
all_targets.extend(targets.cpu().numpy())
else:
continue
total_inst_loss += instance_loss
if self.subtyping:
total_inst_loss /= len(self.instance_classifiers)
M = torch.mm(A, h)
logits = self.classifiers(M)
Y_hat = torch.topk(logits, 1, dim=1)[1]
Y_prob = F.softmax(logits, dim=1)
if instance_eval:
results_dict = {'instance_loss': total_inst_loss, 'inst_labels': np.array(all_targets), 'inst_preds': np.array(all_preds)}
else:
results_dict = {'instance_loss': torch.tensor([0]).cuda(), 'inst_labels': np.array([0]), 'inst_preds': np.array([0])}
if return_features:
results_dict.update({'features': M})
return (logits, Y_prob, Y_hat, A_raw, results_dict) |
class ShuffleUnit(nn.Module):
def __init__(self, in_channels, out_channels, groups=3, first_block=True, combine='add', conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), with_cp=False):
norm_cfg = copy.deepcopy(norm_cfg)
act_cfg = copy.deepcopy(act_cfg)
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.first_block = first_block
self.combine = combine
self.groups = groups
self.bottleneck_channels = (self.out_channels // 4)
self.with_cp = with_cp
if (self.combine == 'add'):
self.depthwise_stride = 1
self._combine_func = self._add
assert (in_channels == out_channels), 'in_channels must be equal to out_channels when combine is add'
elif (self.combine == 'concat'):
self.depthwise_stride = 2
self._combine_func = self._concat
self.out_channels -= self.in_channels
self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
else:
raise ValueError(f'Cannot combine tensors with {self.combine}. Only "add" and "concat" are supported')
self.first_1x1_groups = (1 if first_block else self.groups)
self.g_conv_1x1_compress = ConvModule(in_channels=self.in_channels, out_channels=self.bottleneck_channels, kernel_size=1, groups=self.first_1x1_groups, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.depthwise_conv3x3_bn = ConvModule(in_channels=self.bottleneck_channels, out_channels=self.bottleneck_channels, kernel_size=3, stride=self.depthwise_stride, padding=1, groups=self.bottleneck_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.g_conv_1x1_expand = ConvModule(in_channels=self.bottleneck_channels, out_channels=self.out_channels, kernel_size=1, groups=self.groups, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.act = build_activation_layer(act_cfg)
def _add(x, out):
return (x + out)
def _concat(x, out):
return torch.cat((x, out), 1)
def forward(self, x):
def _inner_forward(x):
residual = x
out = self.g_conv_1x1_compress(x)
out = self.depthwise_conv3x3_bn(out)
if (self.groups > 1):
out = channel_shuffle(out, self.groups)
out = self.g_conv_1x1_expand(out)
if (self.combine == 'concat'):
residual = self.avgpool(residual)
out = self.act(out)
out = self._combine_func(residual, out)
else:
out = self._combine_func(residual, out)
out = self.act(out)
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out |
def make_line_magic(flow_: 'NotebookFlow'):
line_magic_names = [name for (name, val) in globals().items() if inspect.isfunction(val)]
def _handle(cmd, line):
cmd = cmd.replace('-', '_')
if (cmd in ('enable', 'disable', 'on', 'off')):
return toggle_dataflow(cmd)
elif (cmd in ('deps', 'show_deps', 'show_dependency', 'show_dependencies')):
return show_deps(line)
elif (cmd in ('code', 'get_code')):
return get_code(line)
elif (cmd in ('waiting', 'show_waiting')):
return show_waiting(line)
elif (cmd == 'trace_messages'):
return trace_messages(line)
elif (cmd in ('hls', 'nohls', 'highlight', 'highlights')):
return set_highlights(cmd, line)
elif (cmd in ('dag', 'make_dag', 'cell_dag', 'make_cell_dag')):
return json.dumps(create_dag_metadata(), indent=2)
elif (cmd in ('slice', 'make_slice', 'gather_slice')):
return make_slice(line)
elif (cmd == 'tag'):
return tag(line)
elif (cmd == 'show_tags'):
return show_tags(line)
elif (cmd in ('mode', 'exec_mode')):
return set_exec_mode(line)
elif (cmd in ('schedule', 'exec_schedule', 'execution_schedule')):
return set_exec_schedule(line)
elif (cmd in ('direction', 'flow_direction', 'order', 'flow_order', 'semantics', 'flow_semantics')):
return set_flow_direction(line)
elif (cmd == 'reactivity'):
return set_reactivity(line)
elif (cmd in ('register', 'register_tracer')):
return register_tracer(line)
elif (cmd in ('deregister', 'deregister_tracer')):
return deregister_tracer(line)
elif (cmd == 'clear'):
flow_.min_timestamp = flow_.cell_counter()
return None
elif cmd.endswith('warn_ooo'):
flow_.mut_settings.warn_out_of_order_usages = (not cmd.startswith('no'))
return None
elif cmd.endswith('lint_ooo'):
flow_.mut_settings.lint_out_of_order_usages = (not cmd.startswith('no'))
return None
elif (cmd == 'syntax_transforms'):
is_on = line.endswith(('enabled', 'on'))
is_off = line.endswith(('disabled', 'off'))
if (is_on or is_off):
flow_.mut_settings.syntax_transforms_enabled = is_on
return None
elif (cmd == 'syntax_transforms_only'):
flow_.mut_settings.syntax_transforms_only = True
return None
elif cmd.startswith('register_annotation'):
return register_annotations(line)
elif (cmd == 'toggle_reactivity'):
flow_.toggle_reactivity()
return None
elif (cmd == 'bump_min_forced_reactive_counter'):
flow_.bump_min_forced_reactive_counter()
return None
elif (cmd in line_magic_names):
warn(f'We have a magic for {cmd}, but have not yet registered it')
return None
else:
warn(_USAGE)
return None
def _flow_magic(line: str):
try:
(cmd, line) = line.split(' ', 1)
if (cmd in ('slice', 'make_slice', 'gather_slice')):
line = re.sub("--tag +<class '(\\w+)'>", '--tag $\\1', line)
except ValueError:
(cmd, line) = (line, '')
try:
(line, fname) = line.split('>', 1)
except ValueError:
(line, fname) = (line, None)
line = line.strip()
if (fname is not None):
fname = fname.strip()
outstr = _handle(cmd, line)
if (outstr is None):
return
if (fname is None):
print_(outstr)
else:
with open(fname, 'w') as f:
f.write(outstr)
_flow_magic.__name__ = _FLOW_LINE_MAGIC
return register_line_magic(_flow_magic) |
def get_model_from_config(model_config: ConfigDict, nelec: Array, ion_pos: Array, ion_charges: Array, dtype=jnp.float32) -> Module:
spin_split = get_spin_split(nelec)
compute_input_streams = get_compute_input_streams_from_config(model_config.input_streams, ion_pos)
backflow = get_backflow_from_config(model_config.backflow, spin_split, dtype=dtype)
(kernel_init_constructor, bias_init_constructor) = _get_dtype_init_constructors(dtype)
ferminet_model_types = ['ferminet', 'embedded_particle_ferminet', 'extended_orbital_matrix_ferminet']
if (model_config.type in ferminet_model_types):
determinant_fn = None
resnet_config = model_config.det_resnet
if model_config.use_det_resnet:
determinant_fn = get_resnet_determinant_fn_for_ferminet(resnet_config.ndense, resnet_config.nlayers, _get_named_activation_fn(resnet_config.activation), kernel_init_constructor(resnet_config.kernel_init), bias_init_constructor(resnet_config.bias_init), resnet_config.use_bias)
if (model_config.type == 'ferminet'):
return FermiNet(spin_split, compute_input_streams, backflow, model_config.ndeterminants, kernel_initializer_orbital_linear=kernel_init_constructor(model_config.kernel_init_orbital_linear), kernel_initializer_envelope_dim=kernel_init_constructor(model_config.kernel_init_envelope_dim), kernel_initializer_envelope_ion=kernel_init_constructor(model_config.kernel_init_envelope_ion), envelope_softening=model_config.envelope_softening, bias_initializer_orbital_linear=bias_init_constructor(model_config.bias_init_orbital_linear), orbitals_use_bias=model_config.orbitals_use_bias, isotropic_decay=model_config.isotropic_decay, determinant_fn=determinant_fn, determinant_fn_mode=DeterminantFnMode[resnet_config.mode.upper()], full_det=model_config.full_det)
elif (model_config.type == 'embedded_particle_ferminet'):
total_nelec = (jnp.array(model_config.nhidden_fermions_per_spin) + nelec)
total_spin_split = get_spin_split(total_nelec)
backflow = get_backflow_from_config(model_config.backflow, total_spin_split, dtype=dtype)
invariance_config = model_config.invariance
invariance_compute_input_streams = get_compute_input_streams_from_config(invariance_config.input_streams, ion_pos)
invariance_backflow = get_backflow_from_config(invariance_config.backflow, spin_split, dtype=dtype)
return EmbeddedParticleFermiNet(spin_split, compute_input_streams, backflow, model_config.ndeterminants, kernel_initializer_orbital_linear=kernel_init_constructor(model_config.kernel_init_orbital_linear), kernel_initializer_envelope_dim=kernel_init_constructor(model_config.kernel_init_envelope_dim), kernel_initializer_envelope_ion=kernel_init_constructor(model_config.kernel_init_envelope_ion), envelope_softening=model_config.envelope_softening, bias_initializer_orbital_linear=bias_init_constructor(model_config.bias_init_orbital_linear), orbitals_use_bias=model_config.orbitals_use_bias, isotropic_decay=model_config.isotropic_decay, determinant_fn=determinant_fn, determinant_fn_mode=DeterminantFnMode[resnet_config.mode.upper()], full_det=model_config.full_det, nhidden_fermions_per_spin=model_config.nhidden_fermions_per_spin, invariance_compute_input_streams=invariance_compute_input_streams, invariance_backflow=invariance_backflow, invariance_kernel_initializer=kernel_init_constructor(invariance_config.kernel_initializer), invariance_bias_initializer=bias_init_constructor(invariance_config.bias_initializer), invariance_use_bias=invariance_config.use_bias)
elif (model_config.type == 'extended_orbital_matrix_ferminet'):
invariance_config = model_config.invariance
if model_config.use_separate_invariance_backflow:
invariance_backflow = get_backflow_from_config(invariance_config.backflow, spin_split, dtype=dtype)
else:
invariance_backflow = None
return ExtendedOrbitalMatrixFermiNet(spin_split, compute_input_streams, backflow, model_config.ndeterminants, kernel_initializer_orbital_linear=kernel_init_constructor(model_config.kernel_init_orbital_linear), kernel_initializer_envelope_dim=kernel_init_constructor(model_config.kernel_init_envelope_dim), kernel_initializer_envelope_ion=kernel_init_constructor(model_config.kernel_init_envelope_ion), envelope_softening=model_config.envelope_softening, bias_initializer_orbital_linear=bias_init_constructor(model_config.bias_init_orbital_linear), orbitals_use_bias=model_config.orbitals_use_bias, isotropic_decay=model_config.isotropic_decay, determinant_fn=determinant_fn, determinant_fn_mode=DeterminantFnMode[resnet_config.mode.upper()], full_det=model_config.full_det, nhidden_fermions_per_spin=model_config.nhidden_fermions_per_spin, invariance_backflow=invariance_backflow, invariance_kernel_initializer=kernel_init_constructor(invariance_config.kernel_initializer), invariance_bias_initializer=bias_init_constructor(invariance_config.bias_initializer), invariance_use_bias=invariance_config.use_bias)
else:
raise ValueError('FermiNet model type {} requested, but the only supported types are: {}'.format(model_config.type, ferminet_model_types))
elif (model_config.type in ['orbital_cofactor_net', 'per_particle_dets_net']):
if (model_config.type == 'orbital_cofactor_net'):
antieq_layer: Callable[([Array, Array], ArrayList)] = antiequivariance.OrbitalCofactorAntiequivarianceLayer(spin_split, kernel_initializer_orbital_linear=kernel_init_constructor(model_config.kernel_init_orbital_linear), kernel_initializer_envelope_dim=kernel_init_constructor(model_config.kernel_init_envelope_dim), kernel_initializer_envelope_ion=kernel_init_constructor(model_config.kernel_init_envelope_ion), bias_initializer_orbital_linear=bias_init_constructor(model_config.bias_init_orbital_linear), orbitals_use_bias=model_config.orbitals_use_bias, isotropic_decay=model_config.isotropic_decay)
elif (model_config.type == 'per_particle_dets_net'):
antieq_layer = antiequivariance.PerParticleDeterminantAntiequivarianceLayer(spin_split, kernel_initializer_orbital_linear=kernel_init_constructor(model_config.kernel_init_orbital_linear), kernel_initializer_envelope_dim=kernel_init_constructor(model_config.kernel_init_envelope_dim), kernel_initializer_envelope_ion=kernel_init_constructor(model_config.kernel_init_envelope_ion), bias_initializer_orbital_linear=bias_init_constructor(model_config.bias_init_orbital_linear), orbitals_use_bias=model_config.orbitals_use_bias, isotropic_decay=model_config.isotropic_decay)
array_list_sign_covariance = get_sign_covariance_from_config(model_config, spin_split, kernel_init_constructor, dtype)
return AntiequivarianceNet(spin_split, compute_input_streams, backflow, antieq_layer, array_list_sign_covariance, multiply_by_eq_features=model_config.multiply_by_eq_features)
elif (model_config.type == 'explicit_antisym'):
jastrow_config = model_config.jastrow
def _get_two_body_decay_jastrow():
return get_two_body_decay_scaled_for_chargeless_molecules(ion_pos, ion_charges, init_ee_strength=jastrow_config.two_body_decay.init_ee_strength, trainable=jastrow_config.two_body_decay.trainable)
def _get_backflow_based_jastrow():
if jastrow_config.backflow_based.use_separate_jastrow_backflow:
jastrow_backflow = get_backflow_from_config(jastrow_config.backflow_based.backflow, spin_split, dtype=dtype)
else:
jastrow_backflow = None
return BackflowJastrow(backflow=jastrow_backflow)
if (jastrow_config.type == 'one_body_decay'):
jastrow: Jastrow = OneBodyExpDecay(kernel_initializer=kernel_init_constructor(jastrow_config.one_body_decay.kernel_init))
elif (jastrow_config.type == 'two_body_decay'):
jastrow = _get_two_body_decay_jastrow()
elif (jastrow_config.type == 'backflow_based'):
jastrow = _get_backflow_based_jastrow()
elif (jastrow_config.type == 'two_body_decay_and_backflow_based'):
two_body_decay_jastrow = _get_two_body_decay_jastrow()
backflow_jastrow = _get_backflow_based_jastrow()
jastrow = AddedModel([two_body_decay_jastrow, backflow_jastrow])
else:
raise ValueError('Unsupported jastrow type; {} was requested, but the only supported types are: {}'.format(jastrow_config.type, ', '.join(VALID_JASTROW_TYPES)))
if (model_config.antisym_type == 'factorized'):
return FactorizedAntisymmetry(spin_split, compute_input_streams, backflow, jastrow, rank=model_config.rank, ndense_resnet=model_config.ndense_resnet, nlayers_resnet=model_config.nlayers_resnet, kernel_initializer_resnet=kernel_init_constructor(model_config.kernel_init_resnet), bias_initializer_resnet=bias_init_constructor(model_config.bias_init_resnet), activation_fn_resnet=_get_named_activation_fn(model_config.activation_fn_resnet), resnet_use_bias=model_config.resnet_use_bias)
elif (model_config.antisym_type == 'generic'):
return GenericAntisymmetry(spin_split, compute_input_streams, backflow, jastrow, ndense_resnet=model_config.ndense_resnet, nlayers_resnet=model_config.nlayers_resnet, kernel_initializer_resnet=kernel_init_constructor(model_config.kernel_init_resnet), bias_initializer_resnet=bias_init_constructor(model_config.bias_init_resnet), activation_fn_resnet=_get_named_activation_fn(model_config.activation_fn_resnet), resnet_use_bias=model_config.resnet_use_bias)
else:
raise ValueError('Unsupported explicit antisymmetry type; {} was requested'.format(model_config.antisym_type))
else:
raise ValueError('Unsupported model type; {} was requested'.format(model_config.type)) |
class ReplayBuffer(Dataset):
def __init__(self, observation_space: gym.spaces.Box, action_dim: int, capacity: int):
observations = np.empty((capacity, *observation_space.shape), dtype=observation_space.dtype)
actions = np.empty((capacity, action_dim), dtype=np.float32)
rewards = np.empty((capacity,), dtype=np.float32)
masks = np.empty((capacity,), dtype=np.float32)
dones_float = np.empty((capacity,), dtype=np.float32)
next_observations = np.empty((capacity, *observation_space.shape), dtype=observation_space.dtype)
super().__init__(observations=observations, actions=actions, rewards=rewards, masks=masks, dones_float=dones_float, next_observations=next_observations, size=0)
self.size = 0
self.insert_index = 0
self.capacity = capacity
def initialize_with_dataset(self, dataset: Dataset, num_samples: Optional[int]):
assert (self.insert_index == 0), 'Can insert a batch online in an empty replay buffer.'
dataset_size = len(dataset.observations)
if (num_samples is None):
num_samples = dataset_size
else:
num_samples = min(dataset_size, num_samples)
assert (self.capacity >= num_samples), 'Dataset cannot be larger than the replay buffer capacity.'
if (num_samples < dataset_size):
perm = np.random.permutation(dataset_size)
indices = perm[:num_samples]
else:
indices = np.arange(num_samples)
self.observations[:num_samples] = dataset.observations[indices]
self.actions[:num_samples] = dataset.actions[indices]
self.rewards[:num_samples] = dataset.rewards[indices]
self.masks[:num_samples] = dataset.masks[indices]
self.dones_float[:num_samples] = dataset.dones_float[indices]
self.next_observations[:num_samples] = dataset.next_observations[indices]
self.insert_index = num_samples
self.size = num_samples
def insert(self, observation: np.ndarray, action: np.ndarray, reward: float, mask: float, done_float: float, next_observation: np.ndarray):
self.observations[self.insert_index] = observation
self.actions[self.insert_index] = action
self.rewards[self.insert_index] = reward
self.masks[self.insert_index] = mask
self.dones_float[self.insert_index] = done_float
self.next_observations[self.insert_index] = next_observation
self.insert_index = ((self.insert_index + 1) % self.capacity)
self.size = min((self.size + 1), self.capacity) |
def collate(samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True):
if (len(samples) == 0):
return {}
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens([s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=left_pad_source)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
(src_lengths, sort_order) = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if (samples[0].get('target', None) is not None):
target = merge('target', left_pad=left_pad_target)
target = target.index_select(0, sort_order)
ntokens = sum((len(s['target']) for s in samples))
if input_feeding:
prev_output_tokens = merge('target', left_pad=left_pad_target, move_eos_to_beginning=True)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum((len(s['source']) for s in samples))
batch = {'id': id, 'nsentences': len(samples), 'ntokens': ntokens, 'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}, 'target': target}
if (prev_output_tokens is not None):
batch['net_input']['prev_output_tokens'] = prev_output_tokens
return batch |
def evaluations_scipy(ty, pv):
if (not ((scipy != None) and isinstance(ty, scipy.ndarray) and isinstance(pv, scipy.ndarray))):
raise TypeError('type of ty and pv must be ndarray')
if (len(ty) != len(pv)):
raise ValueError('len(ty) must be equal to len(pv)')
ACC = (100.0 * (ty == pv).mean())
MSE = ((ty - pv) ** 2).mean()
l = len(ty)
sumv = pv.sum()
sumy = ty.sum()
sumvy = (pv * ty).sum()
sumvv = (pv * pv).sum()
sumyy = (ty * ty).sum()
with scipy.errstate(all='raise'):
try:
SCC = ((((l * sumvy) - (sumv * sumy)) * ((l * sumvy) - (sumv * sumy))) / (((l * sumvv) - (sumv * sumv)) * ((l * sumyy) - (sumy * sumy))))
except:
SCC = float('nan')
return (float(ACC), float(MSE), float(SCC)) |
class Annotator(object):
def __init__(self, config, filenames, current_mode, args):
self.current_mode = current_mode
self.current_num = None
self.search_term = ''
self.partial_typing = ''
self.cfilename = (- 1)
self.filename = None
self.filenames = filenames
self.datum = None
self.view = None
self.window = None
self.config = config
self.args = args
self.action_to_function = {'delete-query-char': self.delete_typing_char, 'leave-query-mode': self.leave_typing_mode, 'enter-query-mode': self.enter_typing_mode, 'clear-query': self.clear_query, 'add-to-query': self.add_to_typing, 'delete-label-char': self.delete_typing_char, 'assign-text-label': self.assign_text, 'enter-label-mode': self.enter_typing_mode, 'add-to-label': self.add_to_typing, 'toggle-line-numbers': self.toggle_line_numbers, 'move-up': self.move, 'move-down': self.move, 'move-left': self.move, 'move-right': self.move, 'move-link-up': self.move, 'move-link-down': self.move, 'move-link-left': self.move, 'move-link-right': self.move, 'jump-up': self.move, 'jump-down': self.move, 'jump-left': self.move, 'jump-right': self.move, 'extend-up': self.change_span, 'extend-down': self.change_span, 'extend-left': self.change_span, 'extend-right': self.change_span, 'contract-up': self.change_span, 'contract-down': self.change_span, 'contract-left': self.change_span, 'contract-right': self.change_span, 'extend-link-up': self.change_span, 'extend-link-down': self.change_span, 'extend-link-left': self.change_span, 'extend-link-right': self.change_span, 'contract-link-up': self.change_span, 'contract-link-down': self.change_span, 'contract-link-left': self.change_span, 'contract-link-right': self.change_span, 'search-previous': self.search, 'search-next': self.search, 'search-link-previous': self.search, 'search-link-next': self.search, 'page-up': self.shift_view, 'page-down': self.shift_view, 'toggle-help': self.modify_display, 'toggle-progress': self.modify_display, 'toggle-legend': self.modify_display, 'toggle-current-mark': self.modify_display, 'next-file': self.change_file, 'previous-file': self.change_file, 'quit': self.save_or_quit, 'save-and-quit': self.save_or_quit, 'save': self.save_or_quit, 'create-link': self.create_link, 'create-link-and-move': self.create_link, 'edit-annotation': self.edit_annotation, 'remove-annotation': self.remove_annotation, 'update-num': self.update_number}
def move(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
direction = action.split('-')[(- 1)]
jump = ('jump' in action)
link = ('link' in action)
num = 1
if (self.current_num == 0):
jump = True
self.current_num = None
elif (self.current_num is not None):
num = self.current_num
self.current_num = None
self.view.move(direction, num, jump, link)
def toggle_line_numbers(self, user_input, action):
self.view.line_numbers = (not self.view.line_numbers)
def change_span(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
change = action.split('-')[0]
direction = action.split('-')[(- 1)]
link = ('link' in action)
num = 1
jump = False
if (self.current_num == 0):
jump = True
self.current_num = None
elif (self.current_num is not None):
num = self.current_num
self.current_num = None
self.view.adjust(direction, num, change, jump, link)
def delete_typing_char(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if (self.current_mode[(- 1)] == 'write_query'):
self.search_term = self.search_term[:(- 1)]
else:
self.partial_typing = self.partial_typing[:(- 1)]
def leave_typing_mode(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if (len(self.current_mode) > 1):
self.current_mode.pop()
def assign_text(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if (len(self.current_mode) > 1):
self.current_mode.pop()
self.datum.modify_annotation([self.view.cursor], self.partial_typing)
self.partial_typing = ''
def enter_typing_mode(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if ('query' in action):
self.current_mode.append('write_query')
else:
self.current_mode.append('write_label')
self.partial_typing = ''
def clear_query(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
self.search_term = ''
def add_to_typing(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
char = user_input[0]
if (user_input[0] == 'SPACE'):
char = ' '
if (self.current_mode[(- 1)] == 'write_query'):
self.search_term += char
else:
self.partial_typing += char
def change_file(self, user_input, action):
if (self.current_mode[(- 1)] != 'no_file'):
self.save_or_quit(None, 'save')
direction = (1 if ('next' in action) else (- 1))
if (self.current_mode[(- 1)] == 'no_file'):
if ((self.cfilename < 0) == (direction > 0)):
self.current_mode.pop()
self.cfilename += direction
elif (0 <= (self.cfilename + direction) < len(self.filenames)):
self.cfilename += direction
(self.filename, start_pos, output_file, annotation_files) = self.filenames[self.cfilename]
self.datum = Datum(self.filename, self.config, output_file, annotation_files)
self.get_view(self.config, self.cfilename, len(self.filenames), start_pos, self.view)
elif (self.current_mode != 'no_file'):
self.cfilename += direction
self.current_mode.append('no_file')
def modify_display(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if ('help' in action):
self.view.toggle_help()
elif ('progress' in action):
self.view.toggle_progress()
elif ('legend' in action):
self.view.toggle_legend()
elif ('current-mark' in action):
self.view.toggle_current_mark()
def shift_view(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if ('up' in action):
self.view.shift_view()
else:
self.view.shift_view(True)
def update_number(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
num = int(user_input[0])
if (self.current_num is None):
self.current_num = 0
else:
self.current_num *= 10
self.current_num += num
def remove_annotation(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if (self.current_mode[(- 1)] != 'read'):
spans = [self.view.cursor]
if (self.current_mode[(- 1)] == 'link'):
spans = [self.view.linking_pos]
self.datum.remove_annotation(spans)
def edit_annotation(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
if (self.current_mode[(- 1)] == 'category'):
label = self.config.get_label_for_input(user_input)
self.datum.modify_annotation([self.view.cursor], label)
def create_link(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
self.datum.modify_annotation([self.view.cursor, self.view.linking_pos])
if ('and-move' in action):
if (self.config.annotation == 'line'):
self.view.move('down', 1, False, True)
self.view.put_cursor_beside_link()
else:
self.view.move('right', 1, False, True)
self.view.put_cursor_beside_link()
self.view.must_show_linking_pos = True
def save_or_quit(self, user_input, action):
if ('save' in action):
if (self.current_mode[(- 1)] != 'read'):
self.datum.write_out()
if (0 <= self.cfilename < len(self.filenames)):
cur = self.filenames[self.cfilename]
pos = self.view.cursor
if (self.config.annotation_type == 'link'):
pos = self.view.linking_pos
self.filenames[self.cfilename] = (cur[0], pos, cur[2], cur[3])
if ('quit' in action):
if ('save' not in action):
pass
return 'quit'
def search(self, user_input, action):
if (self.current_mode[(- 1)] == 'no_file'):
return
direction = action.split('-')[(- 1)]
jump = False
link = ('link' in action)
num = 1
if (self.current_num == 0):
jump = True
self.current_num = None
elif (self.current_num is not None):
num = self.current_num
self.current_num = None
if (len(self.search_term) > 0):
self.view.search(self.search_term, direction, num, jump, link)
else:
self.view.search(None, direction, num, jump, link)
def input_to_symbol(self, num):
if (num in key_to_symbol):
return key_to_symbol[num]
else:
return 'UNKNOWN'
def get_view(self, config, file_num, total_files, position, prev_view=None):
cursor = position
link = (position if (self.config.annotation_type == 'link') else None)
self.view = View(self.window, cursor, link, self.datum, self.config, file_num, total_files, prev_view)
def annotate(self, window_in):
self.window = window_in
curses.use_default_colors()
for (num, fore, back) in COLORS:
curses.init_pair(num, fore, back)
curses.curs_set(0)
self.cfilename = 0
(self.filename, start_pos, output_file, annotation_files) = self.filenames[self.cfilename]
self.datum = Datum(self.filename, self.config, output_file, annotation_files)
self.get_view(self.config, self.cfilename, len(self.filenames), start_pos)
if self.args.show_help:
self.view.toggle_help()
if self.args.show_progress:
self.view.toggle_progress()
if self.args.show_legend:
self.view.toggle_legend()
if self.args.show_mark:
self.view.toggle_current_mark()
last_num = None
at_end = None
nsteps = 0
user_input = []
while True:
if (self.current_mode[(- 1)] == 'no_file'):
self.view.render_edgecase((self.cfilename >= 0))
else:
tmp_term = self.search_term
if (self.current_mode[(- 1)] == 'write_query'):
tmp_term = ('\\' + tmp_term)
self.view.render(tmp_term, self.partial_typing)
self.view.must_show_linking_pos = False
ch = self.window.getch()
next_user_input = self.input_to_symbol(ch)
logging.debug('Input {} converted to {} in mode {}'.format(ch, next_user_input, self.current_mode))
user_input.append(next_user_input)
tuser_input = tuple(user_input)
if ((self.current_mode[(- 1)], tuser_input) not in self.config.valid_prefixes):
if ((None, tuser_input) not in self.config.valid_prefixes):
if ((self.current_mode[(- 1)], tuser_input) not in self.config.input_to_action):
if ((None, tuser_input) not in self.config.input_to_action):
user_input = [next_user_input]
tuser_input = (next_user_input,)
nsteps += 1
if (((nsteps % 100) == 0) and (self.current_mode[(- 1)] == 'category')):
self.datum.write_out()
action = None
function = None
if ((self.current_mode[(- 1)], tuser_input) in self.config.input_to_action):
action = self.config.input_to_action[(self.current_mode[(- 1)], tuser_input)]
if (action in self.action_to_function):
function = self.action_to_function[action]
elif ((None, tuser_input) in self.config.input_to_action):
action = self.config.input_to_action[(None, tuser_input)]
if (action in self.action_to_function):
function = self.action_to_function[action]
logging.debug('{} {} -> {} {}'.format(self.current_mode, tuser_input, action, function))
if (function is not None):
outcome = function(tuser_input, action)
user_input = []
if (outcome == 'quit'):
break
self.window.clear()
out_filename = (self.args.log_prefix + '.todo')
out = open(out_filename, 'w')
for (fname, start_pos, output_file, annotation_files) in self.filenames:
parts = [fname, output_file, str(start_pos), ' '.join(annotation_files)]
print(' '.join(parts), file=out)
out.close() |
class dataset_iitnn(Dataset):
def __init__(self, data_dir, input1, input2, augmentation1, normalize_1, normalize_2, sup=True, num_images=None, **kwargs):
super(dataset_iitnn, self).__init__()
img_paths_1 = []
img_paths_2 = []
mask_paths = []
image_dir_1 = ((data_dir + '/') + input1)
image_dir_2 = ((data_dir + '/') + input2)
if sup:
mask_dir = (data_dir + '/mask')
for image in os.listdir(image_dir_1):
image_path_1 = os.path.join(image_dir_1, image)
img_paths_1.append(image_path_1)
image_path_2 = os.path.join(image_dir_2, image)
img_paths_2.append(image_path_2)
if sup:
mask_path = os.path.join(mask_dir, image)
mask_paths.append(mask_path)
assert (len(img_paths_1) == len(img_paths_2))
if sup:
assert (len(img_paths_1) == len(mask_paths))
if (num_images is not None):
len_img_paths = len(img_paths_1)
quotient = (num_images // len_img_paths)
remainder = (num_images % len_img_paths)
if (num_images <= len_img_paths):
img_paths_1 = img_paths_1[:num_images]
img_paths_2 = img_paths_2[:num_images]
else:
rand_indices = torch.randperm(len_img_paths).tolist()
new_indices = rand_indices[:remainder]
img_paths_1 = (img_paths_1 * quotient)
img_paths_1 += [img_paths_1[i] for i in new_indices]
img_paths_2 = (img_paths_2 * quotient)
img_paths_2 += [img_paths_2[i] for i in new_indices]
if sup:
mask_paths = (mask_paths * quotient)
mask_paths += [mask_paths[i] for i in new_indices]
self.img_paths_1 = img_paths_1
self.img_paths_2 = img_paths_2
self.mask_paths = mask_paths
self.augmentation_1 = augmentation1
self.normalize_1 = normalize_1
self.normalize_2 = normalize_2
self.sup = sup
self.kwargs = kwargs
def __getitem__(self, index):
img_path_1 = self.img_paths_1[index]
img_1 = Image.open(img_path_1)
img_1 = np.array(img_1)
img_path_2 = self.img_paths_2[index]
img_2 = Image.open(img_path_2)
img_2 = np.array(img_2)
if self.sup:
mask_path = self.mask_paths[index]
mask = Image.open(mask_path)
mask = np.array(mask)
augment_1 = self.augmentation_1(image=img_1, image2=img_2, mask=mask)
img_1 = augment_1['image']
img_2 = augment_1['image2']
mask = augment_1['mask']
normalize_1 = self.normalize_1(image=img_1, mask=mask)
img_1 = normalize_1['image']
mask = normalize_1['mask']
mask = mask.long()
normalize_2 = self.normalize_2(image=img_2)
img_2 = normalize_2['image']
sampel = {'image': img_1, 'image_2': img_2, 'mask': mask, 'ID': os.path.split(mask_path)[1]}
else:
augment_1 = self.augmentation_1(image=img_1, image2=img_2)
img_1 = augment_1['image']
img_2 = augment_1['image2']
normalize_1 = self.normalize_1(image=img_1)
img_1 = normalize_1['image']
normalize_2 = self.normalize_2(image=img_2)
img_2 = normalize_2['image']
sampel = {'image': img_1, 'image_2': img_2, 'ID': os.path.split(img_path_1)[1]}
return sampel
def __len__(self):
return len(self.img_paths_1) |
def describe_graph(graph: str, expert_description='an expert statistician and data scientist.', y_axis_description='', special_task_description='', dataset_description='', include_assistant_response=True):
prompt = (('{{#system~}}\n' + f'''You are {expert_description}
You interpret global explanations produced by a generalized additive model (GAM). GAMs produce explanations in the form of graphs that contain the effect of a specific input feature.
{('You will be given graphs from the model, and the user will ask you questions about the graphs.' if ((dataset_description is None) or (dataset_description == '')) else 'The user will first provide a general description of the dataset. Then you will be given graphs from the model, and the user will ask you questions about the graphs.')}
Answer all questions to the best of your ability, combining both the data contained in the graph{(', the data set description you were given, and your knowledge about the real world.' if ((dataset_description is not None) and (len(dataset_description) > 0)) else ' and your knowledge about the real world.')}
Graphs will be presented as a JSON object with keys representing the x-axis and values representing the y-axis. For continuous features, the keys are intervals that represent ranges where the function predicts the same value. For categorical features, each key represents a possible value that the feature can take. {(y_axis_description if ((y_axis_description is not None) and (len(dataset_description) > 0)) else '')}
The user will provide graphs in the following format:
- The name of the feature depicted in the graph
- The type of the feature (continuous, categorical, or boolean)
- Mean values
- Lower bounds of confidence interval
- Upper bounds of confidence interval
{special_task_description}
''') + '{{~/system}}\n')
if ((dataset_description is not None) and (len(dataset_description) > 0)):
prompt += (('\n{{#user~}}\n' + dataset_description) + '\n{{~/user}}\n\n{{#assistant~}}\nThanks for this general description of the data set. Please continue and provide more information, for example about the graphs from the model.\n{{~/assistant}}\n')
prompt += (('\n{{#user~}}\nConsider the following graph from the model. ' + graph) + '\nPlease describe the general pattern of the graph.\n{{~/user}}\n\n')
if include_assistant_response:
prompt += "{{#assistant~}}{{gen 'graph_description' temperature=0.7 max_tokens=2000}}{{~/assistant}}"
return prompt |
def kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'):
assert (distribution in ['uniform', 'normal'])
if (distribution == 'uniform'):
nn.init.kaiming_uniform_(module.weight, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(module.weight, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias) |
def ibn_pre_conv1x1_block(in_channels, out_channels, stride=1, use_ibn=False, return_preact=False):
return IBNPreConvBlock(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=0, use_ibn=use_ibn, return_preact=return_preact) |
_module()
class PISARetinaHead(RetinaHead):
_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[(- 2):] for featmap in cls_scores]
assert (len(featmap_sizes) == self.prior_generator.num_levels)
device = cls_scores[0].device
(anchor_list, valid_flag_list) = self.get_anchors(featmap_sizes, img_metas, device=device)
label_channels = (self.cls_out_channels if self.use_sigmoid_cls else 1)
cls_reg_targets = self.get_targets(anchor_list, valid_flag_list, gt_bboxes, img_metas, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, return_sampling_results=True)
if (cls_reg_targets is None):
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = ((num_total_pos + num_total_neg) if self.sampling else num_total_pos)
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list, num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [cls_score.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), label_channels) for cls_score in cls_scores]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).reshape((- 1), flatten_cls_scores[0].size((- 1)))
flatten_bbox_preds = [bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, (- 1), 4) for bbox_pred in bbox_preds]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1).view((- 1), flatten_bbox_preds[0].size((- 1)))
flatten_labels = torch.cat(labels_list, dim=1).reshape((- 1))
flatten_label_weights = torch.cat(label_weights_list, dim=1).reshape((- 1))
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape((- 1), 4)
flatten_bbox_targets = torch.cat(bbox_targets_list, dim=1).reshape((- 1), 4)
flatten_bbox_weights = torch.cat(bbox_weights_list, dim=1).reshape((- 1), 4)
isr_cfg = self.train_cfg.get('isr', None)
if (isr_cfg is not None):
all_targets = (flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(flatten_cls_scores, flatten_bbox_preds, all_targets, flatten_anchors, sampling_results_list, bbox_coder=self.bbox_coder, loss_cls=self.loss_cls, num_class=self.num_classes, **self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets, flatten_bbox_weights) = all_targets
losses_cls = self.loss_cls(flatten_cls_scores, flatten_labels, flatten_label_weights, avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(flatten_bbox_preds, flatten_bbox_targets, flatten_bbox_weights, avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
carl_cfg = self.train_cfg.get('carl', None)
if (carl_cfg is not None):
loss_carl = carl_loss(flatten_cls_scores, flatten_labels, flatten_bbox_preds, flatten_bbox_targets, self.loss_bbox, **self.train_cfg.carl, avg_factor=num_total_pos, sigmoid=True, num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict |
def make_data_config(config: ml_collections.ConfigDict, mode: str, num_res: int) -> Tuple[(ml_collections.ConfigDict, List[str])]:
cfg = copy.deepcopy(config)
mode_cfg = cfg[mode]
with cfg.unlocked():
if (mode_cfg.crop_size is None):
mode_cfg.crop_size = num_res
feature_names = cfg.common.unsupervised_features
if cfg.common.use_templates:
feature_names += cfg.common.template_features
if cfg[mode].supervised:
feature_names += cfg.supervised.supervised_features
return (cfg, feature_names) |
def default_auto_wrap_policy(module: nn.Module, recurse: bool, unwrapped_params: int, min_num_params: int=int(.0), force_leaf_modules: Optional[Set[Type[nn.Module]]]=None, exclude_wrap_modules: Optional[Set[Type[nn.Module]]]=None) -> bool:
force_leaf_modules = (default_auto_wrap_policy.FORCE_LEAF_MODULES if (force_leaf_modules is None) else force_leaf_modules)
exclude_wrap_modules = (default_auto_wrap_policy.EXCLUDE_WRAP_MODULES if (exclude_wrap_modules is None) else exclude_wrap_modules)
is_large = (unwrapped_params >= min_num_params)
if recurse:
return (is_large and (not isinstance(module, tuple(force_leaf_modules))))
else:
return (is_large and (not isinstance(module, tuple(exclude_wrap_modules)))) |
def get_config(FLAGS):
config = Config
for (k, v) in FLAGS.__flags.items():
if hasattr(config, k):
setattr(config, k, v.value)
return config |
class DataLoader():
def __init__(self, csv_file='data/nyu2_train.csv', DEBUG=False):
self.shape_rgb = (480, 640, 3)
self.shape_depth = (240, 320, 1)
self.read_nyu_data(csv_file, DEBUG=DEBUG)
def nyu_resize(self, img, resolution=480, padding=6):
from skimage.transform import resize
return resize(img, (resolution, int(((resolution * 4) / 3))), preserve_range=True, mode='reflect', anti_aliasing=True)
def read_nyu_data(self, csv_file, DEBUG=False):
csv = open(csv_file, 'r').read()
nyu2_train = list((row.split(',') for row in csv.split('\n') if (len(row) > 0)))
nyu2_train = shuffle(nyu2_train, random_state=0)
if DEBUG:
nyu2_train = nyu2_train[:10]
self.filenames = [i[0] for i in nyu2_train]
self.labels = [i[1] for i in nyu2_train]
self.length = len(self.filenames)
def _parse_function(self, filename, label):
image_decoded = tf.image.decode_jpeg(tf.io.read_file(filename))
depth_resized = tf.image.resize(tf.image.decode_jpeg(tf.io.read_file(label)), [self.shape_depth[0], self.shape_depth[1]])
rgb = tf.image.convert_image_dtype(image_decoded, dtype=tf.float32)
depth = tf.image.convert_image_dtype((depth_resized / 255.0), dtype=tf.float32)
depth = (1000 / tf.clip_by_value((depth * 1000), 10, 1000))
return (rgb, depth)
def get_batched_dataset(self, batch_size):
self.dataset = tf.data.Dataset.from_tensor_slices((self.filenames, self.labels))
self.dataset = self.dataset.shuffle(buffer_size=len(self.filenames), reshuffle_each_iteration=True)
self.dataset = self.dataset.repeat()
self.dataset = self.dataset.map(map_func=self._parse_function, num_parallel_calls=tf.data.experimental.AUTOTUNE)
self.dataset = self.dataset.batch(batch_size=batch_size)
return self.dataset |
def build_lr_scheduler(config, optimizer):
assert isinstance(config, dict)
lr_type = config['lr_type'].upper()
warmup_type = config.get('warmup_type', 'NO')
warmup_iters = config.get('warmup_iters', 0)
warmup_factor = config.get('warmup_factor', 0.1)
if (lr_type not in _ALLOWED_LR_TYPES):
raise ValueError(f'Invalid learning rate scheduler type `{lr_type}`!Allowed types: {_ALLOWED_LR_TYPES}.')
if (lr_type == 'FIXED'):
return FixedWarmUpLR(optimizer=optimizer, warmup_type=warmup_type, warmup_iters=warmup_iters, warmup_factor=warmup_factor)
if (lr_type == 'STEP'):
return StepWarmUpLR(optimizer=optimizer, decay_step=config['decay_step'], decay_factor=config.get('decay_factor', 0.1), warmup_type=warmup_type, warmup_iters=warmup_iters, warmup_factor=warmup_factor)
if (lr_type == 'EXPSTEP'):
return EXPStepWarmUpLR(optimizer=optimizer, decay_step=config['decay_step'], decay_factor=config.get('decay_factor', 0.1), warmup_type=warmup_type, warmup_iters=warmup_iters, warmup_factor=warmup_factor)
raise NotImplementedError(f'Not implemented scheduler type `{lr_type}`!') |
def add_jitter(models, sd=0.1):
for a_model in models:
a_model.kernel = add_jitter_k([a_model.kernel], sd=sd)[0]
return models |
class _SimpleSegmentationModel(nn.Module):
def __init__(self, backbone, classifier, im_num, ex_num):
super(_SimpleSegmentationModel, self).__init__()
self.backbone = backbone
self.classifier = classifier
self.bat_low = _bound_learner(hidden_features=128, im_num=im_num, ex_num=ex_num)
def forward(self, x):
input_shape = x.shape[(- 2):]
features = self.backbone(x)
(features, point_pre1, point_pre2, point_pre3) = self.bat_low(features)
x = self.classifier(features)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
return (x, point_pre1, point_pre2, point_pre3) |
def build_and_train(slot_affinity_code, log_dir, run_ID, config_key):
affinity = affinity_from_code(slot_affinity_code)
config = configs[config_key]
variant = load_variant(log_dir)
config = update_config(config, variant)
config['eval_env']['game'] = config['env']['game']
sampler = AsyncSerialSampler(EnvCls=AtariEnv, env_kwargs=config['env'], CollectorCls=DbCpuResetCollector, TrajInfoCls=AtariTrajInfo, eval_env_kwargs=config['eval_env'], **config['sampler'])
algo = DQN(optim_kwargs=config['optim'], **config['algo'])
agent = AtariDqnAgent(model_kwargs=config['model'], **config['agent'])
runner = AsyncRlEval(algo=algo, agent=agent, sampler=sampler, affinity=affinity, **config['runner'])
name = ('async_serial_' + config['env']['game'])
with logger_context(log_dir, run_ID, name, config):
runner.train() |
class BaseDataset():
def __init__(self):
pass
def get_pair(self, cls, shuffle):
raise NotImplementedError |
def learn(env, policy_func, reward_giver, expert_dataset, rank, pretrained, pretrained_weight, *, g_step, d_step, entcoeff, save_per_iter, ckpt_dir, log_dir, timesteps_per_batch, task_name, gamma, lam, max_kl, cg_iters, cg_damping=0.01, vf_stepsize=0.0003, d_stepsize=0.0003, vf_iters=3, max_timesteps=0, max_episodes=0, max_iters=0, callback=None):
nworkers = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
np.set_printoptions(precision=3)
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_func('pi', ob_space, ac_space, reuse=(pretrained_weight != None))
oldpi = policy_func('oldpi', ob_space, ac_space)
atarg = tf.placeholder(dtype=tf.float32, shape=[None])
ret = tf.placeholder(dtype=tf.float32, shape=[None])
ob = U.get_placeholder_cached(name='ob')
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
entbonus = (entcoeff * meanent)
vferr = tf.reduce_mean(tf.square((pi.vpred - ret)))
ratio = tf.exp((pi.pd.logp(ac) - oldpi.pd.logp(ac)))
surrgain = tf.reduce_mean((ratio * atarg))
optimgain = (surrgain + entbonus)
losses = [optimgain, meankl, entbonus, surrgain, meanent]
loss_names = ['optimgain', 'meankl', 'entloss', 'surrgain', 'entropy']
dist = meankl
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if (v.name.startswith('pi/pol') or v.name.startswith('pi/logstd'))]
vf_var_list = [v for v in all_var_list if v.name.startswith('pi/vff')]
assert (len(var_list) == (len(vf_var_list) + 1))
d_adam = MpiAdam(reward_giver.get_trainable_variables())
vfadam = MpiAdam(vf_var_list)
get_flat = U.GetFlat(var_list)
set_from_flat = U.SetFromFlat(var_list)
klgrads = tf.gradients(dist, var_list)
flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name='flat_tan')
shapes = [var.get_shape().as_list() for var in var_list]
start = 0
tangents = []
for shape in shapes:
sz = U.intprod(shape)
tangents.append(tf.reshape(flat_tangent[start:(start + sz)], shape))
start += sz
gvp = tf.add_n([tf.reduce_sum((g * tangent)) for (g, tangent) in zipsame(klgrads, tangents)])
fvp = U.flatgrad(gvp, var_list)
assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg], losses)
compute_lossandgrad = U.function([ob, ac, atarg], (losses + [U.flatgrad(optimgain, var_list)]))
compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list))
def timed(msg):
if (rank == 0):
print(colorize(msg, color='magenta'))
tstart = time.time()
(yield)
print(colorize(('done in %.3f seconds' % (time.time() - tstart)), color='magenta'))
else:
(yield)
def allmean(x):
assert isinstance(x, np.ndarray)
out = np.empty_like(x)
MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
out /= nworkers
return out
U.initialize()
th_init = get_flat()
MPI.COMM_WORLD.Bcast(th_init, root=0)
set_from_flat(th_init)
d_adam.sync()
vfadam.sync()
if (rank == 0):
print('Init param sum', th_init.sum(), flush=True)
seg_gen = traj_segment_generator(pi, env, reward_giver, timesteps_per_batch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=40)
rewbuffer = deque(maxlen=40)
true_rewbuffer = deque(maxlen=40)
assert (sum([(max_iters > 0), (max_timesteps > 0), (max_episodes > 0)]) == 1)
g_loss_stats = stats(loss_names)
d_loss_stats = stats(reward_giver.loss_name)
ep_stats = stats(['True_rewards', 'Rewards', 'Episode_length'])
if (pretrained_weight is not None):
U.load_state(pretrained_weight, var_list=pi.get_variables())
while True:
if callback:
callback(locals(), globals())
if (max_timesteps and (timesteps_so_far >= max_timesteps)):
break
elif (max_episodes and (episodes_so_far >= max_episodes)):
break
elif (max_iters and (iters_so_far >= max_iters)):
break
if ((rank == 0) and ((iters_so_far % save_per_iter) == 0) and (ckpt_dir is not None)):
fname = os.path.join(ckpt_dir, task_name)
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
logger.log((' Iteration %i ' % iters_so_far))
def fisher_vector_product(p):
return (allmean(compute_fvp(p, *fvpargs)) + (cg_damping * p))
logger.log('Optimizing Policy...')
for _ in range(g_step):
with timed('sampling'):
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
(ob, ac, atarg, tdlamret) = (seg['ob'], seg['ac'], seg['adv'], seg['tdlamret'])
vpredbefore = seg['vpred']
atarg = ((atarg - atarg.mean()) / atarg.std())
if hasattr(pi, 'ob_rms'):
pi.ob_rms.update(ob)
args = (seg['ob'], seg['ac'], atarg)
fvpargs = [arr[::5] for arr in args]
assign_old_eq_new()
with timed('computegrad'):
(*lossbefore, g) = compute_lossandgrad(*args)
lossbefore = allmean(np.array(lossbefore))
g = allmean(g)
if np.allclose(g, 0):
logger.log('Got zero gradient. not updating')
else:
with timed('cg'):
stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=(rank == 0))
assert np.isfinite(stepdir).all()
shs = (0.5 * stepdir.dot(fisher_vector_product(stepdir)))
lm = np.sqrt((shs / max_kl))
fullstep = (stepdir / lm)
expectedimprove = g.dot(fullstep)
surrbefore = lossbefore[0]
stepsize = 1.0
thbefore = get_flat()
for _ in range(10):
thnew = (thbefore + (fullstep * stepsize))
set_from_flat(thnew)
meanlosses = (surr, kl, *_) = allmean(np.array(compute_losses(*args)))
improve = (surr - surrbefore)
logger.log(('Expected: %.3f Actual: %.3f' % (expectedimprove, improve)))
if (not np.isfinite(meanlosses).all()):
logger.log('Got non-finite value of losses -- bad!')
elif (kl > (max_kl * 1.5)):
logger.log('violated KL constraint. shrinking step.')
elif (improve < 0):
logger.log("surrogate didn't improve. shrinking step.")
else:
logger.log('Stepsize OK!')
break
stepsize *= 0.5
else:
logger.log("couldn't compute a good step")
set_from_flat(thbefore)
if ((nworkers > 1) and ((iters_so_far % 20) == 0)):
paramsums = MPI.COMM_WORLD.allgather((thnew.sum(), vfadam.getflat().sum()))
assert all((np.allclose(ps, paramsums[0]) for ps in paramsums[1:]))
with timed('vf'):
for _ in range(vf_iters):
for (mbob, mbret) in dataset.iterbatches((seg['ob'], seg['tdlamret']), include_final_partial_batch=False, batch_size=128):
if hasattr(pi, 'ob_rms'):
pi.ob_rms.update(mbob)
g = allmean(compute_vflossandgrad(mbob, mbret))
vfadam.update(g, vf_stepsize)
g_losses = meanlosses
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
logger.record_tabular('ev_tdlam_before', explained_variance(vpredbefore, tdlamret))
logger.log('Optimizing Discriminator...')
logger.log(fmt_row(13, reward_giver.loss_name))
(ob_expert, ac_expert) = expert_dataset.get_next_batch(len(ob))
batch_size = (len(ob) // d_step)
d_losses = []
for (ob_batch, ac_batch) in dataset.iterbatches((ob, ac), include_final_partial_batch=False, batch_size=batch_size):
(ob_expert, ac_expert) = expert_dataset.get_next_batch(len(ob_batch))
if hasattr(reward_giver, 'obs_rms'):
reward_giver.obs_rms.update(np.concatenate((ob_batch, ob_expert), 0))
(*newlosses, g) = reward_giver.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert)
d_adam.update(allmean(g), d_stepsize)
d_losses.append(newlosses)
logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
lrlocal = (seg['ep_lens'], seg['ep_rets'], seg['ep_true_rets'])
listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)
(lens, rews, true_rets) = map(flatten_lists, zip(*listoflrpairs))
true_rewbuffer.extend(true_rets)
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular('EpLenMean', np.mean(lenbuffer))
logger.record_tabular('EpRewMean', np.mean(rewbuffer))
logger.record_tabular('EpTrueRewMean', np.mean(true_rewbuffer))
logger.record_tabular('EpThisIter', len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular('EpisodesSoFar', episodes_so_far)
logger.record_tabular('TimestepsSoFar', timesteps_so_far)
logger.record_tabular('TimeElapsed', (time.time() - tstart))
if (rank == 0):
logger.dump_tabular() |
def get_delta(pca, latent, idx, strength):
w_centered = (latent - pca['mean'].to('cuda'))
lat_comp = pca['comp'].to('cuda')
lat_std = pca['std'].to('cuda')
w_coord = (torch.sum((w_centered[0].reshape((- 1)) * lat_comp[idx].reshape((- 1)))) / lat_std[idx])
delta = (((strength - w_coord) * lat_comp[idx]) * lat_std[idx])
return delta |
def train(model, device, dataset, fold, restart, seed):
params_bb = ({param for param in model.learner.image_encoder.parameters()} - {param for param in model.learner.image_encoder.terminal_module_dict.parameters()})
params_new = ({param for param in model.parameters()} - params_bb)
parameters = [{'params': [param for param in params_new if param.requires_grad]}, {'params': [param for param in params_bb if param.requires_grad], 'lr': 1e-06}]
optimizer = optim.AdamW(parameters, lr=5e-05, weight_decay=0.001)
lr_sched = optim.lr_scheduler.LambdaLR(optimizer, (lambda n: (1.0 if (n <= 30) else 0.1)))
num_epochs = 40
train_transform = Compose([RandomHorizontalFlip(), Resize((512, 512)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
val_transform = Compose([Resize((512, 512)), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
training_data = coco.DatasetCOCO(datapath=config['coco_path'], fold=fold, transform=train_transform, split='train', shot=5, mode='training', data_list_path=os.path.join(config['workspace_path'], 'data_splits', 'coco'), class_balance=False)
validation_data = coco.DatasetCOCO(datapath=config['coco_path'], fold=fold, transform=val_transform, split='val', shot=5, mode='training', data_list_path=os.path.join(config['workspace_path'], 'data_splits', 'coco'), class_balance=False)
print('Loaded training set with', len(training_data), 'samples')
print('Loaded validation set with', len(validation_data), 'samples')
training_sampler = torch.utils.data.RandomSampler(training_data, num_samples=8000, replacement=True)
validation_sampler = torch.utils.data.RandomSampler(validation_data, num_samples=2000, replacement=True)
training_loader = DataLoader(training_data, sampler=training_sampler, batch_size=8, num_workers=16)
validation_loader = DataLoader(validation_data, sampler=validation_sampler, batch_size=20, num_workers=16)
trainer = fss_trainer.FSSTrainer(model=model, optimizer=optimizer, lr_sched=lr_sched, train_loader=training_loader, val_loaders=[validation_loader], checkpoint_path=config['checkpoint_path'], visualization_path=os.path.join(config['visualization_path']), save_name=f'{os.path.splitext(os.path.basename(__file__))[0]}_{dataset}_{fold}_{seed}', device=device, checkpoint_epochs=[num_epochs], print_interval=100, visualization_epochs=[2, num_epochs])
if (not restart):
trainer.load_checkpoint()
trainer.train(num_epochs) |
class InducedNormConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, bias=True, coeff=0.97, domain=2, codomain=2, n_iterations=None, atol=None, rtol=None, **unused_kwargs):
del unused_kwargs
super(InducedNormConv2d, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.coeff = coeff
self.n_iterations = n_iterations
self.domain = domain
self.codomain = codomain
self.atol = atol
self.rtol = rtol
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.register_buffer('initialized', torch.tensor(0))
self.register_buffer('spatial_dims', torch.tensor([1.0, 1.0]))
self.register_buffer('scale', torch.tensor(0.0))
self.register_buffer('u', self.weight.new_empty(self.out_channels))
self.register_buffer('v', self.weight.new_empty(self.in_channels))
def compute_domain_codomain(self):
if torch.is_tensor(self.domain):
domain = asym_squash(self.domain)
codomain = asym_squash(self.codomain)
else:
(domain, codomain) = (self.domain, self.codomain)
return (domain, codomain)
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def _initialize_u_v(self):
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
if (self.kernel_size == (1, 1)):
self.u.resize_(self.out_channels).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.v.resize_(self.in_channels).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
else:
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
with torch.no_grad():
num_input_dim = ((c * h) * w)
self.v.resize_(num_input_dim).normal_(0, 1)
self.v.copy_(normalize_v(self.v, domain))
u = F.conv2d(self.v.view(1, c, h, w), self.weight, stride=self.stride, padding=self.padding, bias=None)
num_output_dim = (((u.shape[0] * u.shape[1]) * u.shape[2]) * u.shape[3])
self.u.resize_(num_output_dim).normal_(0, 1)
self.u.copy_(normalize_u(self.u, codomain))
self.initialized.fill_(1)
self.compute_weight(True)
best_scale = self.scale.clone()
(best_u, best_v) = (self.u.clone(), self.v.clone())
if (not ((domain == 2) and (codomain == 2))):
for _ in range(10):
if (self.kernel_size == (1, 1)):
self.u.copy_(normalize_u(self.weight.new_empty(self.out_channels).normal_(0, 1), codomain))
self.v.copy_(normalize_v(self.weight.new_empty(self.in_channels).normal_(0, 1), domain))
else:
self.u.copy_(normalize_u(torch.randn(num_output_dim).to(self.weight), codomain))
self.v.copy_(normalize_v(torch.randn(num_input_dim).to(self.weight), domain))
self.compute_weight(True, n_iterations=200)
if (self.scale > best_scale):
(best_u, best_v) = (self.u.clone(), self.v.clone())
self.u.copy_(best_u)
self.v.copy_(best_v)
def compute_one_iter(self):
if (not self.initialized):
raise ValueError('Layer needs to be initialized first.')
(domain, codomain) = self.compute_domain_codomain()
if (self.kernel_size == (1, 1)):
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach().view(self.out_channels, self.in_channels)
u = normalize_u(torch.mv(weight, v), codomain)
v = normalize_v(torch.mv(weight.t(), u), domain)
return torch.dot(u, torch.mv(weight, v))
else:
u = self.u.detach()
v = self.v.detach()
weight = self.weight.detach()
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view((- 1)), codomain)
v_s = F.conv_transpose2d(u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0)
v = normalize_v(v_s.view((- 1)), domain)
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
return torch.dot(u.view((- 1)), weight_v.view((- 1)))
def compute_weight(self, update=True, n_iterations=None, atol=None, rtol=None):
if (not self.initialized):
self._initialize_u_v()
if (self.kernel_size == (1, 1)):
return self._compute_weight_1x1(update, n_iterations, atol, rtol)
else:
return self._compute_weight_kxk(update, n_iterations, atol, rtol)
def _compute_weight_1x1(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if (n_iterations is not None):
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight.view(self.out_channels, self.in_channels)
if update:
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_v = v.clone()
old_u = u.clone()
u = normalize_u(torch.mv(weight, v), codomain, out=u)
v = normalize_v(torch.mv(weight.t(), u), domain, out=v)
itrs_used = (itrs_used + 1)
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
if ((domain != 1) and (domain != 2)):
self.v.copy_(v)
if ((codomain != 2) and (codomain != float('inf'))):
self.u.copy_(u)
u = u.clone()
v = v.clone()
sigma = torch.dot(u, torch.mv(weight, v))
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight.view(self.out_channels, self.in_channels, 1, 1)
def _compute_weight_kxk(self, update=True, n_iterations=None, atol=None, rtol=None):
n_iterations = (self.n_iterations if (n_iterations is None) else n_iterations)
atol = (self.atol if (atol is None) else atol)
rtol = (self.rtol if (rtol is None) else atol)
if ((n_iterations is None) and ((atol is None) or (rtol is None))):
raise ValueError('Need one of n_iteration or (atol, rtol).')
max_itrs = 200
if (n_iterations is not None):
max_itrs = n_iterations
u = self.u
v = self.v
weight = self.weight
(c, h, w) = (self.in_channels, int(self.spatial_dims[0].item()), int(self.spatial_dims[1].item()))
if update:
with torch.no_grad():
(domain, codomain) = self.compute_domain_codomain()
itrs_used = 0
for _ in range(max_itrs):
old_u = u.clone()
old_v = v.clone()
u_s = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
out_shape = u_s.shape
u = normalize_u(u_s.view((- 1)), codomain, out=u)
v_s = F.conv_transpose2d(u.view(out_shape), weight, stride=self.stride, padding=self.padding, output_padding=0)
v = normalize_v(v_s.view((- 1)), domain, out=v)
itrs_used = (itrs_used + 1)
if ((n_iterations is None) and (atol is not None) and (rtol is not None)):
err_u = (torch.norm((u - old_u)) / (u.nelement() ** 0.5))
err_v = (torch.norm((v - old_v)) / (v.nelement() ** 0.5))
tol_u = (atol + (rtol * torch.max(u)))
tol_v = (atol + (rtol * torch.max(v)))
if ((err_u < tol_u) and (err_v < tol_v)):
break
if (itrs_used > 0):
if (domain != 2):
self.v.copy_(v)
if (codomain != 2):
self.u.copy_(u)
v = v.clone()
u = u.clone()
weight_v = F.conv2d(v.view(1, c, h, w), weight, stride=self.stride, padding=self.padding, bias=None)
weight_v = weight_v.view((- 1))
sigma = torch.dot(u.view((- 1)), weight_v)
with torch.no_grad():
self.scale.copy_(sigma)
factor = torch.max(torch.ones(1).to(weight.device), (sigma / self.coeff))
weight = (weight / factor)
return weight
def forward(self, input):
if (not self.initialized):
self.spatial_dims.copy_(torch.tensor(input.shape[2:4]).to(self.spatial_dims))
weight = self.compute_weight(update=False)
return F.conv2d(input, weight, self.bias, self.stride, self.padding, 1, 1)
def extra_repr(self):
(domain, codomain) = self.compute_domain_codomain()
s = '{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}'
if (self.padding != ((0,) * len(self.padding))):
s += ', padding={padding}'
if (self.bias is None):
s += ', bias=False'
s += ', coeff={}, domain={:.2f}, codomain={:.2f}, n_iters={}, atol={}, rtol={}, learnable_ord={}'.format(self.coeff, domain, codomain, self.n_iterations, self.atol, self.rtol, torch.is_tensor(self.domain))
return s.format(**self.__dict__) |
def flatten_observation_spaces(observation_spaces, observation_excluded=()):
if (not isinstance(observation_excluded, (list, tuple))):
observation_excluded = [observation_excluded]
lower_bound = []
upper_bound = []
for (key, value) in observation_spaces.spaces.items():
if (key not in observation_excluded):
lower_bound.append(np.asarray(value.low).flatten())
upper_bound.append(np.asarray(value.high).flatten())
lower_bound = np.concatenate(lower_bound)
upper_bound = np.concatenate(upper_bound)
observation_space = spaces.Box(np.array(lower_bound), np.array(upper_bound), dtype=np.float32)
if (not observation_excluded):
return observation_space
else:
observation_spaces_after_flatten = {'other': observation_space}
for key in observation_excluded:
observation_spaces_after_flatten[key] = observation_spaces[key]
return spaces.Dict(observation_spaces_after_flatten) |
class TestSequeneceGenerator(TestSequenceGeneratorBase):
def setUp(self):
(self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model) = test_utils.sequence_generator_setup()
self.sample = {'net_input': {'src_tokens': src_tokens, 'src_lengths': src_lengths}}
def test_with_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2)
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, normalize_scores=False)
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_encoder_with_different_output_len(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict)
reshaping_model = test_utils.TestReshapingModel.build_model(args, task)
generator = SequenceGenerator([reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.forward(self.sample)
for sent in [0, 1]:
for beam in [0, 1]:
assert (hypos[sent][beam]['attention'] is not None)
def test_generation_with_additional_input(self):
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict)
add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task)
generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2)
sample = self.sample.copy()
sample['net_input']['fancy_other_input'] = sample['net_input']['src_tokens']
hypos = generator.forward(self.sample)
(eos, w1, w2) = (self.tgt_dict.eos(), self.w1, self.w2)
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0]) |
def val(test_loader, model, epoch, save_path, writer):
global best_mae, best_epoch
model.eval()
with torch.no_grad():
mae_sum = 0
for i in range(test_loader.size):
(images, shorts, gt, name, scene) = test_loader.load_data()
inputs = torch.cat([images, shorts], 2)
preds = model(inputs)
gt = np.asarray(gt, np.float32)
gt /= (gt.max() + 1e-08)
images = [x.cuda() for x in images]
res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = ((res - res.min()) / ((res.max() - res.min()) + 1e-08))
mae_sum += ((np.sum(np.abs((res - gt))) * 1.0) / (gt.shape[0] * gt.shape[1]))
mae = (mae_sum / test_loader.size)
writer.add_scalar('MAE', torch.tensor(mae), global_step=epoch)
print('Epoch: {}, MAE: {}, bestMAE: {}, bestEpoch: {}.'.format(epoch, mae, best_mae, best_epoch))
if (epoch == 1):
best_mae = mae
elif (mae < best_mae):
best_mae = mae
best_epoch = epoch
torch.save(model.state_dict(), (save_path + 'Net_epoch_best.pth'))
print('Save state_dict successfully! Best epoch:{}.'.format(epoch))
logging.info('[Val Info]:Epoch:{} MAE:{} bestEpoch:{} bestMAE:{}'.format(epoch, mae, best_epoch, best_mae)) |
def check_labels(labels, estimator):
label_encoder = None
if (estimator._estimator_type == 'classifier'):
if np.issubdtype(type(labels[0]), np.str_):
label_encoder = LabelEncoder()
label_encoder.fit(labels)
labels = label_encoder.transform(labels)
y_type = type_of_target(labels)
if (y_type not in ['binary', 'multiclass']):
raise ValueError(('Unknown label type: %r' % y_type))
elif (type(labels[0]) not in (np.float32, np.float64)):
logger.info('The labels have been converted in float64')
labels = labels.astype('float64')
_assert_all_finite(labels)
if ((estimator._estimator_type == 'classifier') and (len(np.unique(labels)) == 1)):
raise ValueError("Classifier can't train when only one class is present.")
return (labels, label_encoder) |
def test_nested_typechange(conf_scope):
cfg = conf_scope({'f': {'a': 10}})
assert (cfg.typechanged == {'f.a': (type('a'), int)}) |
def image_label(loader, model, threshold=0.9, out_dir=None):
out_path = osp.join(out_dir, 'pseudo_label.txt')
print('Pseudo Labeling to ', out_path)
iter_label = iter(loader['target_label'])
with torch.no_grad():
with open(out_path, 'w') as f:
for i in range(len(loader['target_label'])):
(inputs, labels, paths) = iter_label.next()
inputs = inputs.cuda()
(_, outputs) = model(inputs)
softmax_outputs = nn.Softmax(dim=1)(outputs)
(maxpred, pseudo_labels) = torch.max(softmax_outputs, dim=1)
pseudo_labels[(maxpred < threshold)] = (- 1)
for (path, label) in zip(paths, pseudo_labels):
f.write((((path + ' ') + str(label.item())) + '\n'))
return out_path |
class TFDebertaModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class MultiEdgeGraphPairwiseFormatter(BaseGraphFormatter):
def __init__(self, config, name='MultiEdgeGraphPairwiseFormatter'):
self.name = name
self.disable_tqdm = config.disable_tqdm
self.config = config
BaseGraphFormatter.__init__(self, config, name)
def format(self, item_json, vocab_dicts):
(token_vd, node_vd, target_vd, word_vd) = vocab_dicts
datapoint = self.datapoint_class()
datapoint.tgt = item_json['target']
dgl_graph_one = self.get_graph(item_json['item_1'], token_vd, node_vd)
datapoint.function_one_graph = dgl_graph_one
dgl_graph_two = self.get_graph(item_json['item_2'], token_vd, node_vd)
datapoint.function_two_graph = dgl_graph_two
return datapoint
def get_graph(self, cur_item, token_vd, node_vd):
graph = self._convert_to_multi_edge_dglgraph(cur_item['jsgraph'], token_vd, node_vd)
return graph |
class DecentralizedDistributedMixin():
def _get_advantages_distributed(self, rollouts: RolloutStorage) -> torch.Tensor:
advantages = (rollouts.returns[:(- 1)] - rollouts.value_preds[:(- 1)])
if (not self.use_normalized_advantage):
return advantages
(mean, var) = distributed_mean_and_var(advantages)
return ((advantages - mean) / (var.sqrt() + EPS_PPO))
def init_distributed(self, find_unused_params: bool=True) -> None:
class Guard():
def __init__(self, model, device):
if torch.cuda.is_available():
self.ddp = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device)
else:
self.ddp = torch.nn.parallel.DistributedDataParallel(model)
self._ddp_hooks = Guard(self.actor_critic, self.device)
self.get_advantages = self._get_advantages_distributed
self.reducer = self._ddp_hooks.ddp.reducer
self.find_unused_params = find_unused_params
def before_backward(self, loss):
super().before_backward(loss)
if self.find_unused_params:
self.reducer.prepare_for_backward([loss])
else:
self.reducer.prepare_for_backward([]) |
def prepare_keys_div2k(folder_path):
print('Reading image path list ...')
img_path_list = sorted(list(scandir(folder_path, suffix='png', recursive=False)))
keys = [img_path.split('.png')[0] for img_path in sorted(img_path_list)]
return (img_path_list, keys) |
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route={}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for (f, g) in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=(- 1))
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map((lambda x: {'f_args': x[0], 'g_args': x[1]}), args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=(- 1))).mean(dim=0) |
class ResContextBlock(nn.Module):
def __init__(self, in_filters, out_filters, kernel_size=(3, 3, 3), stride=1, indice_key=None):
super(ResContextBlock, self).__init__()
self.conv1 = conv1x3(in_filters, out_filters, indice_key=(indice_key + 'bef'))
self.bn0 = nn.BatchNorm1d(out_filters)
self.act1 = nn.LeakyReLU()
self.conv1_2 = conv3x1(out_filters, out_filters, indice_key=(indice_key + 'bef'))
self.bn0_2 = nn.BatchNorm1d(out_filters)
self.act1_2 = nn.LeakyReLU()
self.conv2 = conv3x1(in_filters, out_filters, indice_key=(indice_key + 'bef'))
self.act2 = nn.LeakyReLU()
self.bn1 = nn.BatchNorm1d(out_filters)
self.conv3 = conv1x3(out_filters, out_filters, indice_key=(indice_key + 'bef'))
self.act3 = nn.LeakyReLU()
self.bn2 = nn.BatchNorm1d(out_filters)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
shortcut = self.conv1(x)
shortcut.features = self.act1(shortcut.features)
shortcut.features = self.bn0(shortcut.features)
shortcut = self.conv1_2(shortcut)
shortcut.features = self.act1_2(shortcut.features)
shortcut.features = self.bn0_2(shortcut.features)
resA = self.conv2(x)
resA.features = self.act2(resA.features)
resA.features = self.bn1(resA.features)
resA = self.conv3(resA)
resA.features = self.act3(resA.features)
resA.features = self.bn2(resA.features)
resA.features = (resA.features + shortcut.features)
return resA |
class Device():
du = Device_Util()
speed_distri = None
try:
with open('speed_distri.json', 'r') as f:
speed_distri = json.load(f)
except FileNotFoundError as e:
speed_distri = None
logger.warn("no user's network speed trace was found, set all communication time to 0.0s")
def __init__(self, cfg, model_size=0):
self.device_model = None
self.cfg = cfg
self.sampled_train_time_per_batch = 0
self.sampled_download_time = 0
self.sampled_upload_time = 0
self.sample_count = 10
self.model_size = (model_size / 1024)
if ((cfg.behav_hete == False) and (cfg.hard_hete == False)):
self.model_size = 0
if (Device.speed_distri == None):
self.model_size = 0
self.upload_speed_u = 1.0
self.upload_speed_sigma = 0.0
self.download_speed_u = 1.0
self.download_speed_sigma = 0.0
else:
if (cfg.hard_hete == False):
guid = list(Device.speed_distri.keys())[(cfg.seed % len(Device.speed_distri))]
else:
guid = random.sample(list(Device.speed_distri.keys()), 1)[0]
self.download_speed_u = Device.speed_distri[guid]['down_u']
self.download_speed_sigma = Device.speed_distri[guid]['down_sigma']
self.upload_speed_u = Device.speed_distri[guid]['up_u']
self.upload_speed_sigma = Device.speed_distri[guid]['up_sigma']
Device.du.set_model(cfg.model)
Device.du.set_dataset(cfg.dataset)
def set_device_model(self, real_device_model, client_id):
device_train_times_per_batch = []
self.device_model = Device.du.transfer(real_device_model)
for i in range(self.sample_count):
device_train_times_per_batch.append(Device.du.get_train_time_per_batch(self.device_model))
self.sampled_train_time_per_batch = np.mean(device_train_times_per_batch)
return device_train_times_per_batch
def set_device_model_weakDeviceToCertainClass(self, real_device_model, label):
self.device_model = Device.du.unknown_weakDeviceToCertainClass(label)
def get_upload_time(self):
if (self.model_size == 0.0):
return 0.0
upload_speed = np.random.normal(self.upload_speed_u, self.upload_speed_sigma)
while (upload_speed < 0):
upload_speed = np.random.normal(self.upload_speed_u, self.upload_speed_sigma)
upload_time = (self.model_size / upload_speed)
return float(upload_time)
def get_download_time(self):
if (self.model_size == 0.0):
return 0.0
download_speed = np.random.normal(self.download_speed_u, self.download_speed_sigma)
while (download_speed < 0):
download_speed = np.random.normal(self.download_speed_u, self.download_speed_sigma)
download_time = (self.model_size / download_speed)
return float(download_time)
def get_expected_download_time(self):
if (self.model_size == 0.0):
return 0.0
download_speed = self.download_speed_u
while (download_speed < 0):
download_speed = self.download_speed_u
download_time = (self.model_size / download_speed)
return float(download_time)
def get_expected_upload_time(self):
if (self.model_size == 0.0):
return 0.0
upload_speed = self.upload_speed_u
while (upload_speed < 0):
upload_speed = self.upload_speed_u
upload_time = (self.model_size / upload_speed)
return float(upload_time)
def get_train_time_and_train_time_per_batch(self, num_sample, batch_size, num_epoch):
if (self.device_model == None):
assert False
return Device.du.get_train_time_and_train_time_per_batch_and_train_time_per_epoch(self.device_model, num_sample, batch_size, num_epoch)[:(- 1)]
def get_train_time_and_train_time_per_batch_and_train_time_per_epoch(self, num_sample, batch_size, num_epoch):
if (self.device_model == None):
assert False
return Device.du.get_train_time_and_train_time_per_batch_and_train_time_per_epoch(self.device_model, num_sample, batch_size, num_epoch) |
class ESPNet(nn.Module):
def __init__(self, in_channels, num_classes):
super().__init__()
self.input1 = InputProjectionA(1)
self.input2 = InputProjectionA(1)
initial = 16
config = [32, 128, 256, 256]
reps = [2, 2, 3]
self.level0 = CBR(in_channels, initial, 7, 2)
self.level1 = nn.ModuleList()
for i in range(reps[0]):
if (i == 0):
self.level1.append(DilatedParllelResidualBlockB1(initial, config[0]))
else:
self.level1.append(DilatedParllelResidualBlockB1(config[0], config[0]))
self.level2 = DilatedParllelResidualBlockB1(config[0], config[1], stride=2)
self.level_2 = nn.ModuleList()
for i in range(0, reps[1]):
self.level_2.append(DilatedParllelResidualBlockB1(config[1], config[1]))
self.level3_0 = DilatedParllelResidualBlockB1(config[1], config[2], stride=2)
self.level_3 = nn.ModuleList()
for i in range(0, reps[2]):
self.level_3.append(DilatedParllelResidualBlockB1(config[2], config[2]))
self.up_l3_l2 = UpSampler(config[2], config[1])
self.merge_l2 = DilatedParllelResidualBlockB1((2 * config[1]), config[1])
self.dec_l2 = nn.ModuleList()
for i in range(0, reps[0]):
self.dec_l2.append(DilatedParllelResidualBlockB1(config[1], config[1]))
self.up_l2_l1 = UpSampler(config[1], config[0])
self.merge_l1 = DilatedParllelResidualBlockB1((2 * config[0]), config[0])
self.dec_l1 = nn.ModuleList()
for i in range(0, reps[0]):
self.dec_l1.append(DilatedParllelResidualBlockB1(config[0], config[0]))
self.dec_l1.append(CBR(config[0], num_classes, 3, 1))
self.dec_l1.append(ASPBlock(num_classes, num_classes))
self.pspModules = nn.ModuleList()
scales = [0.2, 0.4, 0.6, 0.8]
for sc in scales:
self.pspModules.append(PSPDec(num_classes, num_classes, sc))
self.classifier = self.classifier = nn.Sequential(CBR(((len(scales) + 1) * num_classes), num_classes, 3, 1), ASPBlock(num_classes, num_classes), nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True), CBR(num_classes, num_classes, 7, 1), C(num_classes, num_classes, 1, 1))
for m in self.modules():
if isinstance(m, nn.Conv3d):
n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if isinstance(m, nn.ConvTranspose3d):
n = (((m.kernel_size[0] * m.kernel_size[1]) * m.kernel_size[2]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, input1, inp_res=(128, 128, 128), inpSt2=False):
dim0 = input1.size(2)
dim1 = input1.size(3)
dim2 = input1.size(4)
if (self.training or (inp_res is None)):
inp_res = ((math.ceil((dim0 / 8)) * 8), (math.ceil((dim1 / 8)) * 8), (math.ceil((dim2 / 8)) * 8))
if inp_res:
input1 = F.adaptive_avg_pool3d(input1, output_size=inp_res)
out_l0 = self.level0(input1)
for (i, layer) in enumerate(self.level1):
if (i == 0):
out_l1 = layer(out_l0)
else:
out_l1 = layer(out_l1)
out_l2_down = self.level2(out_l1)
for (i, layer) in enumerate(self.level_2):
if (i == 0):
out_l2 = layer(out_l2_down)
else:
out_l2 = layer(out_l2)
del out_l2_down
out_l3_down = self.level3_0(out_l2)
for (i, layer) in enumerate(self.level_3):
if (i == 0):
out_l3 = layer(out_l3_down)
else:
out_l3 = layer(out_l3)
del out_l3_down
dec_l3_l2 = self.up_l3_l2(out_l3)
merge_l2 = self.merge_l2(torch.cat([dec_l3_l2, out_l2], 1))
for (i, layer) in enumerate(self.dec_l2):
if (i == 0):
dec_l2 = layer(merge_l2)
else:
dec_l2 = layer(dec_l2)
dec_l2_l1 = self.up_l2_l1(dec_l2)
merge_l1 = self.merge_l1(torch.cat([dec_l2_l1, out_l1], 1))
for (i, layer) in enumerate(self.dec_l1):
if (i == 0):
dec_l1 = layer(merge_l1)
else:
dec_l1 = layer(dec_l1)
psp_outs = dec_l1.clone()
for layer in self.pspModules:
out_psp = layer(dec_l1)
psp_outs = torch.cat([psp_outs, out_psp], 1)
decoded = self.classifier(psp_outs)
return F.upsample(decoded, size=(dim0, dim1, dim2), mode='trilinear', align_corners=True) |
def get_checkpoint_from_config_class(config_class):
checkpoint = None
config_source = inspect.getsource(config_class)
checkpoints = _re_checkpoint.findall(config_source)
for (ckpt_name, ckpt_link) in checkpoints:
if ckpt_link.endswith('/'):
ckpt_link = ckpt_link[:(- 1)]
ckpt_link_from_name = f'
if (ckpt_link == ckpt_link_from_name):
checkpoint = ckpt_name
break
return checkpoint |
def prepare_keys_vimeo90k(folder_path, train_list_path, mode):
print('Reading image path list ...')
with open(train_list_path, 'r') as fin:
train_list = [line.strip() for line in fin]
img_path_list = []
keys = []
for line in train_list:
(folder, sub_folder) = line.split('/')
img_path_list.extend([osp.join(folder, sub_folder, f'im{(j + 1)}.png') for j in range(7)])
keys.extend([f'{folder}/{sub_folder}/im{(j + 1)}' for j in range(7)])
if (mode == 'gt'):
print('Only keep the 4th frame for the gt mode.')
img_path_list = [v for v in img_path_list if v.endswith('im4.png')]
keys = [v for v in keys if v.endswith('/im4')]
return (img_path_list, keys) |
class TextSampler(Sampler):
def __init__(self, lengths, batch_size, n_buckets, shuffle=False):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
(self.sizes, self.buckets) = kmeans(x=lengths, k=n_buckets)
self.chunks = [max(((size * len(bucket)) // self.batch_size), 1) for (size, bucket) in zip(self.sizes, self.buckets)]
def __iter__(self):
range_fn = (torch.randperm if self.shuffle else torch.arange)
for i in range_fn(len(self.buckets)):
for batch in range_fn(len(self.buckets[i])).chunk(self.chunks[i]):
(yield [self.buckets[i][j] for j in batch.tolist()])
def __len__(self):
return sum(self.chunks) |
def cal_acc(zeros, ones):
accuracy = 0.0
for example in zeros:
if (not np.isnan(example[0])):
if (example[0] < 0.5):
accuracy += 1.0
for example in ones:
if (not np.isnan(example[0])):
if (example[0] > 0.5):
accuracy += 1.0
accuracy = (accuracy / float((len(zeros) + len(ones))))
print(('The accuracy of the discriminator is: ' + str(accuracy)))
return accuracy |
def dump2json(ofn, data, force=False):
if (os.path.exists(ofn) and (not force)):
return
def default(obj):
if isinstance(obj, np.int32):
return int(obj)
elif isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.float32):
return float(obj)
elif (isinstance(obj, set) or isinstance(obj, np.ndarray)):
return list(obj)
else:
raise TypeError('Unserializable object {} of type {}'.format(obj, type(obj)))
with open(ofn, 'w') as of:
json.dump(data, of, default=default) |
class EncoderLayer(nn.Module):
def __init__(self, d_model, self_attn, feed_forward=None, use_residual=False, dropout=0.1):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.use_residual = use_residual
if use_residual:
self.sublayer = nn.ModuleList([SublayerConnection(d_model, dropout) for _ in range(2)])
self.d_model = d_model
def forward(self, x, mask):
if self.use_residual:
x = self.sublayer[0](x, (lambda x: self.self_attn(x, x, x, mask)))
if (self.feed_forward is not None):
return self.sublayer[1](x, self.feed_forward)
else:
return x
else:
return self.self_attn(x, x, x, mask) |
class RePU(nn.ReLU):
def __init__(self, n):
super(RePU, self).__init__()
self.n = n
def forward(self, x: torch.Tensor):
return (torch.relu(x) ** self.n) |
def main():
args = parse_args()
dataset_path = args.dataset_path
if (args.out_dir is None):
out_dir = osp.join('data', 'loveDA')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'val'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'img_dir', 'test'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'train'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'ann_dir', 'val'))
assert ('Train.zip' in os.listdir(dataset_path)), 'Train.zip is not in {}'.format(dataset_path)
assert ('Val.zip' in os.listdir(dataset_path)), 'Val.zip is not in {}'.format(dataset_path)
assert ('Test.zip' in os.listdir(dataset_path)), 'Test.zip is not in {}'.format(dataset_path)
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
for dataset in ['Train', 'Val', 'Test']:
zip_file = zipfile.ZipFile(os.path.join(dataset_path, (dataset + '.zip')))
zip_file.extractall(tmp_dir)
data_type = dataset.lower()
for location in ['Rural', 'Urban']:
for image_type in ['images_png', 'masks_png']:
if (image_type == 'images_png'):
dst = osp.join(out_dir, 'img_dir', data_type)
else:
dst = osp.join(out_dir, 'ann_dir', data_type)
if ((dataset == 'Test') and (image_type == 'masks_png')):
continue
else:
src_dir = osp.join(tmp_dir, dataset, location, image_type)
src_lst = os.listdir(src_dir)
for file in src_lst:
shutil.move(osp.join(src_dir, file), dst)
print('Removing the temporary files...')
print('Done!') |
def main(args, logger):
logger.info(args)
fix_seed_for_reproducability(args.seed)
t_start = t.time()
(model, optimizer, classifier) = get_model_and_optimizer(args, logger)
(inv_list, eqv_list) = get_transform_params(args)
trainset = get_dataset(args, mode='train', inv_list=inv_list, eqv_list=eqv_list)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size_cluster, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_train_baseline, worker_init_fn=worker_init_fn(args.seed))
testset = get_dataset(args, mode='train_val')
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size_test, shuffle=False, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_eval, worker_init_fn=worker_init_fn(args.seed))
(_, _) = evaluate(args, logger, testloader, classifier, model)
if (not args.eval_only):
for epoch in range(args.start_epoch, args.num_epoch):
trainloader.dataset.mode = 'compute'
trainloader.dataset.reshuffle()
adjust_learning_rate(optimizer, epoch, args)
logger.info('\n [Epoch {}] \n'.format(epoch))
logger.info('Start computing centroids.')
t1 = t.time()
(centroids, kmloss) = run_mini_batch_kmeans(args, logger, trainloader, model, view=1)
logger.info('-Centroids ready. [{}]\n'.format(get_datetime((int(t.time()) - int(t1)))))
t2 = t.time()
weight = compute_labels(args, logger, trainloader, model, centroids, view=1)
logger.info('-Cluster labels ready. [{}]\n'.format(get_datetime((int(t.time()) - int(t2)))))
criterion = torch.nn.CrossEntropyLoss(weight=weight).cuda()
classifier = initialize_classifier(args)
if args.nonparametric:
classifier.module.weight.data = centroids.unsqueeze((- 1)).unsqueeze((- 1))
freeze_all(classifier)
if args.nonparametric:
optimizer_loop = None
elif (args.optim_type == 'SGD'):
optimizer_loop = torch.optim.SGD(filter((lambda x: x.requires_grad), classifier.module.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif (args.optim_type == 'Adam'):
optimizer_loop = torch.optim.Adam(filter((lambda x: x.requires_grad), classifier.module.parameters()), lr=args.lr)
trainset.mode = 'baseline_train'
trainset.labeldir = args.save_model_path
trainloader_loop = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size_train, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_train_baseline, worker_init_fn=worker_init_fn(args.seed))
logger.info('Start training ...')
train_loss = train(args, logger, trainloader_loop, model, classifier, criterion, optimizer, optimizer_loop)
(acc, res) = evaluate(args, logger, testloader, classifier, model)
logger.info(' Epoch [{}] '.format(epoch))
logger.info(' Time total : [{}].'.format(get_datetime((int(t.time()) - int(t1)))))
logger.info(' K-Means loss : {:.5f}.'.format(kmloss))
logger.info(' Training loss : {:.5f}.'.format(train_loss))
logger.info(' ACC: {:.4f} | mIoU: {:.4f}'.format(acc, res['mean_iou']))
logger.info('\n')
torch.save({'epoch': (epoch + 1), 'args': args, 'state_dict': model.state_dict(), 'classifier1_state_dict': classifier.state_dict(), 'optimizer': optimizer.state_dict()}, os.path.join(args.save_model_path, 'checkpoint_{}.pth.tar'.format(epoch)))
torch.save({'epoch': (epoch + 1), 'args': args, 'state_dict': model.state_dict(), 'classifier1_state_dict': classifier.state_dict(), 'optimizer': optimizer.state_dict()}, os.path.join(args.save_model_path, 'checkpoint.pth.tar'))
trainset = get_dataset(args, mode='eval_val')
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size_cluster, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_train_baseline, worker_init_fn=worker_init_fn(args.seed))
testset = get_dataset(args, mode='eval_test')
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size_test, shuffle=False, num_workers=args.num_workers, pin_memory=True, collate_fn=collate_eval, worker_init_fn=worker_init_fn(args.seed))
acc_list_new = []
res_list_new = []
logger.info('Start computing centroids.')
if (args.repeats > 0):
for _ in range(args.repeats):
t1 = t.time()
(centroids, kmloss) = run_mini_batch_kmeans(args, logger, trainloader, model, view=(- 1))
logger.info('-Centroids ready. [Loss: {:.5f}/ Time: {}]\n'.format(kmloss, get_datetime((int(t.time()) - int(t1)))))
classifier = initialize_classifier(args)
classifier.module.weight.data = centroids.unsqueeze((- 1)).unsqueeze((- 1))
freeze_all(classifier)
(acc_new, res_new) = evaluate(args, logger, testloader, classifier, model)
acc_list_new.append(acc_new)
res_list_new.append(res_new)
else:
(acc_new, res_new) = evaluate(args, logger, testloader, classifier, model)
acc_list_new.append(acc_new)
res_list_new.append(res_new)
logger.info('Average overall pixel accuracy [NEW] : {} +/- {}.'.format(round(np.mean(acc_list_new), 2), np.std(acc_list_new)))
logger.info('Average mIoU [NEW] : {:.3f} +/- {:.3f}. '.format(np.mean([res['mean_iou'] for res in res_list_new]), np.std([res['mean_iou'] for res in res_list_new])))
logger.info('Experiment done. [{}]\n'.format(get_datetime((int(t.time()) - int(t_start))))) |
def reduce_param_groups(params: List[Dict[(str, Any)]]) -> List[Dict[(str, Any)]]:
params = _expand_param_groups(params)
groups = defaultdict(list)
for item in params:
cur_params = tuple(((x, y) for (x, y) in item.items() if (x != 'params')))
groups[cur_params].extend(item['params'])
ret = []
for (param_keys, param_values) in groups.items():
cur = {kv[0]: kv[1] for kv in param_keys}
cur['params'] = param_values
ret.append(cur)
return ret |
def add_dmc_args(parser):
parser.add_argument('--domain_name', type=str, default='fish')
parser.add_argument('--task_name', type=str, default='swim')
parser.add_argument('--from_pixels', action='store_true', help='Use image observations')
parser.add_argument('--height', type=int, default=84)
parser.add_argument('--width', type=int, default=84)
parser.add_argument('--camera_id', type=int, default=0)
parser.add_argument('--frame_skip', type=int, default=1)
parser.add_argument('--frame_stack', type=int, default=3)
parser.add_argument('--channels_last', action='store_true')
parser.add_argument('--rgb', action='store_true')
parser.add_argument('--seed', type=int, default=231) |
def dumb_css_parser(data):
data += ';'
importIndex = data.find('')
while (importIndex != (- 1)):
data = (data[0:importIndex] + data[(data.find(';', importIndex) + 1):])
importIndex = data.find('')
elements = [x.split('{') for x in data.split('}') if ('{' in x.strip())]
try:
elements = dict([(a.strip(), dumb_property_dict(b)) for (a, b) in elements])
except ValueError:
elements = {}
return elements |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.