code
stringlengths
101
5.91M
class AlternateSequentialWeaveGraph(SequentialGraph): def __init__(self, batch_size, max_atoms=50, n_atom_feat=75, n_pair_feat=14): self.graph = tf.Graph() self.batch_size = batch_size self.max_atoms = max_atoms self.n_atom_feat = n_atom_feat self.n_pair_feat = n_pair_feat with self.graph.as_default(): self.graph_topology = AlternateWeaveGraphTopology(self.batch_size, self.max_atoms, self.n_atom_feat, self.n_pair_feat) self.output = self.graph_topology.get_atom_features_placeholder() self.output_P = self.graph_topology.get_pair_features_placeholder() self.layers = [] def add(self, layer): with self.graph.as_default(): if (type(layer).__name__ in ['AlternateWeaveLayer']): (self.output, self.output_P) = layer(([self.output, self.output_P] + self.graph_topology.get_topology_placeholders())) elif (type(layer).__name__ in ['AlternateWeaveGather']): self.output = layer([self.output, self.graph_topology.atom_split_placeholder]) else: self.output = layer(self.output) self.layers.append(layer)
class TestOptions(): def initialize(self): parser = argparse.ArgumentParser(description='test segmentation network') parser.add_argument('--model', type=str, default='DeepLab', help='available options : DeepLab and VGG') parser.add_argument('--GPU', type=str, default='0', help='which GPU to use') parser.add_argument('--data-dir-target', type=str, default='../data_semseg/cityscapes', help='Path to the directory containing the target dataset.') parser.add_argument('--data-list-target', type=str, default='./dataset/cityscapes_list/val.txt', help='list of images in the target dataset.') parser.add_argument('--num-classes', type=int, default=19, help='Number of classes for cityscapes.') parser.add_argument('--set', type=str, default='val', help='choose test set.') parser.add_argument('--restore-opt1', type=str, default=None, help='restore model parameters from beta1') parser.add_argument('--restore-opt2', type=str, default=None, help='restore model parameters from beta2') parser.add_argument('--restore-opt3', type=str, default=None, help='restore model parameters from beta3') parser.add_argument('--init-weights', type=str, default=None, help='initial model.') parser.add_argument('--restore-from', type=str, default=None, help='restore model parameters from') parser.add_argument('--save', type=str, default='../results', help='Path to save result.') parser.add_argument('--gt_dir', type=str, default='../data_semseg/cityscapes/gtFine/val', help='directory for CityScapes val gt images') parser.add_argument('--devkit_dir', type=str, default='./dataset/cityscapes_list', help='list directory of cityscapes') return parser.parse_args()
def requires_submit(func): (func) def _wrapper(self, *args, **kwargs): if (self._future is None): raise JobError('Job not submitted yet!. You have to .submit() first!') return func(self, *args, **kwargs) return _wrapper
class SEMlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, linear=False, use_se=True): super().__init__() out_features = (out_features or in_features) hidden_features = (hidden_features or in_features) self.fc1 = nn.Linear(in_features, hidden_features) self.dwconv = DWConvSeq(hidden_features) self.gamma = nn.Parameter(torch.ones(hidden_features), requires_grad=True) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) self.linear = linear if self.linear: self.relu = nn.ReLU(inplace=True) self.se = (SqueezeExcite(out_features, se_ratio=0.25) if use_se else nn.Identity()) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) elif isinstance(m, nn.Conv2d): fan_out = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) fan_out //= m.groups m.weight.data.normal_(0, math.sqrt((2.0 / fan_out))) if (m.bias is not None): m.bias.data.zero_() def forward(self, x, H, W): (B, N, C) = x.shape x = self.fc1(x) if self.linear: x = self.relu(x) x = (self.drop((self.gamma * self.dwconv(x, H, W))) + x) x = self.fc2(x) x = self.drop(x) x = self.se(x.permute(0, 2, 1).reshape(B, C, H, W)).reshape(B, C, N).permute(0, 2, 1) return x
def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True): if (model_type not in MODEL_CLASSES): raise ValueError('Unrecognized model type, should be one of {}.'.format(list(MODEL_CLASSES.keys()))) (config_class, model_class, pt_model_class, aws_model_maps, aws_config_map) = MODEL_CLASSES[model_type] if (config_file in aws_config_map): config_file = cached_path(aws_config_map[config_file], force_download=(not use_cached_models)) config = config_class.from_json_file(config_file) config.output_hidden_states = True config.output_attentions = True print('Building TensorFlow model from configuration: {}'.format(str(config))) tf_model = model_class(config) if (pytorch_checkpoint_path in aws_model_maps): pytorch_checkpoint_path = cached_path(aws_model_maps[pytorch_checkpoint_path], force_download=(not use_cached_models)) tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path) if compare_with_pt_model: tfo = tf_model(tf_model.dummy_inputs, training=False) state_dict = torch.load(pytorch_checkpoint_path, map_location='cpu') pt_model = pt_model_class.from_pretrained(pretrained_model_name_or_path=None, config=config, state_dict=state_dict) with torch.no_grad(): pto = pt_model(**pt_model.dummy_inputs) np_pt = pto[0].numpy() np_tf = tfo[0].numpy() diff = np.amax(np.abs((np_pt - np_tf))) print('Max absolute difference between models outputs {}'.format(diff)) assert (diff <= 0.02), 'Error, model absolute difference is >2e-2: {}'.format(diff) print('Save TensorFlow model to {}'.format(tf_dump_path)) tf_model.save_weights(tf_dump_path, save_format='h5')
class VGG(nn.Module): arch_settings = {11: (1, 1, 2, 2, 2), 13: (2, 2, 2, 2, 2), 16: (2, 2, 3, 3, 3), 19: (2, 2, 4, 4, 4)} def __init__(self, depth, with_bn=False, num_classes=(- 1), num_stages=5, dilations=(1, 1, 1, 1, 1), out_indices=(0, 1, 2, 3, 4), frozen_stages=(- 1), bn_eval=True, bn_frozen=False, ceil_mode=False, with_last_pool=True): super(VGG, self).__init__() if (depth not in self.arch_settings): raise KeyError('invalid depth {} for vgg'.format(depth)) assert ((num_stages >= 1) and (num_stages <= 5)) stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] assert (len(dilations) == num_stages) assert (max(out_indices) <= num_stages) self.num_classes = num_classes self.out_indices = out_indices self.frozen_stages = frozen_stages self.bn_eval = bn_eval self.bn_frozen = bn_frozen self.inplanes = 3 start_idx = 0 vgg_layers = [] self.range_sub_modules = [] for (i, num_blocks) in enumerate(self.stage_blocks): num_modules = ((num_blocks * (2 + with_bn)) + 1) end_idx = (start_idx + num_modules) dilation = dilations[i] planes = ((64 * (2 ** i)) if (i < 4) else 512) vgg_layer = make_vgg_layer(self.inplanes, planes, num_blocks, dilation=dilation, with_bn=with_bn, ceil_mode=ceil_mode) vgg_layers.extend(vgg_layer) self.inplanes = planes self.range_sub_modules.append([start_idx, end_idx]) start_idx = end_idx if (not with_last_pool): vgg_layers.pop((- 1)) self.range_sub_modules[(- 1)][1] -= 1 self.module_name = 'features' self.add_module(self.module_name, nn.Sequential(*vgg_layers)) if (self.num_classes > 0): self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes)) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) elif isinstance(m, nn.Linear): normal_init(m, std=0.01) else: raise TypeError('pretrained must be a str or None') def forward(self, x): outs = [] vgg_layers = getattr(self, self.module_name) for (i, num_blocks) in enumerate(self.stage_blocks): for j in range(*self.range_sub_modules[i]): vgg_layer = vgg_layers[j] x = vgg_layer(x) if (i in self.out_indices): outs.append(x) if (self.num_classes > 0): x = x.view(x.size(0), (- 1)) x = self.classifier(x) outs.append(x) if (len(outs) == 1): return outs[0] else: return tuple(outs) def train(self, mode=True): super(VGG, self).train(mode) if self.bn_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval() if self.bn_frozen: for params in m.parameters(): params.requires_grad = False vgg_layers = getattr(self, self.module_name) if (mode and (self.frozen_stages >= 0)): for i in range(self.frozen_stages): for j in range(*self.range_sub_modules[i]): mod = vgg_layers[j] mod.eval() for param in mod.parameters(): param.requires_grad = False
def evaluations(ty, pv): if (len(ty) != len(pv)): raise ValueError('len(ty) must equal to len(pv)') total_correct = total_error = 0 sumv = sumy = sumvv = sumyy = sumvy = 0 for (v, y) in zip(pv, ty): if (y == v): total_correct += 1 total_error += ((v - y) * (v - y)) sumv += v sumy += y sumvv += (v * v) sumyy += (y * y) sumvy += (v * y) l = len(ty) ACC = ((100.0 * total_correct) / l) MSE = (total_error / l) try: SCC = ((((l * sumvy) - (sumv * sumy)) * ((l * sumvy) - (sumv * sumy))) / (((l * sumvv) - (sumv * sumv)) * ((l * sumyy) - (sumy * sumy)))) except: SCC = float('nan') return (ACC, MSE, SCC)
def order_terms(term_features, *args): if (len(term_features) == 0): if (len(args) == 0): return [] else: return tuple(([] for _ in range((len(args) + 1)))) keys = (([len(feature_idxs)] + sorted(feature_idxs)) for feature_idxs in term_features) sorted_items = sorted(zip(keys, term_features, *args)) ret = tuple((list(x) for x in islice(zip(*sorted_items), 1, None))) return (ret if (2 <= len(ret)) else ret[0])
class NonLinearPredictor(nn.Module): def __init__(self, in_feats, out_feats, config): super().__init__() self.dropout = nn.Dropout(config['predictor_dropout']) self.linear1 = nn.Linear(in_feats, config['predictor_hidden_feats']) self.activation = nn.GELU() self.batch_normal = nn.BatchNorm1d(config['predictor_hidden_feats']) self.linear2 = nn.Linear(config['predictor_hidden_feats'], out_feats) def forward(self, features): emb = self.dropout(features) emb = self.batch_normal(self.activation(self.linear1(emb))) emb = self.linear2(emb) return emb
def convert_context(params, ctx): new_params = dict() for (k, v) in params.items(): new_params[k] = v.as_in_context(ctx) return new_params
def probability_to_one_hot(tensor, stochastic=False): if stochastic: prob = tensor.data.cpu().numpy().ravel().astype(np.float64) prob = (prob / np.sum(prob)) norm = np.sum(prob) prob = [(prob[i] / norm) for i in range(len(prob))] idx = int(np.random.choice(len(prob), 1, p=prob)) else: idx = int(np.argmax(tensor.data.cpu().numpy())) return create_var(one_hot(torch.FloatTensor([idx]), list(tensor.size())[(- 1)]))
def Lambda_with_lambda(): from keras.layers import Lambda, Input from keras.models import Model x = Input((1,)) y = Lambda((lambda x: (x + 1)))(x) m = Model(x, y) yp = m.predict_on_batch([1, 2, 3]) print('np.array([1,2,3]) + 1:') print(yp)
class LabelSmoothingCrossEntropy(nn.Module): def __init__(self, smoothing=0.1): super(LabelSmoothingCrossEntropy, self).__init__() assert (smoothing < 1.0) self.smoothing = smoothing self.confidence = (1.0 - smoothing) def forward(self, x, target): logprobs = F.log_softmax(x, dim=(- 1)) nll_loss = (- logprobs.gather(dim=(- 1), index=target.unsqueeze(1))) nll_loss = nll_loss.squeeze(1) smooth_loss = (- logprobs.mean(dim=(- 1))) loss = ((self.confidence * nll_loss) + (self.smoothing * smooth_loss)) return loss.mean()
class DetDataSet(Dataset): def __init__(self, config, logger, mode): dataset_conf = config[mode]['dataset'] self.base_dir = dataset_conf['data_base_dir'] self.mode = mode self.logger = logger self.data_lines = self.get_image_info_list(dataset_conf['ano_file_path']) self._transforms = self._transforms_func_lst(dataset_conf['transforms']) if dataset_conf['do_shuffle']: random.shuffle(self.data_lines) def __len__(self): return len(self.data_lines) def get_image_info_list(self, file_path): lines = [] with codecs.open(file_path, 'r', 'utf8') as f: for line in f.readlines(): tmp_data = line.strip().split('\t') if (len(tmp_data) != 2): self.logger.warn(f'{line}') continue image_path = os.path.join(self.base_dir, tmp_data[0]) if (not os.path.exists(image_path)): self.logger.warn(f'{image_path}') continue lines.append([tmp_data[0], tmp_data[1]]) return lines def det_label_encoder(label_str): label = json.loads(label_str) boxes = [] ignore_tags = [] for bno in range(0, len(label)): box = label[bno]['points'] txt = label[bno]['transcription'] if (txt in ['*', '###']): ignore_tags.append(True) else: ignore_tags.append(False) boxes.append(box) boxes = np.array(boxes, dtype=np.float) ignore_tags = np.array(ignore_tags, dtype=np.bool) return (boxes, ignore_tags) def _transforms_func_lst(config): func_lst = [] for _transform in config: operator = list(_transform.keys())[0] params = (dict() if (_transform[operator] is None) else _transform[operator]) func_name = eval(operator)(**params) func_lst.append(func_name) return func_lst def __getitem__(self, index): try: data_line = self.data_lines[index] image_path = os.path.join(self.base_dir, data_line[0]) (polys, ignore_tags) = self.det_label_encoder(data_line[1]) image = cv2.imread(image_path, cv2.IMREAD_COLOR) if (image is None): self.logger.info(image_path) data = {'polys': polys, 'image': image, 'ignore_tags': ignore_tags} for _transform in self._transforms: data = _transform(data) except Exception as e: self.logger.error(e) data = [] if (not data): return self.__getitem__(np.random.randint(self.__len__())) return data
class DilatedContraction(GraphRewriterBase): _elapsed_time('Pass DilatedContraction') def do_transformation(self): cur_graph = GraphAnalyzer() cur_graph.graph = self.model graph_info = cur_graph.parse_graph() target_nodes = cur_graph.query_fusion_pattern_nodes(['SpaceToBatchND', ['Conv2D', 'DepthwiseConv2dNative'], 'BatchToSpaceND']) for node_combination in target_nodes: stob_node = graph_info[node_combination[0]].node contraction_node = graph_info[node_combination[1]].node btos_node = graph_info[node_combination[2]].node stob_padding_node = graph_info[stob_node.input[2]].node block_shape_node = graph_info[btos_node.input[1]].node crops_node = graph_info[btos_node.input[2]].node block_value = [i for i in tensor_util.MakeNdarray(block_shape_node.attr['value'].tensor).flat] new_dilation = [1, block_value[0], block_value[1], 1] if (stob_padding_node.op != 'Const'): continue padding_value = [i for i in tensor_util.MakeNdarray(stob_padding_node.attr['value'].tensor).flat] crops_value = [i for i in tensor_util.MakeNdarray(crops_node.attr['value'].tensor).flat] contraction_node.input[0] = stob_node.input[0] Helper.set_attr_int_list(contraction_node, 'dilations', new_dilation) real_padding = [(padding_value[i] - crops_value[i]) for i in range(4)] explict_padding = [0, 0, 0, 0, 0, 0, 0, 0] data_format = contraction_node.attr['data_format'].s.decode() if any(real_padding): contraction_node.attr['padding'].s = 'EXPLICIT'.encode() assert (data_format in ('NHWC', 'NCHW')) if (data_format == 'NHWC'): explict_padding[2] = real_padding[0] explict_padding[3] = real_padding[1] explict_padding[4] = real_padding[2] explict_padding[5] = real_padding[3] else: explict_padding[4] = real_padding[0] explict_padding[5] = real_padding[1] explict_padding[6] = real_padding[2] explict_padding[7] = real_padding[3] Helper.set_attr_int_list(contraction_node, 'explicit_paddings', explict_padding) contraction_node.attr.pop('_output_shapes') cur_graph.remove_node(stob_node.name) following_node_name = graph_info[node_combination[2]].outputs[0] following_node = graph_info[following_node_name].node following_node.input[0] = btos_node.input[0] cur_graph.remove_node(btos_node.name) return cur_graph.dump_graph()
def get_phantom_from_mhd(filename, range_file, material_file=None): (numpyImage, numpyOrigin, numpySpacing) = read_mhd(filename) phantom = phantoms.Phantom() phantom.mhd_file = filename phantom.range_file = range_file phantom.material_file = material_file phantom.phantom = numpyImage phantom.geomet = tigre.geometry_default(nVoxel=phantom.phantom.shape) phantom.geomet.DSD = 1510 phantom.geomet.dVoxel = numpySpacing phantom.geomet.sVoxel = (phantom.geomet.dVoxel * phantom.geomet.nVoxel) phantom.geomet.dDetector = np.array([0.784, 0.784]) phantom.geomet.nDetector = np.array([512, 512]) phantom.geomet.sDetector = (phantom.geomet.dDetector * phantom.geomet.nDetector) phantom.is_non_integer = False if (range_file is not None): (materials, low_range, high_range) = read_range_file(range_file) phantom.phan_map = materials if np.any((low_range != high_range)): print('Warning: range file contains ranges. Using low range for all materials') if (material_file is not None): make_material_mu_files(material_file) return phantom
def revert_reorientation(image: str) -> None: assert image.endswith('.nii.gz') expected_pkl = (image[:(- 7)] + '_originalAffine.pkl') assert isfile(expected_pkl), ('Must have a file with the original affine, as created by reorient_to_ras. Expected filename: %s' % expected_pkl) (original_affine, original_axcode) = load_pickle((image[:(- 7)] + '_originalAffine.pkl')) img = nib.load(image) before_revert = nib.aff2axcodes(img.affine) img = img.as_reoriented(io_orientation(original_affine)) after_revert = nib.aff2axcodes(img.affine) print('before revert', before_revert, 'after revert', after_revert) restored_affine = img.affine assert np.all(np.isclose(original_affine, restored_affine)), 'restored affine does not match original affine, aborting!' nib.save(img, image) os.remove(expected_pkl)
class SpotClipSamplerDistributedSamplerWrapper(DistributedSampler): def __init__(self, sampler: SpotClipSampler, *args: Any, **kwargs: Any) -> None: shuffle = sampler.shuffle sampler.set_shuffle(False) super().__init__(_DatasetSamplerWrapper(sampler), *args, seed=sampler.seed, shuffle=shuffle, **kwargs) def __iter__(self) -> Iterator: self.dataset.reset() return (self.dataset[index] for index in super().__iter__()) def set_epoch(self, epoch: int) -> None: super().set_epoch(epoch) self.dataset.set_epoch(epoch) return def __repr__(self) -> str: return f'{__class__.__name__}(sampler={self.dataset}, shuffle={self.shuffle}, seed={self.seed})'
class FeaturesNet(nn.Module): def __init__(self, feature_layers=[0, 3, 5], use_normalization=False): super().__init__() model = models.squeezenet1_1(pretrained=True) model.float() model.eval() self.model = model self.feature_layers = feature_layers self.mean = torch.FloatTensor([0.485, 0.456, 0.406]) self.mean_tensor = None self.std = torch.FloatTensor([0.229, 0.224, 0.225]) self.std_tensor = None self.use_normalization = use_normalization for param in self.parameters(): param.requires_grad = False def normalize(self, x): if (not self.use_normalization): return x if (self.mean_tensor is None): self.mean_tensor = Variable(self.mean.view(1, 3, 1, 1).expand(x.shape), requires_grad=False) self.std_tensor = Variable(self.std.view(1, 3, 1, 1).expand(x.shape), requires_grad=False) x = ((x + 1) / 2) return ((x - self.mean_tensor) / self.std_tensor) def run(self, x): features = [] h = x for f in range((max(self.feature_layers) + 1)): h = self.model.features[f](h) if (f in self.feature_layers): not_normed_features = h.clone().view(h.size(0), (- 1)) features.append(not_normed_features) return torch.cat(features, dim=1) def forward(self, x): h = self.normalize(x) return self.run(h)
def fixed_padding(inputs, kernel_size, data_format): pad_total = (kernel_size - 1) pad_beg = (pad_total // 2) pad_end = (pad_total - pad_beg) if (data_format == 'channels_first'): padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) else: padded_inputs = tf.pad(tensor=inputs, paddings=[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) return padded_inputs
def clip_gradient(optimizer, grad_clip): for group in optimizer.param_groups: torch.nn.utils.clip_grad_value_(group['params'], grad_clip)
def gen_nnsmith_rules(inst): lib = ('torch' if ('torch' in inst.name_index) else 'tf') try: with open(os.path.join(RULE_DIR, f'{lib}_nnsmith_reuse', f'{inst.name_index}.pkl'), 'rb') as f: res = pickle.load(f) except: res = [] return res
class OpenAIGPTConfig(PretrainedConfig): pretrained_config_archive_map = OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = 'openai-gpt' def __init__(self, vocab_size=40478, n_positions=512, n_ctx=512, n_embd=768, n_layer=12, n_head=12, afn='gelu', resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initializer_range=0.02, predict_special_tokens=True, summary_type='cls_index', summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, **kwargs): super().__init__(**kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.afn = afn self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.predict_special_tokens = predict_special_tokens self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels def max_position_embeddings(self): return self.n_positions def hidden_size(self): return self.n_embd def num_attention_heads(self): return self.n_head def num_hidden_layers(self): return self.n_layer
def load_adapter(pipe, ckpt_dir, adapter_name): unet_sub_dir = os.path.join(ckpt_dir, 'unet') text_encoder_sub_dir = os.path.join(ckpt_dir, 'text_encoder') pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name)
_mps class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): pipeline_class = IFImg2ImgSuperResolutionPipeline params = (TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}) batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'}) required_optional_params = (PipelineTesterMixin.required_optional_params - {'latents'}) def get_dummy_components(self): return self._get_superresolution_dummy_components() def get_dummy_inputs(self, device, seed=0): if str(device).startswith('mps'): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device) inputs = {'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy'} return inputs (((torch_device != 'cuda') or (not is_xformers_available())), reason='XFormers attention is only available with CUDA and `xformers` installed') def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=0.001) def test_save_load_optional_components(self): self._test_save_load_optional_components() ((torch_device != 'cuda'), reason='float16 requires CUDA') def test_save_load_float16(self): super().test_save_load_float16(expected_max_diff=0.1) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(expected_max_diff=0.01) def test_save_load_local(self): self._test_save_load_local() def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=0.01)
def test_sphere_wrong_occupancy(): mesh = o3d.geometry.TriangleMesh.create_sphere(0.8) mesh = o3d.t.geometry.TriangleMesh.from_legacy(mesh) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(mesh) min_bound = (mesh.vertex.positions.min(0).numpy() * 1.1) max_bound = (mesh.vertex.positions.max(0).numpy() * 1.1) xyz_range = np.linspace(min_bound, max_bound, num=6) query_points = np.stack(np.meshgrid(*xyz_range.T), axis=(- 1)).astype(np.float32) occupancy = scene.compute_occupancy(query_points) expected = np.array([[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]], dtype=np.float32) np.testing.assert_equal(occupancy.numpy(), expected) occupancy_3samples = scene.compute_occupancy(query_points, nsamples=3) np.testing.assert_equal(occupancy_3samples.numpy(), expected)
def _create_model(variant, pretrained, model_kwargs): cfg = {} if variant.startswith('selecsls42'): cfg['block'] = SelecSLSBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 144, 144, True, 2), (144, 144, 144, 288, False, 1), (288, 0, 304, 304, True, 2), (304, 304, 304, 480, False, 1)] if (variant == 'selecsls42b'): cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)] cfg['num_features'] = 1024 else: cfg['head'] = [(480, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)] cfg['num_features'] = 1280 elif variant.startswith('selecsls60'): cfg['block'] = SelecSLSBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 128, False, 1), (128, 0, 128, 128, True, 2), (128, 128, 128, 128, False, 1), (128, 128, 128, 288, False, 1), (288, 0, 288, 288, True, 2), (288, 288, 288, 288, False, 1), (288, 288, 288, 288, False, 1), (288, 288, 288, 416, False, 1)] if (variant == 'selecsls60b'): cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1280, 3, 2), (1280, 1024, 1, 1)] cfg['num_features'] = 1024 else: cfg['head'] = [(416, 756, 3, 2), (756, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 1, 1)] cfg['num_features'] = 1280 elif (variant == 'selecsls84'): cfg['block'] = SelecSLSBlock cfg['features'] = [(32, 0, 64, 64, True, 2), (64, 64, 64, 144, False, 1), (144, 0, 144, 144, True, 2), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 144, False, 1), (144, 144, 144, 304, False, 1), (304, 0, 304, 304, True, 2), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 304, False, 1), (304, 304, 304, 512, False, 1)] cfg['head'] = [(512, 960, 3, 2), (960, 1024, 3, 1), (1024, 1024, 3, 2), (1024, 1280, 3, 1)] cfg['num_features'] = 1280 else: raise ValueError((('Invalid net configuration ' + variant) + ' !!!')) model = SelecSLS(cfg, **model_kwargs) model.default_cfg = default_cfgs[variant] if pretrained: load_pretrained(model, num_classes=model_kwargs.get('num_classes', 0), in_chans=model_kwargs.get('in_chans', 3), strict=True) return model
def _extract_weight_tuples(model): mlist = get_modules(model) return tuple([(m, 'weight') for m in mlist])
class CopyEnv(algorithmic_env.TapeAlgorithmicEnv): def __init__(self, base=5, chars=True): super(CopyEnv, self).__init__(base=base, chars=chars) def target_from_input_data(self, input_data): return input_data
def to_off(path): file_path = os.path.dirname(path) file_name = os.path.splitext(os.path.basename(path))[0] output_file = os.path.join(file_path, (file_name + '_scaled.off')) if os.path.exists(output_file): print('Exists: {}'.format(output_file)) return try: with HiddenPrints(): input = trimesh.load(path) mesh = as_mesh(input) total_size = (mesh.bounds[1] - mesh.bounds[0]).max() centers = ((mesh.bounds[1] + mesh.bounds[0]) / 2) mesh.apply_translation((- centers)) mesh.apply_scale((1 / total_size)) mesh.export(output_file) print('Finished: {}'.format(path)) except: print('Error with {}: {}'.format(path, traceback.format_exc()))
.slow def test_independent_samples(): (nsamples, nchains) = _get_sample_size() key = random.PRNGKey(0) independent_samples = random.normal(key, (nsamples, nchains)) (autocorr_curve, variance) = statistics.multi_chain_autocorr_and_variance(independent_samples) tau = statistics.tau(autocorr_curve) np.testing.assert_allclose(autocorr_curve[0], 1) np.testing.assert_allclose(autocorr_curve[1:100], 0, atol=0.01) np.testing.assert_allclose(variance, jnp.var(independent_samples), 0.01) np.testing.assert_allclose(tau, 1, 0.01)
class LazyModel(ABC): def __init__(self, loader: Callable[([], Callable)]): super().__init__() self.get_model = loader self.model: Optional[Callable] = None def is_in_memory(self) -> bool: return (self.model is not None) def load(self): if (not self.is_in_memory()): self.model = self.get_model() def to(self, device: torch.device) -> LazyModel: self.load() self.model = self.model.to(device) return self def __call__(self, *args, **kwargs): self.load() return self.model(*args, **kwargs) def eval(self) -> LazyModel: self.load() if isinstance(self.model, nn.Module): self.model.eval() return self
class _TestClassD(_TestClassA): def __init__(self, input_shape: ShapeSpec, arg1: int, arg2, arg3=3): assert (input_shape == 'shape') super().__init__(arg1, arg2, arg3)
class InnerProductParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
def resume_exp_directory(cfg, pretrained_path=None): pretrained_path = (pretrained_path or cfg.get('pretrained_path', None) or cfg.get('pretrained_path', None)) if (os.path.basename(os.path.dirname(pretrained_path)) == 'checkpoint'): cfg.run_dir = os.path.dirname(os.path.dirname(cfg.pretrained_path)) cfg.log_dir = cfg.run_dir cfg.run_name = os.path.basename(cfg.run_dir) cfg.ckpt_dir = os.path.join(cfg.run_dir, 'checkpoint') cfg.code_dir = os.path.join(cfg.run_dir, 'code') cfg.log_path = os.path.join(cfg.run_dir, (((cfg.run_name + time.strftime('%Y%m%d-%H%M%S-')) + str(shortuuid.uuid())) + '.log')) else: expid = (time.strftime('%Y%m%d-%H%M%S-') + str(shortuuid.uuid())) cfg.run_name = '_'.join([os.path.basename(pretrained_path), expid]) cfg.run_dir = os.path.join(cfg.root_dir, cfg.run_name) cfg.log_dir = cfg.run_dir cfg.ckpt_dir = os.path.join(cfg.run_dir, 'checkpoint') cfg.code_dir = os.path.join(cfg.run_dir, 'code') cfg.log_path = os.path.join(cfg.run_dir, (cfg.run_name + '.log')) if (cfg.get('rank', 0) == 0): os.makedirs(cfg.run_dir, exist_ok=True) cfg.wandb.tags = ['resume']
def test_ctypes_array_2d(): char2d = ((ctypes.c_char * 10) * 4)() int2d = ((ctypes.c_int * 15) * 3)() long2d = ((ctypes.c_long * 7) * 2)() for carray in (char2d, int2d, long2d): info = m.get_buffer_info(carray) assert (info.itemsize == ctypes.sizeof(carray[0]._type_)) assert (info.size == (len(carray) * len(carray[0]))) assert (info.ndim == 2) assert (info.shape == [len(carray), len(carray[0])]) assert (info.strides == [(info.itemsize * len(carray[0])), info.itemsize]) assert (not info.readonly)
def check(variant, suffix, ckpt, gt_semantics): ckpt_dir = f'/srv/share/jye72/objectnav/{variant}-{suffix}/' ckpts = [] if (not osp.exists(ckpt_dir)): return ckpts = [int(p.split('.')[(- 2)]) for p in os.listdir(ckpt_dir)] existing_evals = [] eval_dir = f'/srv/share/jye72/objectnav_eval/{variant}-{suffix}' if osp.exists(eval_dir): relevant_eval_file = f'eval_gt_{str(gt_semantics)}.json' for c in os.listdir(eval_dir): if osp.exists(osp.join(eval_dir, c, relevant_eval_file)): existing_evals.append(int(c)) os.makedirs('/srv/flash1/jye72/projects/embodied-recall/watch', exist_ok=True) watchfile = f"/srv/flash1/jye72/projects/embodied-recall/watch/{variant}_{suffix}_{('gt' if gt_semantics else 'pred')}" pending_evals = [] if osp.exists(watchfile): with open(watchfile, 'r') as f: pending_evals = [int(c) for c in f.readlines()] start = (max([(ckpt - 1), *pending_evals, *existing_evals]) + 1) with open(watchfile, 'a') as f: for c in sorted(ckpts): if (c >= start): f.write(f'''{c} ''') launch(variant, suffix, c, gt_semantics)
def setup_gen_trainer(config, dataloader_object): model_path = os.path.join(config.generation_model_path, 'model.pt') print_msg(('PG Model Path: %s' % model_path), 'GenerateTrainerSetup') translate_evaluator = TranslationEvaluator(config, config.output_dir) dataloader_object.token_tokenizer = dataloader_object.gtoken_tokenizer gtrainer = TrainerFactory().get_trainer(config, dataloader_object, get_experiment_enum(config.generation_model_type, None)) gtrainer.legacy = config.gen_legacy if gtrainer.legacy: config.dim_feedforward = config.legacy_dim_feedforward config.nhead = config.legacy_nhead config.trans_dec_n_layers = config.legacy_trans_dec_n_layers config.trans_enc_n_layers = config.legacy_trans_enc_n_layers gtrainer.setup_model() gtrainer.load_pymodel(model_path) gtrainer.model.eval() return (gtrainer, translate_evaluator)
def get_in_dataset_patches_2(): ret_set = set() data = pandas.read_csv('csv/d4j-overlap.csv', header=0).fillna(0) for (index, type) in enumerate(data['Result-2']): if (type == 'Fixed function'): ret_set.add(data['Bug-2'][index].replace(' ', '-')) return ret_set
def _add_inplace_unary_passthrough_function(name, preferred=None): def iu_wrapper_function(self, *args, **kwargs): self._tensor = getattr(self._tensor, name)(*args, **kwargs) return self if (preferred is None): setattr(MPCTensor, name, iu_wrapper_function) else: setattr(MPCTensor, name, mode(preferred, True)(iu_wrapper_function))
class QCNNBase(NNBase): def __init__(self, num_inputs, F_prior, recurrent=False, hidden_size=1024): super(QCNNBase, self).__init__(recurrent, hidden_size, hidden_size) self.main = nn.Sequential(Conv2d_Q(num_inputs, 32, 8, stride=4, F_prior=F_prior), nn.ReLU(), Conv2d_Q(32, 64, 4, stride=2, F_prior=F_prior), nn.ReLU(), Conv2d_Q(64, 128, 3, stride=1, F_prior=F_prior), nn.ReLU(), Flatten(), Linear_Q(((128 * 7) * 7), hidden_size, F_prior=F_prior), nn.ReLU()) self.train() def forward(self, inputs, rnn_hxs, masks): x = self.main((inputs / 255.0)) if self.is_recurrent: (x, rnn_hxs) = self._forward_gru(x, rnn_hxs, masks) return (x, rnn_hxs)
def compute_new_deaths(df, in_col='deaths'): return df[in_col].apply((lambda x: np.array([(x[(i + 1)] - x[i]) for i in range((len(x) - 1))])))
def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]: if (not cfg.MODEL.DENSEPOSE_ON): return None use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS def has_densepose_annotations(instance: Instance) -> bool: for ann in instance['annotations']: if (all(((key in ann) for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK)) or all(((key in ann) for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK))): return True if (use_masks and ('segmentation' in ann)): return True return False return has_densepose_annotations
def benchmark_add_10k(benchmark, benchmark_data_10k): (_, solution_batch, objective_batch, measures_batch) = benchmark_data_10k def setup(): archive = SlidingBoundariesArchive(solution_dim=solution_batch.shape[1], dims=[10, 20], ranges=[((- 1), 1), ((- 2), 2)], remap_frequency=100, buffer_capacity=1000) archive.add_single(solution_batch[0], objective_batch[0], measures_batch[0]) return ((archive,), {}) def add_10k(archive): archive.add(solution_batch, objective_batch, measures_batch) benchmark.pedantic(add_10k, setup=setup, rounds=5, iterations=1)
def init_dist(rank, world_size): os.environ['LOCAL_RANK'] = str(rank) os.environ['RANK'] = str(rank) os.environ['WORLD_SIZE'] = str(world_size) os.environ['NPROC_PER_NODE'] = str(world_size) if torch.cuda.is_available(): atorch.init_distributed('nccl') else: atorch.init_distributed('gloo')
def standard_size(): from phcpy.phcpy2c3 import py2c_numbtrop_standard_size as get_size return get_size()
_lr_scheduler('inverse_sqrt') class InverseSquareRootSchedule(FairseqLRScheduler): def __init__(self, args, optimizer): super().__init__(args, optimizer) if (len(args.lr) > 1): raise ValueError('Cannot use a fixed learning rate schedule with inverse_sqrt. Consider --lr-scheduler=fixed instead.') warmup_end_lr = args.lr[0] if (args.warmup_init_lr < 0): args.warmup_init_lr = (0 if (args.warmup_updates > 0) else warmup_end_lr) self.lr_step = ((warmup_end_lr - args.warmup_init_lr) / args.warmup_updates) self.decay_factor = (warmup_end_lr * (args.warmup_updates ** 0.5)) self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) def add_args(parser): parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=(- 1), type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') def step(self, epoch, val_loss=None): super().step(epoch, val_loss) return self.optimizer.get_lr() def step_update(self, num_updates): if (num_updates < self.args.warmup_updates): self.lr = (self.args.warmup_init_lr + (num_updates * self.lr_step)) else: self.lr = (self.decay_factor * (num_updates ** (- 0.5))) self.optimizer.set_lr(self.lr) return self.lr
def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False, lang_flags=None, extra_valid_flags=None): if (lang_flags is None): lang_flags = ['--source-lang', 'in', '--target-lang', 'out'] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch(train_parser, ((['--task', task, data_dir, '--save-dir', data_dir, '--arch', arch, '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--num-workers', 0] + lang_flags) + (extra_flags or []))) train.main(train_args) if run_validation: validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch(validate_parser, ((['--task', task, data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--valid-subset', 'valid', '--max-tokens', '500', '--no-progress-bar'] + lang_flags) + (extra_valid_flags or []))) validate.main(validate_args)
def lpg(env_fn, actor_critic=core.MLPActorCritic, ac_kwargs=dict(), seed=0, steps_per_epoch=4000, epochs=50, gamma=0.99, pi_lr=0.0003, vf_lr=0.001, ccritic_lr=0.001, train_v_iters=80, train_ccritic_iters=80, lam=0.97, max_ep_len=1000, target_kl=0.01, logger_kwargs=dict(), save_freq=10, backtrack_coeff=0.8, backtrack_iters=100, model_save=False): setup_pytorch_for_mpi() logger = EpochLogger(**logger_kwargs) logger.save_config(locals()) seed += (10000 * proc_id()) torch.manual_seed(seed) np.random.seed(seed) env = env_fn() obs_dim = env.observation_space.shape act_dim = env.action_space.shape ac = actor_critic(env.observation_space, env.action_space, **ac_kwargs).to(device) sync_params(ac) var_counts = tuple((core.count_vars(module) for module in [ac.pi, ac.v])) logger.log(('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)) local_steps_per_epoch = int((steps_per_epoch / num_procs())) buf = LpgBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam) def compute_kl_pi(data, cur_pi): (obs, act, adv, logp_old, mu_old, logstd_old) = (data['obs'], data['act'], data['adv'], data['logp'], data['mu'], data['logstd']) (pi, logp) = cur_pi(obs, act) average_kl = cur_pi._d_kl(torch.as_tensor(obs, dtype=torch.float32), torch.as_tensor(mu_old, dtype=torch.float32), torch.as_tensor(logstd_old, dtype=torch.float32), device=device) return average_kl def compute_loss_pi(data, cur_pi): (obs, act, adv, logp_old) = (data['obs'], data['act'], data['adv'], data['logp']) (pi, logp) = cur_pi(obs, act) ratio = torch.exp((logp - logp_old)) loss_pi = (- (ratio * adv).mean()) approx_kl = (logp_old - logp).mean().item() ent = pi.entropy().mean().item() pi_info = dict(kl=approx_kl, ent=ent) return (loss_pi, pi_info) def compute_loss_v(data): (obs, ret) = (data['obs'], data['ret']) return ((ac.v(obs) - ret) ** 2).mean() def compute_loss_ccritic(data): (obs, act_safe, targetc) = (data['obs'], data['act_safe'], data['targetc']) obs_act = torch.cat((obs, act_safe), dim=1) return ((ac.ccritic(obs_act) - targetc) ** 2).mean() vf_optimizer = Adam(ac.v.parameters(), lr=vf_lr) ccritic_optimizer = Adam(ac.ccritic.parameters(), lr=ccritic_lr) if model_save: logger.setup_pytorch_saver(ac) def update(): data = buf.get() (pi_l_old, pi_info_old) = compute_loss_pi(data, ac.pi) pi_l_old = pi_l_old.item() v_l_old = compute_loss_v(data).item() (loss_pi, pi_info) = compute_loss_pi(data, ac.pi) g = auto_grad(loss_pi, ac.pi) kl_div = compute_kl_pi(data, ac.pi) Hx = (lambda x: auto_hession_x(kl_div, ac.pi, torch.FloatTensor(x).to(device))) x_hat = cg(Hx, g) s = (x_hat.T Hx(x_hat)) s_ep = (s if (s < 0.0) else 1) x_direction = (np.sqrt(((2 * target_kl) / (s + EPS))) * x_hat) actor_tmp = copy.deepcopy(ac.pi) def set_and_eval(step): new_param = (get_net_param_np_vec(ac.pi) - (step * x_direction)) assign_net_param_from_flat(new_param, actor_tmp) kl = compute_kl_pi(data, actor_tmp) (pi_l, _) = compute_loss_pi(data, actor_tmp) return (kl, pi_l) for j in range(backtrack_iters): try: (kl, pi_l_new) = set_and_eval((backtrack_coeff ** j)) except: import ipdb ipdb.set_trace() if ((kl.item() <= target_kl) and (pi_l_new.item() <= pi_l_old)): print(colorize((f'Accepting new params at step %d of line search.' % j), 'green', bold=False)) new_param = (get_net_param_np_vec(ac.pi) - ((backtrack_coeff ** j) * x_direction)) assign_net_param_from_flat(new_param, ac.pi) (loss_pi, pi_info) = compute_loss_pi(data, ac.pi) break if (j == (backtrack_iters - 1)): print(colorize(f'Line search failed! Keeping old params.', 'yellow', bold=False)) for i in range(train_v_iters): vf_optimizer.zero_grad() loss_v = compute_loss_v(data) loss_v.backward() mpi_avg_grads(ac.v) vf_optimizer.step() for i in range(train_ccritic_iters): ccritic_optimizer.zero_grad() loss_ccritic = compute_loss_ccritic(data) loss_ccritic.backward() mpi_avg_grads(ac.ccritic) ccritic_optimizer.step() (kl, ent) = (pi_info['kl'], pi_info_old['ent']) logger.store(LossPi=pi_l_old, LossV=v_l_old, KL=kl, Entropy=ent, DeltaLossPi=(loss_pi.item() - pi_l_old), DeltaLossV=(loss_v.item() - v_l_old), EpochS=s_ep) start_time = time.time() while True: try: (o, ep_ret, ep_len) = (env.reset(), 0, 0) break except: print('reset environment is wrong, try next reset') (ep_cost, cum_cost, prev_c) = (0, 0, 0) for epoch in range(epochs): for t in range(local_steps_per_epoch): (a, v, logp, mu, logstd, qc) = ac.step(torch.as_tensor(o, dtype=torch.float32)) if (t == 0): ac.ccritic.store_init(o, a) print('Initial D(x0): ', ac.ccritic.get_Q_init()) warmup_ratio = (1.0 / 3.0) if (epoch > (epochs * warmup_ratio)): a_safe = ac.ccritic.safety_correction(o, a, prev_c) assert (a_safe is not a) else: a_safe = a try: (next_o, r, d, info) = env.step(a_safe) assert ('cost' in info.keys()) except: (next_o, r, d) = (o, 0, True) info['cost'] = 0 c = info['cost'] cum_cost += c ep_ret += r ep_len += 1 ep_cost += c logger.store(VVals=v) timeout = (ep_len == max_ep_len) terminal = (d or timeout) epoch_ended = (t == (local_steps_per_epoch - 1)) buf.store(o, a, a_safe, r, v, logp, mu, logstd, c, qc) o = next_o if (terminal or epoch_ended): if (epoch_ended and (not terminal)): print(('Warning: trajectory cut off by epoch at %d steps.' % ep_len), flush=True) if (timeout or epoch_ended): (_, v, _, _, _, _) = ac.step(torch.as_tensor(o, dtype=torch.float32)) else: v = 0 buf.finish_path(v) if terminal: logger.store(EpRet=ep_ret, EpLen=ep_len, EpCost=ep_cost) while True: try: (o, ep_ret, ep_len) = (env.reset(), 0, 0) break except: print('reset environment is wrong, try next reset') ep_cost = 0 prev_c = 0 if ((((epoch % save_freq) == 0) or (epoch == (epochs - 1))) and model_save): logger.save_state({'env': env}, None) update() cumulative_cost = mpi_sum(cum_cost) cost_rate = (cumulative_cost / ((epoch + 1) * steps_per_epoch)) logger.log_tabular('Epoch', epoch) logger.log_tabular('EpRet', with_min_and_max=True) logger.log_tabular('EpCost', with_min_and_max=True) logger.log_tabular('EpLen', average_only=True) logger.log_tabular('CumulativeCost', cumulative_cost) logger.log_tabular('CostRate', cost_rate) logger.log_tabular('VVals', with_min_and_max=True) logger.log_tabular('TotalEnvInteracts', ((epoch + 1) * steps_per_epoch)) logger.log_tabular('LossPi', average_only=True) logger.log_tabular('LossV', average_only=True) logger.log_tabular('DeltaLossPi', average_only=True) logger.log_tabular('DeltaLossV', average_only=True) logger.log_tabular('Entropy', average_only=True) logger.log_tabular('KL', average_only=True) logger.log_tabular('Time', (time.time() - start_time)) logger.log_tabular('EpochS', average_only=True) logger.dump_tabular()
def resnext272_2x32d_svhn(num_classes=10, **kwargs): return get_resnext_cifar(num_classes=num_classes, blocks=272, cardinality=2, bottleneck_width=32, model_name='resnext272_2x32d_svhn', **kwargs)
class TestFrameChange(QiskitTestCase): def test_default(self): fc_command = FrameChange(phase=1.57) self.assertEqual(fc_command.phase, 1.57) self.assertEqual(fc_command.duration, 0)
def _ensure_tensor(input): if isinstance(input, (int, float)): input = torch.tensor(input) return input
def _create_ngrams(tokens, n): ngrams = collections.Counter() for ngram in (tuple(tokens[i:(i + n)]) for i in range(((len(tokens) - n) + 1))): ngrams[ngram] += 1 return ngrams
class AgentParams(Params): def __init__(self): super(AgentParams, self).__init__() if (self.agent_type == 'sl'): if (self.circuit_type == 'ntm'): self.criteria = nn.BCELoss() self.optim = optim.RMSprop self.steps = 100000 self.batch_size = 16 self.early_stop = None self.clip_grad = 50.0 self.lr = 0.0001 self.optim_eps = 1e-10 self.optim_alpha = 0.9 self.eval_freq = 500 self.eval_steps = 50 self.prog_freq = self.eval_freq self.test_nepisodes = 5 elif (self.circuit_type == 'dnc'): self.criteria = nn.BCELoss() self.optim = optim.RMSprop self.steps = 100000 self.batch_size = 16 self.early_stop = None self.clip_grad = 50.0 self.lr = 0.0001 self.optim_eps = 1e-10 self.optim_alpha = 0.9 self.eval_freq = 500 self.eval_steps = 50 self.prog_freq = self.eval_freq self.test_nepisodes = 5 elif (self.agent_type == 'empty'): self.criteria = nn.BCELoss() self.optim = optim.RMSprop self.steps = 100000 self.batch_size = 16 self.early_stop = None self.clip_grad = 50.0 self.lr = 0.0001 self.optim_eps = 1e-10 self.optim_alpha = 0.9 self.eval_freq = 500 self.eval_steps = 50 self.prog_freq = self.eval_freq self.test_nepisodes = 5 self.env_params = EnvParams() self.circuit_params = CircuitParams()
def _calculate_valid_crop_size(crop_size, upscale_factor): return (crop_size - (crop_size % upscale_factor))
def get_trainer(): x = ph([None, None, 3]) sx = tf.shape(x) noisy_x = x noisy_x = tf.clip_by_value(noisy_x, clip_value_max=1.0, clip_value_min=0.0) code_noise = tf.Variable(1.0) linear_code = enc(noisy_x) noisy_code = (linear_code - tf.random_normal(stddev=code_noise, shape=tf.shape(linear_code))) binary_code = Act('sigmoid')(noisy_code) y = dec(binary_code) loss = (tf.reduce_mean(((y - noisy_x) ** 2)) + (tf.reduce_mean((binary_code ** 2)) * 0.0001)) opt = tf.train.AdamOptimizer() train_step = opt.minimize(loss, var_list=(enc.get_weights() + dec.get_weights())) def feed(batch, cnoise): sess = ct.get_session() res = sess.run([train_step, loss], feed_dict={x: batch, code_noise: cnoise}) return res[1] set_training_state(False) quantization_thresholcomdefendd = tf.Variable(0.5) binary_code_test = tf.cast((binary_code > quantization_threshold), tf.float32) y_test = dec(binary_code_test) def test(batch, quanth): sess = ct.get_session() res = sess.run([binary_code_test, y_test, binary_code, y, noisy_x, x], feed_dict={x: batch, quantization_threshold: quanth}) return res return (feed, test)
def get_name(node, nid): if (node.state is None): t = 0 else: t = int(node.state.t) name = ('%s %d' % (node.tag, nid)) return (name, (nid + 1))
.filterwarnings('ignore::DeprecationWarning') def test_log_file() -> None: with tempfile.TemporaryDirectory() as tmp: tmpdir = Path(tmp) configure_logging(fname=(tmpdir / 'test1.log')) logger.debug('Debug message') logger.info('Info message') logger.warning('Warn message') logger.error('Error message') _close_handlers(logger) with open((tmpdir / 'test1.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert (not any((('Info message' in line) for line in lines))) assert any((('Warn message' in line) for line in lines)) assert any((('Error message' in line) for line in lines)) configure_logging(fname=(tmpdir / 'test2.log'), level='INFO') logger.debug('Debug message') logger.info('Info message') logger.warning('Warn message') logger.error('Error message') _close_handlers(logger) with open((tmpdir / 'test2.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert any((('Info message' in line) for line in lines)) assert any((('Warn message' in line) for line in lines)) assert any((('Error message' in line) for line in lines)) configure_logging(fname=(tmpdir / 'test3.log'), level='WARNING') logger.debug('Debug message') logger.info('Info message') logger.warning('Warn message') logger.error('Error message') _close_handlers(logger) with open((tmpdir / 'test3.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert (not any((('Info message' in line) for line in lines))) assert any((('Warn message' in line) for line in lines)) assert any((('Error message' in line) for line in lines)) configure_logging(fname=(tmpdir / 'test4.log'), level='ERROR') logger.debug('Debug message') logger.info('Info message') logger.warning('Warn message') logger.error('Error message') with open((tmpdir / 'test4.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert (not any((('Info message' in line) for line in lines))) assert (not any((('Warn message' in line) for line in lines))) assert any((('Error message' in line) for line in lines)) with pytest.warns(UserWarning, match='to avoid this message'): configure_logging(fname=(tmpdir / 'test4.log'), level='WARNING') logger.debug('Debug2 message') logger.info('Info2 message') logger.warning('Warn2 message') logger.error('Error2 message') with open((tmpdir / 'test4.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert (not any((('Info message' in line) for line in lines))) assert (not any((('Warn message' in line) for line in lines))) assert any((('Error message' in line) for line in lines)) assert (not any((('Debug2 message' in line) for line in lines))) assert (not any((('Info2 message' in line) for line in lines))) assert any((('Warn2 message' in line) for line in lines)) assert any((('Error2 message' in line) for line in lines)) configure_logging(fname=(tmpdir / 'test4.log'), level='WARNING', overwrite=True) logger.debug('Debug3 message') logger.info('Info3 message') logger.warning('Warn3 message') logger.error('Error3 message') with open((tmpdir / 'test4.log')) as f: lines = f.readlines() assert (not any((('Debug message' in line) for line in lines))) assert (not any((('Info message' in line) for line in lines))) assert (not any((('Warn message' in line) for line in lines))) assert (not any((('Error message' in line) for line in lines))) assert (not any((('Debug2 message' in line) for line in lines))) assert (not any((('Info2 message' in line) for line in lines))) assert (not any((('Warn2 message' in line) for line in lines))) assert (not any((('Error2 message' in line) for line in lines))) assert (not any((('Debug3 message' in line) for line in lines))) assert (not any((('Info3 message' in line) for line in lines))) assert any((('Warn3 message' in line) for line in lines)) assert any((('Error3 message' in line) for line in lines)) with pytest.warns(RuntimeWarning, match='Warn raised'): warn_with_log('Warn raised') with pytest.raises(ValueError, match='Error raised'): raise_error('Error raised') with open((tmpdir / 'test4.log')) as f: lines = f.readlines() assert any((('Warn raised' in line) for line in lines)) assert any((('Error raised' in line) for line in lines))
def norm_ema_inplace(moving_avg, new, decay): moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) moving_avg.data.copy_(l2norm(moving_avg.data))
class KukaKr3(Robot): def __init__(self, name: str, id_num: int, world, sim_step: float, use_physics_sim: bool, base_position: Union[(list, np.ndarray)], base_orientation: Union[(list, np.ndarray)], resting_angles: Union[(list, np.ndarray)], control_mode: Union[(int, str)], ik_xyz_delta: float=0.005, ik_rpy_delta: float=0.005, jt_joint_delta: float=0.5, joint_velocities_overwrite: Union[(float, List)]=1, joint_limits_overwrite: Union[(float, List)]=1, controlled_joints: list=[], self_collision: bool=True): super().__init__(name, id_num, world, sim_step, use_physics_sim, base_position, base_orientation, resting_angles, control_mode, ik_xyz_delta, ik_rpy_delta, jt_joint_delta, joint_velocities_overwrite, joint_limits_overwrite, controlled_joints, self_collision) self.end_effector_link_id = 'tool0' self.base_link_id = 'base_link' self.urdf_path = 'robots/predefined/kuka_kr3/urdf/kr3r540.urdf'
class GeneralData(NiceRepr): def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if (meta_info is not None): self.set_meta_info(meta_info=meta_info) if (data is not None): self.set_data(data) def set_meta_info(self, meta_info): assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}' meta = copy.deepcopy(meta_info) for (k, v) in meta.items(): if (k in self._meta_info_fields): ori_value = getattr(self, k) if isinstance(ori_value, (torch.Tensor, np.ndarray)): if (ori_value == v).all(): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') elif (ori_value == v): continue else: raise KeyError(f'img_meta_info {k} has been set as {getattr(self, k)} before, which is immutable ') else: self._meta_info_fields.add(k) self.__dict__[k] = v def set_data(self, data): assert isinstance(data, dict), f'meta should be a `dict` but get {data}' for (k, v) in data.items(): self.__setattr__(k, v) def new(self, meta_info=None, data=None): new_data = self.__class__() new_data.set_meta_info(dict(self.meta_info_items())) if (meta_info is not None): new_data.set_meta_info(meta_info) if (data is not None): new_data.set_data(data) return new_data def keys(self): return [key for key in self._data_fields] def meta_info_keys(self): return [key for key in self._meta_info_fields] def values(self): return [getattr(self, k) for k in self.keys()] def meta_info_values(self): return [getattr(self, k) for k in self.meta_info_keys()] def items(self): for k in self.keys(): (yield (k, getattr(self, k))) def meta_info_items(self): for k in self.meta_info_keys(): (yield (k, getattr(self, k))) def __setattr__(self, name, val): if (name in ('_meta_info_fields', '_data_fields')): if (not hasattr(self, name)): super().__setattr__(name, val) else: raise AttributeError(f'{name} has been used as a private attribute, which is immutable. ') else: if (name in self._meta_info_fields): raise AttributeError(f'`{name}` is used in meta information,which is immutable') self._data_fields.add(name) super().__setattr__(name, val) def __delattr__(self, item): if (item in ('_meta_info_fields', '_data_fields')): raise AttributeError(f'{item} has been used as a private attribute, which is immutable. ') if (item in self._meta_info_fields): raise KeyError(f'{item} is used in meta information, which is immutable.') super().__delattr__(item) if (item in self._data_fields): self._data_fields.remove(item) __setitem__ = __setattr__ __delitem__ = __delattr__ def __getitem__(self, name): return getattr(self, name) def get(self, *args): assert (len(args) < 3), '`get` get more than 2 arguments' return self.__dict__.get(*args) def pop(self, *args): assert (len(args) < 3), '`pop` get more than 2 arguments' name = args[0] if (name in self._meta_info_fields): raise KeyError(f'{name} is a key in meta information, which is immutable') if (args[0] in self._data_fields): self._data_fields.remove(args[0]) return self.__dict__.pop(*args) elif (len(args) == 2): return args[1] else: raise KeyError(f'{args[0]}') def __contains__(self, item): return ((item in self._data_fields) or (item in self._meta_info_fields)) def to(self, *args, **kwargs): new_data = self.new() for (k, v) in self.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) new_data[k] = v return new_data def cpu(self): new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cpu() new_data[k] = v return new_data def mlu(self): new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.mlu() new_data[k] = v return new_data def cuda(self): new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.cuda() new_data[k] = v return new_data def detach(self): new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach() new_data[k] = v return new_data def numpy(self): new_data = self.new() for (k, v) in self.items(): if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() new_data[k] = v return new_data def __nice__(self): repr = '\n \n META INFORMATION \n' for (k, v) in self.meta_info_items(): repr += f'''{k}: {v} ''' repr += '\n DATA FIELDS \n' for (k, v) in self.items(): if isinstance(v, (torch.Tensor, np.ndarray)): repr += f'''shape of {k}: {v.shape} ''' else: repr += f'''{k}: {v} ''' return (repr + '\n')
def main(): env = gym.make('MountainCar-v0') act = deepq.load('mountaincar_model.pkl') while True: (obs, done) = (env.reset(), False) episode_rew = 0 while (not done): env.render() (obs, rew, done, _) = env.step(act(obs[None])[0]) episode_rew += rew print('Episode reward', episode_rew)
def compute_cost_mat(X_1, X_2, rescale_cost=False, cost_distance='l2'): (n_1, _) = X_1.size() (n_2, _) = X_2.size() if (cost_distance == 'l2'): X_1 = X_1.view(n_1, 1, (- 1)) X_2 = X_2.view(1, n_2, (- 1)) squared_dist = ((X_1 - X_2) ** 2) cost_mat = torch.sum(squared_dist, dim=2) elif (cost_distance == 'dot'): cost_mat = (- X_1.matmul(X_2.transpose(0, 1))) else: assert False if rescale_cost: cost_mat = (cost_mat / cost_mat.max()) return cost_mat
def get_loss(task, loss_name, data_batch, out, dataset_name): check_out_fmt(task, out, dataset_name) if (task == 'cls'): label = data_batch['label'].to(out['logit'].device) if (loss_name == 'cross_entropy'): if ('label_2' in data_batch.keys()): label_2 = data_batch['label_2'].to(out['logit'].device) if isinstance(data_batch['lam'], torch.Tensor): loss = 0 for i in range(data_batch['pc'].shape[0]): loss_tmp = ((smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i])) + (smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i])) loss += loss_tmp loss = (loss / data_batch['pc'].shape[0]) else: loss = ((smooth_loss(out['logit'], label) * (1 - data_batch['lam'])) + (smooth_loss(out['logit'], label_2) * data_batch['lam'])) else: loss = F.cross_entropy(out['logit'], label) elif (loss_name == 'smooth'): if ('label_2' in data_batch.keys()): label_2 = data_batch['label_2'].to(out['logit'].device) if isinstance(data_batch['lam'], torch.Tensor): loss = 0 for i in range(data_batch['pc'].shape[0]): loss_tmp = ((smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i])) + (smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i])) loss += loss_tmp loss = (loss / data_batch['pc'].shape[0]) else: loss = ((smooth_loss(out['logit'], label) * (1 - data_batch['lam'])) + (smooth_loss(out['logit'], label_2) * data_batch['lam'])) else: loss = smooth_loss(out['logit'], label) else: assert False elif (task == 'cls_trans'): label = data_batch['label'].to(out['logit'].device) trans_feat = out['trans_feat'] logit = out['logit'] if (loss_name == 'cross_entropy'): if ('label_2' in data_batch.keys()): label_2 = data_batch['label_2'].to(out['logit'].device) if isinstance(data_batch['lam'], torch.Tensor): loss = 0 for i in range(data_batch['pc'].shape[0]): loss_tmp = ((smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i])) + (smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i])) loss += loss_tmp loss = (loss / data_batch['pc'].shape[0]) else: loss = ((smooth_loss(out['logit'], label) * (1 - data_batch['lam'])) + (smooth_loss(out['logit'], label_2) * data_batch['lam'])) else: loss = F.cross_entropy(out['logit'], label) loss += (feature_transform_regularizer(trans_feat) * 0.001) elif (loss_name == 'smooth'): if ('label_2' in data_batch.keys()): label_2 = data_batch['label_2'].to(out['logit'].device) if isinstance(data_batch['lam'], torch.Tensor): loss = 0 for i in range(data_batch['pc'].shape[0]): loss_tmp = ((smooth_loss(out['logit'][i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - data_batch['lam'][i])) + (smooth_loss(out['logit'][i].unsqueeze(0), label_2[i].unsqueeze(0).long()) * data_batch['lam'][i])) loss += loss_tmp loss = (loss / data_batch['pc'].shape[0]) else: loss = ((smooth_loss(out['logit'], label) * (1 - data_batch['lam'])) + (smooth_loss(out['logit'], label_2) * data_batch['lam'])) else: loss = smooth_loss(out['logit'], label) loss += (feature_transform_regularizer(trans_feat) * 0.001) else: assert False else: assert False return loss
def densenet121(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> DenseNet: return DenseNet(torchvision.models.densenet121(pretrained, progress, **kwargs))
_register(log_shape=False, use_scope=False) def BNReLU(x, name=None): x = BatchNorm('bn', x) x = tf.nn.relu(x, name=name) return x
def do_tokenize(args): print(time.clock()) data_builder.tokenize(args) print(time.clock())
def _a3_tab1(brd): return ((((((- 0.0247) * (brd ** 4.0)) + (0.1718 * (brd ** 3.0))) - (0.4124 * (brd ** 2.0))) - (0.5944 * brd)) + 0.7333)
class AutoregressiveLSTMCell(tf.contrib.rnn.RNNCell): def __init__(self, lstm, output_size): super(AutoregressiveLSTMCell, self).__init__() self.lstm_cell = lstm self._output_size = output_size def state_size(self): return (self.lstm_cell.state_size + self._output_size) def output_size(self): return self._output_size def call(self, inputs, state): (actual_states, prev_outputs) = tf.split(state, [self.lstm_cell.state_size, self._output_size], axis=(- 1)) combined_inputs = tf.concat([inputs, prev_outputs], axis=(- 1)) with tf.variable_scope('autoregressive'): (output, state) = self.lstm_cell(combined_inputs, actual_states) output = tf.layers.dense(output, self._output_size, activation=tf.nn.tanh) state = tf.concat([state, output], axis=(- 1)) return (output, state)
def test_double_double_track(vrblvl=0): mickey = ['x^2 + 4*y^2 - 4;', '2*y^2 - x;'] (start, startsols) = total_degree_start_system(mickey, vrblvl=vrblvl) print('the start system :') for pol in start: print(pol) print('the start solutions :') for (idx, sol) in enumerate(startsols): print('Solution', (idx + 1), ':') print(sol) (gamma, sols) = double_double_track(mickey, start, startsols, tasks=4, vrblvl=vrblvl) ddpols = get_double_double_system(vrblvl) for pol in ddpols: print(pol) write_double_double_solutions(vrblvl) print('the solutions :') for (idx, sol) in enumerate(sols): print('Solution', (idx + 1), ':') print(sol) err = verify(mickey, sols, vrblvl) if (vrblvl > 0): print('the error sum :', err) if ((len(sols) == 4) and (abs((err.real + err.imag)) < 1e-10)): if (vrblvl > 0): print('Found 4 solutions and error is okay.') return 0 if (len(sols) != 4): if (vrblvl > 0): print('Number of solutions is not 4 :', len(sols)) return 1 if (abs((err.real + err.imag)) >= 1e-10): if (vrblvl > 0): print('The error is too large.') return 1
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = conv1x1(inplanes, planes) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = conv1x1(planes, (planes * self.expansion)) self.bn3 = nn.BatchNorm2d((planes * self.expansion)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): identity = self.downsample(x) out += identity out = self.relu(out) return out
class EvalArguments(TrainingArguments): topk: int = field(default=1000) threads: int = field(default=32)
class ProbeRegimen(): def __init__(self, args): self.args = args self.max_epochs = args['probe_training']['epochs'] self.params_path = os.path.join(args['reporting']['root'], args['probe']['params_path']) def set_optimizer(self, probe): self.optimizer = optim.Adam(probe.parameters(), lr=0.001) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.1, patience=0) def train_until_convergence(self, probe, model, loss, train_dataset, dev_dataset): self.set_optimizer(probe) min_dev_loss = sys.maxsize min_dev_loss_epoch = (- 1) for epoch_index in tqdm(range(self.max_epochs), desc='[training]'): epoch_train_loss = 0 epoch_dev_loss = 0 epoch_train_epoch_count = 0 epoch_dev_epoch_count = 0 epoch_train_loss_count = 0 epoch_dev_loss_count = 0 for batch in tqdm(train_dataset, desc='[training batch]'): probe.train() self.optimizer.zero_grad() (observation_batch, label_batch, length_batch, _) = batch word_representations = model(observation_batch) predictions = probe(word_representations) (batch_loss, count) = loss(predictions, label_batch, length_batch) batch_loss.backward() epoch_train_loss += (batch_loss.detach().cpu().numpy() * count.detach().cpu().numpy()) epoch_train_epoch_count += 1 epoch_train_loss_count += count.detach().cpu().numpy() self.optimizer.step() for batch in tqdm(dev_dataset, desc='[dev batch]'): self.optimizer.zero_grad() probe.eval() (observation_batch, label_batch, length_batch, _) = batch word_representations = model(observation_batch) predictions = probe(word_representations) (batch_loss, count) = loss(predictions, label_batch, length_batch) epoch_dev_loss += (batch_loss.detach().cpu().numpy() * count.detach().cpu().numpy()) epoch_dev_loss_count += count.detach().cpu().numpy() epoch_dev_epoch_count += 1 self.scheduler.step(epoch_dev_loss) tqdm.write('[epoch {}] Train loss: {}, Dev loss: {}'.format(epoch_index, (epoch_train_loss / epoch_train_loss_count), (epoch_dev_loss / epoch_dev_loss_count))) if ((epoch_dev_loss / epoch_dev_loss_count) < (min_dev_loss - 0.0001)): torch.save(probe.state_dict(), self.params_path) min_dev_loss = (epoch_dev_loss / epoch_dev_loss_count) min_dev_loss_epoch = epoch_index tqdm.write('Saving probe parameters') elif (min_dev_loss_epoch < (epoch_index - 4)): tqdm.write('Early stopping') break def predict(self, probe, model, dataset): probe.eval() predictions_by_batch = [] for batch in tqdm(dataset, desc='[predicting]'): (observation_batch, label_batch, length_batch, _) = batch word_representations = model(observation_batch) predictions = probe(word_representations) predictions_by_batch.append(predictions.detach().cpu().numpy()) return predictions_by_batch
def generate_uid_from_pbobject(pb_object): json_string = json.dumps(MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True), indent=2, sort_keys=True) out = StringIO() out.write(json_string) uid = hashlib.sha1(out.getvalue().encode('utf-8')).hexdigest() out.close() return uid
class AVATAR_OT_SetBodyShape(bpy.types.Operator): bl_idname = 'avt.set_body_shape' bl_label = 'Set Body Shape' bl_description = 'Set Body Shape' def execute(self, context): global mAvt obj = mAvt.body cp_vals = obj.data.copy() mAvt.np_mesh_prev = mAvt.read_verts(cp_vals) mAvt.refresh_shape(obj) mAvt.np_mesh = mAvt.read_verts(obj.data) mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev) for object in bpy.data.objects: if ((object.type == 'MESH') and (object.name != 'Avatar:Body')): mAvt.deform_cloth(cloth_name=str(object.name)) return {'FINISHED'}
class Text(list): def __init__(self, string, token=[WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA], language='en', encoding='utf-8'): self.encoding = encoding if _is_tokenstring(string): (token, language) = (string.tags, getattr(string, 'language', language)) if string: if isinstance(string, str): string = string.splitlines() self.extend((Sentence(s, token, language) for s in string)) def insert(self, index, sentence): list.insert(self, index, sentence) sentence.text = self def append(self, sentence): list.append(self, sentence) sentence.text = self def extend(self, sentences): list.extend(self, sentences) for s in sentences: s.text = self def remove(self, sentence): list.remove(self, sentence) sentence.text = None def pop(self, index): sentence = list.pop(self, index) sentence.text = None return sentence def sentences(self): return list(self) def words(self): return list(chain(*self)) def copy(self): t = Text('', encoding=self.encoding) for sentence in self: t.append(sentence.copy()) return t def string(self): return '\n'.join((sentence.string for sentence in self)) def __str__(self): return self.string def xml(self): xml = [] xml.append(('<?xml version="1.0" encoding="%s"?>' % XML_ENCODING.get(self.encoding, self.encoding))) xml.append(('<%s>' % XML_TEXT)) xml.extend([sentence.xml for sentence in self]) xml.append(('</%s>' % XML_TEXT)) return '\n'.join(xml) def from_xml(cls, xml): return Text(parse_string(xml)) fromxml = from_xml
def test_track_parallel_progress_list(capsys): results = mmcv.track_parallel_progress(sleep_1s, [1, 2, 3, 4], 2, bar_width=4) (out, _) = capsys.readouterr() assert (out == '[ ] 0/4, elapsed: 0s, ETA:\r[> ] 1/4, 1.0 task/s, elapsed: 1s, ETA: 3s\r[>> ] 2/4, 2.0 task/s, elapsed: 1s, ETA: 1s\r[>>> ] 3/4, 1.5 task/s, elapsed: 2s, ETA: 1s\r[>>>>] 4/4, 2.0 task/s, elapsed: 2s, ETA: 0s\n') assert (results == [1, 2, 3, 4])
class GaussianCriterion(Criterion): def __init__(self, bigdl_type='float'): super(GaussianCriterion, self).__init__(None, bigdl_type)
((torch.cuda.device_count() < 2), 'test requires 2 GPUs') class TestBMUF(unittest.TestCase): def bmuf_process(self, cfg, args, iterations): processes = [] results = Manager().dict() torch.multiprocessing.spawn(fn=functools.partial(single_gpu_training, cfg, args), args=(iterations, results), nprocs=args.distributed_world_size, join=True) return results def test_bmuf_sync(self): (cfg, args) = setup_args() iterations = 1 results = self.bmuf_process(cfg, args, iterations) assert (len(results) == 2) self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): (cfg, args) = setup_args() args.warmup_iterations = 20 cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert (len(results) == 2) self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): (cfg, args) = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.global_sync_iter = args.global_sync_iter iterations = 25 results = self.bmuf_process(cfg, args, iterations) assert (len(results) == 2) self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): (cfg, args) = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert (len(results) == 1) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), 'size mismatch') self.assertLess((t1 - t2).abs().max(), 0.0001)
def load_data(): dir = '/backup3/jcxu/data/compression-data.json' train_file = '/backup3/jcxu/data/compression-train.tsv' test_file = '/backup3/jcxu/data/compression-test.tsv' with open(dir, 'r') as fd: lines = fd.read().splitlines() line_num = [idx for (idx, x) in enumerate(lines) if (x == '')] line_num = (([0] + line_num) + [(- 1)]) dataset = [] for idx in range((len(line_num) - 1)): start_line = line_num[idx] if (line_num[(idx + 1)] == (- 1)): end_line = (- 1) tmp_lines = lines[start_line:] else: end_line = line_num[(idx + 1)] tmp_lines = lines[start_line:end_line] str_lines = ' '.join(tmp_lines) data = json.loads(str_lines) compress_edges = unfold_sentence(data['compression']['edge']) original_edges = unfold_sentence(data['graph']['edge']) node_lists = [(n['word'][n['head_word_index']]['id'], n['word']) for (idx, n) in enumerate(data['graph']['node'])] delta_edges = list((set(original_edges) - set(compress_edges))) compressed_nodes = [c[1] for c in delta_edges] from operator import itemgetter node_lists.sort(key=itemgetter(0)) max_idx = (node_lists[(- 1)][1][(- 1)]['id'] + 20) tags = ['' for _ in range(max_idx)] sents = ['' for _ in range(max_idx)] for node in node_lists: idx = node[0] if (idx == (- 1)): continue words = node[1] for w_dict in words: sents[w_dict['id']] = w_dict['form'] l = len(words) if (idx in compressed_nodes): for w_dict in words: tags[w_dict['id']] = 'B' tags[words[0]['id']] = 'B' else: for w_dict in words: tags[w_dict['id']] = 'O' this_example = [] for (t, s) in zip(tags, sents): if (t == ''): continue this_example.append('{}###{}'.format(s, t)) ex = '\t'.join(this_example) dataset.append(ex) test = dataset[:1000] train = dataset[1000:9000] print(test[100]) print(test[101]) with open(train_file, 'w') as fd: fd.write('\n'.join(train)) with open(test_file, 'w') as fd: fd.write('\n'.join(test))
def setup(args): cfg = get_cfg() add_densepose_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name='densepose') return cfg
def train(args, logger, run_id): model = get_model(args) optimizer = get_optim(args, model) (train_data, eval_data) = get_data(args) train_dataset = LocalizationDataset(train_data) eval_dataset = LocalizationDataset(eval_data) train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, pin_memory=True, shuffle=True) eval_loader = DataLoader(eval_dataset, batch_size=args.batch_size, num_workers=8, pin_memory=True, shuffle=False) os.mkdir(os.path.join('logs', run_id, 'models')) cnt = 0 best_eval = 1000 from tqdm import tqdm for epoch in tqdm(range(args.epochs)): model.train() for (iteration, data) in enumerate(train_loader): cnt = (cnt + 1) (env_map, obs, pos, action) = data if (torch.cuda.is_available() and args.gpu): env_map = env_map.to('cuda') obs = obs.to('cuda') pos = pos.to('cuda') action = action.to('cuda') model.zero_grad() (loss, log_loss, particle_pred) = model.step(env_map, obs, action, pos, args) loss.backward() if (args.clip > 0): torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() if (iteration % 50): loss_last = log_loss.to('cpu').detach().numpy() loss_all = loss.to('cpu').detach().numpy() logger.add_scalar('train/loss_last', loss_last, cnt) logger.add_scalar('train/loss', loss_all, cnt) model.eval() eval_loss_all = [] eval_loss_last = [] with torch.no_grad(): for (iteration, data) in enumerate(eval_loader): (env_map, obs, pos, action) = data if (torch.cuda.is_available() and args.gpu): env_map = env_map.to('cuda') obs = obs.to('cuda') pos = pos.to('cuda') action = action.to('cuda') model.zero_grad() (loss, log_loss, particle_pred) = model.step(env_map, obs, action, pos, args) eval_loss_all.append(loss.to('cpu').detach().numpy()) eval_loss_last.append(log_loss.to('cpu').detach().numpy()) log_eval_last = np.mean(eval_loss_last) log_eval_all = np.mean(eval_loss_all) logger.add_scalar('eval/loss_last', log_eval_last, cnt) logger.add_scalar('eval/loss', log_eval_all, cnt) if (log_eval_last < best_eval): best_eval = log_eval_last torch.save(model.state_dict(), os.path.join('logs', run_id, 'models', 'model_best')) torch.save(optimizer.state_dict(), os.path.join('logs', run_id, 'models', 'optim_best')) torch.save(model.state_dict(), os.path.join('logs', run_id, 'models', 'model_final')) torch.save(optimizer.state_dict(), os.path.join('logs', run_id, 'models', 'optim_final'))
def s2hot(arr): h = [] for i in range(len(arr)): if (arr[i][0] == 1.0): h.append([1, 0]) else: h.append([0, 1]) return array(h)
def vote(predicted_file: str, size: int): import json from cap.data.utils import iobes_to_spans, spans_to_iobes, span_vote from cap.training.metrics.span_f1_measure import SpanF1Measure from conlleval import evaluate_conll_file metric = SpanF1Measure() lines = list() with open(predicted_file) as file: for row in file: ins = json.loads(row) texts = ([ins['text']] + ins['hits'][:size]) tags_array = ([ins['predicted_tags']] + ins['predicted_tags_for_hits'][:size]) spans = span_vote(texts, tags_array, suffix=True) tags = spans_to_iobes(spans, len(ins['text'])) for items in zip(ins['text'], ins['tags'], tags): items = ('-', *items[1:]) lines.append(('\t'.join(items) + '\n')) lines.append('\n') metric([iobes_to_spans(tags)], [iobes_to_spans(ins['tags'])]) metrics = metric.get_metric() print(json.dumps(metrics, indent=2)) with open(predicted_file.replace('test_prediction', 'metric_vote'), mode='w') as file: json.dump(metrics, file, indent=2) print('write metrics to', file.name) evaluate_conll_file(lines)
def color_begin_end(latex_contents, latex_file, str, color_name, inner_outer='outer'): all_begin_brace_list = get_all_begin_brace_nodes(latex_contents, latex_file, str=str) for begin_brace_list in all_begin_brace_list: latex_contents = add_color_begin_end_command(latex_contents, begin_brace_list[(- 1)], color_name, inner_outer=inner_outer) return latex_contents
def load_movielens100k(as_frame: bool=False) -> Union[(Tuple[(np.ndarray, np.ndarray, np.ndarray)], Tuple[(pd.DataFrame, pd.DataFrame, pd.DataFrame)])]: with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_data.parquet.brotli') as fpath: df_data = pd.read_parquet(fpath) with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_items.parquet.brotli') as fpath: df_items = pd.read_parquet(fpath) with resources.path('pytorch_widedeep.datasets.data', 'MovieLens100k_users.parquet.brotli') as fpath: df_users = pd.read_parquet(fpath) if as_frame: return (df_data, df_users, df_items) else: return (df_data.to_numpy(), df_users.to_numpy(), df_items.to_numpy())
def entropy_loss(Pz, Pzt, Pzzt): (Pz, Pzt, Pzzt) = batch_probability(Pz, Pzt, Pzzt) entropy = (Pz * torch.log(Pz)).sum() entropy += (Pzt * torch.log(Pzt)).sum() entropy += (Pzzt * torch.log(Pzzt)).sum() entropy /= 3 return entropy
class PSAMask(Function): def __init__(self, psa_type=0, mask_H_=None, mask_W_=None): super(PSAMask, self).__init__() assert (psa_type in [0, 1]) self.psa_type = psa_type assert (((mask_H_ is None) and (mask_W_ is None)) or ((mask_H_ is not None) and (mask_W_ is not None))) self.mask_H_ = mask_H_ self.mask_W_ = mask_W_ def forward(self, input): (num_, channels_, feature_H_, feature_W_) = input.size() if ((self.mask_H_ is not None) and (self.mask_W_ is not None)): mask_H_ = self.mask_H_ mask_W_ = self.mask_W_ assert ((mask_H_ % 2) == 1) assert ((mask_W_ % 2) == 1) else: mask_H_ = ((2 * feature_H_) - 1) mask_W_ = ((2 * feature_W_) - 1) assert (channels_ == (mask_H_ * mask_W_)) half_mask_H_ = ((mask_H_ - 1) // 2) half_mask_W_ = ((mask_W_ - 1) // 2) output = torch.zeros([num_, (feature_H_ * feature_W_), feature_H_, feature_W_], dtype=input.dtype, device=input.device) if (not input.is_cuda): src.cpu.psamask_forward(self.psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) else: output = output.cuda() src.gpu.psamask_forward(self.psa_type, input, output, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) (self.num_, self.channels_, self.feature_H_, self.feature_W_, self.mask_H_, self.mask_W_, self.half_mask_H_, self.half_mask_W_) = (num_, channels_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) return output def backward(self, grad_output): (num_, channels_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) = (self.num_, self.channels_, self.feature_H_, self.feature_W_, self.mask_H_, self.mask_W_, self.half_mask_H_, self.half_mask_W_) grad_input = torch.zeros([num_, channels_, feature_H_, feature_W_], dtype=grad_output.dtype, device=grad_output.device) if (not grad_output.is_cuda): src.cpu.psamask_backward(self.psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) else: src.gpu.psamask_backward(self.psa_type, grad_output, grad_input, num_, feature_H_, feature_W_, mask_H_, mask_W_, half_mask_H_, half_mask_W_) return grad_input
def create_dir_and_delete_content(directory): os.makedirs(directory, exist_ok=True) files = sorted(filter((lambda f: (os.path.isfile(f) and f.endswith('.h5'))), map((lambda f: os.path.join(directory, f)), os.listdir(directory))), key=os.path.getmtime) for file in files[:(- 4)]: logging.info('removing old model: {}'.format(file)) os.remove(file)
class NormedHistogram(nn.Module): def __init__(self, nbins: int=256, r_min: float=0.0, r_max: float=255.0): super(NormedHistogram, self).__init__() assert isinstance(nbins, int), type(nbins) assert (nbins > 0), nbins self.nbins = nbins assert isinstance(r_min, float), type(r_min) assert isinstance(r_max, float), type(r_max) assert (r_min < r_max), f'{r_min}, {r_max}' self.r_min = r_min self.r_max = r_max def forward(self, x: torch.Tensor) -> torch.Tensor: assert (x.ndim == 4), x.ndim (b, c, h, w) = x.shape out = torch.zeros((b, c, self.nbins), dtype=torch.float32, requires_grad=x.requires_grad) bins = self.nbins r = (self.r_min, self.r_max) w = ((1.0 / x[(0, 0)].numel()) * torch.zeros(x[(0, 0)].numel(), requires_grad=False, device=x.device)) for i in range(b): for j in range(c): out[(i, j)] = torch.histogram(x[(i, j)], bins=bins, range=r, weight=w, density=False) return out
class HitNet(): def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=DEFAULT_CONFIG, max_dist=10): self.model = self.initialize_model(model_path, model_type, camera_config, max_dist) def __call__(self, left_img, right_img): return self.update(left_img, right_img) def initialize_model(self, model_path, model_type=ModelType.eth3d, camera_config=DEFAULT_CONFIG, max_dist=10): self.model_type = model_type self.camera_config = camera_config self.max_dist = max_dist self.session = onnxruntime.InferenceSession(model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']) self.get_input_details() self.get_output_details() def update(self, left_img, right_img): input_tensor = self.prepare_input(left_img, right_img) if (self.model_type == ModelType.flyingthings): (left_disparity, right_disparity) = self.inference(input_tensor) self.disparity_map = left_disparity else: self.disparity_map = self.inference(input_tensor) self.depth_map = self.get_depth_from_disparity(self.disparity_map, self.camera_config) return self.disparity_map def prepare_input(self, left_img, right_img): (self.img_height, self.img_width) = left_img.shape[:2] left_img = cv2.resize(left_img, (self.input_width, self.input_height)) right_img = cv2.resize(right_img, (self.input_width, self.input_height)) if (self.model_type is ModelType.eth3d): left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY) left_img = np.expand_dims(left_img, 2) right_img = np.expand_dims(right_img, 2) combined_img = (np.concatenate((left_img, right_img), axis=(- 1)) / 255.0) else: left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB) right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB) combined_img = (np.concatenate((left_img, right_img), axis=(- 1)) / 255.0) combined_img = combined_img.transpose(2, 0, 1) return np.expand_dims(combined_img, 0).astype(np.float32) def inference(self, input_tensor): input_name = self.session.get_inputs()[0].name left_output_name = self.session.get_outputs()[0].name if (self.model_type is not ModelType.flyingthings): left_disparity = self.session.run([left_output_name], {input_name: input_tensor}) return np.squeeze(left_disparity) right_output_name = self.session.get_outputs()[1].name (left_disparity, right_disparity) = self.session.run([left_output_name, right_output_name], {input_name: input_tensor}) return (np.squeeze(left_disparity), np.squeeze(right_disparity)) def get_depth_from_disparity(disparity_map, camera_config): return ((camera_config.f * camera_config.baseline) / disparity_map) def draw_disparity(self): disparity_map = cv2.resize(self.disparity_map, (self.img_width, self.img_height)) norm_disparity_map = (255 * ((disparity_map - np.min(disparity_map)) / (np.max(disparity_map) - np.min(disparity_map)))) return cv2.applyColorMap(cv2.convertScaleAbs(norm_disparity_map, 1), cv2.COLORMAP_MAGMA) def draw_depth(self): return self.util_draw_depth(self.depth_map, (self.img_width, self.img_height), self.max_dist) def util_draw_depth(depth_map, img_shape, max_dist): norm_depth_map = (255 * (1 - (depth_map / max_dist))) norm_depth_map[(norm_depth_map < 0)] = 0 norm_depth_map[(norm_depth_map >= 255)] = 0 norm_depth_map = cv2.resize(norm_depth_map, img_shape) return cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map, 1), cv2.COLORMAP_MAGMA) def get_input_details(self): model_inputs = self.session.get_inputs() self.input_names = [model_inputs[i].name for i in range(len(model_inputs))] self.input_shape = model_inputs[0].shape self.input_height = self.input_shape[2] self.input_width = self.input_shape[3] def get_output_details(self): model_outputs = self.session.get_outputs() self.output_names = [model_outputs[i].name for i in range(len(model_outputs))] self.output_shape = model_outputs[0].shape
class ModuleParallel(nn.Module): def __init__(self, module): super(ModuleParallel, self).__init__() self.module = module def forward(self, x_parallel): return [self.module(x) for x in x_parallel]
class InitialStateBridge(Bridge): def __init__(self, encoder_outputs, decoder_state_size, params, mode): super(InitialStateBridge, self).__init__(encoder_outputs, decoder_state_size, params, mode) if (not hasattr(encoder_outputs, self.params['bridge_input'])): raise ValueError('Invalid bridge_input not in encoder outputs.') self._bridge_input = getattr(encoder_outputs, self.params['bridge_input']) self._activation_fn = locate(self.params['activation_fn']) def default_params(): return {'bridge_input': 'final_state', 'activation_fn': 'tensorflow.identity'} def _create(self): bridge_input = nest.map_structure((lambda x: tf.reshape(x, [self.batch_size, _total_tensor_depth(x)])), self._bridge_input) bridge_input_flat = nest.flatten([bridge_input]) bridge_input_concat = tf.concat(bridge_input_flat, 1) state_size_splits = nest.flatten(self.decoder_state_size) total_decoder_state_size = sum(state_size_splits) initial_state_flat = tf.contrib.layers.fully_connected(inputs=bridge_input_concat, num_outputs=total_decoder_state_size, activation_fn=self._activation_fn) initial_state = tf.split(initial_state_flat, state_size_splits, axis=1) return nest.pack_sequence_as(self.decoder_state_size, initial_state)
def createdataset_byid(ds_files_subsets, subsets, classname, out_path): for s in subsets: try: folderpath = os.path.join(out_path, s, classname) os.makedirs(folderpath) except OSError: print(('Creation of the directory %s failed' % out_path)) else: print(('Successfully created the directory %s ' % out_path)) idx = 0 for i in range(len(ds_files_subsets[s])): id_path = ds_files_subsets[s][i] img_files = [ff for ff in glob.glob((id_path + '/*.jpg'))] for f in img_files: img_name = 'img_{:06d}.jpg'.format(idx) shutil.copyfile(f, os.path.join(out_path, s, classname, img_name)) idx = (idx + 1) if (idx >= n_img[s]): break if (idx >= n_img[s]): break
class MixerBlock(nn.Module): def __init__(self, config): super(MixerBlock, self).__init__() self.token_mlp_block = MlpBlock(config.n_patches, config.tokens_mlp_dim) self.channel_mlp_block = MlpBlock(config.hidden_dim, config.channels_mlp_dim) self.pre_norm = nn.LayerNorm(config.hidden_dim, eps=1e-06) self.post_norm = nn.LayerNorm(config.hidden_dim, eps=1e-06) def forward(self, x): h = x x = self.pre_norm(x) x = x.transpose((- 1), (- 2)) x = self.token_mlp_block(x) x = x.transpose((- 1), (- 2)) x = (x + h) h = x x = self.post_norm(x) x = self.channel_mlp_block(x) x = (x + h) return x def load_from(self, weights, n_block): ROOT = f'MixerBlock_{n_block}' with torch.no_grad(): self.token_mlp_block.fc0.weight.copy_(np2th(weights[pjoin(ROOT, TOK_FC_0, 'kernel')]).t()) self.token_mlp_block.fc1.weight.copy_(np2th(weights[pjoin(ROOT, TOK_FC_1, 'kernel')]).t()) self.token_mlp_block.fc0.bias.copy_(np2th(weights[pjoin(ROOT, TOK_FC_0, 'bias')]).t()) self.token_mlp_block.fc1.bias.copy_(np2th(weights[pjoin(ROOT, TOK_FC_1, 'bias')]).t()) self.channel_mlp_block.fc0.weight.copy_(np2th(weights[pjoin(ROOT, CHA_FC_0, 'kernel')]).t()) self.channel_mlp_block.fc1.weight.copy_(np2th(weights[pjoin(ROOT, CHA_FC_1, 'kernel')]).t()) self.channel_mlp_block.fc0.bias.copy_(np2th(weights[pjoin(ROOT, CHA_FC_0, 'bias')]).t()) self.channel_mlp_block.fc1.bias.copy_(np2th(weights[pjoin(ROOT, CHA_FC_1, 'bias')]).t()) self.pre_norm.weight.copy_(np2th(weights[pjoin(ROOT, PRE_NORM, 'scale')])) self.pre_norm.bias.copy_(np2th(weights[pjoin(ROOT, PRE_NORM, 'bias')])) self.post_norm.weight.copy_(np2th(weights[pjoin(ROOT, POST_NORM, 'scale')])) self.post_norm.bias.copy_(np2th(weights[pjoin(ROOT, POST_NORM, 'bias')]))
def squeezenet1_1(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> SqueezeNet: return SqueezeNet(torchvision.models.squeezenet1_1(pretrained, progress, **kwargs))
def train_collate_fn(batch): (imgs, pids, camids, img_paths) = zip(*batch) pids = torch.tensor(pids, dtype=torch.int64) return (torch.stack(imgs, dim=0), pids, camids, img_paths)
def serve(): port = str(config.grpc_api_port) server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) neural_solution_pb2_grpc.add_TaskServiceServicer_to_server(TaskSubmitterServicer(), server) server.add_insecure_port(('[::]:' + port)) server.start() logger.info(('Server started, listening on ' + port)) server.wait_for_termination()
def main(): p = create_config(args.config_env, args.config_exp) print(colored(p, 'red')) print(colored('Retrieve model', 'blue')) model = get_model(p) print('Model is {}'.format(model.__class__.__name__)) print('Model parameters: {:.2f}M'.format((sum((p.numel() for p in model.parameters())) / 1000000.0))) model = model.cuda() print(colored('Set CuDNN benchmark', 'blue')) torch.backends.cudnn.benchmark = True print(colored('Retrieve dataset', 'blue')) train_transforms = get_train_transformations(p) print('Train transforms:', train_transforms) val_transforms = get_val_transformations(p) print('Validation transforms:', val_transforms) train_dataset = get_train_dataset(p, train_transforms, to_augmented_dataset=True, split='train+unlabeled') val_dataset = get_val_dataset(p, val_transforms) train_dataloader = get_train_dataloader(p, train_dataset) val_dataloader = get_val_dataloader(p, val_dataset) print('Dataset contains {}/{} train/val samples'.format(len(train_dataset), len(val_dataset))) print(colored('Build MemoryBank', 'blue')) base_dataset = get_train_dataset(p, val_transforms, split='train') base_dataloader = get_val_dataloader(p, base_dataset) memory_bank_base = MemoryBank(len(base_dataset), model.contrastive_head[0].weight.shape[0], p['num_classes'], p['criterion_kwargs']['temperature']) memory_bank_base.cuda() memory_bank_val = MemoryBank(len(val_dataset), model.contrastive_head[0].weight.shape[0], p['num_classes'], p['criterion_kwargs']['temperature']) memory_bank_val.cuda() print(colored('Retrieve criterion', 'blue')) criterion = get_criterion(p) print('Criterion is {}'.format(criterion.__class__.__name__)) criterion = criterion.cuda() print(colored('Retrieve optimizer', 'blue')) optimizer = get_optimizer(p, model) print(optimizer) state = torch.load(p['pretext_model'], map_location='cpu') model.load_state_dict(state) model.cuda() print(colored('Starting main loop', 'blue')) torch.save(model.state_dict(), p['pretext_model']) print(colored('Fill memory bank for mining the nearest neighbors (train) ...', 'blue')) fill_memory_bank(base_dataloader, model, memory_bank_base) topk = 20 print(('Mine the nearest neighbors (Top-%d)' % topk)) (indices, acc) = memory_bank_base.mine_nearest_neighbors(topk) print(('Accuracy of top-%d nearest neighbors on train set is %.2f' % (topk, (100 * acc)))) np.save(p['topk_neighbors_train_path'], indices) print(colored('Fill memory bank for mining the nearest neighbors (val) ...', 'blue')) fill_memory_bank(val_dataloader, model, memory_bank_val) topk = 5 print(('Mine the nearest neighbors (Top-%d)' % topk)) (indices, acc) = memory_bank_val.mine_nearest_neighbors(topk) print(('Accuracy of top-%d nearest neighbors on val set is %.2f' % (topk, (100 * acc)))) np.save(p['topk_neighbors_val_path'], indices)
class Visualizer(): def __init__(self, opt): self.opt = opt self.tf_log = opt.tf_log self.use_html = (opt.isTrain and (not opt.no_html)) self.win_size = opt.display_winsize self.name = opt.name if self.tf_log: import tensorflow as tf self.tf = tf self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') self.writer = tf.summary.FileWriter(self.log_dir) if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print(('create web directory %s...' % self.web_dir)) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name, 'a') as log_file: now = time.strftime('%c') log_file.write((' Training Loss (%s) \n' % now)) def display_current_results(self, visuals, epoch, step): if self.tf_log: img_summaries = [] for (label, image_numpy) in visuals.items(): try: s = StringIO() except: s = BytesIO() scipy.misc.toimage(image_numpy).save(s, format='jpeg') img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1]) img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum)) summary = self.tf.Summary(value=img_summaries) self.writer.add_summary(summary, step) if self.use_html: for (label, image_numpy) in visuals.items(): if isinstance(image_numpy, list): for i in range(len(image_numpy)): img_path = os.path.join(self.img_dir, ('epoch%.3d_%s_%d.jpg' % (epoch, label, i))) util.save_image(image_numpy[i], img_path) else: img_path = os.path.join(self.img_dir, ('epoch%.3d_%s.jpg' % (epoch, label))) util.save_image(image_numpy, img_path) webpage = html.HTML(self.web_dir, ('Experiment name = %s' % self.name), reflesh=1) for n in range(epoch, 0, (- 1)): webpage.add_header(('epoch [%d]' % n)) ims = [] txts = [] links = [] for (label, image_numpy) in visuals.items(): if isinstance(image_numpy, list): for i in range(len(image_numpy)): img_path = ('epoch%.3d_%s_%d.jpg' % (n, label, i)) ims.append(img_path) txts.append((label + str(i))) links.append(img_path) else: img_path = ('epoch%.3d_%s.jpg' % (n, label)) ims.append(img_path) txts.append(label) links.append(img_path) if (len(ims) < 6): webpage.add_images(ims, txts, links, width=self.win_size) else: num = int(round((len(ims) / 2.0))) webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size) webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size) webpage.save() def plot_current_errors(self, errors, step): if self.tf_log: for (tag, value) in errors.items(): summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)]) self.writer.add_summary(summary, step) def print_current_errors(self, epoch, i, errors, t): message = ('(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)) for (k, v) in sorted(errors.items()): if (v != 0): message += ('%s: %.3f ' % (k, v)) print(message) with open(self.log_name, 'a') as log_file: log_file.write(('%s\n' % message)) def save_images(self, image_dir, visuals, image_path, webpage=None): dirname = os.path.basename(os.path.dirname(image_path[0])) image_dir = os.path.join(image_dir, dirname) util.mkdir(image_dir) name = os.path.basename(image_path[0]) name = os.path.splitext(name)[0] if (webpage is not None): webpage.add_header(name) (ims, txts, links) = ([], [], []) for (label, image_numpy) in visuals.items(): save_ext = ('png' if (('real_A' in label) and (self.opt.label_nc != 0)) else 'jpg') image_name = ('%s_%s.%s' % (label, name, save_ext)) save_path = os.path.join(image_dir, image_name) util.save_image(image_numpy, save_path) if (webpage is not None): ims.append(image_name) txts.append(label) links.append(image_name) if (webpage is not None): webpage.add_images(ims, txts, links, width=self.win_size) def save_test_images(self, save_dir, visuals, index): dir = (save_dir + ('/%04d/' % index)) util.mkdir(dir) for (label, image_numpy) in visuals.items(): save_ext = 'png' image_name = ('%s.%s' % (label, save_ext)) save_path = os.path.join(dir, image_name) util.save_image(image_numpy, save_path) def vis_print(self, message): print(message) with open(self.log_name, 'a') as log_file: log_file.write(('%s\n' % message))