code
stringlengths
101
5.91M
def qrandint(lower: int, upper: int, q: int=1) -> 'tune.sample.Integer': return tune.qrandint(lower, upper, q)
class DAIN(nn.Module): def __init__(self, nclass, model1, model2): super(DAIN, self).__init__() self.model1 = model1 self.model2 = model2 self.fc = nn.Linear((512 * 2), nclass) def forward(self, img, diff_img): img_f = self.model1.conv1(img) img_f = self.model1.bn1(img_f) img_f = self.model1.relu(img_f) img_f = self.model1.maxpool(img_f) img_f = self.model1.layer1(img_f) img_f = self.model1.layer2(img_f) img_f = self.model1.layer3(img_f) img_f = self.model1.layer4(img_f) img_f = self.model1.avgpool(img_f) diff_img_f = self.model2.conv1(diff_img) diff_img_f = self.model2.bn1(diff_img_f) diff_img_f = self.model2.relu(diff_img_f) diff_img_f = self.model2.maxpool(diff_img_f) diff_img_f = self.model2.layer1(diff_img_f) diff_img_f = self.model2.layer2(diff_img_f) diff_img_f = self.model2.layer3(diff_img_f) diff_img_f = self.model2.layer4(diff_img_f) diff_img_f = self.model2.avgpool(diff_img_f) img_f = torch.flatten(img_f, 1) diff_img_f = torch.flatten(diff_img_f, 1) diff_img_f = (diff_img_f + img_f) out = torch.cat((img_f, diff_img_f), dim=1) out = self.fc(out) return out
(eq=False) class DeFeatNet(BaseModel): num_layers: int preres: bool scales: list = range(4) use_skips: bool = True n_dims: int = 3 spp_branches: list = None activation: str = 'relu' im_pad: int = None norm: bool = True def __post_init__(self): super().__post_init__() self.depth_net = DepthNet(self.num_layers, self.preres, self.scales, self.use_skips) self.pose_net = PoseNet(self.num_layers, self.preres) self.feat_net = FeatNet(self.n_dims, self.spp_branches, self.activation, self.im_pad, self.norm) def add_parser_args(parser): DepthNet.add_parser_args(parser) FeatNet.add_parser_args(parser) def forward(self, target_frames, support_frames, support_idxs): target_disps = self.depth_net(target_frames) target_features = self.feat_net(target_frames) support_features = self.feat_net(torch.cat(support_frames, dim=0)).chunk(len(support_frames), dim=0) poses = [] for (idx, sf) in zip(support_idxs, support_frames): inp = ((sf, target_frames) if (idx > 0) else (target_frames, sf)) poses.append(self.pose_net(*inp)) return (target_disps, target_features, support_features, poses)
def ReadFileSL(x_axis, tthread, batchInterval, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity): (w, h) = (2, len(x_axis)) y = [[] for _ in range(w)] for NUM_ITEMS in x_axis: inputEvents = (tthread * batchInterval) op_gs_path = getPathSL('OPGSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput = lines[0].split(': ')[1] y[0].append(float(throughput)) for NUM_ITEMS in x_axis: inputEvents = (tthread * batchInterval) op_gs_path = getPathSL('GSA', inputEvents, tthread, NUM_ITEMS, deposit_ratio, key_skewness, overlap_ratio, abort_ratio, isCyclic, complexity) lines = open(op_gs_path).readlines() throughput = lines[0].split(': ')[1] y[1].append(float(throughput)) print(y) return y
.parametrize('in_features, out_features, C, a, b, bias, batch_size, use_prototypes', [(in_features, out_features, C, a, b, bias, batch_size, use_prototypes) for in_features in [512] for out_features in [32, 128] for C in [4, 16] for a in [1.0] for b in [0.0] for bias in [True, False] for batch_size in [1, 8] for use_prototypes in [False]]) def test_linear_module(in_features: int, out_features: int, C: int, a: float, b: float, bias: bool, batch_size: int, use_prototypes: bool) -> None: n_row_learn = 10000 n_row_test = 2000 linear_helper(in_features, out_features, bias, n_row_learn, n_row_test, C, a, b, batch_size=batch_size, use_prototypes=use_prototypes)
def tiny_oshi_zumo_nfsp_dqn_params(env: MultiAgentEnv) -> Dict[(str, Any)]: return merge_dicts(GRL_DEFAULT_OSHI_ZUMO_TINY_DQN_PARAMS, {'exploration_config': {'epsilon_timesteps': int(.0), 'final_epsilon': 0.001, 'initial_epsilon': 0.06, 'type': ValidActionsEpsilonGreedy}, 'model': merge_dicts(MODEL_DEFAULTS, {'fcnet_activation': 'relu', 'fcnet_hiddens': [128, 128], 'custom_model': get_valid_action_fcn_class_for_env(env=env)})})
def input_fn_builder(features, seq_length, drop_remainder): all_unique_ids = [] all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_start_positions = [] all_end_positions = [] for feature in features: all_unique_ids.append(feature.unique_id) all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_start_positions.append(feature.start_position) all_end_positions.append(feature.end_position) def input_fn(params): batch_size = params['batch_size'] num_examples = len(features) feature_map = {'unique_ids': tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32), 'input_ids': tf.constant(all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'input_mask': tf.constant(all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), 'segment_ids': tf.constant(all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), 'start_positions': tf.constant(all_start_positions, shape=[num_examples], dtype=tf.int32), 'end_positions': tf.constant(all_end_positions, shape=[num_examples], dtype=tf.int32)} d = tf.data.Dataset.from_tensor_slices(feature_map) d = d.repeat() d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn
def pairwise_operator(codes, method): pairs = [] for (i, coderi) in enumerate(codes): for (j, coderj) in enumerate(codes): if (j > i): pairs.append(method(coderi, coderj)) return np.mean(pairs)
def _header_paths(): return ['', 'include', 'include/cuda', 'include/*-linux-gnu', 'extras/CUPTI/include', 'include/cuda/CUPTI', 'local/cuda/extras/CUPTI/include']
class TorchvisionNormalize(): def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): self.mean = mean self.std = std def __call__(self, img): imgarr = np.asarray(img) proc_img = np.empty_like(imgarr, np.float32) proc_img[(..., 0)] = (((imgarr[(..., 0)] / 255.0) - self.mean[0]) / self.std[0]) proc_img[(..., 1)] = (((imgarr[(..., 1)] / 255.0) - self.mean[1]) / self.std[1]) proc_img[(..., 2)] = (((imgarr[(..., 2)] / 255.0) - self.mean[2]) / self.std[2]) return proc_img
_model def eca_nfnet_l1(pretrained=False, **kwargs): return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs)
def forward_wrapper(model, input, device='cpu'): if (isinstance(input, dict) or isinstance(input, UserDict)): if (device == 'cpu'): output = model(**input) else: for inp in input.keys(): input[inp] = (input[inp].to(device) if isinstance(input[inp], torch.Tensor) else input[inp]) output = model(**input) elif (isinstance(input, list) or isinstance(input, tuple)): if (device == 'cpu'): output = model(*input) else: input = [(inp.to(device) if isinstance(inp, torch.Tensor) else inp) for inp in input] output = model(*input) elif ((device == 'cpu') or (not isinstance(input, torch.Tensor))): output = model(input) else: input = input.to(device) output = model(input) return output
def test_neither_x0_nor_initial_solutions_provided(archive_fixture): (archive, _) = archive_fixture with pytest.raises(ValueError): GaussianEmitter(archive, sigma=1.0)
def get_doc_cell(func_name): code = f'show_doc({func_name})' return get_code_cell(code, True)
def QImage_from_np(img): if (img.dtype != np.uint8): raise ValueError('img should be in np.uint8 format') (h, w, c) = img.shape if (c == 1): fmt = QImage.Format_Grayscale8 elif (c == 3): fmt = QImage.Format_BGR888 elif (c == 4): fmt = QImage.Format_ARGB32 else: raise ValueError('unsupported channel count') return QImage(img.data, w, h, (c * w), fmt)
class TFLongformerSelfAttention(): def __init__(self, *args, **kwargs): requires_tf(self)
def get_logger(root, name=None, debug=True): logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s: %(message)s', '%Y-%m-%d %H:%M') console_handler = logging.StreamHandler() if debug: console_handler.setLevel(logging.DEBUG) else: console_handler.setLevel(logging.INFO) logfile = os.path.join(root, 'run.log') print('Creat Log File in: ', logfile) file_handler = logging.FileHandler(logfile, mode='w') file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(formatter) console_handler.setFormatter(formatter) logger.addHandler(console_handler) if (not debug): logger.addHandler(file_handler) return logger
def cook_test(test, refparam, eff=None, n=4): (reflen, refmaxcounts) = (refparam[0], refparam[1]) (testlen, counts) = precook(test, n, True) result = {} if (eff == 'closest'): result['reflen'] = min(((abs((l - testlen)), l) for l in reflen))[1] else: result['reflen'] = reflen result['testlen'] = testlen result['guess'] = [max(0, ((testlen - k) + 1)) for k in range(1, (n + 1))] result['correct'] = ([0] * n) for (ngram, count) in counts.items(): result['correct'][(len(ngram) - 1)] += min(refmaxcounts.get(ngram, 0), count) return result
class AntFileSystem(object): def __init__(self, uri): raise NotImplementedError def exists(self, filename): raise NotImplementedError def remove(self, filename): raise NotImplementedError def stat(self, filename): raise NotImplementedError def list_dir(self, dirname): raise NotImplementedError def makedirs(self, dirname): raise NotImplementedError def rename(self, oldname, newname, overwrite=False): raise NotImplementedError def remove_dir(self, dirname): raise NotImplementedError def create_dir(self, dirname): raise NotImplementedError def open(self, filename, mode): raise NotImplementedError def close(self): pass def __enter__(self): return self def __exit__(self, type=None, value=None, trace=None): pass
class ResUNetIN101(ResUNet101): NORM_TYPE = NormType.SPARSE_INSTANCE_NORM BLOCK = BottleneckIN
class ResidualConvUnit_custom(nn.Module): def __init__(self, features, activation, bn): super().__init__() self.bn = bn self.groups = 1 self.conv1 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) self.conv2 = nn.Conv2d(features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups) if (self.bn == True): self.bn1 = nn.BatchNorm2d(features) self.bn2 = nn.BatchNorm2d(features) self.activation = activation self.skip_add = nn.quantized.FloatFunctional() def forward(self, x): out = self.activation(x) out = self.conv1(out) if (self.bn == True): out = self.bn1(out) out = self.activation(out) out = self.conv2(out) if (self.bn == True): out = self.bn2(out) if (self.groups > 1): out = self.conv_merge(out) return self.skip_add.add(out, x)
def modify_space_hw(space, h, w): if (isinstance(space, gym.spaces.Box) and is_image_space(space)): shape = list(space.shape) shape[(- 2)] = h shape[(- 1)] = w return gym.spaces.Box(low=0, high=255, shape=shape, dtype=np.uint8) elif isinstance(space, gym.spaces.Dict): return gym.spaces.Dict({k: modify_space_hw(v, h, w) for (k, v) in space.items()}) else: return space
def test(in_dataset, out_dataset, wide, epsilon, temperature): testsetout = torchvision.datasets.ImageFolder(os.path.expanduser('./data/{}'.format(out_dataset)), transform=transform) testloaderOut = torch.utils.data.DataLoader(testsetout, batch_size=100, shuffle=False, num_workers=2) if (in_dataset == 'cifar100'): testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) elif (in_dataset == 'cifar10'): testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloaderIn = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2) for fold in range(1, 6): print(f'Processing fold {fold}') nclasses = int(in_dataset[5:]) if wide: net = models.WideResNet(int(((nclasses * 4) / 5))) ck = torch.load(f'./checkpoints/{in_dataset}_fold_{fold}_wide_checkpoint/model_best.pth.tar') else: net = models.DenseNet(int(((nclasses * 4) / 5))) ck = torch.load(f'./checkpoints/{in_dataset}_fold_{fold}_dense_checkpoint/model_best.pth.tar') net.load_state_dict(ck['state_dict']) net.cuda() net.eval() d.testData(net, criterion, testloaderIn, testloaderOut, in_dataset, out_dataset, epsilon, temperature, fold) m.test(in_dataset, out_dataset, plot=True)
class BBoxCrop(object): def __init__(self, padding=0): if (type(padding) != int): raise TypeError('padding should be int') self.padding = padding def __call__(self, img, bbox): if (not ((isinstance(bbox, (list, tuple, np.ndarray)) and len(bbox)) == 4)): raise TypeError('bbox should be list or tuple or ndarray like [x,y,w,h]. Got {}'.format(type(bbox))) else: bbox = np.array(bbox).round().astype('int32') if (not isinstance(img, Image.Image)): raise TypeError('img should be PIL Image') else: (width, height) = img.size x0 = max((bbox[0] - self.padding), 0) y0 = max((bbox[1] - self.padding), 0) x1 = min((bbox[2] + self.padding), width) y1 = min((bbox[3] + self.padding), height) crop_img = img.crop((x0, y0, x1, y1)) return crop_img
class TFMarianPreTrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
class BaseSampler(object): def __init__(self, max_path_length, min_pool_size, batch_size, store_last_n_paths=10): self._max_path_length = max_path_length self._min_pool_size = min_pool_size self._batch_size = batch_size self._store_last_n_paths = store_last_n_paths self._last_n_paths = deque(maxlen=store_last_n_paths) self.env = None self.policy = None self.pool = None self.action_low = None self.action_high = None def initialize(self, env, policy, pool): self.env = env self.policy = policy self.pool = pool self.action_high = env.action_space.high self.action_low = env.action_space.low def set_policy(self, policy): self.policy = policy def clear_last_n_paths(self): self._last_n_paths.clear() def get_last_n_paths(self, n=None): if (n is None): n = self._store_last_n_paths last_n_paths = tuple(islice(self._last_n_paths, None, n)) return last_n_paths def sample(self): raise NotImplementedError def batch_ready(self): enough_samples = (self.pool.size >= self._min_pool_size) return enough_samples def random_batch(self, batch_size=None, **kwargs): batch_size = (batch_size or self._batch_size) return self.pool.random_batch(batch_size, **kwargs) def terminate(self): self.env.close() def get_diagnostics(self): diagnostics = OrderedDict({'pool-size': self.pool.size}) return diagnostics def __getstate__(self): state = {key: value for (key, value) in self.__dict__.items() if (key not in ('env', 'policy', 'pool'))} return state def __setstate__(self, state): self.__dict__.update(state) self.env = None self.policy = None self.pool = None
def eval_argparser(): arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--dataset_path', type=str, help='Path to dataset') _add_common_args(arg_parser) return arg_parser
def test1(): graph = {('A', 'B'): 3, ('A', 'C'): 3, ('A', 'F'): 5, ('C', 'B'): (- 2), ('C', 'D'): 7, ('C', 'E'): 4, ('D', 'E'): (- 5), ('E', 'F'): (- 1)} result = shortest_paths('A', graph) expected = {'A': 0, 'C': 3, 'B': 1, 'E': 5, 'D': 10, 'F': 4} assert (result == expected)
class GenericDataloader(DataLoader): def __init__(self, dataset: Dataset, config: Namespace, shuffle: bool=True, drop_last: bool=False): super().__init__(dataset, batch_size=config.batch_size, shuffle=shuffle, pin_memory=False, num_workers=config.num_workers, drop_last=False)
def get_gan_criterion(mode): if (mode == 'dcgan'): criterion = GANLoss(dis_loss=nn.BCEWithLogitsLoss(), gen_loss=nn.BCEWithLogitsLoss()) elif (mode == 'lsgan'): criterion = GANLoss(dis_loss=nn.MSELoss(), gen_loss=nn.MSELoss()) elif (mode == 'hinge'): def hinge_dis(pre, margin): logict = ((margin > 0).float() + ((- 1.0) * (margin < 0).float())) return torch.mean(F.relu(((margin - pre) * logict))) def hinge_gen(pre, margin): return (- torch.mean(pre)) criterion = GANLoss(real_label=1, fake_label=(- 1), dis_loss=hinge_dis, gen_loss=hinge_gen) else: raise NotImplementedError('{} is not implementation'.format(mode)) return criterion
def load_reactant_vocab(path_to_json: str) -> typing.List[str]: with open(path_to_json, 'r') as fo: d = json.load(fo) return sorted(list(d.keys()), key=(lambda x: d[x]))
def clean(embedding_path: str, output_path: str=None, block_size: int=665536): with open(embedding_path, 'r', encoding='utf8', errors='ignore') as input_file: with open(output_path, 'w+', encoding='utf8') as output_file: lines: List[str] = input_file.readlines(block_size) while lines: print(' '.join(lines), file=output_file) lines = input_file.readlines(block_size)
class PruningMode(Enum): BASICMAGNITUDE = 'basic_magnitude' PATTERNLOCK = 'pattern_lock' GROUPLASSO = 'group_lasso'
def compare(string1, string2): if compare_cell(string1[:(len(string1) // 2)], string2[:(len(string2) // 2)]): if compare_cell(string1[(len(string1) // 2):], string2[(len(string2) // 2):]): return True return False
class TestFilterLearnableParmams(unittest.TestCase): def test_filter_learnable_params(self) -> None: boring_model = BoringModel() large_boring_model = LargeBoringModel() boring_model_params = list(boring_model.parameters()) filtered_boring_model_params = filter_learnable_params(boring_model_params, boring_model) assert all([any([(param is filtered_param) for filtered_param in filtered_boring_model_params]) for param in boring_model_params]) assert (len(boring_model_params) == len(filtered_boring_model_params)) large_boring_model_params = list(large_boring_model.parameters()) filtered_large_boring_model_params = filter_learnable_params(large_boring_model_params, large_boring_model) assert any([(not any([(param is filtered_param) for filtered_param in filtered_large_boring_model_params])) for param in boring_model_params]) assert (len(large_boring_model_params) == (len(filtered_large_boring_model_params) + 2))
def test_reallocation_f(capture, msg): with capture: create_and_destroy(4, 0.5) assert (msg(capture) == strip_comments('\n noisy new # preallocation needed before invoking placement-new overload\n noisy delete # deallocation of preallocated storage\n noisy new # Factory pointer allocation\n NoisyAlloc(int 4) # factory pointer construction\n ---\n ~NoisyAlloc() # Destructor\n noisy delete # operator delete\n '))
class RobertaModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class Rescal(BaseModel): def __init__(self, entity_dict_len, relation_dict_len, embedding_dim, penalty_weight=0.0): super().__init__(model_name='Rescal', penalty_weight=penalty_weight) self.entity_dict_len = entity_dict_len self.relation_dict_len = relation_dict_len self.embedding_dim = embedding_dim self.entity_embedding = nn.Embedding(entity_dict_len, embedding_dim) self.relation_embedding = nn.Embedding(relation_dict_len, (embedding_dim * embedding_dim)) self._reset_param() def _reset_param(self): nn.init.xavier_uniform_(self.entity_embedding.weight.data) nn.init.xavier_uniform_(self.relation_embedding.weight.data) def get_realation_embedding(self, relation_ids): return self.r_embedding(relation_ids) def get_entity_embedding(self, entity_ids): return self.e_embedding(entity_ids) def get_triplet_embedding(self, data): h_embedding = self.e_embedding(data[0]) r_embedding = self.r_embedding(data[1]) t_embedding = self.e_embedding(data[2]) return (h_embedding, r_embedding, t_embedding) def forward(self, data): (batch_h, batch_r, batch_t) = (data[0], data[1], data[2]) A = self.entity_embedding(batch_h) A = F.normalize(A, p=2, dim=(- 1)) R = self.relation_embedding(batch_r).view((- 1), self.embedding_dim, self.embedding_dim) A_T = self.entity_embedding(batch_t).view((- 1), self.embedding_dim, 1) A_T = F.normalize(A_T, p=2, dim=1) tr = torch.matmul(R, A_T) tr = tr.view((- 1), self.embedding_dim) return (- torch.sum((A * tr), dim=(- 1))) def loss(self, data): pos_data = data pos_data = self.data_to_device(pos_data) neg_data = self.model_negative_sampler.create_negative(data) neg_data = self.data_to_device(neg_data) pos_score = self.forward(pos_data) neg_score = self.forward(neg_data) return (self.model_loss(pos_score, neg_score) + self.penalty(data))
class DatasetMapper(): def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], image_format: str, use_instance_mask: bool=False, use_keypoint: bool=False, instance_mask_format: str='polygon', keypoint_hflip_indices: Optional[np.ndarray]=None, precomputed_proposal_topk: Optional[int]=None, recompute_boxes: bool=False): if recompute_boxes: assert use_instance_mask, 'recompute_boxes requires instance masks' self.is_train = is_train self.augmentations = T.AugmentationList(augmentations) self.image_format = image_format self.use_instance_mask = use_instance_mask self.instance_mask_format = instance_mask_format self.use_keypoint = use_keypoint self.keypoint_hflip_indices = keypoint_hflip_indices self.proposal_topk = precomputed_proposal_topk self.recompute_boxes = recompute_boxes logger = logging.getLogger(__name__) mode = ('training' if is_train else 'inference') logger.info(f'[DatasetMapper] Augmentations used in {mode}: {augmentations}') def from_config(cls, cfg, is_train: bool=True): augs = utils.build_augmentation(cfg, is_train) if (cfg.INPUT.CROP.ENABLED and is_train): augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) recompute_boxes = cfg.MODEL.MASK_ON else: recompute_boxes = False ret = {'is_train': is_train, 'augmentations': augs, 'image_format': cfg.INPUT.FORMAT, 'use_instance_mask': cfg.MODEL.MASK_ON, 'instance_mask_format': cfg.INPUT.MASK_FORMAT, 'use_keypoint': cfg.MODEL.KEYPOINT_ON, 'recompute_boxes': recompute_boxes} if cfg.MODEL.KEYPOINT_ON: ret['keypoint_hflip_indices'] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) if cfg.MODEL.LOAD_PROPOSALS: ret['precomputed_proposal_topk'] = (cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN if is_train else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST) return ret def _transform_annotations(self, dataset_dict, transforms, image_shape): for anno in dataset_dict['annotations']: if (not self.use_instance_mask): anno.pop('segmentation', None) if (not self.use_keypoint): anno.pop('keypoints', None) annos = [utils.transform_instance_annotations(obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices) for obj in dataset_dict.pop('annotations') if (obj.get('iscrowd', 0) == 0)] instances = utils.annotations_to_instances(annos, image_shape, mask_format=self.instance_mask_format) if self.recompute_boxes: instances.gt_boxes = instances.gt_masks.get_bounding_boxes() dataset_dict['instances'] = utils.filter_empty_instances(instances) def __call__(self, dataset_dict): dataset_dict = copy.deepcopy(dataset_dict) image = utils.read_image(dataset_dict['file_name'], format=self.image_format) utils.check_image_size(dataset_dict, image) if ('sem_seg_file_name' in dataset_dict): sem_seg_file_name = dataset_dict.pop('sem_seg_file_name') sem_seg_gt = utils.read_image(sem_seg_file_name).astype('double') else: sem_seg_gt = None aug_input = T.AugInput(image, sem_seg=sem_seg_gt) transforms = self.augmentations(aug_input) (image, sem_seg_gt) = (aug_input.image, aug_input.sem_seg) image_shape = image.shape[:2] dataset_dict['image'] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) if (sem_seg_gt is not None): dataset_dict['sem_seg'] = torch.as_tensor(sem_seg_gt.astype('long')) if (self.proposal_topk is not None): utils.transform_proposals(dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk) if (not self.is_train): dataset_dict.pop('annotations', None) dataset_dict.pop('sem_seg_file_name', None) return dataset_dict if ('annotations' in dataset_dict): self._transform_annotations(dataset_dict, transforms, image_shape) return dataset_dict
class ResamplingDataset(BaseWrapperDataset): def __init__(self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=1): super().__init__(dataset) if (weights is None): self.weights = None else: assert (len(weights) == len(dataset)) weights_arr = np.array(weights, dtype=np.float64) weights_arr /= weights_arr.sum() self.weights = plasma_utils.PlasmaArray(weights_arr) self.replace = replace assert (size_ratio > 0.0) if (not self.replace): assert (size_ratio < 1.0) self.size_ratio = float(size_ratio) self.actual_size = np.ceil((len(dataset) * self.size_ratio)).astype(int) self.batch_by_size = batch_by_size self.seed = seed self._cur_epoch = None self._cur_indices = None self.set_epoch(epoch) def __getitem__(self, index): return self.dataset[self._cur_indices.array[index]] def __len__(self): return self.actual_size def sizes(self): if isinstance(self.dataset.sizes, list): return [s[self._cur_indices.array] for s in self.dataset.sizes] return self.dataset.sizes[self._cur_indices.array] def num_tokens(self, index): return self.dataset.num_tokens(self._cur_indices.array[index]) def size(self, index): return self.dataset.size(self._cur_indices.array[index]) def ordered_indices(self): if self.batch_by_size: order = [np.arange(len(self)), self.sizes] return np.lexsort(order) else: return np.arange(len(self)) def prefetch(self, indices): self.dataset.prefetch(self._cur_indices.array[indices]) def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): logger.debug('ResamplingDataset.set_epoch: {}'.format(epoch)) super().set_epoch(epoch) if (epoch == self._cur_epoch): return self._cur_epoch = epoch rng = np.random.RandomState([42, (self.seed % (2 ** 32)), self._cur_epoch]) self._cur_indices = plasma_utils.PlasmaArray(rng.choice(len(self.dataset), self.actual_size, replace=self.replace, p=(None if (self.weights is None) else self.weights.array)))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data-path', type=str, default='./data') parser.add_argument('--output-path', type=str, default='./outputs') parser.add_argument('--output-name', type=str, default='adv_bpda.npy') parser.add_argument('--defense', type=str, default='GD') args = parser.parse_args() if (not os.path.exists(args.output_path)): os.makedirs(args.output_path) data = np.load(os.path.join(args.data_path, 'clean100data.npy')) labels = np.load(os.path.join(args.data_path, 'clean100label.npy')) targets = np.load(os.path.join(args.data_path, 'random_targets.npy')) adv = defend_BPDA(data, args.defense, targets) np.save(os.path.join(args.output_path, args.output_name), adv)
def aspect_ratio_rel(im, aspect_ratio): (im_h, im_w) = im.shape[:2] im_ar_w = int(round((aspect_ratio * im_w))) im_ar = cv2.resize(im, dsize=(im_ar_w, im_h)) return im_ar
def main(args): dbsn_net = DBSN_Model(in_ch=args.input_channel, out_ch=args.output_channel, mid_ch=args.middle_channel, blindspot_conv_type=args.blindspot_conv_type, blindspot_conv_bias=args.blindspot_conv_bias, br1_block_num=args.br1_block_num, br1_blindspot_conv_ks=args.br1_blindspot_conv_ks, br2_block_num=args.br2_block_num, br2_blindspot_conv_ks=args.br2_blindspot_conv_ks, activate_fun=args.activate_fun) sigma_mu_net = Sigma_mu_Net(in_ch=args.middle_channel, out_ch=args.sigma_mu_output_channel, mid_ch=args.sigma_mu_middle_channel, layers=args.sigma_mu_layers, kernel_size=args.sigma_mu_kernel_size, bias=args.sigma_mu_bias) sigma_n_net = Sigma_n_Net(in_ch=args.sigma_n_input_channel, out_ch=args.sigma_n_output_channel, mid_ch=args.sigma_n_middle_channel, layers=args.sigma_n_layers, kernel_size=args.sigma_n_kernel_size, bias=args.sigma_n_bias) dbsn_model = nn.DataParallel(dbsn_net, args.device_ids).cuda() sigma_mu_model = nn.DataParallel(sigma_mu_net, args.device_ids).cuda() sigma_n_model = nn.DataParallel(sigma_n_net, args.device_ids).cuda() tmp_ckpt = torch.load(args.last_ckpt, map_location=torch.device('cuda', args.device_ids[0])) pretrained_dict = tmp_ckpt['state_dict_dbsn'] model_dict = dbsn_model.state_dict() pretrained_dict_update = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)} assert (len(pretrained_dict) == len(pretrained_dict_update)) assert (len(pretrained_dict_update) == len(model_dict)) model_dict.update(pretrained_dict_update) dbsn_model.load_state_dict(model_dict) pretrained_dict = tmp_ckpt['state_dict_sigma_mu'] model_dict = sigma_mu_model.state_dict() pretrained_dict_update = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)} assert (len(pretrained_dict) == len(pretrained_dict_update)) assert (len(pretrained_dict_update) == len(model_dict)) model_dict.update(pretrained_dict_update) sigma_mu_model.load_state_dict(model_dict) pretrained_dict = tmp_ckpt['state_dict_sigma_n'] model_dict = sigma_n_model.state_dict() pretrained_dict_update = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)} assert (len(pretrained_dict) == len(pretrained_dict_update)) assert (len(pretrained_dict_update) == len(model_dict)) model_dict.update(pretrained_dict_update) sigma_n_model.load_state_dict(model_dict) val_setname = args.valset dataset_val = create_dataset(val_setname, 'val', args).load_data() dbsn_model.eval() sigma_mu_model.eval() sigma_n_model.eval() with torch.no_grad(): psnr_val = 0 for (count, data) in enumerate(dataset_val): img_val = data['clean'].cuda() img_noise_val = data['noisy'].cuda() (batch, C, H, W) = img_noise_val.shape (mu_out_val, mid_out_val) = dbsn_model(img_noise_val) sigma_mu_out_val = sigma_mu_model(mid_out_val) if (args.noise_type == 'poisson_gaussian'): sigma_n_out_val = sigma_n_model(mu_out_val) else: sigma_n_out_val = sigma_n_model(img_noise_val) if (args.noise_type == 'gaussian'): sigma_mu_out_val = sigma_mu_out_val.repeat(1, 3, 1, 1) index = torch.LongTensor([0, 4, 8]).cuda() tmp1 = torch.zeros((batch, H, W, 9), device='cuda') tmp1 = tmp1.index_copy_(3, index, sigma_mu_out_val.permute(0, 2, 3, 1)) L_matrix = tmp1.view((batch, H, W, 3, 3)) sigma_mu = (L_matrix L_matrix.transpose(3, 4)) noise_est_val = sigma_n_out_val.mean(dim=(2, 3), keepdim=True).repeat(1, 3, H, W) noise_est_val = (F.softplus((noise_est_val - 4)) + 0.001) tmp2 = torch.zeros((batch, H, W, 9), device='cuda') tmp2 = tmp2.index_copy_(3, index, noise_est_val.permute(0, 2, 3, 1)) P_matrix = tmp2.view((batch, H, W, 3, 3)) sigma_n = (P_matrix P_matrix.transpose(3, 4)) elif (args.noise_type == 'poisson_gaussian'): index = torch.LongTensor([0, 4, 8]).cuda() tmp1 = torch.zeros((batch, H, W, 9), device='cuda') tmp1 = tmp1.index_copy_(3, index, sigma_mu_out_val.permute(0, 2, 3, 1)) L_matrix = tmp1.view((batch, H, W, 3, 3)) sigma_mu = (L_matrix L_matrix.transpose(3, 4)) noise_est = (F.softplus((sigma_n_out_val - 4)) + 0.001) tmp2 = torch.zeros((batch, H, W, 9), device='cuda') tmp2 = tmp2.index_copy_(3, index, noise_est.permute(0, 2, 3, 1)) P_matrix = tmp2.view((batch, H, W, 3, 3)) sigma_n = (P_matrix P_matrix.transpose(3, 4)) elif (args.noise_type == 'multivariate_gaussian'): index = torch.LongTensor([0, 1, 2, 4, 5, 8]).cuda() tmp1 = torch.zeros((batch, H, W, 9), device='cuda') tmp1 = tmp1.index_copy_(3, index, sigma_mu_out_val.permute(0, 2, 3, 1)) L_matrix = tmp1.view((batch, H, W, 3, 3)) sigma_mu_matrix = (L_matrix L_matrix.transpose(3, 4)) sigma_mu = sigma_mu_matrix.mean(dim=(1, 2), keepdim=True).repeat(1, H, W, 1, 1) tmp2 = torch.zeros((batch, H, W, 9), device='cuda') tmp2 = tmp2.index_copy_(3, index, sigma_n_out_val.permute(0, 2, 3, 1)) P_matrix = tmp2.view((batch, H, W, 3, 3)) sigma_n_matrix = (P_matrix P_matrix.transpose(3, 4)) sigma_n = sigma_n_matrix.mean(dim=(1, 2), keepdim=True).repeat(1, H, W, 1, 1) else: assert 'Unknown noise type!' mu_x = mu_out_val.permute(0, 2, 3, 1).unsqueeze((- 1)) y = img_noise_val.permute(0, 2, 3, 1).unsqueeze((- 1)) Ieps = (1e-06 * torch.eye(3, device='cuda').repeat(batch, H, W, 1, 1)) sigma_mu_inv = (sigma_mu + Ieps).inverse() sigma_n_inv = (sigma_n + Ieps).inverse() term_c1 = ((sigma_mu_inv + (args.gamma * sigma_n_inv)) + Ieps).inverse() term_c2 = ((sigma_mu_inv mu_x) + ((args.gamma * sigma_n_inv) y)) out = (term_c1 term_c2) out = out.squeeze((- 1)).permute(0, 3, 1, 2) psnr = batch_psnr(out.clamp(0.0, 1.0), img_val, 1.0) psnr_val += psnr print(('Image[%02d]: psnr_dbsn = %.4f ' % (count, psnr))) psnr_val /= len(dataset_val) print(('Avg psnr_dbsn: %.4f \n' % psnr_val))
class FlipGradientBuilder(object): def __init__(self): self.num_calls = 0 def __call__(self, x, l=1.0): grad_name = ('FlipGradient%d' % self.num_calls) (grad_name) def _flip_gradients(op, grad): return [(tf.negative(grad) * l)] g = tf.get_default_graph() with g.gradient_override_map({'Identity': grad_name}): y = tf.identity(x) self.num_calls += 1 return y
class PadToMultiple(object): def __init__(self, multiple, fill=0, padding_mode='constant'): assert isinstance(multiple, numbers.Number) assert isinstance(fill, (numbers.Number, str, tuple)) assert (padding_mode in ['constant', 'edge', 'reflect', 'symmetric']) self.multiple = multiple self.fill = fill self.padding_mode = padding_mode def __call__(self, img): (w, h) = img.size m = self.multiple nw = (((w // m) + int(((w % m) != 0))) * m) nh = (((h // m) + int(((h % m) != 0))) * m) padw = (nw - w) padh = (nh - h) out = vf.pad(img, (0, 0, padw, padh), self.fill, self.padding_mode) return out def __repr__(self): return (self.__class__.__name__ + '(multiple={0}, fill={1}, padding_mode={2})'.format(self.mulitple, self.fill, self.padding_mode))
_register_to_config class FlaxAutoencoderKL(nn.Module, FlaxModelMixin, ConfigMixin): in_channels: int = 3 out_channels: int = 3 down_block_types: Tuple[str] = ('DownEncoderBlock2D',) up_block_types: Tuple[str] = ('UpDecoderBlock2D',) block_out_channels: Tuple[int] = (64,) layers_per_block: int = 1 act_fn: str = 'silu' latent_channels: int = 4 norm_num_groups: int = 32 sample_size: int = 32 scaling_factor: float = 0.18215 dtype: jnp.dtype = jnp.float32 def setup(self): self.encoder = FlaxEncoder(in_channels=self.config.in_channels, out_channels=self.config.latent_channels, down_block_types=self.config.down_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, act_fn=self.config.act_fn, norm_num_groups=self.config.norm_num_groups, double_z=True, dtype=self.dtype) self.decoder = FlaxDecoder(in_channels=self.config.latent_channels, out_channels=self.config.out_channels, up_block_types=self.config.up_block_types, block_out_channels=self.config.block_out_channels, layers_per_block=self.config.layers_per_block, norm_num_groups=self.config.norm_num_groups, act_fn=self.config.act_fn, dtype=self.dtype) self.quant_conv = nn.Conv((2 * self.config.latent_channels), kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) self.post_quant_conv = nn.Conv(self.config.latent_channels, kernel_size=(1, 1), strides=(1, 1), padding='VALID', dtype=self.dtype) def init_weights(self, rng: jax.Array) -> FrozenDict: sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) (params_rng, dropout_rng, gaussian_rng) = jax.random.split(rng, 3) rngs = {'params': params_rng, 'dropout': dropout_rng, 'gaussian': gaussian_rng} return self.init(rngs, sample)['params'] def encode(self, sample, deterministic: bool=True, return_dict: bool=True): sample = jnp.transpose(sample, (0, 2, 3, 1)) hidden_states = self.encoder(sample, deterministic=deterministic) moments = self.quant_conv(hidden_states) posterior = FlaxDiagonalGaussianDistribution(moments) if (not return_dict): return (posterior,) return FlaxAutoencoderKLOutput(latent_dist=posterior) def decode(self, latents, deterministic: bool=True, return_dict: bool=True): if (latents.shape[(- 1)] != self.config.latent_channels): latents = jnp.transpose(latents, (0, 2, 3, 1)) hidden_states = self.post_quant_conv(latents) hidden_states = self.decoder(hidden_states, deterministic=deterministic) hidden_states = jnp.transpose(hidden_states, (0, 3, 1, 2)) if (not return_dict): return (hidden_states,) return FlaxDecoderOutput(sample=hidden_states) def __call__(self, sample, sample_posterior=False, deterministic: bool=True, return_dict: bool=True): posterior = self.encode(sample, deterministic=deterministic, return_dict=return_dict) if sample_posterior: rng = self.make_rng('gaussian') hidden_states = posterior.latent_dist.sample(rng) else: hidden_states = posterior.latent_dist.mode() sample = self.decode(hidden_states, return_dict=return_dict).sample if (not return_dict): return (sample,) return FlaxDecoderOutput(sample=sample)
def get_config_group(dataset): for (group, group_data) in CONFIG_GROUPS.items(): if (dataset in group_data['datasets']): return group assert False, f"Dataset `{dataset}' not found"
class S2VGraph(object): def __init__(self, g, label, node_tags=None, node_features=None): self.label = label self.g = g self.node_tags = node_tags self.neighbors = [] self.node_features = 0 self.max_neighbor = 0 self.mean_neighbor = 0
def writeMain(output): if (not (options.gui or options.runner)): return output.write(('int %s( int argc, char *argv[] ) {\n' % options.main)) output.write(' int status;\n') if options.noStaticInit: output.write(' CxxTest::initialize();\n') if options.gui: tester_t = ('CxxTest::GuiTuiRunner<CxxTest::%s, CxxTest::%s> ' % (options.gui, options.runner)) else: tester_t = ('CxxTest::%s' % options.runner) if options.xunit_printer: output.write((' std::ofstream ofstr("%s");\n' % options.xunit_file)) output.write((' %s tmp(ofstr);\n' % tester_t)) output.write((' CxxTest::RealWorldDescription::_worldName = "%s";\n' % options.world)) else: output.write((' %s tmp;\n' % tester_t)) output.write((' status = CxxTest::Main<%s>( tmp, argc, argv );\n' % tester_t)) output.write(' return status;\n') output.write('}\n')
class DdpCheckpointer(Checkpointer): def __init__(self, checkpoint_dir: str): self.checkpoint_dir = checkpoint_dir self._engine = DdpCheckpointEngine(checkpoint_dir) def save_checkpoint(self, step, state_dict, path='', storage_type=StorageType.DISK): if (path == ''): ckpt_name = f'{CheckpointConstant.CKPT_NAME_PREFIX}{step}.pt' path = os.path.join(self.checkpoint_dir, ckpt_name) if (storage_type == StorageType.MEMORY): self._engine.save_to_memory(step, state_dict, path) elif (storage_type == StorageType.DISK): if (not path): raise ValueError('path cannot be empty if storage type is disk!') self._engine.save_to_storage(step, state_dict, path) else: raise ValueError(f'No support storage type {storage_type}') def load_checkpoint(self, resume_path=''): return self._engine.load(resume_path)
def CreateTrgDataLoader(args): if ((args.set == 'train') or (args.set == 'trainval')): target_dataset = cityscapesDataSetLabel(args.data_dir_target, args.data_list_target, crop_size=image_sizes['cityscapes'], mean=IMG_MEAN, max_iters=(args.num_steps * args.batch_size), set=args.set) else: target_dataset = cityscapesDataSet(args.data_dir_target, args.data_list_target, crop_size=cs_size_test['cityscapes'], mean=IMG_MEAN, set=args.set) if ((args.set == 'train') or (args.set == 'trainval')): target_dataloader = data.DataLoader(target_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, pin_memory=True) else: target_dataloader = data.DataLoader(target_dataset, batch_size=1, shuffle=False, pin_memory=True) return target_dataloader
def create_mapping(dico): sorted_items = sorted(dico.items(), key=(lambda x: ((- x[1]), x[0]))) id_to_item = {i: v[0] for (i, v) in enumerate(sorted_items)} item_to_id = {v: k for (k, v) in id_to_item.items()} return (item_to_id, id_to_item)
def load_view_point(pcd, filename): vis = o3d.visualization.Visualizer() vis.create_window() ctr = vis.get_view_control() param = o3d.io.read_pinhole_camera_parameters(filename) vis.add_geometry(pcd) ctr.convert_from_pinhole_camera_parameters(param) vis.run() vis.destroy_window()
class TestAttentionReshape(unittest.TestCase): def setUpClass(self): pass def tearDownClass(self): pass def test_attention_reshape_0(self): graph = Graph() graph.framework_modeling_config['framework'] = 'onnxruntime' input_data_node = OPERATORS['Input']() input_tensors = [] output_tensors = [Tensor(name='input_data'), Tensor(), Tensor()] input_data_node.construct('input_data', 'Input', input_tensors=input_tensors, output_tensors=output_tensors) constantofshape_node = OPERATORS['ConstantOfShape']() input_tensors = [Tensor(name='input0', data=np.array(1), shape=[1])] output_tensors = [Tensor(name='output0', source_op=['constantofshape'], dest_op=['mul']), Tensor(name='constantofshape_output0', source_op=['constantofshape'], dest_op=['where'])] constantofshape_node.construct('constantofshape', 'ConstantOfShape', input_tensors=input_tensors, output_tensors=output_tensors) mul_node = OPERATORS['Mul']() input_tensors = [Tensor(name='output0', source_op=['constantofshape'], dest_op=['mul']), Tensor(name='mul_input1', data=np.array(1), shape=[1])] output_tensors = [Tensor(name='mul_output', source_op=['mul'], dest_op=['equal'])] mul_node.construct('mul', 'Mul', input_tensors=input_tensors, output_tensors=output_tensors) equal_node = OPERATORS['Equal']() input_tensors = [Tensor(name='equal_input0', data=np.array([(- 1), 1, 1, (- 1)]), shape=[4]), Tensor(name='mul_output', source_op=['mul'], dest_op=['equal'])] output_tensors = [Tensor(name='equal_output', source_op=['equal'], dest_op=['where'])] equal_node.construct('equal', 'Equal', input_tensors=input_tensors, output_tensors=output_tensors) where_node = OPERATORS['Where']() input_tensors = [Tensor(name='equal_output', source_op=['equal'], dest_op=['where']), Tensor(name='constantofshape_output0', source_op=['constantofshape'], dest_op=['where']), Tensor(name='where_input2', data=np.array([(- 1), 1, 1, (- 1)]), shape=[1])] output_tensors = [Tensor(name='where_output', source_op=['where'], dest_op=['expand'])] where_node.construct('where', 'Where', input_tensors=input_tensors, output_tensors=output_tensors) unsqueeze_node = OPERATORS['Unsqueeze']() input_tensors = [Tensor(name='unsqueeze_input0', data=np.array(1), shape=[2, 12, 384, 384]), Tensor(name='unsqueeze_input1', data=np.array(1), shape=[1])] output_tensors = [Tensor(name='unsqueeze_output', source_op=['unsqueeze'], dest_op=['expand'])] unsqueeze_node.construct('unsqueeze', 'Unsqueeze', input_tensors=input_tensors, output_tensors=output_tensors, attr=OrderedDict({'axes': '1'})) expand_node = OPERATORS['Expand']() input_tensors = [Tensor(name='unsqueeze_output', source_op=['unsqueeze'], dest_op=['expand']), Tensor(name='where_output', source_op=['where'], dest_op=['expand'])] output_tensors = [Tensor(name='expand_output', source_op=['expand'], dest_op=['gatherelements'])] expand_node.construct('expand', 'Expand', input_tensors=input_tensors, output_tensors=output_tensors) gatherelements_node = OPERATORS['GatherElements']() input_tensors = [Tensor(name='gatherelements_input0', data=np.array(1), shape=[1]), Tensor(name='expand_output', source_op=['expand'], dest_op=['gatherelements'])] output_tensors = [Tensor(name='gatherelements_output', source_op=['gatherelements'], dest_op=['empty'])] gatherelements_node.construct('gatherelements', 'GatherElements', input_tensors=input_tensors, output_tensors=output_tensors, attr=OrderedDict({'axis': '1'})) graph.insert_nodes(len(graph.nodes), [input_data_node, constantofshape_node, mul_node, equal_node, where_node, unsqueeze_node, expand_node, gatherelements_node]) graph = AttentionOutputLayerNormLengthAdaptiveExpandIndices()(graph) self.assertEqual(3, len(graph.nodes)) self.assertEqual('expand', graph.nodes[1].name)
def find_first_disambig_symbol(symbols: k2.SymbolTable) -> int: return min((v for (k, v) in symbols._sym2id.items() if k.startswith('#')))
def main(): opt = TestOptions().parse() opt.is_flip = False opt.batchSize = 1 data_loader = CreateDataLoader(opt) model = create_model(opt) web_dir = os.path.join(opt.results_dir, 'test') webpage = html.HTML(web_dir, 'task {}'.format(opt.exp_name)) for (i, data) in enumerate(islice(data_loader, opt.how_many)): print(('process input image %3.3d/%3.3d' % (i, opt.how_many))) results = model.translation(data) img_path = ('image%3.3i' % i) save_images(webpage, results, img_path, None, width=opt.fine_size) webpage.save()
def main(args): misc.init_distributed_mode(args) print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) print('{}'.format(args).replace(', ', ',\n')) device = torch.device(args.device) seed = (args.seed + misc.get_rank()) torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True dataset_train = build_dataset(type='train', args=args) dataset_pseudo = build_dataset(type='pseudo', args=args) dataset_val = build_dataset(type='val', args=args) num_tasks = misc.get_world_size() global_rank = misc.get_rank() sampler_train = torch.utils.data.DistributedSampler(dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True) sampler_pseudo = torch.utils.data.DistributedSampler(dataset_pseudo, num_replicas=num_tasks, rank=global_rank, shuffle=True) print(('Sampler_train = %s' % str(sampler_train))) if args.dist_eval: if ((len(dataset_val) % num_tasks) != 0): print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. This will slightly alter validation results as extra duplicate entries are added to achieve equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler(dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) else: sampler_val = torch.utils.data.SequentialSampler(dataset_val) if ((global_rank == 0) and (args.output_dir is not None)): os.makedirs(args.output_dir, exist_ok=True) data_loader_train = torch.utils.data.DataLoader(dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True) data_loader_ps = torch.utils.data.DataLoader(dataset_pseudo, sampler=sampler_pseudo, batch_size=1000, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True) data_loader_val = torch.utils.data.DataLoader(dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False) (backbone, hidden_dim) = get_resnet(args) model = Network(backbone, hidden_dim, args.feat_dim, args.nb_cluster) if args.resume: checkpoint = torch.load(args.resume, map_location='cpu') print(('Load pre-trained checkpoint from: %s' % args.resume)) checkpoint_model = checkpoint['model'] msg = model.load_state_dict(checkpoint_model, strict=False) print(msg) model.to(device) metric_logger = misc.MetricLogger(delimiter=' ') header = 'Test:' model.eval() feat_vector = [] labels_vector = [] for (images, labels, _) in metric_logger.log_every(data_loader_val, 20, header): images = images.to(device, non_blocking=True) with torch.cuda.amp.autocast(): (feat, c) = model.forward_zc(images) c = torch.argmax(c, dim=1) feat_vector.extend(feat.cpu().detach().numpy()) labels_vector.extend(labels.numpy()) feat_vector = np.array(feat_vector) labels_vector = np.array(labels_vector) print('Feat shape {}, Label shape {}'.format(feat_vector.shape, labels_vector.shape)) model_without_ddp = model n_parameters = sum((p.numel() for p in model.parameters() if p.requires_grad)) print(('Model = %s' % str(model_without_ddp))) print(('number of params (M): %.2f' % (n_parameters / 1000000.0))) eff_batch_size = (args.batch_size * misc.get_world_size()) print(('base lr: %.3e' % args.lr)) print(('effective batch size: %d' % eff_batch_size)) optimizer = torch.optim.Adam([{'params': model.resnet.parameters(), 'lr': args.lr}, {'params': model.instance_projector.parameters(), 'lr': args.lr}, {'params': model.cluster_projector.parameters(), 'lr': args.lr}], lr=args.lr, weight_decay=args.weight_decay) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module loss_scaler = NativeScaler() criterion_ins = InstanceLossBoost(tau=args.ins_temp, distributed=True, alpha=0.99, gamma=0.5) criterion_clu = ClusterLossBoost(distributed=True, cluster_num=args.nb_cluster) misc.load_model(args=args, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) print(f'Start training for {args.epochs} epochs') pseudo_labels = (- torch.ones(dataset_train.__len__(), dtype=torch.long)) start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) (train_stats, pseudo_labels) = boost_one_epoch(model, criterion_ins, criterion_clu, data_loader_train, optimizer, device, epoch, loss_scaler, pseudo_labels, args=args) if (args.output_dir and (((epoch % args.save_freq) == 0) or ((epoch + 1) == args.epochs))): misc.save_model(args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch) if (((epoch % args.eval_freq) == 0) or ((epoch + 1) == args.epochs)): test_stats = evaluate(data_loader_val, model, device) print(f"Clustering performance on the {len(dataset_val)} test images: NMI={test_stats['nmi']:.2f}%, ACC={test_stats['acc']:.2f}%, ARI={test_stats['ari']:.2f}%") max_accuracy = max(max_accuracy, test_stats['acc']) print(f'Max accuracy: {max_accuracy:.2f}%') if (epoch == args.start_epoch): test_stats = {'pred_num': 1000} log_stats = {**{f'train_{k}': v for (k, v) in train_stats.items()}, **{f'test_{k}': v for (k, v) in test_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if (args.output_dir and misc.is_main_process()): with open(os.path.join(args.output_dir, 'log.txt'), mode='a', encoding='utf-8') as f: f.write((json.dumps(log_stats) + '\n')) total_time = (time.time() - start_time) total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str))
class AverageMeter(object): def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n self.avg = (self.sum / self.count) def __str__(self): fmtstr = (((('{name} {val' + self.fmt) + '} ({avg') + self.fmt) + '})') return fmtstr.format(**self.__dict__)
class RandomResize(object): def __init__(self, min_size, max_size=None): self.min_size = min_size if (max_size is None): max_size = min_size self.max_size = max_size def __call__(self, image, target): size = random.randint(self.min_size, self.max_size) image = F.resize(image, size) target = F.resize(target, size, interpolation=Image.NEAREST) return (image, target)
class LinearSelfAttn(nn.Module): def __init__(self, input_size, dropout=None): super(LinearSelfAttn, self).__init__() self.linear = nn.Linear(input_size, 1) self.dropout = dropout def forward(self, x, x_mask): x = self.dropout(x) x_flat = x.contiguous().view((- 1), x.size((- 1))) scores = self.linear(x_flat).view(x.size(0), x.size(1)) scores.data.masked_fill_(x_mask.data, (- float('inf'))) alpha = F.softmax(scores, 1) return alpha.unsqueeze(1).bmm(x).squeeze(1)
def wrn_40_2(conv_layer, linear_layer, init_type, **kwargs): assert (init_type == 'kaiming_normal'), 'only supporting default init for WRN' return WideResNet(conv_layer, linear_layer, depth=40, widen_factor=2, **kwargs)
def generate(*args, method='auto', **kwargs): if (method == 'auto'): if (not sf.util.CPLEX_AVAILABLE): log.info('CPLEX solver not found; falling back to pyomo/bonmin.') method = 'bonmin' else: method = 'cplex' if (method == 'bonmin'): return _generate_bonmin(*args, **kwargs) elif (method == 'cplex'): return _generate_cplex(*args, **kwargs) else: raise ValueError(f'Unrecognized solver {method}')
(scope='session') def model_architectures(): return [('le_net_mnist', (1, 1, 32, 32)), ('le_net_cifar', (1, 3, 32, 32)), ('resnet18', (1, 3, 128, 128)), ('resnet20', (1, 3, 128, 128)), ('resnet56', (1, 3, 128, 128))]
def load_adult_income_dataset(only_train=True): outdirname = 'adult' zipfilename = (outdirname + '.zip') urlretrieve(' zipfilename) with zipfile.ZipFile(zipfilename, 'r') as unzip: unzip.extractall(outdirname) raw_data = np.genfromtxt((outdirname + '/adult.data'), delimiter=', ', dtype=str, invalid_raise=False) column_names = ['age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income'] adult_data = pd.DataFrame(raw_data, columns=column_names) adult_data = adult_data.astype({'age': np.int64, 'educational-num': np.int64, 'hours-per-week': np.int64}) adult_data = adult_data.replace({'workclass': {'Without-pay': 'Other/Unknown', 'Never-worked': 'Other/Unknown'}}) adult_data = adult_data.replace({'workclass': {'Federal-gov': 'Government', 'State-gov': 'Government', 'Local-gov': 'Government'}}) adult_data = adult_data.replace({'workclass': {'Self-emp-not-inc': 'Self-Employed', 'Self-emp-inc': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'Never-worked': 'Self-Employed', 'Without-pay': 'Self-Employed'}}) adult_data = adult_data.replace({'workclass': {'?': 'Other/Unknown'}}) adult_data = adult_data.replace({'occupation': {'Adm-clerical': 'White-Collar', 'Craft-repair': 'Blue-Collar', 'Exec-managerial': 'White-Collar', 'Farming-fishing': 'Blue-Collar', 'Handlers-cleaners': 'Blue-Collar', 'Machine-op-inspct': 'Blue-Collar', 'Other-service': 'Service', 'Priv-house-serv': 'Service', 'Prof-specialty': 'Professional', 'Protective-serv': 'Service', 'Tech-support': 'Service', 'Transport-moving': 'Blue-Collar', 'Unknown': 'Other/Unknown', 'Armed-Forces': 'Other/Unknown', '?': 'Other/Unknown'}}) adult_data = adult_data.replace({'marital-status': {'Married-civ-spouse': 'Married', 'Married-AF-spouse': 'Married', 'Married-spouse-absent': 'Married', 'Never-married': 'Single'}}) adult_data = adult_data.replace({'race': {'Black': 'Other', 'Asian-Pac-Islander': 'Other', 'Amer-Indian-Eskimo': 'Other'}}) adult_data = adult_data[['age', 'workclass', 'education', 'marital-status', 'occupation', 'race', 'gender', 'hours-per-week', 'income']] adult_data = adult_data.replace({'income': {'<=50K': 0, '>50K': 1}}) adult_data = adult_data.replace({'education': {'Assoc-voc': 'Assoc', 'Assoc-acdm': 'Assoc', '11th': 'School', '10th': 'School', '7th-8th': 'School', '9th': 'School', '12th': 'School', '5th-6th': 'School', '1st-4th': 'School', 'Preschool': 'School'}}) adult_data = adult_data.rename(columns={'marital-status': 'marital_status', 'hours-per-week': 'hours_per_week'}) if only_train: (train, _) = train_test_split(adult_data, test_size=0.2, random_state=17) adult_data = train.reset_index(drop=True) if os.path.isdir(outdirname): entire_path = os.path.abspath(outdirname) shutil.rmtree(entire_path) return adult_data
class MT5EncoderModel(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def sharding(config, out_file): with open(out_file, 'rb') as fr: captions = pickle.load(fr) target_dir = config.target_dir prefix = (((os.path.basename(os.path.splitext(config.caption_pkl_path)[0]) + '.') + config.bert_name) + '.') for split in ['train', 'val']: target_path = os.path.join(target_dir, (split + '_meta')) with open((target_path + '.pkl'), 'rb') as fr: meta = pickle.load(fr) print('load meta', target_path, len(meta)) for shard_id in meta: numpify(shard_id, meta[shard_id], captions, target_dir, split, prefix)
class TripletLoss(nn.Module): def __init__(self, margin=0.2): super(TripletLoss, self).__init__() self.margin = margin def forward(self, audio_embeds, text_embeds, labels): n = audio_embeds.size(0) sim_a2t = util.cos_sim(audio_embeds, text_embeds) sim_ap = torch.diag(sim_a2t).view(n, 1) d1 = sim_ap.expand_as(sim_a2t) d2 = sim_ap.t().expand_as(sim_a2t) cost_s = F.relu(((self.margin + sim_a2t) - d1)) cost_a = F.relu(((self.margin + sim_a2t) - d2)) mask = labels.expand(n, n).eq(labels.expand(n, n).t()).to(cost_a.device) cost_s = cost_s.masked_fill(mask, 0) cost_a = cost_a.masked_fill(mask, 0) cost_s = cost_s.max(1)[0] cost_a = cost_a.max(0)[0] loss = ((cost_s.sum() + cost_a.sum()) / n) return loss
def normal(in_image): value_max = np.max(in_image) value_min = np.min(in_image) return ((in_image - value_min) / (value_max - value_min))
_torch class MegatronBertModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ((MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification) if is_torch_available() else ()) pipeline_model_mapping = ({'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification} if is_torch_available() else {}) fx_compatible = True test_head_masking = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if (model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING)): inputs_dict['labels'] = torch.zeros((self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device) inputs_dict['next_sentence_label'] = torch.zeros(self.model_tester.batch_size, dtype=torch.long, device=torch_device) return inputs_dict def setUp(self): self.model_tester = MegatronBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MegatronBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_megatron_bert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*config_and_inputs)
class SuperResIDWE4K3(SuperResIDWEXKX): def __init__(self, in_channels=None, out_channels=None, stride=None, bottleneck_channels=None, sub_layers=None, no_create=False, **kwargs): super(SuperResIDWE4K3, self).__init__(in_channels=in_channels, out_channels=out_channels, stride=stride, bottleneck_channels=bottleneck_channels, sub_layers=sub_layers, kernel_size=3, expension=4.0, no_create=no_create, **kwargs)
def bilinear_attention(queries, units, num_heads, attns=None, memory=None, seq_len=None, causality=False, scope='Bilinear_Attention', reuse=None, mask=None, return_weights=False, bias=True, dropout=0.0): with tf.variable_scope(scope, default_name='bilinear_attention', reuse=reuse): memory_shapes = memory.shape.as_list() memory_tmp = tf.reshape(memory, [memory_shapes[0], (- 1), memory_shapes[(- 1)]]) key = conv(memory_tmp, units, name='memory_projection', reuse=reuse) key = tf.reshape(key, (memory_shapes[:(- 1)] + [units])) if (len(memory_shapes) == 4): queries = tf.expand_dims(queries, 2) logits = tf.matmul(queries, key, transpose_b=True) shapes = [(x if (x != None) else (- 1)) for x in logits.shape.as_list()] if (mask is not None): mask = (tf.cast(tf.reshape(mask, [(- 1), 1, 1, shapes[(- 1)]]), tf.int32) if (len(shapes) == 4) else tf.cast(tf.reshape(mask, [(- 1), 1, shapes[(- 1)]]), tf.int32)) logits = mask_logits(logits, mask) weights = tf.nn.softmax(logits, name='attention_weights') weights = tf.nn.dropout(weights, (1.0 - dropout)) res = tf.matmul(weights, memory) return (res, weights, logits)
class Interp2xBoundary3dFunction(Function): def forward(ctx, input, balance_value): (output, is_boundary) = interp2x_boundary3d.forward(input, balance_value) return (output, is_boundary) def backward(ctx, grad_output, grad_boundary): grad_input = interp2x_boundary3d.backward(grad_output.contiguous()) return (grad_input, None)
class Inputs(unittest.TestCase): def test_m2m100_inputs(self): with tempfile.TemporaryDirectory() as tmpdirname: input_path = os.path.join(tmpdirname, 'source.txt') output_path = os.path.join(tmpdirname, 'target.txt') with open(os.path.join(tmpdirname, 'source.txt'), 'w', encoding='utf8') as f: print('Hello, world, my name is Iker!', file=f) main(sentences_path=input_path, sentences_dir=None, files_extension='txt', output_path=output_path, source_lang='en', target_lang='es', starting_batch_size=32, model_name='facebook/m2m100_418M', lora_weights_name_or_path=None, force_auto_device_map=True, precision=None, max_length=64, num_beams=2, num_return_sequences=1, do_sample=False, temperature=1.0, top_k=50, top_p=1.0, keep_special_tokens=False, keep_tokenization_spaces=False, repetition_penalty=None, prompt=None) main(sentences_path=None, sentences_dir=tmpdirname, files_extension='txt', output_path=os.path.join(tmpdirname, 'target'), source_lang='en', target_lang='es', starting_batch_size=32, model_name='facebook/m2m100_418M', lora_weights_name_or_path=None, force_auto_device_map=True, precision=None, max_length=64, num_beams=2, num_return_sequences=1, do_sample=False, temperature=1.0, top_k=50, top_p=1.0, keep_special_tokens=False, keep_tokenization_spaces=False, repetition_penalty=None, prompt=None)
class CALayer(nn.Module): def __init__(self, channel, reduction=16): super(CALayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.c1 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 3, 3) self.c2 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 5, 5) self.c3 = ops.BasicBlock(channel, (channel // reduction), 3, 1, 7, 7) self.c4 = ops.BasicBlockSig(((channel // reduction) * 3), channel, 3, 1, 1) def forward(self, x): y = self.avg_pool(x) c1 = self.c1(y) c2 = self.c2(y) c3 = self.c3(y) c_out = torch.cat([c1, c2, c3], dim=1) y = self.c4(c_out) return (x * y)
class AMAZONPOLARITY(AbstractTask): name = 'amazon_polarity' metric = [metrics.accuracy] metric_names = ['accuracy'] split_to_data_split = {'train': 'train', 'validation': 'validation'} def load_dataset(self, split: int): train_data_files = {'train': './data/manual/amazon_review_polarity_csv/train.csv'} amazon_polarity = load_dataset('csv', data_files=train_data_files, script_version='master', names=['label', 'title', 'content']) amazon_polarity_train = amazon_polarity['train'].select([i for i in range(0, 50000)]) amazon_polarity_eval = amazon_polarity['train'].select([(len(amazon_polarity_train) - i) for i in range(0, 300)]) amazon_polarity = DatasetDict() amazon_polarity['train'] = amazon_polarity_train amazon_polarity['validation'] = amazon_polarity_eval if (split == 'train'): return amazon_polarity['train'] elif (split == 'validation'): return amazon_polarity['validation'] else: return None def preprocessor(self, example, add_prefix=True): prompt = DatasetTemplates('amazon_polarity')[self.prompt] result = prompt.apply(example) if (self.prompt == 'Is_this_review'): if use_verbalizer: return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'labels_list': ['Positive', 'Negative'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'User_recommend_this_product'): if use_verbalizer: return {'source': result[0], 'target': ('No' if (example['label'] == 1) else 'Yes'), 'labels_list': ['Yes', 'No'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('No' if (example['label'] == 1) else 'Yes'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'Is_this_product_review_positive'): if use_verbalizer: return {'source': result[0], 'target': ('No' if (example['label'] == 1) else 'Yes'), 'labels_list': ['Yes', 'No'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('No' if (example['label'] == 1) else 'Yes'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'Is_this_review_negative'): if use_verbalizer: return {'source': result[0], 'target': ('Yes' if (example['label'] == 1) else 'No'), 'labels_list': ['No', 'Yes'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('Yes' if (example['label'] == 1) else 'No'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'convey_negative_or_positive_sentiment'): if use_verbalizer: return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'labels_list': ['Positive', 'Negative'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'negative_or_positive_tone'): if use_verbalizer: return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'labels_list': ['Positive', 'Negative'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('Negative' if (example['label'] == 1) else 'Positive'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'user_satisfied'): if use_verbalizer: return {'source': result[0], 'target': ('dissatisfied' if (example['label'] == 1) else 'satisfied'), 'labels_list': ['satisfied', 'dissatisfied'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('dissatisfied' if (example['label'] == 1) else 'satisfied'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'would_you_buy'): if use_verbalizer: return {'source': result[0], 'target': ('decrease' if (example['label'] == 1) else 'increase'), 'labels_list': ['increase', 'decrease'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('decrease' if (example['label'] == 1) else 'increase'), 'task': self.name, 'extra_fields': {}} elif (self.prompt == 'flattering_or_not'): if use_verbalizer: return {'source': result[0], 'target': ('unflattering' if (example['label'] == 1) else 'flattering'), 'labels_list': ['flattering', 'unfalttering'], 'task': self.name, 'extra_fields': {}} return {'source': result[0], 'target': ('unflattering' if (example['label'] == 1) else 'flattering'), 'task': self.name, 'extra_fields': {}}
def smoothrange(a=None, b=None, n=10): def _multiple(v, round=False): e = floor(log(v, 10)) m = pow(10, e) f = (v / m) if (round is True): (op, x, y, z) = (lt, 1.5, 3.0, 7.0) if (round is False): (op, x, y, z) = (le, 1.0, 2.0, 5.0) if op(f, x): return (m * 1) if op(f, y): return (m * 2) if op(f, z): return (m * 5) else: return (m * 10) if ((a is None) and (b is None)): (a, b) = (0, 1) if (a is None): (a, b) = (0, b) if (b is None): (a, b) = (0, a) if (a == b): (yield float(a)) raise StopIteration r = _multiple((b - a)) t = _multiple((r / (n - 1)), round=True) a = (floor((a / t)) * t) b = (ceil((b / t)) * t) for i in range((int(((b - a) / t)) + 1)): (yield (a + (i * t)))
def weights_init(m): classname = m.__class__.__name__ if (('Linear' in classname) or ('Embedding' == classname)): print(f'Initializing Module {classname}.') nn.init.trunc_normal_(m.weight.data, 0.0, 0.02)
def rx_rm_vlc(host, port, chunk=hl2ss.ChunkSize.RM_VLC, mode=hl2ss.StreamMode.MODE_1, divisor=1, profile=hl2ss.VideoProfile.H265_MAIN, level=hl2ss.H26xLevel.DEFAULT, bitrate=None, options=None, decoded=True): if (bitrate is None): bitrate = get_video_codec_default_bitrate(hl2ss.Parameters_RM_VLC.WIDTH, hl2ss.Parameters_RM_VLC.HEIGHT, hl2ss.Parameters_RM_VLC.FPS, divisor, profile) if (options is None): options = get_video_codec_default_options(hl2ss.Parameters_RM_VLC.WIDTH, hl2ss.Parameters_RM_VLC.HEIGHT, hl2ss.Parameters_RM_VLC.FPS, divisor, profile) return (hl2ss.rx_decoded_rm_vlc(host, port, chunk, mode, divisor, profile, level, bitrate, options) if decoded else hl2ss.rx_rm_vlc(host, port, chunk, mode, divisor, profile, level, bitrate, options))
def test_eval_map(): det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]] labels = np.array([0, 1, 1]) labels_ignore = np.array([0, 1]) gt_info = {'bboxes': gt_bboxes, 'bboxes_ignore': gt_ignore, 'labels': labels, 'labels_ignore': labels_ignore} annotations = [gt_info, gt_info] (mean_ap, eval_results) = eval_map(det_results, annotations, use_legacy_coordinate=True) assert (0.291 < mean_ap < 0.293) (mean_ap, eval_results) = eval_map(det_results, annotations, use_legacy_coordinate=False) assert (0.291 < mean_ap < 0.293) det_results = [[det_bboxes, det_bboxes]] labels = np.array([0, 1, 1]) labels_ignore = np.array([0, 1]) gt_info = {'bboxes': gt_bboxes, 'bboxes_ignore': gt_ignore, 'labels': labels, 'labels_ignore': labels_ignore} annotations = [gt_info] (mean_ap, eval_results) = eval_map(det_results, annotations, use_legacy_coordinate=True) assert (0.291 < mean_ap < 0.293) (mean_ap, eval_results) = eval_map(det_results, annotations, use_legacy_coordinate=False) assert (0.291 < mean_ap < 0.293)
def build_client_model(feature_num): inputs = Input(shape=feature_num) outputs = Dense(1)(inputs) return Model(inputs=inputs, outputs=outputs, name='vfl_client_model')
def run_all(tests, K, M): benchmarkSize = 0 passed = 0 failed = 0 total = 0 oov = 0 for suite in tests: (passed, failed, total, oov) = run_suite(suite, passed, failed, total, oov, K, M) benchmarkSize = (benchmarkSize + ((len(suite[2]) * len(suite[2])) - len(suite[2]))) print('[reproduce-rq2] Results {}/{}/{} ({:.2%})'.format(oov, passed, total, (float(passed) / float((oov + total)))))
class DMSelfAttentionMLP(snt.AbstractModule): def __init__(self, kq_dim, v_dim, make_mlp_fn, num_heads=8, concat_heads_output_dim=20, concat=True, residual=False, layer_norm=False, kq_dim_division=False, name='dm_self_attention'): super(DMSelfAttentionMLP, self).__init__(name=name) self.kq_dim = kq_dim self.v_dim = v_dim self.mlp = make_mlp_fn() self.num_heads = num_heads self.concat_heads_output_dim = concat_heads_output_dim self.concat = concat self.residual = residual self.layer_norm = layer_norm self.kq_dim_division = kq_dim_division def _build(self, graph): initializers = {'w': tf.contrib.layers.xavier_initializer(uniform=True)} project_q_mod = snt.Linear((self.num_heads * self.kq_dim), use_bias=False, initializers=initializers) project_q = project_q_mod(graph.nodes) project_k_mod = snt.Linear((self.num_heads * self.kq_dim), use_bias=False, initializers=initializers) project_k = project_k_mod(graph.nodes) project_q = tf.reshape(project_q, [(- 1), self.num_heads, self.kq_dim]) project_k = tf.reshape(project_k, [(- 1), self.num_heads, self.kq_dim]) project_v_mod = snt.Linear(self.v_dim, use_bias=False, initializers=initializers) project_v = project_v_mod(graph.nodes) project_v = tf.keras.backend.repeat(project_v, self.num_heads) attn_module = DMSelfAttention(self.kq_dim_division, self.kq_dim) attn_graph = attn_module(project_v, project_q, project_k, graph) new_nodes = attn_graph.nodes new_nodes = tf.reshape(new_nodes, [(- 1), (self.num_heads * self.v_dim)]) new_node_proj = snt.Linear(self.concat_heads_output_dim, use_bias=False) new_nodes = new_node_proj(new_nodes) if self.concat: new_nodes = tf.concat([graph.nodes, new_nodes], axis=1) new_nodes = self.mlp(new_nodes) if self.residual: new_nodes += graph.nodes if self.layer_norm: ln_mod = snt.LayerNorm() new_nodes = ln_mod(new_nodes) return graph.replace(nodes=new_nodes)
def _flash_attn_flops_compute(qkv, cu_seqlens, max_seqlen, dropout_p, softmax_scale=None, causal=False, return_attn_probs=False): (_, _, nheads, headdim) = qkv.shape batch_size = (cu_seqlens.shape[0] - 1) qk_macs = (((batch_size * nheads) * (max_seqlen ** 2)) * headdim) fake_tensor = torch.zeros([batch_size, nheads, max_seqlen, max_seqlen]) (softmax_flops, softmax_macs) = _softmax_flops_compute(fake_tensor) multi_v_macs = (((batch_size * nheads) * (max_seqlen ** 2)) * headdim) total_flops = ((((2 * qk_macs) + (max_seqlen ** 2)) + softmax_flops) + (2 * multi_v_macs)) total_macs = ((qk_macs + softmax_macs) + multi_v_macs) return (total_flops, total_macs)
def test_extended_orbital_matrix_ferminet_can_be_constructed(): _make_extended_orbital_matrix_ferminets()
def get_hoi_output(Image_dets, corre_mat=None): output_hoi = [] for Image_det in tqdm(Image_dets, desc='trans output into eval format'): Image_det = json.loads(Image_det) file_name = Image_det['image_id'] output = {'predictions': [], 'hoi_prediction': [], 'file_name': file_name} count = 0 for det in Image_det['hoi_list']: human_bbox = det['h_box'] human_score = det['h_cls'] object_bbox = det['o_box'] object_score = det['o_cls'] object_name = det['o_name'] object_cat = coco_classes_originID[object_name] inter_name = det['i_name'] inter_cat = hico_name2id[inter_name] inter_score = det['i_cls'] output['predictions'].append({'bbox': human_bbox, 'category_id': 1}) human_idx = count count += 1 output['predictions'].append({'bbox': object_bbox, 'category_id': object_cat}) object_idx = count count += 1 ocat_inside = coco_object_inverse_ids[object_cat] icat_inside = hico_action_inverse_ids[inter_cat] final_score = (((corre_mat[icat_inside][ocat_inside] * human_score) * object_score) * inter_score) output['hoi_prediction'].append({'subject_id': human_idx, 'object_id': object_idx, 'category_id': inter_cat, 'score': final_score}) output_hoi.append(output) return output_hoi
class DataTrainingArguments(): task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'}) dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) def __post_init__(self): self.task_name = self.task_name.lower()
class DocstringStyler(CodeStyler): def is_no_style_block(self, line): if (_re_textual_blocks.search(line) is not None): return False if (_re_example.search(line) is not None): return True return (_re_code_block.search(line) is not None) def is_comment_or_textual_block(self, line): if (_re_return.search(line) is not None): self.in_block = SpecialBlock.NOT_SPECIAL return True return super().is_comment_or_textual_block(line) def is_special_block(self, line): if self.is_no_style_block(line): self.in_block = SpecialBlock.NO_STYLE return True if (_re_arg_def.search(line) is not None): self.in_block = SpecialBlock.ARG_LIST return True return False def init_in_block(self, text): lines = text.split('\n') while ((len(lines) > 0) and (len(lines[0]) == 0)): lines = lines[1:] if (len(lines) == 0): return SpecialBlock.NOT_SPECIAL if re.search(':\\s*$', lines[0]): indent = get_indent(lines[0]) if ((len(lines) == 1) or (len(get_indent(lines[1])) > len(indent)) or ((len(get_indent(lines[1])) == len(indent)) and re.search(':\\s*$', lines[1]))): self.current_indent = indent return SpecialBlock.ARG_LIST return SpecialBlock.NOT_SPECIAL
def update_version_in_file(fname, version, pattern): with open(fname, 'r', encoding='utf-8', newline='\n') as f: code = f.read() (re_pattern, replace) = REPLACE_PATTERNS[pattern] replace = replace.replace('VERSION', version) code = re_pattern.sub(replace, code) with open(fname, 'w', encoding='utf-8', newline='\n') as f: f.write(code)
(version='2.0') def _split_nodename_and_shape(name): inputs = [] shapes = {} name_pattern = '(?:([\\w\\d/\\-\\._:]+)(\\[[\\-\\d,]+\\])?),?' splits = re.split(name_pattern, name) for i in range(1, len(splits), 3): inputs.append((splits[i] + ':0')) if (splits[(i + 1)] is not None): shape = [int(n) for n in splits[(i + 1)][1:(- 1)].split(',')] shape = [(n if (n >= 0) else None) for n in shape] shapes[(splits[i] + ':0')] = shape if (not shapes): shapes = None return (inputs, shapes)
class Testmodel(TestCase): def test_HGF(self): custom_hgf = HGF(model_type=None).add_input_node(kind='continuous', input_idxs=0).add_input_node(kind='binary', input_idxs=1).add_value_parent(children_idxs=0).add_value_parent(children_idxs=1, additional_parameters={'binary_expected_precision': jnp.nan}).add_value_parent(children_idxs=[2, 3]).add_value_parent(children_idxs=4).add_volatility_parent(children_idxs=[2, 3]).add_volatility_parent(children_idxs=2).add_volatility_parent(children_idxs=7).init() custom_hgf.input_data(input_data=np.array([0.2, 1])) timeserie = load_data('continuous') two_level_continuous_hgf = HGF(n_levels=2, model_type='continuous', initial_mean={'1': timeserie[0], '2': 0.0}, initial_precision={'1': 10000.0, '2': 10.0}, tonic_volatility={'1': (- 3.0), '2': (- 3.0)}, tonic_drift={'1': 0.0, '2': 0.0}, volatility_coupling={'1': 1.0}) two_level_continuous_hgf.input_data(input_data=timeserie) surprise = two_level_continuous_hgf.surprise() assert jnp.isclose(surprise, (- 1141.0911)) assert (len(two_level_continuous_hgf.node_trajectories[1]['mean']) == 614) three_level_continuous_hgf = HGF(n_levels=3, model_type='continuous', initial_mean={'1': 1.04, '2': 1.0, '3': 1.0}, initial_precision={'1': 10000.0, '2': 10.0, '3': 10.0}, tonic_volatility={'1': (- 13.0), '2': (- 2.0), '3': (- 2.0)}, tonic_drift={'1': 0.0, '2': 0.0, '3': 0.0}, volatility_coupling={'1': 1.0, '2': 1.0}) three_level_continuous_hgf.input_data(input_data=timeserie) surprise = three_level_continuous_hgf.surprise() assert jnp.isclose(surprise, (- 892.82227)) sp = total_gaussian_surprise(three_level_continuous_hgf) assert jnp.isclose(sp, 1159.1089) (u, _) = load_data('binary') two_level_binary_hgf = HGF(n_levels=2, model_type='binary', initial_mean={'1': 0.0, '2': 0.5}, initial_precision={'1': 0.0, '2': 10000.0}, tonic_volatility={'1': None, '2': (- 6.0)}, tonic_drift={'1': None, '2': 0.0}, volatility_coupling={'1': None}, eta0=0.0, eta1=1.0, binary_precision=jnp.inf) two_level_binary_hgf = two_level_binary_hgf.input_data(u) surprise = two_level_binary_hgf.surprise() assert jnp.isclose(surprise, 215.58821) three_level_binary_hgf = HGF(n_levels=3, model_type='binary', initial_mean={'1': 0.0, '2': 0.5, '3': 0.0}, initial_precision={'1': 0.0, '2': 10000.0, '3': 10.0}, tonic_volatility={'1': None, '2': (- 6.0), '3': (- 2.0)}, tonic_drift={'1': None, '2': 0.0, '3': 0.0}, volatility_coupling={'1': None, '2': 1.0}, eta0=0.0, eta1=1.0, binary_precision=jnp.inf) three_level_binary_hgf.input_data(input_data=u) surprise = three_level_binary_hgf.surprise() assert jnp.isclose(surprise, 215.59067) three_level_binary_hgf = HGF(n_levels=3, model_type='binary', initial_mean={'1': 0.0, '2': 0.5, '3': 0.0}, initial_precision={'1': 0.0, '2': 10000.0, '3': 10.0}, tonic_volatility={'1': None, '2': (- 6.0), '3': (- 2.0)}, tonic_drift={'1': None, '2': 0.0, '3': 0.0}, volatility_coupling={'1': None, '2': 1.0}, eta0=0.0, eta1=1.0, binary_precision=jnp.inf) update_sequence1 = three_level_binary_hgf.update_sequence update_sequence2 = update_sequence1[:2] update_branches = (update_sequence1, update_sequence2) branches_idx = np.random.binomial(n=1, p=0.5, size=len(u)) three_level_binary_hgf.input_custom_sequence(update_branches=update_branches, branches_idx=branches_idx, input_data=u)
class Linear(nn.Linear): def __init__(self, in_features: int, out_features: int, output_dim: int, bias: bool=True, layer_config: dict=None) -> None: super(Linear, self).__init__(in_features, out_features, bias) self.layer_config = layer_config if ('options' not in self.layer_config): self.layer_config['options'] = {'constrain_weights': False, 'gradient_clip': False, 'init': 'xavier'} self.options = self.layer_config['options'] self.init = self.options['init'] self.loss_gradient = None self.weight_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False) self.bias_backward = None if (self.bias is not None): self.bias_backward = nn.Parameter(torch.Tensor(size=(output_dim, self.in_features)), requires_grad=False) self.init_parameters() if (('constrain_weights' in self.options) and self.options['constrain_weights']): with torch.no_grad(): self.norm_initial_weights = torch.linalg.norm(self.weight) self.register_backward_hook(self.dfa_backward_hook) self.weight_ratio = 0 def init_parameters(self) -> None: (fan_in, fan_out) = nn.init._calculate_fan_in_and_fan_out(self.weight) if (self.init == 'xavier'): nn.init.xavier_uniform_(self.weight) nn.init.xavier_uniform_(self.weight_backward) self.scaling_factor = math.sqrt((2.0 / float((fan_in + fan_out)))) if (self.bias is not None): nn.init.constant_(self.bias, 0) nn.init.constant_(self.bias_backward, 0) else: nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5)) nn.init.kaiming_uniform_(self.weight_backward, a=math.sqrt(5)) self.scaling_factor = (1 / math.sqrt((3 * fan_in))) if (self.bias is not None): bound = ((1 / math.sqrt(fan_in)) if (fan_in > 0) else 0) nn.init.uniform_(self.bias, (- bound), bound) nn.init.uniform_(self.bias_backward, (- bound), bound) def forward(self, x): with torch.no_grad(): if (('constrain_weights' in self.options) and self.options['constrain_weights']): self.weight = torch.nn.Parameter(((self.weight * self.norm_initial_weights) / torch.linalg.norm(self.weight))) return LinearGrad.apply(x, self.weight, self.bias) def compute_weight_ratio(self): with torch.no_grad(): self.weight_diff = (torch.linalg.norm(self.weight_backward) / torch.linalg.norm(self.weight)) return self.weight_diff def dfa_backward_hook(module, grad_input, grad_output): if (grad_input[0] is None): return grad_input else: grad_dfa = module.loss_gradient.mm(module.weight_backward) if (len(grad_input) == 2): return (grad_dfa, grad_input[1]) else: return (grad_dfa, grad_input[1], grad_input[2])
class CHeaderNode(Node): __instance: CHeaderNode = None intel_intr_includes = '\n#include <emmintrin.h>\n#include <pmmintrin.h>\n#include <tmmintrin.h>\n#include <immintrin.h>\n#include <xmmintrin.h>\n ' math_include = '#include <math.h>\n' test_include = '\n#ifdef CNN_TEST\n#include <stdio.h>\n#endif\n\n' weights_init_stdio = '\n#include <stdio.h>\n\nvoid init_weight_float(float* w, int len, const char* name)\n{{\n FILE *f = fopen(name, "rb");\n fread(w, sizeof(float), len, f);\n fclose(f);\n}}\n\nvoid init_weight_int8(int8_t* w, int len, const char* name)\n{{\n FILE *f = fopen(name, "rb");\n fread(w, sizeof(int8_t), len, f);\n fclose(f);\n}}\n\nvoid init_weight_int16(int16_t* w, int len, const char* name)\n{{\n FILE *f = fopen(name, "rb");\n fread(w, sizeof(int16_t), len, f);\n fclose(f);\n}}\n\nvoid init_weights()\n{{\n{}\n}}\n\n' func_def = 'void cnn{id}(float {out_var_name}, float *{scores_var})\n{{\n' intel_intr_required = False math_required = False test_required = True out_var: Variable = None out_var_name = '' var_decls: List[Variable] = [] pointer_decls: List[Variable] = [] const_decls: List[Variable] = [] def __init__(self, id, in_dim, weights_method): super().__init__() self.id = id self.in_dim = in_dim self.out_var = Allocation.allocate_var('float', 'x', in_dim) self.out_var.decl_written = True self.out_dim = in_dim self.weights_method = weights_method if (weights_method == 'stdio'): self.direct = False self.stdio = True elif (weights_method == 'direct'): self.direct = True self.stdio = False else: raise Exception('Unknown weights method.') CHeaderNode.__instance = self self.reset() def instance() -> CHeaderNode: return CHeaderNode.__instance def reset(self): self.var_decls = [] self.pointer_decls = [] self.const_decls = [] def write_c(self): self.snippet = '' weight_snippet = '' if (self.id is None): self.id = '' self.scores_var = CFooterNode.instance().in_var try: self.var_decls.remove(self.scores_var) except Exception: pass try: self.pointer_decls.remove(self.scores_var) except Exception: pass try: self.const_decls.remove(self.scores_var) except Exception: pass self.out_var_name = (str(self.out_var) + ''.join([(('[' + str(i)) + ']') for i in self.in_dim])) if self.test_required: self.snippet += self.test_include if self.math_required: self.snippet += self.math_include if self.intel_intr_required: self.snippet += self.intel_intr_includes for v in self.const_decls: self.snippet += v.get_def(self.direct).replace('{', '{{').replace('}', '}}') if self.stdio: var_type = Variable.type_to_c(v.type) if (var_type == 'float'): func_name = 'init_weight_float' elif (var_type == 'int8_t'): func_name = 'init_weight_int8' elif (var_type == 'int16_t'): func_name = 'init_weight_int16' else: assert False weight_snippet += '\t{}(({}*){}, {}, "{}");\n'.format(func_name, var_type, str(v), np.prod(v.dim), str(v)) Writer.write_data(v.init_data, str(v)) if self.stdio: self.snippet += self.weights_init_stdio.format(weight_snippet).replace('{', '{{').replace('}', '}}') self.snippet += self.func_def for v in self.var_decls: self.snippet += ('\t' + v.get_def().replace('{', '{{').replace('}', '}}')) for v in self.pointer_decls: self.snippet += ('\t' + v.get_pointer_decl().replace('{', '{{').replace('}', '}}')) super().write_c() Writer.cur_depth += 1
('/click/<string:articleId>', methods=['GET']) def click(articleId): db.clickArticle(articleId, g.user) pdf = request.args.get('pdf', False, type=(lambda x: (x.lower() == 'true'))) if pdf: return redirect((' % articleId)) return redirect((' + articleId))
class BinConv2d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size=(- 1), stride=(- 1), padding=(- 1), dropout=0): super(BinConv2d, self).__init__() self.layer_type = 'BinConv2d' self.kernel_size = kernel_size self.stride = stride self.padding = padding self.dropout_ratio = dropout self.bn = nn.BatchNorm2d(input_channels, eps=0.0001, momentum=0.1, affine=True) if (dropout != 0): self.dropout = nn.Dropout(dropout) self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=padding) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.bn(x) (x, mean) = BinActive()(x) if (self.dropout_ratio != 0): x = self.dropout(x) x = self.conv(x) x = self.relu(x) return x
def _resnetv2(layers=(3, 4, 9), **kwargs): padding_same = kwargs.get('padding_same', True) if padding_same: stem_type = 'same' conv_layer = StdConv2dSame else: stem_type = '' conv_layer = StdConv2d if len(layers): backbone = ResNetV2(layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer) else: backbone = create_resnetv2_stem(kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) return backbone
class QHeuristic(): def __init__(self): pass def evaluate(self, state: State, action): raise NotImplementedError
def train(args, io): train_loader = DataLoader(ModelNet40(partition='train', num_points=args.num_points, args=(args if args.pw else None)), num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True) test_loader = DataLoader(ModelNet40(partition='test', num_points=args.num_points), num_workers=8, batch_size=args.test_batch_size, shuffle=True, drop_last=False) device = torch.device(('cuda' if args.cuda else 'cpu')) if (args.model == 'RPC'): model = RPC(args).to(device) else: model = Pct(args).to(device) print(str(model)) model = nn.DataParallel(model) if args.use_sgd: print('Use SGD') opt = optim.SGD(model.parameters(), lr=(args.lr * 100), momentum=args.momentum, weight_decay=0.0005) else: print('Use Adam') opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0001) scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr) criterion = cal_loss best_test_acc = 0 for epoch in range(args.epochs): scheduler.step() train_loss = 0.0 count = 0.0 model.train() train_pred = [] train_true = [] idx = 0 total_time = 0.0 for (data, label) in train_loader: rsmix = False r = np.random.rand(1) if ((args.beta > 0) and (r < args.rsmix_prob)): rsmix = True (data, lam, label, label_b) = rsmix_provider.rsmix(data, label, beta=args.beta, n_sample=args.nsample, KNN=args.knn) if (args.rot or args.rdscale or args.shift or args.jitter or args.shuffle or args.rddrop or (args.beta is not 0.0)): data = torch.FloatTensor(data) if rsmix: lam = torch.FloatTensor(lam) (lam, label_b) = (lam.to(device), label_b.to(device).squeeze()) (data, label) = (data.to(device), label.to(device).squeeze()) if rsmix: data = data.permute(0, 2, 1) batch_size = data.size()[0] opt.zero_grad() start_time = time.time() logits = model(data) loss = 0 for i in range(batch_size): loss_tmp = ((criterion(logits[i].unsqueeze(0), label[i].unsqueeze(0).long()) * (1 - lam[i])) + (criterion(logits[i].unsqueeze(0), label_b[i].unsqueeze(0).long()) * lam[i])) loss += loss_tmp loss = (loss / batch_size) else: data = data.permute(0, 2, 1) batch_size = data.size()[0] opt.zero_grad() start_time = time.time() logits = model(data) loss = criterion(logits, label) loss.backward() opt.step() end_time = time.time() total_time += (end_time - start_time) preds = logits.max(dim=1)[1] count += batch_size train_loss += (loss.item() * batch_size) train_true.append(label.cpu().numpy()) train_pred.append(preds.detach().cpu().numpy()) print('train total time is', total_time) train_true = np.concatenate(train_true) train_pred = np.concatenate(train_pred) outstr = ('Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch, ((train_loss * 1.0) / count), metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred))) io.cprint(outstr) test_loss = 0.0 count = 0.0 model.eval() test_pred = [] test_true = [] total_time = 0.0 for (data, label) in test_loader: (data, label) = (data.to(device), label.to(device).squeeze()) data = data.permute(0, 2, 1) batch_size = data.size()[0] start_time = time.time() logits = model(data) end_time = time.time() total_time += (end_time - start_time) loss = criterion(logits, label) preds = logits.max(dim=1)[1] count += batch_size test_loss += (loss.item() * batch_size) test_true.append(label.cpu().numpy()) test_pred.append(preds.detach().cpu().numpy()) print('test total time is', total_time) test_true = np.concatenate(test_true) test_pred = np.concatenate(test_pred) test_acc = metrics.accuracy_score(test_true, test_pred) avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred) outstr = ('Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch, ((test_loss * 1.0) / count), test_acc, avg_per_class_acc)) io.cprint(outstr) if (test_acc >= best_test_acc): best_test_acc = test_acc torch.save(model.state_dict(), ('checkpoints/%s/models/model.t7' % args.exp_name)) torch.save(model.state_dict(), ('checkpoints/%s/models/model_final.t7' % args.exp_name))
class DeepLab(nn.Module): def __init__(self, ch, c1=128, c2=512, factor=2, sync_bn=True, freeze_bn=False): super(DeepLab, self).__init__() if (sync_bn == True): BatchNorm = SynchronizedBatchNorm2d else: BatchNorm = nn.BatchNorm2d self.sr_decoder = Decoder(c1, c2) self.edsr = EDSR(num_channels=ch, input_channel=64, factor=8) self.factor = factor def forward(self, low_level_feat, x): x_sr = self.sr_decoder(x, low_level_feat, self.factor) x_sr_up = self.edsr(x_sr) return x_sr_up
def get_subset_data(data_path, indices): sub_img_list = [0 for _ in indices] sub_label_list = [0 for _ in indices] idx_dict = dict() for (i, idx) in enumerate(indices): idx_dict[idx] = i indices_set = set(indices) with open(data_path, 'r') as f: for (idx, line) in enumerate(f): if (idx in indices_set): (img_path, target) = line.strip().split(KEY_SEP)[:2] list_idx = idx_dict[idx] sub_img_list[list_idx] = img_path sub_label_list[list_idx] = int(target) return (sub_img_list, sub_label_list)
class DataWriter(object): def __init__(self, args, q): self.queue = q self.output_dir = args.output if (self.output_dir is None): logger.warning('No output directory') self.started = False self.proc = None return try: os.makedirs(self.output_dir, exist_ok=True) except BaseException as be: logger.error(f'Failed to write results to {self.output_dir}.') logger.error(be) logger.error('Skip writing predictions') self.started = False return logger.info(f'Output dir: {self.output_dir}') path = os.path.join(self.output_dir, 'instances.log') self.proc = Process(target=self.write_loop, args=(path, q)) self.proc.start() self.started = True def write_loop(path, q): logger.info(f'Start data writer (process id {os.getpid()})') with open(path, 'w') as f: while True: try: m = q.get() f.write((json.dumps(m) + '\n')) f.flush() except EOFError: break def write_scores(self, scores): if self.started: with open(os.path.join(self.output_dir, 'scores'), 'w') as f: f.write(json.dumps(scores, indent=4)) def kill(self): if (self.proc is not None): self.proc.kill() logger.info('Close data writer')