code
stringlengths
101
5.91M
class TmpData(BaseDataset): def __init__(self, n, **kwargs): self.n = range(n) super().__init__(**kwargs) def __len__(self): return len(self.n) def load(self, item, x, y, meta): x['item'] = self.n[item] return (x, y, meta) def augment(self, x, y, meta): x['item'] *= 100 meta['augs'] = 'helloworld' return (x, y, meta) def show(self, x, y, meta, axs=None): ...
class TestCutVideo(unittest.TestCase): def setUp(self): shutil.rmtree('./raw', ignore_errors=True) os.mkdir('./raw') def tearDown(self) -> None: shutil.rmtree('./raw', ignore_errors=True) ((get_device_type() != 'cpu'), 'Only run this test on CPU') def test_cut_video(self): parser = argparse.ArgumentParser(__doc__) video_path = '/intel-extension-for-transformers/intel_extension_for_transformers/neural_chat/assets/video/intel.mp4' if os.path.exists(video_path): parser.add_argument('--path', type=str, default=video_path) else: parser.add_argument('--path', type=str, default='../assets/video/intel.mp4') parser.add_argument('--min', type=str, default='1') parser.add_argument('--sr', type=str, default='16000') parser.add_argument('--out_path', type=str, default='./raw') parser.add_argument('--verbose', help='increase output verbosity', action='store_true') args = parser.parse_args() if (not os.path.exists(args.path)): raise FileNotFoundError(f"Input path '{args.path}' does not exist.") cut_video(args, './raw') self.assertTrue(os.path.exists('./raw/intel_0.wav'))
class TFConvNextPreTrainedModel(TFPreTrainedModel): config_class = ConvNextConfig base_model_prefix = 'convnext' main_input_name = 'pixel_values' def dummy_inputs(self) -> Dict[(str, tf.Tensor)]: VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, self.config.image_size, self.config.image_size), dtype=tf.float32) return {'pixel_values': tf.constant(VISION_DUMMY_INPUTS)} (input_signature=[{'pixel_values': tf.TensorSpec((None, None, None, None), tf.float32, name='pixel_values')}]) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output)
def get_loss(seg_pred, seg): per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1) seg_loss = tf.reduce_mean(per_instance_seg_loss) per_instance_seg_pred_res = tf.argmax(seg_pred, 2) return (seg_loss, per_instance_seg_loss, per_instance_seg_pred_res)
def get_super_module_by_name(model, module_name): name_list = module_name.split('.') for name in name_list[:(- 1)]: if hasattr(model, name): model = getattr(model, name) else: return None if hasattr(model, name_list[(- 1)]): return model else: return None
class FlaxDDIMScheduler(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax']) def from_config(cls, *args, **kwargs): requires_backends(cls, ['flax']) def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ['flax'])
class TcmfValDataset(torch.utils.data.IterableDataset): def __init__(self, config): super(TcmfValDataset).__init__() self.tcmf_data_loader = get_tcmf_data_loader(config) def __iter__(self): (inp, out, _, _) = self.tcmf_data_loader.supply_test() (yield (inp, out))
def hn(tag): if ((tag[0] == 'h') and (len(tag) == 2)): try: n = int(tag[1]) if (n in range(1, 10)): return n except ValueError: return 0
def save_model(epoch, args, model, type_name=''): model_to_save = (model.module.banzhafteacher if hasattr(model, 'module') else model.banzhafteacher) output_model_file = join(args.output_dir, 'pytorch_model.bin.{}{}'.format(('' if (type_name == '') else (type_name + '.')), epoch)) torch.save(model_to_save.state_dict(), output_model_file) logger.info('Model saved to %s', output_model_file) return output_model_file
class CNNMnist(nn.Module): def __init__(self, args): super(CNNMnist, self).__init__() self.conv1 = nn.Conv2d(args.num_channels, 16, 8, 2, padding=3) self.conv2 = nn.Conv2d(16, 32, 4, 2) self.fc1 = nn.Linear(((32 * 4) * 4), 32) self.fc2 = nn.Linear(32, args.num_classes) def forward(self, x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x, 2, 1) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2, 1) x = x.view((- 1), ((32 * 4) * 4)) x = F.relu(self.fc1(x)) x = self.fc2(x) return x
def train() -> None: print(f'Starting training {config.name}...') train_losses = [] t_start = time() while True: for input in train_loader: config.step += 1 input = input.to(config.device) loss = train_step(input) train_losses.append(loss) if ((config.step % config.log_frequency) == 0): log_msg = f'Iteration {config.step} - ' log_msg += f'train loss: {np.mean(train_losses):.4f}' log_msg += f' - time: {(time() - t_start):.2f}s' print(log_msg) log({'train/loss': np.mean(train_losses)}, config) train_losses = [] if ((config.step % config.val_frequency) == 0): validate(val_loader, config) if ((config.step % config.anom_val_frequency) == 0): evaluate(config, small_testloader, val_step) if (config.step >= config.max_steps): save_model(model, config) print(f'Reached {config.max_steps} iterations. Finished training {config.name}.') return
def my_augment_pool(): augs = [(AutoContrast, None, None), (Brightness, 1.8, 0.1), (Color, 1.8, 0.1), (Contrast, 1.8, 0.1), (Cutout, 0.2, 0), (Equalize, None, None), (Invert, None, None), (Posterize, 4, 4), (Rotate, 30, 0), (Sharpness, 1.8, 0.1), (ShearX, 0.3, 0), (ShearY, 0.3, 0), (Solarize, 256, 0), (SolarizeAdd, 110, 0), (TranslateX, 0.45, 0), (TranslateY, 0.45, 0)] return augs
class ForwardBackward(Algorithm): def __init__(self, parameters=None, **kargs): Algorithm.__init__(self) self.add_parameters(parameters, kargs) self._default_keyword_parameters.update({'gradient': None, 'proximal': None, 'lipschitz_constant': None, 'lambda': 1, 'initialization': None, 'relative_difference_tolerance': 1e-05}) def _initialize(self): self.__gradient = self._parameters['gradient'] self.__proximal = self._parameters['proximal'] self.__L = self._parameters['lipschitz_constant'] self.__lambda = self._parameters['lambda'] self._data['x'] = self._parameters['initialization'] self.__t = 1 self._data['previous_x'] = self._data['x'].copy() def _terminate(self): del self._data['previous_x'] def _iterate(self): t_next = ((1 + np.sqrt((1 + ((4 * self.__t) * self.__t)))) / 2) w = ((self.__t - 1) / t_next) x = self._data['x'] y = (((1 + w) * x) - (w * self._data['previous_x'])) self._data['previous_x'] = x.copy() self.__t = t_next self._data['x'] = self.__proximal((y - (self.__gradient(y) / self.__L)), (self.__lambda / self.__L)) def _extract_current_iterate_matrix(self): return self._data['x']
class Pooler(nn.Module): def __init__(self, output_size, scales, sampling_ratio): super(Pooler, self).__init__() poolers = [] for scale in scales: poolers.append(ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)) self.poolers = nn.ModuleList(poolers) self.output_size = output_size lvl_min = (- torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()) lvl_max = (- torch.log2(torch.tensor(scales[(- 1)], dtype=torch.float32)).item()) self.map_levels = LevelMapper(lvl_min, lvl_max) def convert_to_roi_format(self, boxes): concat_boxes = cat([b.bbox for b in boxes], dim=0) (device, dtype) = (concat_boxes.device, concat_boxes.dtype) ids = cat([torch.full((len(b), 1), i, dtype=dtype, device=device) for (i, b) in enumerate(boxes)], dim=0) rois = torch.cat([ids, concat_boxes], dim=1) return rois def forward(self, x, boxes): num_levels = len(self.poolers) rois = self.convert_to_roi_format(boxes) if (num_levels == 1): return self.poolers[0](x[0], rois) levels = self.map_levels(boxes) num_rois = len(rois) num_channels = x[0].shape[1] output_size = self.output_size[0] (dtype, device) = (x[0].dtype, x[0].device) result = torch.zeros((num_rois, num_channels, output_size, output_size), dtype=dtype, device=device) for (level, (per_level_feature, pooler)) in enumerate(zip(x, self.poolers)): idx_in_level = torch.nonzero((levels == level)).squeeze(1) rois_per_level = rois[idx_in_level] result[idx_in_level] = pooler(per_level_feature, rois_per_level).to(dtype) return result
def _compute_corrected_ttest(differences: np.ndarray, n_train: int, n_test: int, df: Optional[int]=None, alternative: str='two-sided') -> Tuple[(float, float)]: mean = differences.mean(axis=0) if (df is None): df = (len(differences) - 1) std = _corrected_std(differences, n_train=n_train, n_test=n_test) t_stat = (mean / std) if (alternative == 'less'): p_val = special.stdtr(df, t_stat) elif (alternative == 'greater'): p_val = special.stdtr(df, (- t_stat)) elif (alternative == 'two-sided'): p_val = (special.stdtr(df, (- np.abs(t_stat))) * 2) else: raise_error(f"Invalid alternative {alternative}. Should be 'two-sided', 'less' or 'greater'.") return (t_stat, p_val)
def number_double_solutions(vrblvl=0): if (vrblvl > 0): print('in number_double_solutions ...') phc = get_phcfun() aaa = pointer(c_int32(0)) bbb = pointer(c_int32(0)) ccc = pointer(c_double(0.0)) vrb = c_int32(vrblvl) if (vrblvl > 0): print('-> number_double_solutions calls phc', end='') retval = phc(32, aaa, bbb, ccc, vrb) if (vrblvl > 0): print(', return value :', retval) return bbb[0]
def get_ptb_format_from_diora_tree(parse, tokens, return_string=False, batched=False): if batched: return [get_ptb_format(p, t, return_string, batched=False) for (p, t) in zip(parse, tokens)] def recursive_add_tokens(parse): def helper(tr, pos): if (not isinstance(tr, (tuple, list))): return (1, tokens[pos]) (size, nodes) = (0, []) for x in tr: (xsize, xnode) = helper(x, (pos + size)) size += xsize nodes.append(xnode) return (size, tuple(nodes)) (_, new_parse) = helper(parse, 0) return new_parse def recursive_string(parse): if isinstance(parse, str): return f'(DT {parse})' return (('(S ' + ' '.join([recursive_string(p) for p in parse])) + ')') parse = recursive_add_tokens(parse) if return_string: parse = recursive_string(parse) return parse
def main(): node = rospy.init_node('map_collector') controller = Controller(node) controller.start() navigator = Navigator(controller) navigator = make_file_dataset(navigator) navigator = visualize(navigator) navigator.explore()
def tensor_normalize(data): d_min = data.min(dim=1)[0] data += torch.abs(d_min).unsqueeze(1).repeat(1, data.shape[1]) d_min = data.min(dim=1)[0] d_max = data.max(dim=1)[0] dst = (d_max - d_min) norm_data = (data - d_min.unsqueeze(1).repeat(1, data.shape[1])).true_divide(dst.unsqueeze(1).repeat(1, data.shape[1])) return norm_data
class OnlineEstimator(): def __init__(self, x_): self.n = 1 self.mean = (x_ * 0.0) self.m2 = (x_ * 0.0) delta = (x_ - self.mean) self.mean += (delta / self.n) delta2 = (x_ - self.mean) self.m2 += (delta * delta2) def __call__(self, x_): self.n += 1 delta = (x_ - self.mean) self.mean += (delta / self.n) delta2 = (x_ - self.mean) self.m2 += (delta * delta2) return (self.mean, (self.m2 / (self.n - 1)))
class Gen_50(nn.Module): def __init__(self): super(Gen_50, self).__init__() self.name = 'Gen_50' self.lr = 3e-05 self.n_hosts = 50 self.n_hidden = 64 self.n = ((self.n_hosts * PROTO_DIM) + (self.n_hosts * self.n_hosts)) self.delta = nn.Sequential(nn.Linear(self.n, self.n_hidden), nn.LeakyReLU(True), nn.Linear(self.n_hidden, (self.n_hosts * self.n_hosts)), nn.Tanh()) def forward(self, e, s): del_s = (4 * self.delta(torch.cat((e.view((- 1)), s.view((- 1)))))) return (s + del_s.reshape(self.n_hosts, self.n_hosts))
def conv_block_Asym_Inception_WithIncreasedFeatMaps(in_dim, mid_dim, out_dim, kernel_size, padding, dilation=1): model = nn.Sequential(nn.Conv2d(in_dim, mid_dim, kernel_size=[kernel_size, 1], padding=tuple([(padding * dilation), 0]), dilation=(dilation, 1)), nn.BatchNorm2d(mid_dim), nn.ReLU(), nn.Conv2d(mid_dim, out_dim, kernel_size=[1, kernel_size], padding=tuple([0, (padding * dilation)]), dilation=(dilation, 1)), nn.BatchNorm2d(out_dim), nn.ReLU()) return model
def output_seq_1(): class MockTokenizer(): def decode(self, i=None): return '' def convert_ids_to_tokens(self, i=None): return [''] output_1 = output.OutputSeq(**{'model_type': 'causal', 'tokenizer': MockTokenizer(), 'token_ids': [[352, 11, 352, 11, 362]], 'n_input_tokens': 4, 'output_text': ' 1, 1, 2', 'tokens': [[' 1', ',', ' 1', ',', ' 2']], 'decoder_hidden_states': [torch.rand(6, 1, 768)], 'attention': None, 'attribution': {'gradient': [np.array([0., 0., 0., 0.], dtype=np.float32)], 'grad_x_input': [np.array([0., 0., 0., 0.], dtype=np.float32)]}, 'activations': [{'decoder': {'layer_0': torch.rand(1, 768), 'layer_1': torch.rand(1, 768), 'layer_2': torch.rand(1, 768)}}], 'lm_head': torch.nn.Linear(768, 50257, bias=False), 'config': {'embedding': 'embeddings.word_embeddings', 'type': 'mlm', 'activations': ['intermediate\\.dense'], 'token_prefix': '', 'partial_token_prefix': '', 'tokenizer_config': {'token_prefix': '', 'partial_token_prefix': ''}}, 'device': 'cpu'}) (yield output_1)
.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return (grad_output * (x_sigmoid * (1 + (x * (1 - x_sigmoid)))))
def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-d', '--depth_map', help='path to depth map', type=str, required=True) parser.add_argument('-n', '--normal_map', help='path to normal map', type=str, required=True) parser.add_argument('--min_depth_percentile', help='minimum visualization depth percentile', type=float, default=5) parser.add_argument('--max_depth_percentile', help='maximum visualization depth percentile', type=float, default=95) args = parser.parse_args() return args
def aggregate_scores_for_experiment(score_file, labels=None, metrics=Evaluator.default_metrics, nanmean=True, json_output_file=None, json_name='', json_description='', json_author='Fabian', json_task=''): scores = np.load(score_file) scores_mean = scores.mean(0) if (labels is None): labels = list(map(str, range(scores.shape[1]))) results = [] results_mean = OrderedDict() for i in range(scores.shape[0]): results.append(OrderedDict()) for (l, label) in enumerate(labels): results[(- 1)][label] = OrderedDict() results_mean[label] = OrderedDict() for (m, metric) in enumerate(metrics): results[(- 1)][label][metric] = float(scores[i][l][m]) results_mean[label][metric] = float(scores_mean[l][m]) json_dict = OrderedDict() json_dict['name'] = json_name json_dict['description'] = json_description timestamp = datetime.today() json_dict['timestamp'] = str(timestamp) json_dict['task'] = json_task json_dict['author'] = json_author json_dict['results'] = {'all': results, 'mean': results_mean} json_dict['id'] = hashlib.md5(json.dumps(json_dict).encode('utf-8')).hexdigest()[:12] if (json_output_file is not None): json_output_file = open(json_output_file, 'w') json.dump(json_dict, json_output_file, indent=4, separators=(',', ': ')) json_output_file.close() return json_dict
class RLAus_Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, rla_channel=32, SE=False, ECA_size=None, groups=1, base_width=64, dilation=1, norm_layer=None, reduction=16): super(RLAus_Bottleneck, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d width = (int((planes * (base_width / 64.0))) * groups) self.conv1 = conv1x1((inplanes + rla_channel), width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, (planes * self.expansion)) self.bn3 = norm_layer((planes * self.expansion)) self.relu = nn.ReLU(inplace=False) self.downsample = downsample self.stride = stride self.conv_out = conv1x1((planes * self.expansion), rla_channel) self.recurrent_conv = conv3x3(rla_channel, rla_channel) self.bn_rla = norm_layer(rla_channel) self.tanh_rla = nn.Tanh() self.averagePooling = None if ((downsample is not None) and (stride != 1)): self.averagePooling = nn.AvgPool2d((2, 2), stride=(2, 2)) self.se = None if SE: self.se = SELayer((planes * self.expansion), reduction) self.eca = None if (ECA_size != None): self.eca = eca_layer((planes * self.expansion), int(ECA_size)) def forward(self, x, h): identity = x x = torch.cat((x, h), dim=1) out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.se != None): out = self.se(out) if (self.eca != None): out = self.eca(out) y = out if (self.downsample is not None): identity = self.downsample(identity) if (self.averagePooling is not None): h = self.averagePooling(h) y_out = self.conv_out(y) h = (h + y_out) h = self.bn_rla(h) h = self.tanh_rla(h) h = self.recurrent_conv(h) out = (out + identity) out = self.relu(out) return (out, h)
def run_production(input_doc, default_claim=False): try: input_doc = filter_feats(input_doc, load=True) input_doc = add_embeddings(input_doc) spacy_var = True except: spacy_var = False our_approach = True if our_approach: doc = input_doc doc_sents = list(doc.sents) if (len(doc_sents) == 1): assert (default_claim == True) doc._.Labels = [1] doc._.CLPR_Labels = [1] return doc batch_sentences = [x.text for x in doc_sents] encoded_inputs = tokenizer(batch_sentences, padding=True, return_tensors='pt') classification_logits = model(**encoded_inputs)[0] softmax_output = torch.nn.functional.softmax(classification_logits) numps = classification_logits.detach().numpy() softs = scipy.special.softmax(numps, axis=(- 1)) preds = np.argmax(softs, axis=(- 1)) doc._.Labels = [(1 if (x != 2) else 0) for x in preds] labs = [] for (count, x) in enumerate(preds): if (x == 2): labs.append(2) elif (x == 1): labs.append(0) elif spacy_var: if (len(doc_sents[count]) <= 3): labs.append(0) else: labs.append(1) else: cur_len = len(nlp(doc_sents[count].text)) if (cur_len <= 3): labs.append(0) else: labs.append(1) if default_claim: if (1 not in labs): doc._.Labels[0] = 1 labs[0] = 1 doc._.CLPR_Labels = labs else: doc = predict(input_doc) clpr_feats = [] for (idx, l) in enumerate(input_doc._.Labels): if (l == 1): clpr_feats.append(input_doc._.Features[idx]) if (len(clpr_feats) < 2): input_doc._.Labels = [1 for s in input_doc._.Labels] clpr_feats = input_doc._.Features input_doc._.CLPR_Features = clpr_feats doc = predict_clpr(input_doc) return doc
def parse_args(): parser = argparse.ArgumentParser(description='Print the whole config') parser.add_argument('config', help='config file path') parser.add_argument('--options', nargs='+', action=DictAction, help='arguments in dict') args = parser.parse_args() return args
def edge_matrix(labels, connectivity=1): conn = ndi.generate_binary_structure(labels.ndim, connectivity) eroded = ndi.grey_erosion(labels, footprint=conn).ravel() dilated = ndi.grey_dilation(labels, footprint=conn).ravel() labels = labels.ravel() boundaries0 = np.flatnonzero((eroded != labels)) boundaries1 = np.flatnonzero((dilated != labels)) labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1])) labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1])) n = (np.max(labels_large) + 1) data = np.concatenate((boundaries0, boundaries1)) sparse_graph = sparse.coo_matrix((data, (labels_small, labels_large)), dtype=np.int_, shape=(n, n)) return sparse_graph
def __init__2__cinit__(lines, no_optimization): new_lines = [] in_cclass = False for line in lines: if ((len(line) > 13) and (line[:14] == '')): in_cclass = True elif ((line[0] not in ' \n') and (not ((len(line) > 4) and (line[:5] == 'class')))): in_cclass = False if (in_cclass and (len(line) > 16) and (line[:17] == ' def __init__(')): line = (' def __cinit__(' + line[17:]) new_lines.append(line) return new_lines
def main(args): trs_parser = Trs2Stm(args.trs, args.audio) trs_parser.print_segments() return 0
class TestKraus(ChannelTestCase): def test_init(self): chan = Kraus(self.UI) self.assertAllClose(chan.data, [self.UI]) self.assertEqual(chan.dim, (2, 2)) chan = Kraus(self.depol_kraus(0.5)) self.assertAllClose(chan.data, self.depol_kraus(0.5)) self.assertEqual(chan.dim, (2, 2)) (kraus_l, kraus_r) = ([self.UI, self.UX], [self.UY, self.UZ]) chan = Kraus((kraus_l, kraus_r)) self.assertAllClose(chan.data, (kraus_l, kraus_r)) self.assertEqual(chan.dim, (2, 2)) chan = Kraus((kraus_l, kraus_l)) self.assertAllClose(chan.data, kraus_l) self.assertEqual(chan.dim, (2, 2)) kraus = [np.zeros((4, 2))] chan = Kraus(kraus) self.assertAllClose(chan.data, kraus) self.assertEqual(chan.dim, (2, 4)) self.assertRaises(QiskitError, Kraus, kraus, input_dims=4, output_dims=4) def test_circuit_init(self): (circuit, target) = self.simple_circuit_no_measure() op = Kraus(circuit) target = Kraus(target) self.assertEqual(op, target) def test_circuit_init_except(self): circuit = self.simple_circuit_with_measure() self.assertRaises(QiskitError, Kraus, circuit) def test_equal(self): kraus = [self.rand_matrix(2, 2) for _ in range(2)] self.assertEqual(Kraus(kraus), Kraus(kraus)) def test_copy(self): mat = np.eye(4) orig = Kraus(mat) cpy = orig.copy() cpy._data[0][0][(0, 0)] = 0.0 self.assertFalse((cpy == orig)) def test_evolve(self): input_psi = [0, 1] input_rho = [[0, 0], [0, 1]] chan = Kraus(self.UI) target_psi = np.array([0, 1]) self.assertAllClose(chan._evolve(input_psi), target_psi) self.assertAllClose(chan._evolve(np.array(input_psi)), target_psi) target_rho = np.array([[0, 0], [0, 1]]) self.assertAllClose(chan._evolve(input_rho), target_rho) self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho) mat = (np.array([[1, 1], [1, (- 1)]]) / np.sqrt(2)) chan = Kraus(mat) target_psi = (np.array([1, (- 1)]) / np.sqrt(2)) self.assertAllClose(chan._evolve(input_psi), target_psi) self.assertAllClose(chan._evolve(np.array(input_psi)), target_psi) target_rho = (np.array([[1, (- 1)], [(- 1), 1]]) / 2) self.assertAllClose(chan._evolve(input_rho), target_rho) self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho) chan = Kraus(self.depol_kraus(1)) target_rho = (np.eye(2) / 2) self.assertAllClose(chan._evolve(input_psi), target_rho) self.assertAllClose(chan._evolve(np.array(input_psi)), target_rho) self.assertAllClose(chan._evolve(input_rho), target_rho) self.assertAllClose(chan._evolve(np.array(input_rho)), target_rho) def test_is_cptp(self): self.assertTrue(Kraus(self.depol_kraus(0.5)).is_cptp()) self.assertTrue(Kraus(self.UX).is_cptp()) self.assertFalse(Kraus(([self.UI], [self.UX])).is_cptp()) self.assertFalse(Kraus([self.UI, self.UX]).is_cptp()) def test_conjugate(self): (kraus_l, kraus_r) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) targ = Kraus([np.conjugate(k) for k in kraus_l]) chan1 = Kraus(kraus_l) chan = chan1.conjugate() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (2, 4)) targ = Kraus(([np.conjugate(k) for k in kraus_l], [np.conjugate(k) for k in kraus_r])) chan1 = Kraus((kraus_l, kraus_r)) chan = chan1.conjugate() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (2, 4)) def test_transpose(self): (kraus_l, kraus_r) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) targ = Kraus([np.transpose(k) for k in kraus_l]) chan1 = Kraus(kraus_l) chan = chan1.transpose() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (4, 2)) targ = Kraus(([np.transpose(k) for k in kraus_l], [np.transpose(k) for k in kraus_r])) chan1 = Kraus((kraus_l, kraus_r)) chan = chan1.transpose() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (4, 2)) def test_adjoint(self): (kraus_l, kraus_r) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) targ = Kraus([np.transpose(k).conj() for k in kraus_l]) chan1 = Kraus(kraus_l) chan = chan1.adjoint() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (4, 2)) targ = Kraus(([np.transpose(k).conj() for k in kraus_l], [np.transpose(k).conj() for k in kraus_r])) chan1 = Kraus((kraus_l, kraus_r)) chan = chan1.adjoint() self.assertEqual(chan, targ) self.assertEqual(chan.dim, (4, 2)) def test_compose_except(self): self.assertRaises(QiskitError, Kraus(np.eye(2)).compose, Kraus(np.eye(4))) self.assertRaises(QiskitError, Kraus(np.eye(2)).compose, 2) def test_compose(self): rho = self.rand_rho(2) chan1 = Kraus(self.UX) chan2 = Kraus(self.UY) chan = chan1.compose(chan2) targ = Kraus(self.UZ)._evolve(rho) self.assertAllClose(chan._evolve(rho), targ) chan1 = Kraus(self.depol_kraus(0.5)) chan = chan1.compose(chan1) targ = Kraus(self.depol_kraus(0.75))._evolve(rho) self.assertAllClose(chan._evolve(rho), targ) (kraus1, kraus2) = (self.rand_kraus(2, 4, 4), self.rand_kraus(4, 2, 4)) chan1 = Kraus(kraus1) chan2 = Kraus(kraus2) targ = chan2._evolve(chan1._evolve(rho)) chan = chan1.compose(chan2) self.assertEqual(chan.dim, (2, 2)) self.assertAllClose(chan._evolve(rho), targ) chan = (chan1 chan2) self.assertEqual(chan.dim, (2, 2)) self.assertAllClose(chan._evolve(rho), targ) def test_compose_front(self): rho = self.rand_rho(2) chan1 = Kraus(self.UX) chan2 = Kraus(self.UY) chan = chan1.compose(chan2, front=True) targ = Kraus(self.UZ)._evolve(rho) self.assertAllClose(chan._evolve(rho), targ) chan1 = Kraus(self.depol_kraus(0.5)) chan = chan1.compose(chan1, front=True) targ = Kraus(self.depol_kraus(0.75))._evolve(rho) self.assertAllClose(chan._evolve(rho), targ) (kraus1, kraus2) = (self.rand_kraus(2, 4, 4), self.rand_kraus(4, 2, 4)) chan1 = Kraus(kraus1) chan2 = Kraus(kraus2) targ = chan2._evolve(chan1._evolve(rho)) chan = chan2.compose(chan1, front=True) self.assertEqual(chan.dim, (2, 2)) self.assertAllClose(chan._evolve(rho), targ) def test_expand(self): (rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1])) rho_init = np.kron(rho0, rho0) chan1 = Kraus(self.UI) chan2 = Kraus(self.UX) chan = chan1.expand(chan2) rho_targ = np.kron(rho1, rho0) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) chan = chan2.expand(chan1) rho_targ = np.kron(rho0, rho1) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) chan_dep = Kraus(self.depol_kraus(1)) chan = chan_dep.expand(chan_dep) rho_targ = (np.diag([1, 1, 1, 1]) / 4) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) def test_tensor(self): (rho0, rho1) = (np.diag([1, 0]), np.diag([0, 1])) rho_init = np.kron(rho0, rho0) chan1 = Kraus(self.UI) chan2 = Kraus(self.UX) chan = chan2.tensor(chan1) rho_targ = np.kron(rho1, rho0) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) chan = chan1.tensor(chan2) rho_targ = np.kron(rho0, rho1) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) chan_dep = Kraus(self.depol_kraus(1)) chan = chan_dep.tensor(chan_dep) rho_targ = (np.diag([1, 1, 1, 1]) / 4) self.assertEqual(chan.dim, (4, 4)) self.assertAllClose(chan._evolve(rho_init), rho_targ) def test_power(self): rho = np.diag([1, 0]) p_id = 0.9 chan = Kraus(self.depol_kraus((1 - p_id))) p_id3 = (p_id ** 3) chan3 = chan.power(3) targ3a = chan._evolve(chan._evolve(chan._evolve(rho))) self.assertAllClose(chan3._evolve(rho), targ3a) targ3b = Kraus(self.depol_kraus((1 - p_id3)))._evolve(rho) self.assertAllClose(chan3._evolve(rho), targ3b) def test_power_except(self): chan = Kraus(self.depol_kraus(0.9)) self.assertRaises(QiskitError, chan.power, 0.5) def test_add(self): rho = self.rand_rho(2) (kraus1, kraus2) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) chan1 = Kraus(kraus1) chan2 = Kraus(kraus2) targ = (chan1._evolve(rho) + chan2._evolve(rho)) chan = chan1.add(chan2) self.assertAllClose(chan._evolve(rho), targ) chan = (chan1 + chan2) self.assertAllClose(chan._evolve(rho), targ) chan = Kraus((kraus1, kraus2)) targ = (2 * chan._evolve(rho)) chan = chan.add(chan) self.assertAllClose(chan._evolve(rho), targ) def test_subtract(self): rho = self.rand_rho(2) (kraus1, kraus2) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) chan1 = Kraus(kraus1) chan2 = Kraus(kraus2) targ = (chan1._evolve(rho) - chan2._evolve(rho)) chan = chan1.subtract(chan2) self.assertAllClose(chan._evolve(rho), targ) chan = (chan1 - chan2) self.assertAllClose(chan._evolve(rho), targ) chan = Kraus((kraus1, kraus2)) targ = (0 * chan._evolve(rho)) chan = chan.subtract(chan) self.assertAllClose(chan._evolve(rho), targ) def test_multiply(self): rho = self.rand_rho(2) val = 0.5 (kraus1, kraus2) = (self.rand_kraus(2, 4, 4), self.rand_kraus(2, 4, 4)) chan1 = Kraus(kraus1) targ = (val * chan1._evolve(rho)) chan = chan1.multiply(val) self.assertAllClose(chan._evolve(rho), targ) chan = (val * chan1) self.assertAllClose(chan._evolve(rho), targ) chan = (chan1 * val) self.assertAllClose(chan._evolve(rho), targ) chan2 = Kraus((kraus1, kraus2)) targ = (val * chan2._evolve(rho)) chan = chan2.multiply(val) self.assertAllClose(chan._evolve(rho), targ) chan = (val * chan2) self.assertAllClose(chan._evolve(rho), targ) chan = (chan2 * val) self.assertAllClose(chan._evolve(rho), targ) def test_multiply_except(self): chan = Kraus(self.depol_kraus(1)) self.assertRaises(QiskitError, chan.multiply, 's') self.assertRaises(QiskitError, chan.multiply, chan) def test_negate(self): rho = np.diag([1, 0]) targ = np.diag([(- 0.5), (- 0.5)]) chan = (- Kraus(self.depol_kraus(1))) self.assertAllClose(chan._evolve(rho), targ)
def test_chunk_text_preprocessor_one_go(): df = pd.read_csv(os.path.join(data_folder, fname)) text_processor = TextPreprocessor(text_col=text_col, n_cpus=1, maxlen=10, max_vocab=50) X_text = text_processor.fit_transform(df) chunk_text_processor = ChunkTextPreprocessor(text_col=text_col, n_chunks=1, n_cpus=1, maxlen=10, max_vocab=50) chunk_text_processor.partial_fit(df) X_text_chunk = chunk_text_processor.transform(df) assert (X_text == X_text_chunk).all()
def make_plot(list_of_csv): colors = px.colors.qualitative.Dark24 n_colors = len(colors) fig = go.Figure() hovertemplate_prediction = '<b>%{meta}</b><br>x=%{x}<br>y=%{y}<extra></extra>' for (index, path) in enumerate(list_of_csv): df_temporary = pd.read_csv(path) fig.add_trace(go.Scatter(x=(df_temporary['timestamp'] / 1000), y=df_temporary['Relative optimality gap'], mode='lines', line_dash='dash', line_color=colors[(index % n_colors)], showlegend=False, meta=path.split(os.path.sep)[(- 1)].rsplit('.', 1)[0], hovertemplate=hovertemplate_prediction, visible=True)) fig.update_layout(showlegend=True, xaxis_tickfont_size=(LABEL_FONT_SIZE - 4), yaxis_tickfont_size=(LABEL_FONT_SIZE - 4), yaxis_type='linear', yaxis_title='Relative optimality gap', xaxis_title='Time (s)', hovermode='closest', height=FIRST_LINE_HEIGHT, margin=dict(t=0, b=0.02), legend=dict(x=0.05, y=0.8, font_size=LABEL_FONT_SIZE), yaxis=dict(range=[(- 6), 3]), width=1500) return fig
class CUDA_build_ext(build_ext): def build_extensions(self): self.compiler.src_extensions.append('.cu') self.compiler.set_executable('compiler_so', 'nvcc') self.compiler.set_executable('linker_so', 'nvcc --shared') if hasattr(self.compiler, '_c_extensions'): self.compiler._c_extensions.append('.cu') self.compiler.spawn = self.spawn build_ext.build_extensions(self) def spawn(self, cmd, search_path=1, verbose=0, dry_run=0): if ((sys.platform == 'darwin') and (len(cmd) >= 2) and (cmd[0] == 'nvcc') and (cmd[1] == '--shared') and (cmd.count('-arch') > 0)): while True: try: index = cmd.index('-arch') del cmd[index:(index + 2)] except ValueError: break elif (self.compiler.compiler_type == 'msvc'): cmd[:1] = ['nvcc', '--compiler-bindir', (os.path.dirname(find_executable('cl.exe', PATH)) or cmd[0])] for (idx, c) in enumerate(cmd): if (c == '/c'): cmd[idx] = '-c' elif (c == '/DLL'): cmd[idx] = '--shared' elif ('-fPIC' in c): del cmd[idx] elif c.startswith('/Tc'): cmd[idx] = c[3:] elif c.startswith('/Fo'): cmd[idx:(idx + 1)] = ['-o', c[3:]] elif c.startswith('/LIBPATH:'): cmd[idx] = ('-L' + c[9:]) elif c.startswith('/OUT:'): cmd[idx:(idx + 1)] = ['-o', c[5:]] elif c.startswith('/EXPORT:'): del cmd[idx] elif (c == 'cublas.lib'): cmd[idx] = '-lcublas' if ('--shared' in cmd): pass_on = '--linker-options=' cmd.append('/NODEFAULTLIB:libcmt.lib') else: pass_on = '--compiler-options=' cmd = ([c for c in cmd if (c[0] != '/')] + [(pass_on + ','.join((c for c in cmd if (c[0] == '/'))))]) spawn(cmd, search_path, verbose, dry_run)
def SGD(model_param, lr=0.0001, momentum=0.9, dampening=0, weight_decay=0, nesterov=False): optimizer = torch.optim.SGD(model_param, lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=False) return optimizer
def active_session(delay=DELAY, interval=INTERVAL): token = requests.request('GET', TOKEN_URL, headers=TOKEN_HEADERS).text headers = {'Authorization': ('STAR ' + token)} delay = max(delay, MIN_DELAY) interval = max(interval, MIN_INTERVAL) original_handler = signal.getsignal(signal.SIGALRM) try: signal.signal(signal.SIGALRM, _request_handler(headers)) signal.setitimer(signal.ITIMER_REAL, delay, interval) (yield) finally: signal.signal(signal.SIGALRM, original_handler) signal.setitimer(signal.ITIMER_REAL, 0)
def listdir_nohidden(path, sort=False): items = [f for f in os.listdir(path) if (not f.startswith('.'))] if sort: items.sort() return items
class Flip(Base): def __init__(self, axis=0): self.axis = axis def tf(self, img, k=0): return np.flip(img, self.axis) def __str__(self): return 'Flip(axis={})'.format(self.axis)
class VAE_GST(nn.Module): def __init__(self, hparams): super().__init__() self.ref_encoder = ReferenceEncoder(hparams) self.fc1 = nn.Linear(hparams.ref_enc_gru_size, hparams.z_latent_dim) self.fc2 = nn.Linear(hparams.ref_enc_gru_size, hparams.z_latent_dim) self.fc3 = nn.Linear(hparams.z_latent_dim, hparams.E) def reparameterize(self, mu, logvar): if self.training: std = torch.exp((0.5 * logvar)) eps = torch.randn_like(std) return eps.mul(std).add_(mu) else: return mu def forward(self, inputs): enc_out = self.ref_encoder(inputs) mu = self.fc1(enc_out) logvar = self.fc2(enc_out) z = self.reparameterize(mu, logvar) style_embed = self.fc3(z) return (style_embed, mu, logvar, z)
def download_czang16(download_to, username=None): wgets = [f'wget --user={username} --password=czeng -P {download_to} for i in range(10)] cmds = [] for (i, cmd) in enumerate(wgets): filename = f'{download_to}/data-plaintext-format.{i}.tar' if os.path.exists(filename): print(f'{filename} has already been downloaded; so skip') continue cmds.append(cmd) if (cmds and (username is None)): raise ValueError('No czeng username is given; please register at to obtain username to download') for cmd in cmds: call(cmd) print('done with downloading czeng1.6')
def append_test(file, suites): for suite in suites: for test in suite['tests']: text = (('\\item ' + tex_escape(test['fullTitle'])) + '\n') file.write(text) append_test(file, suite['suites'])
def codex(prompt, top_p=1, temperature=0.0, n=1): response = None received = False while (not received): try: openai.api_key = key_generator.get_key() response = openai.Completion.create(engine=engine, prompt=prompt, max_tokens=128, logprobs=1, top_p=top_p, n=n, temperature=temperature, stream=False, stop=['\n', '<|endoftext|>']) received = True except: error = sys.exc_info()[0] if (error == openai.error.InvalidRequestError): print(f'''InvalidRequestError Prompt passed in: {prompt} ''') assert False print('API error:', error) time.sleep(5) return response
def kaiming_uniform_in_(tensor, a=0, mode='fan_in', scale=1.0, nonlinearity='leaky_relu'): fan_in = nn.init._calculate_correct_fan(tensor, mode) fan_in *= scale gain = nn.init.calculate_gain(nonlinearity, a) std = (gain / math.sqrt(fan_in)) bound = (math.sqrt(3.0) * std) with torch.no_grad(): return tensor.uniform_((- bound), bound)
def _calculate_fan_in(tensor): dimension = tensor.ndimension() if (dimension < 2): raise ValueError('Fan in can not be computed for tensor with less than 2 dimensions') fan_in = tensor.size(1) if ((dimension > 2) and (tensor.dim() > 2)): fan_in *= tensor[0][0].numel() return fan_in
class ReliabilityMetricsPowerOutage(PowerOutage): def __init__(self, saifi: float=None, caidi: float=None, start_time_steps: List[int]=None, **kwargs): super().__init__(**kwargs) self.saifi = saifi self.caidi = caidi self.start_time_steps = start_time_steps def saifi(self) -> float: return self.__saifi def caidi(self) -> float: return self.__caidi def start_time_steps(self) -> List[int]: return self.__start_time_steps def saifi(self, value: float): self.__saifi = (1.436 if (value is None) else value) def caidi(self, value: float): self.__caidi = (331.2 if (value is None) else value) _time_steps.setter def start_time_steps(self, value: List[float]): self.__start_time_steps = value def get_signals(self, time_steps: int, seconds_per_time_step: float, **kwargs) -> np.ndarray: nprs = np.random.RandomState(self.random_seed) days_per_year = 365.0 seconds_per_day = 86400.0 seconds_per_minute = 60.0 time_steps_per_day = (seconds_per_day / seconds_per_time_step) time_steps_per_minute = (seconds_per_minute / seconds_per_time_step) day_count = (time_steps / time_steps_per_day) daily_outage_probability = (self.saifi / days_per_year) outage_days = nprs.binomial(n=1, p=daily_outage_probability, size=int(day_count)) outage_day_ixs = (outage_days * np.arange(day_count)) outage_day_ixs = outage_day_ixs[(outage_day_ixs != 0)] outage_day_count = outage_days[(outage_days == 1)].shape[0] start_time_steps = (list(range(int(time_steps_per_day))) if (self.start_time_steps is None) else self.start_time_steps) outage_start_time_steps = nprs.choice(start_time_steps, size=outage_day_count) outage_durations = nprs.exponential(scale=self.caidi, size=outage_day_count) outage_duration_time_steps = (outage_durations * time_steps_per_minute) signals = np.zeros(time_steps, dtype=int) for (i, j, k) in zip(outage_day_ixs, outage_start_time_steps, outage_duration_time_steps): start_ix = ((i * time_steps_per_day) + j) end_ix = (start_ix + k) start_ix = int(start_ix) end_ix = int(end_ix) signals[start_ix:end_ix] = 1 return signals
def _dump_entity_embeddings(predictor: BertPredictor): for start in range(0, len(entity_dict), SHARD_SIZE): end = (start + SHARD_SIZE) shard_id = (start // SHARD_SIZE) shard_path = _get_shard_path(shard_id=shard_id) if os.path.exists(shard_path): logger.info('{} already exists'.format(shard_path)) continue logger.info('shard_id={}, from {} to {}'.format(shard_id, start, end)) shard_entity_exs = entity_dict.entity_exs[start:end] shard_entity_tensor = predictor.predict_by_entities(shard_entity_exs) torch.save(shard_entity_tensor, _get_shard_path(shard_id=shard_id)) logger.info('done for shard_id={}'.format(shard_id))
class FurthestPointSamplingWithDist(Function): def forward(ctx, points_dist: torch.Tensor, num_points: int) -> torch.Tensor: assert points_dist.is_contiguous() (B, N, _) = points_dist.size() output = points_dist.new_zeros([B, num_points], dtype=torch.int32) temp = points_dist.new_zeros([B, N]).fill_(.0) furthest_point_sample_ext.furthest_point_sampling_with_dist_wrapper(B, N, num_points, points_dist, temp, output) ctx.mark_non_differentiable(output) return output def backward(xyz, a=None): return (None, None)
def write_html(filename, iterations, image_save_iterations, image_directory, all_size=1536): html_file = open(filename, 'w') html_file.write(('\n <!DOCTYPE html>\n <html>\n <head>\n <title>Experiment name = %s</title>\n <meta content="30">\n </head>\n <body>\n ' % os.path.basename(filename))) html_file.write('<h3>current</h3>') write_one_row_html(html_file, iterations, ('%s/val_current.jpg' % image_directory), all_size) for i in range(iterations, (image_save_iterations - 1), (- 1)): if ((i % image_save_iterations) == 0): write_one_row_html(html_file, i, ('%s/val_%08d.jpg' % (image_directory, i)), all_size) html_file.write('</body></html>') html_file.close()
class CrossEn(nn.Module): def __init__(self): super(CrossEn, self).__init__() def forward(self, sim_matrix, target): logpt = F.log_softmax(sim_matrix, dim=(- 1)) logpt = torch.index_select(logpt, (- 1), target) loss = (- logpt) sim_loss = loss.mean() return sim_loss
def get_network(weights): if (weights in WEIGHTS_URLS.keys()): arch_params = WEIGHTS_URLS[weights]['arch_params'] url = WEIGHTS_URLS[weights]['url'] name = WEIGHTS_URLS[weights]['name'] else: raise ValueError('Available RDN network weights: {}'.format(list(WEIGHTS_URLS.keys()))) c_dim = 3 kernel_size = 3 upscaling = 'ups' return (arch_params, c_dim, kernel_size, upscaling, url, name)
class ImgNormalize(nn.Conv2d): def __init__(self, pixel_range, img_mean, img_std, sign=(- 1)): assert (len(img_mean) == len(img_std)) num_channels = len(img_mean) super().__init__(num_channels, num_channels, kernel_size=1) std = torch.Tensor(img_std) self.weight.data = torch.eye(num_channels).view(num_channels, num_channels, 1, 1) self.weight.data.div_(std.view(num_channels, 1, 1, 1)) self.bias.data = ((sign * pixel_range) * torch.Tensor(img_mean)) self.bias.data.div_(std) self.weight.requires_grad = False self.bias.requires_grad = False
def image_clean(cleaning_set, imagefile, basedir): if (cleaning_set == 'clean_greyscale'): clean_greyscale.clean_greyscale(imagefile) elif (cleaning_set == 'clean_extractfaces'): clean_extractfaces.clean_extractfaces(imagefile, basedir) elif (cleaning_set == 'clean_jpg2png'): clean_jpg2png.clean_jpg2png(imagefile)
def ae_pointnet(args, num_points=2048, global_feat=True, data=None): model = AE_pointnet(args, num_points, global_feat) if (data is not None): model.encoder.load_state_dict(data['state_dict_encoder']) model.decoder.load_state_dict(data['state_dict_decoder']) return model
def write_checkpoints_json(model_name_or_path, local_rank, checkpoints_json, token=None): checkpoint_files = get_checkpoint_files(model_name_or_path, local_rank, token) if ((local_rank == 0) and (len(checkpoint_files) != 0)): data = {'type': 'ds_model', 'checkpoints': checkpoint_files, 'version': 1.0} with open(checkpoints_json, 'w') as fp: json.dump(data, fp) return (len(checkpoint_files) != 0)
def main(): parser = argparse.ArgumentParser(description='NoBox') parser.add_argument('--gp_coeff', type=float, default=0.0, help='coeff for the gradient penalty') parser.add_argument('--latent_dim', type=int, default=20, metavar='N', help='Latent dim for VAE') parser.add_argument('--lr', type=float, default=0.01, metavar='LR', help='learning rate for the generator (default: 0.01)') parser.add_argument('--lr_model', type=float, default=None, metavar='LR', help='learning rate for the model (default: None -> default to args.lr)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='optimizer momentum (default: 0.5)') parser.add_argument('--extragradient', default=False, action='store_true', help='Use extragadient algorithm') parser.add_argument('--latent_size', type=int, default=50, metavar='N', help='Size of latent distribution (default: 50)') parser.add_argument('--flow_model', default=None, const='soft', nargs='?', choices=[None, 'RealNVP', 'planar', 'radial'], help='Type of Normalizing Flow (default: %(default)s)') parser.add_argument('--flow_layer_type', type=str, default='Linear', help='Which type of layer to use ---i.e. GRevNet or Linear') parser.add_argument('--flow_hidden_size', type=int, default=128, help='Hidden layer size for Flows.') parser.add_argument('--n_blocks', type=int, default=2, help='Number of blocks to stack in flow') parser.add_argument('--flow_hidden', type=int, default=1, help='Number of hidden layers in each Flow.') parser.add_argument('--eval_set', default='test', help='Evaluate model on test or validation set.') parser.add_argument('--train_with_critic_path', type=str, default=None, help='Train generator with saved critic model') parser.add_argument('--train_on_file', default=False, action='store_true', help='Train using Madry tf grad') parser.add_argument('--lambda_on_clean', default=0.0, type=float, help='train the critic on clean examples of the train set') parser.add_argument('--not_use_labels', default=False, action='store_true', help='Use the labels for the conditional generator') parser.add_argument('--hinge_coeff', default=10.0, type=float, help='coeff for the hinge loss penalty') parser.add_argument('--anneal_eps', default=0.0, type=float, help='coeff for the epsilon annealing') parser.add_argument('--fixed_critic', default=False, action='store_true', help='Critic is not trained') parser.add_argument('--train_on_list', default=False, action='store_true', help='train on a list of classifiers') parser.add_argument('--train_set', default='train', choices=['train_and_test', 'test', 'train'], help='add the test set in the training set') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--n_iter', type=int, default=500, help='N iters for quere based attacks') parser.add_argument('--PGD_steps', type=int, default=40, metavar='N', help='max gradient steps (default: 30)') parser.add_argument('--max_iter', type=int, default=10, metavar='N', help='max gradient steps (default: 10)') parser.add_argument('--epsilon', type=float, default=0.1, metavar='M', help='Epsilon for Delta (default: 0.1)') parser.add_argument('--attack_ball', type=str, default='L2', choices=['L2', 'Linf'], help='type of box attack') parser.add_argument('--bb_steps', type=int, default=2000, metavar='N', help='Max black box steps per sample(default: 1000)') parser.add_argument('--attack_epochs', type=int, default=100, metavar='N', help='Max numbe of epochs to train G') parser.add_argument('--num_flows', type=int, default=2, metavar='N', help='Number of Flows') parser.add_argument('--seed', type=int, metavar='S', help='random seed (default: None)') parser.add_argument('--input_size', type=int, default=784, metavar='S', help='Input size for MNIST is default') parser.add_argument('--batch_size', type=int, default=256, metavar='S', help='Batch size') parser.add_argument('--test_batch_size', type=int, default=512, metavar='S', help='Test Batch size') parser.add_argument('--pgd_on_critic', default=False, action='store_true', help='Train Critic on pgd samples') parser.add_argument('--train_with_robust', default=False, action='store_true', help='Train with Robust model + Critic') parser.add_argument('--test', default=False, action='store_true', help='just test model and print accuracy') parser.add_argument('--clip_grad', default=True, action='store_true', help='Clip grad norm') parser.add_argument('--train_vae', default=False, action='store_true', help='Train VAE') parser.add_argument('--train_ae', default=False, action='store_true', help='Train AE') parser.add_argument('--attack_type', type=str, default='nobox', help='Which attack to run') parser.add_argument('--attack_loss', type=str, default='cross_entropy', help='Which loss func. to use to optimize G') parser.add_argument('--perturb_loss', type=str, default='L2', choices=['L2', 'Linf'], help='Which loss func. to use to optimize to compute constraint') parser.add_argument('--dataset', type=str, default='mnist') parser.add_argument('--model', type=str, default=None) parser.add_argument('--deterministic_G', default=False, action='store_true', help='Deterministic Latent State') parser.add_argument('--run_baseline', default=False, action='store_true', help='Run baseline PGD') parser.add_argument('--resample_test', default=False, action='store_true', help='Load model and test resampling capability') parser.add_argument('--resample_iterations', type=int, default=100, metavar='N', help='How many times to resample (default: 100)') parser.add_argument('--architecture', default='VGG16', help='The architecture we want to attack on CIFAR.') parser.add_argument('--eval_freq', default=5, type=int, help='Evaluate and save model every eval_freq epochs.') parser.add_argument('--num_test_samples', default=None, type=int, help='The number of samples used to train and test the attacker.') parser.add_argument('--num_eval_samples', default=None, type=int, help='The number of samples used to train and test the attacker.') parser.add_argument('--wandb', action='store_true', default=False, help='Use wandb for logging') parser.add_argument('--model_path', type=str, default='mnist_cnn.pt', help='where to save/load') parser.add_argument('--namestr', type=str, default='NoBox', help='additional info in output filename to describe experiments') parser.add_argument('--dir_test_models', type=str, default='./dir_test_models', help='The path to the directory containing the classifier models for evaluation.') parser.add_argument('--robust_model_path', type=str, default='./madry_challenge_models/mnist/adv_trained/mnist_lenet5_advtrained.pt', help='The path to our adv robust classifier') parser.add_argument('--robust_sample_prob', type=float, default=0.1, metavar='N', help='1-P(robust)') parser.add_argument('--max_test_model', type=int, default=1, help='The maximum number of pretrained classifiers to use for testing.') parser.add_argument('--perturb_magnitude', type=float, default=None, help='The amount of perturbation we want to enforce with lagrangian.') parser.add_argument('--log_path', type=str, default='./logs', help='Where to save logs if logger is specified.') parser.add_argument('--save_model', type=str, default=None, help='Where to save the models, if it is specified.') parser.add_argument('--fixed_testset', action='store_true', help='If used then makes sure that the same set of samples is always used for testing.') parser.add_argument('--normalize', default=None, choices=(None, 'default', 'meanstd')) parser.add_argument('--source_arch', default='res18', help='The architecture we want to attack on CIFAR.') parser.add_argument('--target_arch', nargs='*', help='The architecture we want to blackbox transfer to on CIFAR.') parser.add_argument('--ensemble_adv_trained', action='store_true') parser.add_argument('--adv_models', nargs='*', help='path to adv model(s)') parser.add_argument('--type', type=int, default=0, help='Model type (default: 0)') parser.add_argument('--model_name', help='path to model') parser.add_argument('--transfer', action='store_true') parser.add_argument('--command', choices=('eval', 'train'), default='train') parser.add_argument('--split', type=int, default=None, help='Which subsplit to use.') parser.add_argument('--path_to_data', default='../data', type=str) args = parser.parse_args() args.dev = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) normalize = None if (args.normalize == 'meanstd'): normalize = transforms.Normalize(cf.mean['cifar10'], cf.std['cifar10']) elif (args.normalize == 'default'): normalize = CIFAR_NORMALIZATION (train_loader, test_loader, split_train_loader, split_test_loader) = create_loaders(args, root=args.path_to_data, split=args.split, num_test_samples=args.num_test_samples, normalize=normalize) if (args.split is not None): train_loader = split_train_loader test_loader = split_test_loader if os.path.isfile('../settings.json'): with open('../settings.json') as f: data = json.load(f) args.wandb_apikey = data.get('wandbapikey') if args.wandb: os.environ['WANDB_API_KEY'] = args.wandb_apikey wandb.init(project='NoBox-table2', name='NoBox-Attack-{}-{}'.format(args.dataset, args.namestr)) (model, adv_models, l_test_classif_paths, args.model_type) = data_and_model_setup(args, no_box_attack=True) model.to(args.dev) model.eval() print(('Testing on %d Test Classifiers with Source Model %s' % (len(l_test_classif_paths), args.source_arch))) (x_test, y_test) = load_data(args, test_loader) if (args.dataset == 'mnist'): critic = load_unk_model(args) elif (args.dataset == 'cifar'): name = args.source_arch if (args.source_arch == 'adv'): name = 'res18' critic = load_unk_model(args, name=name) misclassify_loss_func = kwargs_attack_loss[args.attack_loss] attacker = NoBoxAttack(critic, misclassify_loss_func, args) print('Evaluating clean error rate:') list_model = [args.source_arch] if (args.source_arch == 'adv'): list_model = [args.model_type] if (args.target_arch is not None): list_model = args.target_arch for model_type in list_model: num_samples = args.num_eval_samples if (num_samples is None): num_samples = len(test_loader.dataset) eval_loader = torch.utils.data.Subset(test_loader.dataset, np.random.randint(len(test_loader.dataset), size=(num_samples,))) eval_loader = torch.utils.data.DataLoader(eval_loader, batch_size=args.test_batch_size) baseline_transfer(args, None, 'Clean', model_type, eval_loader, list_classifiers=l_test_classif_paths) def eval_fn(model): advcorrect = 0 model.to(args.dev) model.eval() with ctx_noparamgrad_and_eval(model): if (args.source_arch == 'googlenet'): adv_complete_list = [] for (batch_idx, (x_batch, y_batch)) in enumerate(test_loader): if (((batch_idx + 1) * args.test_batch_size) > args.batch_size): break (x_batch, y_batch) = (x_batch.to(args.dev), y_batch.to(args.dev)) adv_complete_list.append(attacker.perturb(x_batch, target=y_batch)) adv_complete = torch.cat(adv_complete_list) else: adv_complete = attacker.perturb(x_test[:args.batch_size], target=y_test[:args.batch_size]) adv_complete = torch.clamp(adv_complete, min=0.0, max=1.0) output = model(adv_complete) pred = output.max(1, keepdim=True)[1] advcorrect += pred.eq(y_test[:args.batch_size].view_as(pred)).sum().item() fool_rate = (1 - (advcorrect / float(args.batch_size))) print(('Test set base model fool rate: %f' % fool_rate)) model.cpu() if args.transfer: adv_img_list = [] y_orig = y_test[:args.batch_size] for i in range(0, len(adv_complete)): adv_img_list.append([adv_complete[i].unsqueeze(0), y_orig[i]]) del model torch.cuda.empty_cache() baseline_transfer(args, attacker, 'AEG', model_type, adv_img_list, l_test_classif_paths, adv_models) if (args.command == 'eval'): attacker.load(args) elif (args.command == 'train'): attacker.train(train_loader, test_loader, adv_models, l_test_classif_paths, l_train_classif={'source_model': model}, eval_fn=eval_fn)
class JitDataLoader(): def __init__(self, module_name, file_name, batch_size, is_train, device, log, vocab=None): self.module_name = module_name split_chars = (lambda x: list(x)) source = Field(tokenize=split_chars, init_token='<sos>', eos_token='<eos>', batch_first=True) target = Field(tokenize=split_chars, init_token='<sos>', eos_token='<eos>', batch_first=True) log('Loading JIT datasets ...') folder = os.path.join(DATASET_TARGET_DIR, module_name) dataset = JitDataset(path=os.path.join(folder, file_name), exts=(XY_FILE_ENDING, INDEX_FILE_ENDING), fields=(source, target)) if (vocab is None): log('Building vocab ...') fn_vocab = (os.path.join(folder, TRAIN_FILE_NAME) + VOCAB_FILE_ENDING) with open(fn_vocab, 'r') as vfile: vocab_text = vfile.read() source.build_vocab([vocab_text]) target.vocab = source.vocab else: target.vocab = vocab source.vocab = vocab log('Creating iterators ...') if is_train: iterator = JitIterator(dataset=dataset, batch_size=batch_size, train=False, repeat=True, shuffle=False, device=device) else: iterator = JitIterator(dataset=dataset, batch_size=batch_size, train=False, repeat=False, shuffle=False, device=device) self.dataset = dataset self.iterator = iterator self.source = source self.target = target def encode(self, str_list): return self.source.process(str_list) def decode(self, batch, remove_pad=False): itos = self.source.vocab.itos.copy() if remove_pad: itos[1] = '' str_list = [''.join([itos[idx] for idx in row]) for row in batch.tolist()] return str_list
class HyperGCN(nn.Module): def __init__(self, V, E, X, num_features, num_layers, num_classses, args): super(HyperGCN, self).__init__() (d, l, c) = (num_features, num_layers, num_classses) cuda = args.cuda h = [d] for i in range((l - 1)): power = ((l - i) + 2) if (args.dname == 'citeseer'): power = ((l - i) + 4) h.append((2 ** power)) h.append(c) if args.HyperGCN_fast: reapproximate = False structure = utils.Laplacian(V, E, X, args.HyperGCN_mediators) else: reapproximate = True structure = E self.layers = nn.ModuleList([utils.HyperGraphConvolution(h[i], h[(i + 1)], reapproximate, cuda) for i in range(l)]) (self.do, self.l) = (args.dropout, num_layers) (self.structure, self.m) = (structure, args.HyperGCN_mediators) def reset_parameters(self): for layer in self.layers: layer.reset_parameters() def forward(self, data): (do, l, m) = (self.do, self.l, self.m) H = data.x for (i, hidden) in enumerate(self.layers): H = F.relu(hidden(self.structure, H, m)) if (i < (l - 1)): V = H H = F.dropout(H, do, training=self.training) return H
class LoopExecutor(): def run(self, target, *args_iter, verbose=False): tasks = list(zip(*args_iter)) n_tasks = len(tasks) for (i, task) in enumerate(tasks): target(*task) if verbose: print(('task %i of %i' % ((n_tasks - len(tasks)), n_tasks)))
def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None): if (chunk_sizes is None): return torch.nn.DataParallel(module, device_ids, output_device, dim) standard_size = True for i in range(1, len(chunk_sizes)): if (chunk_sizes[i] != chunk_sizes[0]): standard_size = False if standard_size: return torch.nn.DataParallel(module, device_ids, output_device, dim) return _DataParallel(module, device_ids, output_device, dim, chunk_sizes)
def worker(gpu, solver, ngpus_per_node, args): args.sys_params.rank = ((args.sys_params.rank * ngpus_per_node) + gpu) dist.init_process_group(backend='nccl', world_size=args.sys_params.world_size, init_method='env://', rank=args.sys_params.rank) args.gpu = gpu args.ngpus_per_node = ngpus_per_node solver.set_gpu(args) start_epoch = solver.start_epoch if args.dir_params.resume: start_epoch = (start_epoch + 1) for epoch in range(start_epoch, (args.hyperparams.epochs + 1)): solver.train_sampler.set_epoch(epoch) solver.train(args, epoch) time.sleep(1) solver.multi_validate(args, epoch) if (solver.stop == True): print('Apply Early Stopping') if args.wandb_params.use_wandb: wandb.finish() sys.exit() if args.wandb_params.use_wandb: wandb.finish()
class AutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class pos_model(base): _init_pytorch def __init__(self, vocab_size, embed_dim, embed_init, max_nsent, max_npara, max_nlv, doc_title_vocab_size, sec_title_vocab_size, experiment, *args, **kwargs): super(pos_model, self).__init__(vocab_size, embed_dim, embed_init, experiment) if (self.expe.config.encoder_type.lower() in ['lstm', 'gru', 'gru_attn']): ensize = (2 * self.expe.config.ensize) else: ensize = embed_dim self.sent_id_pred = model_utils.get_mlp(input_size=ensize, hidden_size=self.expe.config.mhsize, output_size=max_nsent, n_layer=self.expe.config.mlplayer, dropout=self.expe.config.dp) self.para_id_pred = model_utils.get_mlp(input_size=ensize, hidden_size=self.expe.config.mhsize, output_size=max_npara, n_layer=self.expe.config.mlplayer, dropout=self.expe.config.dp) self.lv_pred = model_utils.get_mlp(input_size=ensize, hidden_size=self.expe.config.mhsize, output_size=max_nlv, n_layer=self.expe.config.mlplayer, dropout=self.expe.config.dp) self.doc_title_decode = decoders.bag_of_words(input_size=ensize, mlp_hidden_size=self.expe.config.mhsize, mlp_layer=self.expe.config.mlplayer, hidden_size=self.expe.config.desize, dropout=self.expe.config.dp, embed_dim=embed_dim, tie_weight=self.expe.config.tw, word_dropout=self.expe.config.wd, embed_init=embed_init, vocab_size=doc_title_vocab_size, log=experiment.log) self.sec_title_decode = decoders.bag_of_words(input_size=ensize, mlp_hidden_size=self.expe.config.mhsize, mlp_layer=self.expe.config.mlplayer, hidden_size=self.expe.config.desize, dropout=self.expe.config.dp, embed_dim=embed_dim, tie_weight=self.expe.config.tw, word_dropout=self.expe.config.wd, embed_init=embed_init, vocab_size=sec_title_vocab_size, log=experiment.log) def forward(self, sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, doc_id, para_id, pmask, sent_id, smask, lvs, doc_title, sec_title, *args): self.train() (sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, para_id, pmask, sent_id, smask, lvs, doc_title, sec_title) = self.to_vars(sent, mask, tgt, tgt_mask, tgt2, tgt_mask2, para_id, pmask, sent_id, smask, lvs, doc_title, sec_title) (bs, sl) = sent.size() sent_vec = self.encode(sent, mask) logloss2 = self.next_decode(sent_vec, tgt2, tgt_mask2) if self.expe.config.uni_pred: logloss1 = torch.zeros_like(logloss2) else: logloss1 = self.prev_decode(sent_vec, tgt, tgt_mask) logloss = (logloss1 + logloss2) if self.expe.config.sratio: sent_id_logit = self.sent_id_pred(sent_vec) neg_smask = (1 - smask) sent_id_logit.data.masked_fill_(neg_smask.data.byte(), (- float('inf'))) sent_id_loss = F.cross_entropy(sent_id_logit, sent_id.long()) else: sent_id_loss = torch.zeros_like(logloss) if self.expe.config.pratio: para_id_logit = self.para_id_pred(sent_vec) neg_pmask = (1 - pmask) para_id_logit.data.masked_fill_(neg_pmask.data.byte(), (- float('inf'))) para_id_loss = F.cross_entropy(para_id_logit, para_id.long()) else: para_id_loss = torch.zeros_like(logloss) if self.expe.config.lvratio: level_logit = self.lv_pred(sent_vec) level_loss = F.cross_entropy(level_logit, lvs.long()) else: level_loss = torch.zeros_like(logloss) if self.expe.config.dtratio: doc_title_loss = self.doc_title_decode(sent_vec, doc_title, None) else: doc_title_loss = torch.zeros_like(logloss) if self.expe.config.stratio: sec_title_loss = self.sec_title_decode(sent_vec, sec_title, None) else: sec_title_loss = torch.zeros_like(logloss) loss = ((((((self.expe.config.lratio * logloss) + (self.expe.config.sratio * sent_id_loss)) + (self.expe.config.pratio * para_id_loss)) + (self.expe.config.lvratio * level_loss)) + (self.expe.config.dtratio * doc_title_loss)) + (self.expe.config.stratio * sec_title_loss)) return (loss, logloss1, logloss2, para_id_loss, sent_id_loss, level_loss, doc_title_loss, sec_title_loss) def score_sts(self, sent1, mask1, sent2, mask2): self.eval() (sent1, mask1, sent2, mask2) = self.to_vars(sent1, mask1, sent2, mask2) sent1_vec = self.encode(sent1, mask1) sent2_vec = self.encode(sent2, mask2) sent_cos_pos = F.cosine_similarity(sent1_vec, sent2_vec) return sent_cos_pos.data.cpu().numpy()
def train(): encoder = Encoder(encoder_params[0], encoder_params[1]).cuda() decoder = Decoder(decoder_params[0], decoder_params[1]).cuda() net = ED(encoder, decoder) run_dir = ('./runs/' + TIMESTAMP) if (not os.path.isdir(run_dir)): os.makedirs(run_dir) tb = SummaryWriter(run_dir) early_stopping = EarlyStopping(patience=20, verbose=True) device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) if (torch.cuda.device_count() > 1): net = nn.DataParallel(net) net.to(device) if os.path.exists(os.path.join(save_dir, 'checkpoint.pth.tar')): print('==> loading existing model') model_info = torch.load(os.path.join(save_dir, 'checkpoin.pth.tar')) net.load_state_dict(model_info['state_dict']) optimizer = torch.optim.Adam(net.parameters()) optimizer.load_state_dict(model_info['optimizer']) cur_epoch = (model_info['epoch'] + 1) else: if (not os.path.isdir(save_dir)): os.makedirs(save_dir) cur_epoch = 0 lossfunction = nn.MSELoss().cuda() optimizer = optim.Adam(net.parameters(), lr=args.lr) pla_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, patience=4, verbose=True) train_losses = [] valid_losses = [] avg_train_losses = [] avg_valid_losses = [] for epoch in range(cur_epoch, (args.epochs + 1)): t = tqdm(trainLoader, leave=False, total=len(trainLoader)) for (i, (idx, targetVar, inputVar, _, _)) in enumerate(t): inputs = inputVar.to(device) label = targetVar.to(device) optimizer.zero_grad() net.train() pred = net(inputs) loss = lossfunction(pred, label) loss_aver = (loss.item() / args.batch_size) train_losses.append(loss_aver) loss.backward() torch.nn.utils.clip_grad_value_(net.parameters(), clip_value=10.0) optimizer.step() t.set_postfix({'trainloss': '{:.6f}'.format(loss_aver), 'epoch': '{:02d}'.format(epoch)}) tb.add_scalar('TrainLoss', loss_aver, epoch) with torch.no_grad(): net.eval() t = tqdm(validLoader, leave=False, total=len(validLoader)) for (i, (idx, targetVar, inputVar, _, _)) in enumerate(t): if (i == 3000): break inputs = inputVar.to(device) label = targetVar.to(device) pred = net(inputs) loss = lossfunction(pred, label) loss_aver = (loss.item() / args.batch_size) valid_losses.append(loss_aver) t.set_postfix({'validloss': '{:.6f}'.format(loss_aver), 'epoch': '{:02d}'.format(epoch)}) tb.add_scalar('ValidLoss', loss_aver, epoch) torch.cuda.empty_cache() train_loss = np.average(train_losses) valid_loss = np.average(valid_losses) avg_train_losses.append(train_loss) avg_valid_losses.append(valid_loss) epoch_len = len(str(args.epochs)) print_msg = ((f'[{epoch:>{epoch_len}}/{args.epochs:>{epoch_len}}] ' + f'train_loss: {train_loss:.6f} ') + f'valid_loss: {valid_loss:.6f}') print(print_msg) train_losses = [] valid_losses = [] pla_lr_scheduler.step(valid_loss) model_dict = {'epoch': epoch, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict()} early_stopping(valid_loss.item(), model_dict, epoch, save_dir) if early_stopping.early_stop: print('Early stopping') break with open('avg_train_losses.txt', 'wt') as f: for i in avg_train_losses: print(i, file=f) with open('avg_valid_losses.txt', 'wt') as f: for i in avg_valid_losses: print(i, file=f)
def batch_clamp(float_or_vector, tensor): if isinstance(float_or_vector, torch.Tensor): assert (len(float_or_vector) == len(tensor)) tensor = _batch_clamp_tensor_by_vector(float_or_vector, tensor) return tensor elif isinstance(float_or_vector, float): tensor = clamp(tensor, (- float_or_vector), float_or_vector) else: raise TypeError('Value has to be float or torch.Tensor') return tensor
def components_from_array(ion, *, z, b, logN): pars = Parameters() for (num, vals) in enumerate(zip(z, b, logN)): z_name = ('z%i_%s' % (num, ion)) b_name = ('b%i_%s' % (num, ion)) N_name = ('logN%i_%s' % (num, ion)) pars.add(z_name, value=vals[0]) pars.add(b_name, value=vals[1]) pars.add(N_name, value=vals[2]) return pars
def sample_dirichlet(prior): n = len(prior) dist = np.zeros(n) for i in range(n): dist[i] = np.random.gamma(prior[i]) dist = (dist / sum(dist)) return dist
class AgentOutputStatus(str, Enum): NORMAL = 'normal' CANCELLED = 'cancelled' AGENT_CONTEXT_LIMIT = 'agent context limit'
class Trainer(object): def __init__(self, args): self.args = args self.saver = Saver(args) self.saver.save_experiment_config() self.summary = TensorboardSummary(self.saver.experiment_dir) self.writer = self.summary.create_summary() kwargs = {'num_workers': args.workers, 'pin_memory': True, 'drop_last': True} (self.train_loaderA, self.train_loaderB, self.val_loader, self.test_loader) = make_data_loader(args, **kwargs) model = AutoStereo(maxdisp=self.args.max_disp, Fea_Layers=self.args.fea_num_layers, Fea_Filter=self.args.fea_filter_multiplier, Fea_Block=self.args.fea_block_multiplier, Fea_Step=self.args.fea_step, Mat_Layers=self.args.mat_num_layers, Mat_Filter=self.args.mat_filter_multiplier, Mat_Block=self.args.mat_block_multiplier, Mat_Step=self.args.mat_step) optimizer_F = torch.optim.SGD(model.feature.weight_parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) optimizer_M = torch.optim.SGD(model.matching.weight_parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) (self.model, self.optimizer_F, self.optimizer_M) = (model, optimizer_F, optimizer_M) self.architect_optimizer_F = torch.optim.Adam(self.model.feature.arch_parameters(), lr=args.arch_lr, betas=(0.9, 0.999), weight_decay=args.arch_weight_decay) self.architect_optimizer_M = torch.optim.Adam(self.model.matching.arch_parameters(), lr=args.arch_lr, betas=(0.9, 0.999), weight_decay=args.arch_weight_decay) self.scheduler = LR_Scheduler(args.lr_scheduler, args.lr, args.epochs, len(self.train_loaderA), min_lr=args.min_lr) if args.cuda: self.model = torch.nn.DataParallel(self.model).cuda() self.best_pred = 100.0 if (args.resume is not None): if (not os.path.isfile(args.resume)): raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] if args.clean_module: self.model.load_state_dict(checkpoint['state_dict']) state_dict = checkpoint['state_dict'] new_state_dict = OrderedDict() for (k, v) in state_dict.items(): if (k.find('module') != (- 1)): print(1) pdb.set_trace() name = k[7:] new_state_dict[name] = v pdb.set_trace() copy_state_dict(self.model.state_dict(), new_state_dict) elif (torch.cuda.device_count() > 1): copy_state_dict(self.model.module.state_dict(), checkpoint['state_dict']) else: copy_state_dict(self.model.module.state_dict(), checkpoint['state_dict']) if (not args.ft): copy_state_dict(self.optimizer_M.state_dict(), checkpoint['optimizer_M']) copy_state_dict(self.optimizer_F.state_dict(), checkpoint['optimizer_F']) self.best_pred = checkpoint['best_pred'] print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) if args.ft: args.start_epoch = 0 print('Total number of model parameters : {}'.format(sum([p.data.nelement() for p in self.model.parameters()]))) print('Number of Feature Net parameters: {}'.format(sum([p.data.nelement() for p in self.model.module.feature.parameters()]))) print('Number of Matching Net parameters: {}'.format(sum([p.data.nelement() for p in self.model.module.matching.parameters()]))) def training(self, epoch): train_loss = 0.0 valid_iteration = 0 self.model.train() tbar = tqdm(self.train_loaderA) num_img_tr = len(self.train_loaderA) for (i, batch) in enumerate(tbar): (input1, input2, target) = (Variable(batch[0], requires_grad=True), Variable(batch[1], requires_grad=True), batch[2]) if self.args.cuda: input1 = input1.cuda() input2 = input2.cuda() target = target.cuda() target = torch.squeeze(target, 1) mask = (target < self.args.max_disp) mask.detach_() valid = target[mask].size()[0] if (valid > 0): self.scheduler(self.optimizer_F, i, epoch, self.best_pred) self.scheduler(self.optimizer_M, i, epoch, self.best_pred) self.optimizer_F.zero_grad() self.optimizer_M.zero_grad() output = self.model(input1, input2) loss = F.smooth_l1_loss(output[mask], target[mask], reduction='mean') loss.backward() self.optimizer_F.step() self.optimizer_M.step() if (epoch >= self.args.alpha_epoch): print('Start searching architecture!') search = next(iter(self.train_loaderB)) (input1_search, input2_search, target_search) = (Variable(search[0], requires_grad=True), Variable(search[1], requires_grad=True), search[2]) if self.args.cuda: input1_search = input1_search.cuda() input2_search = input2_search.cuda() target_search = target_search.cuda() target_search = torch.squeeze(target_search, 1) mask_search = (target_search < self.args.max_disp) mask_search.detach_() self.architect_optimizer_F.zero_grad() self.architect_optimizer_M.zero_grad() output_search = self.model(input1_search, input2_search) arch_loss = F.smooth_l1_loss(output_search[mask_search], target_search[mask_search], reduction='mean') arch_loss.backward() self.architect_optimizer_F.step() self.architect_optimizer_M.step() train_loss += loss.item() valid_iteration += 1 tbar.set_description(('Train loss: %.3f' % (train_loss / (i + 1)))) self.writer.add_scalar('train/total_loss_iter', loss.item(), (i + (num_img_tr * epoch))) if ((i % (num_img_tr // 10)) == 0): global_step = (i + (num_img_tr * epoch)) self.summary.visualize_image_stereo(self.writer, input1, target, output, global_step) self.writer.add_scalar('train/total_loss_epoch', train_loss, epoch) print('=== Train ===> Epoch :{} Error: {:.4f}'.format(epoch, (train_loss / valid_iteration))) print(self.model.module.feature.alphas) is_best = False if (torch.cuda.device_count() > 1): state_dict = self.model.module.state_dict() else: state_dict = self.model.state_dict() self.saver.save_checkpoint({'epoch': (epoch + 1), 'state_dict': state_dict, 'optimizer_F': self.optimizer_F.state_dict(), 'optimizer_M': self.optimizer_M.state_dict(), 'best_pred': self.best_pred}, is_best, filename='checkpoint_{}.pth.tar'.format(epoch)) def validation(self, epoch): self.model.eval() epoch_error = 0 three_px_acc_all = 0 valid_iteration = 0 tbar = tqdm(self.val_loader, desc='\r') test_loss = 0.0 for (i, batch) in enumerate(tbar): (input1, input2, target) = (Variable(batch[0], requires_grad=False), Variable(batch[1], requires_grad=False), Variable(batch[2], requires_grad=False)) if self.args.cuda: input1 = input1.cuda() input2 = input2.cuda() target = target.cuda() target = torch.squeeze(target, 1) mask = (target < self.args.max_disp) mask.detach_() valid = target[mask].size()[0] if (valid > 0): with torch.no_grad(): output = self.model(input1, input2) error = torch.mean(torch.abs((output[mask] - target[mask]))) epoch_error += error.item() valid_iteration += 1 pred_disp = output.cpu().detach() true_disp = target.cpu().detach() disp_true = true_disp index = np.argwhere((true_disp < opt.max_disp)) disp_true[(index[0][:], index[1][:], index[2][:])] = np.abs((true_disp[(index[0][:], index[1][:], index[2][:])] - pred_disp[(index[0][:], index[1][:], index[2][:])])) correct = ((disp_true[(index[0][:], index[1][:], index[2][:])] < 1) | (disp_true[(index[0][:], index[1][:], index[2][:])] < (true_disp[(index[0][:], index[1][:], index[2][:])] * 0.05))) three_px_acc = (1 - (float(torch.sum(correct)) / float(len(index[0])))) three_px_acc_all += three_px_acc print('===> Test({}/{}): Error(EPE): ({:.4f} {:.4f})'.format(i, len(self.val_loader), error.item(), three_px_acc)) self.writer.add_scalar('val/EPE', (epoch_error / valid_iteration), epoch) self.writer.add_scalar('val/D1_all', (three_px_acc_all / valid_iteration), epoch) print('===> Test: Avg. Error: ({:.4f} {:.4f})'.format((epoch_error / valid_iteration), (three_px_acc_all / valid_iteration))) new_pred = (epoch_error / valid_iteration) if (new_pred < self.best_pred): is_best = True self.best_pred = new_pred if (torch.cuda.device_count() > 1): state_dict = self.model.module.state_dict() else: state_dict = self.model.state_dict() self.saver.save_checkpoint({'epoch': (epoch + 1), 'state_dict': state_dict, 'optimizer_F': self.optimizer_F.state_dict(), 'optimizer_M': self.optimizer_M.state_dict(), 'best_pred': self.best_pred}, is_best)
def get_confirm_token(response): for (key, value) in response.cookies.items(): if key.startswith('download_warning'): return value return None
def vgg11_bn(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> VGG: return VGG(torchvision.models.vgg11_bn(pretrained, progress, **kwargs))
def get_string_from_layer_name(all_layers, current_layer, full_layer_name): (layer_name, auxiliary_output) = split_layer_name(full_layer_name) for layer in all_layers: if (layer is current_layer): break if (layer.get_name() == full_layer_name): return layer.output_name() if (layer.get_name() == layer_name): if ((not (auxiliary_output in layer.auxiliary_outputs())) and (auxiliary_output is not None)): raise RuntimeError("Layer '{0}' has no such auxiliary output: '{1}' ({0}.{1})".format(layer_name, auxiliary_output)) return layer.output_name(auxiliary_output) if (layer_name in [layer.get_name() for layer in all_layers]): raise RuntimeError("Layer '{0}' was requested before it appeared in the xconfig file (circular dependencies or out-of-order layers".format(layer_name)) else: raise RuntimeError("No such layer: '{0}'".format(layer_name))
def ycrank(pt0, y): from math import cos, sin, acos, pi (yp0, yp1) = ((y[0] + pt0[0]), (y[1] + pt0[1])) crklen = sqrt((((yp0 - 1) ** 2) + (yp1 ** 2))) crkagl = acos(((yp0 - 1) / crklen)) if (yp1 < 0): dlt = (pi - crkagl) crkagl = (pi + dlt) cx = (1 + (crklen * cos(crkagl))) cy = (crklen * sin(crkagl)) return (crklen, crkagl)
def main(): tf.set_random_seed(1) (height, width) = (224, 224) inputs = tf.Variable(tf.random_uniform((2, height, width, 3)), name='input') inputs = tf.identity(inputs, 'input_node') (net, end_points) = resnet_v1.resnet_v1_101(inputs, 1000, is_training=True) print('nodes in the graph') for n in end_points: print(((n + ' => ') + str(end_points[n]))) net_outputs = map((lambda x: tf.get_default_graph().get_tensor_by_name(x)), argv[2].split(',')) run_model(net_outputs, argv[1], 'resnet_v1_101', (argv[3] == 'True'))
def _load_from_remote(model_name_or_path: str, ckpt_file: str='best.ckpt', cfg_file: str='config.yaml', **kwargs) -> TranslatorHubInterface: download_dir = _download_and_extract(model_name_or_path, **kwargs) (config, test_data, model) = _from_pretrained(model_name_or_path=download_dir, ckpt_file=ckpt_file, cfg_file=cfg_file, **kwargs) return TranslatorHubInterface(config, test_data, model)
def get_video_codec_bitrate(width, height, framerate, divisor, factor): return int(((((width * height) * (framerate / divisor)) * 12) * factor))
class SharedStorage(object): def __init__(self): self._networks = {} def latest_network(self) -> Network: if self._networks: return self._networks[max(self._networks.keys())] else: return make_uniform_network() def old_network(self) -> Network: if self._networks: return self._networks[min(self._networks.keys())] else: return make_uniform_network() def save_network(self, step: int, network: Network): self._networks[step] = network
def pointnet_fp_module(xyz1, xyz2, points1, points2, mlp, is_training, bn_decay, scope, bn=True): with tf.variable_scope(scope) as sc: (dist, idx) = three_nn(xyz1, xyz2) dist = tf.maximum(dist, 1e-10) norm = tf.reduce_sum((1.0 / dist), axis=2, keepdims=True) norm = tf.tile(norm, [1, 1, 3]) weight = ((1.0 / dist) / norm) interpolated_points = three_interpolate(points2, idx, weight) if (points1 is not None): new_points1 = tf.concat(axis=2, values=[interpolated_points, points1]) else: new_points1 = interpolated_points new_points1 = tf.expand_dims(new_points1, 2) for (i, num_out_channel) in enumerate(mlp): new_points1 = tf_util.conv2d(new_points1, num_out_channel, [1, 1], padding='VALID', stride=[1, 1], bn=bn, is_training=is_training, scope=('conv_%d' % i), bn_decay=bn_decay) new_points1 = tf.squeeze(new_points1, [2]) return new_points1
class RNNEncoder(nn.Module): def __init__(self, n_vocab, d_word_vec, d_model, n_layer, brnn, rnn, feat_vocab, d_feat_vec, slf_attn, dropout): self.name = 'rnn' self.n_layer = n_layer self.num_directions = (2 if brnn else 1) assert ((d_model % self.num_directions) == 0), 'd_model = hidden_size x direction_num' self.hidden_size = (d_model // self.num_directions) super(RNNEncoder, self).__init__() self.word_emb = nn.Embedding(n_vocab, d_word_vec, padding_idx=Constants.PAD) input_size = d_word_vec self.feature = (False if (not feat_vocab) else True) if self.feature: self.feat_embs = nn.ModuleList([nn.Embedding(n_f_vocab, d_feat_vec, padding_idx=Constants.PAD) for n_f_vocab in feat_vocab]) input_size += (len(feat_vocab) * d_feat_vec) self.slf_attn = slf_attn if slf_attn: self.gated_slf_attn = GatedSelfAttention(d_model) if (rnn == 'lstm'): self.rnn = nn.LSTM(input_size, self.hidden_size, num_layers=n_layer, dropout=dropout, bidirectional=brnn, batch_first=True) elif (rnn == 'gru'): self.rnn = nn.GRU(input_size, self.hidden_size, num_layers=n_layer, dropout=dropout, bidirectional=brnn, batch_first=True) else: raise ValueError("Only support 'LSTM' and 'GRU' for RNN-based Encoder ") def from_opt(cls, opt): return cls(opt['n_vocab'], opt['d_word_vec'], opt['d_model'], opt['n_layer'], opt['brnn'], opt['rnn'], opt['feat_vocab'], opt['d_feat_vec'], opt['slf_attn'], opt['dropout']) def forward(self, inputs): (src_seq, lengths, feat_seqs) = (inputs['src_seq'], inputs['lengths'], inputs['feat_seqs']) lengths = torch.LongTensor(lengths.data.view((- 1)).tolist()) enc_input = self.word_emb(src_seq) if self.feature: feat_outputs = [feat_emb(feat_seq) for (feat_seq, feat_emb) in zip(feat_seqs, self.feat_embs)] feat_outputs = torch.cat(feat_outputs, dim=2) enc_input = torch.cat((enc_input, feat_outputs), dim=(- 1)) enc_input = pack(enc_input, lengths, batch_first=True, enforce_sorted=False) (enc_output, hidden) = self.rnn(enc_input, None) enc_output = unpack(enc_output, batch_first=True)[0] if self.slf_attn: mask = get_attn_key_pad_mask(seq_k=src_seq, seq_q=src_seq) (enc_output, score) = self.gated_slf_attn(enc_output, mask) return (enc_output, hidden)
def train_wsam(train_loader, model, criterion, optimizer, scheduler, args): starttime = time.time() train_loss = 0.0 total_num = 0 model.train() for (batch_idx, (data, target)) in enumerate(train_loader): if args.use_gpu: (data, target) = (data.cuda(non_blocking=args.pin_memory), target.cuda(non_blocking=args.pin_memory)) def closure(): output = model(data) loss = criterion(output, target) loss.backward() return loss loss = optimizer.step(closure) optimizer.zero_grad() train_loss += (loss.item() * target.size()[0]) total_num += target.size()[0] if (args.scheduler == 'cosine'): scheduler.step() if (args.scheduler == 'multistep'): scheduler.step() endtime = time.time() logging.info('cost: {}'.format((endtime - starttime))) train_loss /= total_num return train_loss
class Distribution(): def __init__(self): with resource_stream(__name__, 'resources/partition_spline.npz') as spline_file: with np.load(spline_file, allow_pickle=False) as f: self._spline_x_scale = torch.tensor(f['x_scale']) self._spline_values = torch.tensor(f['values']) self._spline_tangents = torch.tensor(f['tangents']) def log_base_partition_function(self, alpha): alpha = torch.as_tensor(alpha) assert (alpha >= 0).all() x = partition_spline_curve(alpha) return cubic_spline.interpolate1d((x * self._spline_x_scale.to(x)), self._spline_values.to(x), self._spline_tangents.to(x)) def nllfun(self, x, alpha, scale): x = torch.as_tensor(x) alpha = torch.as_tensor(alpha) scale = torch.as_tensor(scale) assert (alpha >= 0).all() assert (scale >= 0).all() float_dtype = x.dtype assert (alpha.dtype == float_dtype) assert (scale.dtype == float_dtype) loss = general.lossfun(x, alpha, scale, approximate=False) log_partition = (torch.log(scale) + self.log_base_partition_function(alpha)) nll = (loss + log_partition) return nll def draw_samples(self, alpha, scale): alpha = torch.as_tensor(alpha) scale = torch.as_tensor(scale) assert (alpha >= 0).all() assert (scale >= 0).all() float_dtype = alpha.dtype assert (scale.dtype == float_dtype) cauchy = torch.distributions.cauchy.Cauchy(0.0, np.sqrt(2.0)) uniform = torch.distributions.uniform.Uniform(0, 1) samples = torch.zeros_like(alpha) accepted = torch.zeros(alpha.shape).type(torch.bool) while (not accepted.type(torch.uint8).all()): cauchy_sample = torch.reshape(cauchy.sample((np.prod(alpha.shape),)), alpha.shape) cauchy_sample = cauchy_sample.type(alpha.dtype) nll = self.nllfun(cauchy_sample, torch.as_tensor(alpha).to(cauchy_sample), torch.tensor(1).to(cauchy_sample)) nll_bound = (general.lossfun(cauchy_sample, torch.tensor(0.0, dtype=cauchy_sample.dtype), torch.tensor(1.0, dtype=cauchy_sample.dtype), approximate=False) + self.log_base_partition_function(alpha)) uniform_sample = torch.reshape(uniform.sample((np.prod(alpha.shape),)), alpha.shape) uniform_sample = uniform_sample.type(alpha.dtype) accept = (uniform_sample <= torch.exp((nll_bound - nll))) samples = torch.where(accept, cauchy_sample, samples) accepted = (accepted | accept) samples *= scale return samples
class ResNet_Strategy(nn.Module): def __init__(self, block, num_blocks, args): self.args = args super(ResNet_Strategy, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.Attack_method = nn.Linear(((512 * block.expansion) * 4), len(args.attack_types)) self.Attack_epsilon = nn.Linear(((512 * block.expansion) * 4), len(args.epsilon_types)) self.Attack_iters = nn.Linear(((512 * block.expansion) * 4), len(args.attack_iters_types)) self.Attack_step_size = nn.Linear(((512 * block.expansion) * 4), len(args.step_size_types)) self.Attack_beta_size = nn.Linear(((512 * block.expansion) * 4), len(args.beta_types)) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) Attack_method = self.Attack_method(out) Attack_epsilon = self.Attack_epsilon(out) Attack_iters = self.Attack_iters(out) Attack_step_size = self.Attack_step_size(out) Attack_beta = self.Attack_beta_size(out) return (Attack_method, Attack_epsilon, Attack_iters, Attack_step_size, Attack_beta)
def build_mxnet_kl(): fake_yaml = '\n model:\n name: imagenet\n framework: mxnet\n\n quantization:\n model_wise:\n activation:\n algorithm: kl\n\n tuning:\n accuracy_criterion:\n relative: 0.01\n exit_policy:\n timeout: 0\n random_seed: 9527\n workspace:\n path: {}\n '.format(str(WORKSPACE_DIR)) configs = yaml.load(fake_yaml, Loader=yaml.SafeLoader) with open('mxnet_kl.yaml', 'w', encoding='utf-8') as f: yaml.dump(configs, f) f.close()
class AutoModelForSequenceClassification(nn.Module): def __init__(self, args, Model, config, num_labels=2): super(AutoModelForSequenceClassification, self).__init__() self.num_labels = num_labels self.encoder = Model self.config = config self.dropout = nn.Dropout(args.drop_ratio) self.classifier = nn.Linear(config.hidden_size, num_labels) self.logsoftmax = nn.LogSoftmax(dim=1) self._init_weights(self.classifier) def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if (isinstance(module, nn.Linear) and (module.bias is not None)): module.bias.data.zero_() def forward(self, input_ids, target_mask=None, token_type_ids=None, attention_mask=None, labels=None, head_mask=None): outputs = self.encoder(input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask, head_mask=head_mask) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) logits = self.logsoftmax(logits) if (labels is not None): loss_fct = nn.NLLLoss() loss = loss_fct(logits.view((- 1), self.num_labels), labels.view((- 1))) return loss return logits
class CrystalPlateMail(BaseSuit): def __init__(self): super().__init__('crystal plate mail', weight=450, armour_class=7, material=M.Glass)
def generate_scenario(num_hosts, num_services, **params): generator = ScenarioGenerator() return generator.generate(num_hosts, num_services, **params)
def test_sim_trajectory(): with open('test/data/habitat-sim_trajectory_data.json', 'r') as f: test_trajectory = json.load(f) with init_sim() as sim: sim.reset() sim.set_agent_state(position=test_trajectory['positions'][0], rotation=test_trajectory['rotations'][0]) for (i, action) in enumerate(test_trajectory['actions'][:(- 1)]): action = HabitatSimActions[action] if (i > 0): state = sim.get_agent_state() assert (np.allclose(np.array(test_trajectory['positions'][i], dtype=np.float32), state.position) is True), 'mismatch in position at step {}'.format(i) assert (np.allclose(np.array(test_trajectory['rotations'][i], dtype=np.float32), np.array([*state.rotation.imag, state.rotation.real])) is True), 'mismatch in rotation at step {}'.format(i) max_search_radius = 2.0 dist_to_obs = sim.distance_to_closest_obstacle(state.position, max_search_radius) assert np.isclose(dist_to_obs, test_trajectory['distances_to_obstacles'][i]) assert sim.action_space.contains(action) sim.step(action)
class RawVideoExtractorCV2(): def __init__(self, centercrop=False, size=224, framerate=(- 1)): self.centercrop = centercrop self.size = size self.framerate = framerate self.transform = self._transform(self.size) def _transform(self, n_px): return Compose([Resize(n_px, interpolation=Image.BICUBIC), CenterCrop(n_px), (lambda image: image.convert('RGB')), ToTensor(), Normalize((0., 0.4578275, 0.), (0., 0., 0.))]) def video_to_tensor(self, video_file, preprocess, sample_fp=0, start_time=None, end_time=None): if ((start_time is not None) or (end_time is not None)): assert (isinstance(start_time, int) and isinstance(end_time, int) and (start_time > (- 1)) and (end_time > start_time)) assert (sample_fp > (- 1)) cap = cv2.VideoCapture(video_file) frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(cap.get(cv2.CAP_PROP_FPS)) total_duration = (((frameCount + fps) - 1) // fps) (start_sec, end_sec) = (0, total_duration) if (start_time is not None): (start_sec, end_sec) = (start_time, (end_time if (end_time <= total_duration) else total_duration)) cap.set(cv2.CAP_PROP_POS_FRAMES, int((start_time * fps))) interval = 1 if (sample_fp > 0): interval = (fps // sample_fp) else: sample_fp = fps if (interval == 0): interval = 1 inds = [ind for ind in np.arange(0, fps, interval)] assert (len(inds) >= sample_fp) inds = inds[:sample_fp] ret = True (images, included) = ([], []) for sec in np.arange(start_sec, (end_sec + 1)): if (not ret): break sec_base = int((sec * fps)) for ind in inds: cap.set(cv2.CAP_PROP_POS_FRAMES, (sec_base + ind)) (ret, frame) = cap.read() if (not ret): break frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) images.append(preprocess(Image.fromarray(frame_rgb).convert('RGB'))) cap.release() if (len(images) > 0): video_data = th.tensor(np.stack(images)) else: video_data = th.zeros(1) return {'video': video_data} def get_video_data(self, video_path, start_time=None, end_time=None): image_input = self.video_to_tensor(video_path, self.transform, sample_fp=self.framerate, start_time=start_time, end_time=end_time) return image_input def process_raw_data(self, raw_video_data): tensor_size = raw_video_data.size() tensor = raw_video_data.view((- 1), 1, tensor_size[(- 3)], tensor_size[(- 2)], tensor_size[(- 1)]) return tensor def process_frame_order(self, raw_video_data, frame_order=0): if (frame_order == 0): pass elif (frame_order == 1): reverse_order = np.arange((raw_video_data.size(0) - 1), (- 1), (- 1)) raw_video_data = raw_video_data[(reverse_order, ...)] elif (frame_order == 2): random_order = np.arange(raw_video_data.size(0)) np.random.shuffle(random_order) raw_video_data = raw_video_data[(random_order, ...)] return raw_video_data
def setup_density_and_loaders(config, device): (train_loader, valid_loader, test_loader) = get_loaders(dataset=config['dataset'], device=device, data_root=config['data_root'], make_valid_loader=config['early_stopping'], train_batch_size=config['train_batch_size'], valid_batch_size=config['valid_batch_size'], test_batch_size=config['test_batch_size']) density = get_density(schema=get_schema(config=config), x_train=train_loader.dataset.x) density.to(device) return (density, train_loader, valid_loader, test_loader)
def imagenet_wide_resnet50_2_pretrained(output_dim): return _replace_fc(torchvision.models.wide_resnet50_2(pretrained=True), output_dim)
def get_num_channels(input_shape_or_channels): if hasattr(input_shape_or_channels, '__iter__'): return input_shape_or_channels[0] else: return input_shape_or_channels
class CustomLVISResults(LVISResults): def __init__(self, lvis_gt, results, max_dets=300, max_dets_per_class=(- 1)): if isinstance(lvis_gt, LVIS): self.dataset = deepcopy(lvis_gt.dataset) elif isinstance(lvis_gt, str): self.dataset = self._load_json(lvis_gt) else: raise TypeError('Unsupported type {} of lvis_gt.'.format(lvis_gt)) self.logger = logging.getLogger(__name__) self.logger.info('Loading and preparing results.') if isinstance(results, str): result_anns = self._load_json(results) else: self.logger.warn('Assuming user provided the results in correct format.') result_anns = results assert isinstance(result_anns, list), 'results is not a list.' if (max_dets_per_class >= 0): result_anns = self.limit_dets_per_class_per_image(result_anns, max_dets_per_class) if (max_dets >= 0): result_anns = self.limit_dets_per_image(result_anns, max_dets) if ('bbox' in result_anns[0]): for (id, ann) in enumerate(result_anns): (x1, y1, w, h) = ann['bbox'] x2 = (x1 + w) y2 = (y1 + h) if ('segmentation' not in ann): ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] ann['area'] = (w * h) ann['id'] = (id + 1) elif ('segmentation' in result_anns[0]): for (id, ann) in enumerate(result_anns): ann['area'] = mask_utils.area(ann['segmentation']) if ('bbox' not in ann): ann['bbox'] = mask_utils.toBbox(ann['segmentation']) ann['id'] = (id + 1) self.dataset['annotations'] = result_anns self._create_index() img_ids_in_result = [ann['image_id'] for ann in result_anns] assert (set(img_ids_in_result) == (set(img_ids_in_result) & set(self.get_img_ids()))), 'Results do not correspond to current LVIS set.' def limit_dets_per_class_per_image(self, anns, max_dets_per_class): img_cls_ann = defaultdict(list) for ann in anns: img_cls_ann[(ann['image_id'], ann['category_id'])].append(ann) for ((img_id, cls_id), _anns) in img_cls_ann.items(): if (len(_anns) <= max_dets_per_class): continue _anns = sorted(_anns, key=(lambda ann: ann['score']), reverse=True) img_cls_ann[(img_id, cls_id)] = _anns[:max_dets_per_class] return [ann for anns in img_cls_ann.values() for ann in anns]
def extract_davis(epochs): results = dict() print('\t \tJ&F-Mean,J-Mean,J-Recall,J-Decay,F-Mean,F-Recall,F-Decay') JFm = [] Jm = [] Jr = [] Jd = [] Fm = [] Fr = [] Fd = [] for e in epochs: results[e] = dict() full_path = join('result', args.dataset, e, 'global_results-val.csv') record = open(full_path, 'r').readlines() record = eval(record[1]) print('{} {} {} {} {} {} {} {}'.format(e, record[0], record[1], record[2], record[3], record[4], record[5], record[6])) JFm.append(record[0]) Jm.append(record[1]) Jr.append(record[2]) Jd.append(record[3]) Fm.append(record[4]) Fr.append(record[5]) Fd.append(record[6]) print('> sort with J&F: <') argidx = np.argmax(np.array(JFm)) print('{} {} {} {} {} {} {} {}'.format(epochs[argidx], JFm[argidx], Jm[argidx], Jr[argidx], Jd[argidx], Fm[argidx], Fr[argidx], Fd[argidx])) print('> sort with Jm: <') argidx = np.argmax(np.array(Jm)) print('{} {} {} {} {} {} {} {}'.format(epochs[argidx], JFm[argidx], Jm[argidx], Jr[argidx], Jd[argidx], Fm[argidx], Fr[argidx], Fd[argidx]))
class RecDataSetTest(tf.test.TestCase): def testRecDataSet(self): dir_path = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(dir_path, 'testdata') rec_data_set = rec_dataset.RecDataset(os.path.join(data_dir, 'batch.txt'), '', os.path.join(data_dir, 'feature_dict.txt'), 0, 2, neg_item_count=3) rec_data_set = rec_data_set.batch(3) rec_data_set = rec_data_set.repeat(10) iterator = rec_data_set.make_one_shot_iterator() next_element = iterator.get_next() with self.test_session() as sess: try: for i in range(2): for e in next_element.eval(): print(e.decode()) except tf.errors.OutOfRangeError: print('OutOfRangeError')
def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None): pydot_graph = pydot.Dot((caffe_net.name if caffe_net.name else 'Net'), graph_type='digraph', rankdir=rankdir) pydot_nodes = {} pydot_edges = [] for layer in caffe_net.layer: if (phase is not None): included = False if (len(layer.include) == 0): included = True if ((len(layer.include) > 0) and (len(layer.exclude) > 0)): raise ValueError((('layer ' + layer.name) + ' has both include and exclude specified.')) for layer_phase in layer.include: included = (included or (layer_phase.phase == phase)) for layer_phase in layer.exclude: included = (included and (not (layer_phase.phase == phase))) if (not included): continue node_label = get_layer_label(layer, rankdir) node_name = ('%s_%s' % (layer.name, layer.type)) if ((len(layer.bottom) == 1) and (len(layer.top) == 1) and (layer.bottom[0] == layer.top[0])): pydot_nodes[node_name] = pydot.Node(node_label, **NEURON_LAYER_STYLE) else: layer_style = LAYER_STYLE_DEFAULT layer_style['fillcolor'] = choose_color_by_layertype(layer.type) pydot_nodes[node_name] = pydot.Node(node_label, **layer_style) for bottom_blob in layer.bottom: pydot_nodes[(bottom_blob + '_blob')] = pydot.Node(('%s' % bottom_blob), **BLOB_STYLE) edge_label = '""' pydot_edges.append({'src': (bottom_blob + '_blob'), 'dst': node_name, 'label': edge_label}) for top_blob in layer.top: pydot_nodes[(top_blob + '_blob')] = pydot.Node(('%s' % top_blob)) if label_edges: edge_label = get_edge_label(layer) else: edge_label = '""' pydot_edges.append({'src': node_name, 'dst': (top_blob + '_blob'), 'label': edge_label}) for node in pydot_nodes.values(): pydot_graph.add_node(node) for edge in pydot_edges: pydot_graph.add_edge(pydot.Edge(pydot_nodes[edge['src']], pydot_nodes[edge['dst']], label=edge['label'])) return pydot_graph
def get_action_for_move(agent_position: Tuple[(int, int)], agent_direction: Grid4TransitionsEnum, next_agent_position: Tuple[(int, int)], next_agent_direction: int, rail: GridTransitionMap) -> Optional[RailEnvActions]: possible_transitions = rail.get_transitions(*agent_position, agent_direction) num_transitions = np.count_nonzero(possible_transitions) if rail.is_dead_end(agent_position): valid_action = RailEnvActions.MOVE_FORWARD new_direction = ((agent_direction + 2) % 4) if possible_transitions[new_direction]: new_position = get_new_position(agent_position, new_direction) if ((new_position == next_agent_position) and (new_direction == next_agent_direction)): return valid_action elif (num_transitions == 1): valid_action = RailEnvActions.MOVE_FORWARD for new_direction in [((agent_direction + i) % 4) for i in range((- 1), 2)]: if possible_transitions[new_direction]: new_position = get_new_position(agent_position, new_direction) if ((new_position == next_agent_position) and (new_direction == next_agent_direction)): return valid_action else: for new_direction in [((agent_direction + i) % 4) for i in range((- 1), 2)]: if possible_transitions[new_direction]: if (new_direction == agent_direction): valid_action = RailEnvActions.MOVE_FORWARD new_position = get_new_position(agent_position, new_direction) if ((new_position == next_agent_position) and (new_direction == next_agent_direction)): return valid_action elif (new_direction == ((agent_direction + 1) % 4)): valid_action = RailEnvActions.MOVE_RIGHT new_position = get_new_position(agent_position, new_direction) if ((new_position == next_agent_position) and (new_direction == next_agent_direction)): return valid_action elif (new_direction == ((agent_direction - 1) % 4)): valid_action = RailEnvActions.MOVE_LEFT new_position = get_new_position(agent_position, new_direction) if ((new_position == next_agent_position) and (new_direction == next_agent_direction)): return valid_action
def test_batch_all_triplet_loss(): num_data = 10 feat_dim = 6 margin = 0.2 num_classes = 5 embeddings = np.random.rand(num_data, feat_dim).astype(np.float32) labels = np.random.randint(0, num_classes, size=num_data).astype(np.float32) for squared in [True, False]: pdist_matrix = pairwise_distance_np(embeddings, squared=squared) loss_np = 0.0 num_positives = 0.0 num_valid = 0.0 for i in range(num_data): for j in range(num_data): for k in range(num_data): distinct = ((i != j) and (i != k) and (j != k)) valid = ((labels[i] == labels[j]) and (labels[i] != labels[k])) if (distinct and valid): num_valid += 1.0 pos_distance = pdist_matrix[i][j] neg_distance = pdist_matrix[i][k] loss = np.maximum(0.0, ((pos_distance - neg_distance) + margin)) loss_np += loss num_positives += (loss > 0) loss_np /= num_positives (loss_tf_val, fraction_val) = BatchHardTripletLoss.batch_all_triplet_loss(torch.from_numpy(labels), torch.from_numpy(embeddings), margin, squared=squared) assert np.allclose(loss_np, loss_tf_val) assert np.allclose((num_positives / num_valid), fraction_val)
def prepare_environment(seed): random.seed(seed) np.random.seed(seed) tf.set_random_seed(seed)
def transliteration_cleaners(text): text = convert_to_ascii(text) text = lowercase(text) text = collapse_whitespace(text) return text