code
stringlengths
101
5.91M
class GCN(nn.Module): def __init__(self, nfeat, nhid, nclass, dropout=0, indep_weights=True): super(GCN, self).__init__() self.gc1 = GraphConvolution(nfeat, nhid, indep_weights=indep_weights) self.gc2 = GraphConvolution(nhid, nhid, indep_weights=indep_weights) self.gc4 = GraphConvolution(nhid, nclass, indep_weights=indep_weights) self.dropout = dropout def forward(self, x, adj, labels): x = F.relu(self.gc1(x, adj, labels)) x = F.dropout(x, self.dropout) x = F.relu(self.gc2(x, adj, labels)) x = self.gc4(x, adj, labels) return x
def densepose_chart_predictor_output_to_result(predictor_output: DensePoseChartPredictorOutput, boxes: Boxes) -> DensePoseChartResult: assert ((len(predictor_output) == 1) and (len(boxes) == 1)), f'Predictor output to result conversion can operate only single outputs, got {len(predictor_output)} predictor outputs and {len(boxes)} boxes' boxes_xyxy_abs = boxes.tensor.clone() boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) box_xywh = make_int_box(boxes_xywh_abs[0]) labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0) uv = resample_uv_to_bbox(predictor_output, labels, box_xywh) return DensePoseChartResult(labels=labels, uv=uv)
def self_gen_net(args, data=None): model = self_G_net(args) model.G.load_state_dict(data) return model
def main(): print('Lambda with lambda') Lambda_with_lambda() print('Lambda function') Lambda_function() print('Backend for Lambda') Backend_for_Lambda() print('TF for Lambda') TF_for_Lamda()
def Split_Parenthesis_At_End_of_URL(input_word): new_token = [input_word] if (len(input_word) <= 0): return new_token url_rule = re.compile(url) url_words = url_rule.findall(input_word) word_wo_balanced_paren = [] for word in url_words: bal_paren_word = find_word_w_balanced_paren(word) if (len(bal_paren_word) == 0): word_wo_balanced_paren.append(word) if ((len(url_words) > 0) and (len(word_wo_balanced_paren) > 0)): if ((input_word[(- 1)] == ')') or (input_word[(- 1)] == ']') or (input_word == '}')): main_url = input_word[:(- 1)] end_paren = ')' new_token = [main_url, end_paren] return new_token
def load_args(): parser = argparse.ArgumentParser(description='Supervised GCKN', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--seed', type=int, default=0, help='random seed') parser.add_argument('--dataset', type=str, default='ZINC', help='name of dataset') parser.add_argument('--epochs', type=int, default=300, help='number of epochs') parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate') parser.add_argument('--batch-size', type=int, default=128, help='batch size') parser.add_argument('--path-size', type=int, nargs='+', default=[4], help='path sizes for layers') parser.add_argument('--hidden-size', type=int, nargs='+', default=[128], help='number of filters for layers') parser.add_argument('--pooling', type=str, default='sum', help='local path pooling for each node') parser.add_argument('--global-pooling', type=str, default='sum', help='global node pooling for each graph') parser.add_argument('--aggregation', action='store_true', help='aggregate all path features until path size') parser.add_argument('--kernel-funcs', type=str, nargs='+', default=None, help='kernel functions') parser.add_argument('--sigma', type=float, nargs='+', default=[0.5], help='sigma of expo (Gaussian) kernels for layers') parser.add_argument('--sampling-paths', type=int, default=300000, help='number of paths to sample for unsup training') parser.add_argument('--weight-decay', type=float, default=0.0001, help='weight decay for classifier') parser.add_argument('--alternating', action='store_true', help='use alternating training') parser.add_argument('--walk', action='store_true', help='use walk instead of path') parser.add_argument('--use-cuda', action='store_true', help='use cuda or not') parser.add_argument('--outdir', type=str, default='', help='output path') parser.add_argument('--batch-norm', action='store_true', help='use batch norm') args = parser.parse_args() if torch.cuda.is_available(): args.use_cuda = True args.save_logs = False if (args.outdir != ''): args.save_logs = True outdir = args.outdir if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass outdir = (outdir + '/gckn_sup') if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass outdir = (outdir + '/{}'.format(args.dataset)) if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass if args.aggregation: outdir = (outdir + '/aggregation') if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass outdir = (outdir + '/{}_{}_{}_{}_{}_{}_{}'.format(args.path_size, args.hidden_size, args.pooling, args.global_pooling, args.sigma, args.weight_decay, args.lr)) if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass outdir = (outdir + '/fold-{}'.format(args.seed)) if (not os.path.exists(outdir)): try: os.makedirs(outdir) except Exception: pass args.outdir = outdir return args
def main(): args = options.parse_distributed_args() args_dict = vars(args) args_dict.pop('master_addr') args_dict.pop('master_port') args_dict.pop('nnodes') args_dict.pop('nproc_per_node') args_dict.pop('node_rank') current_env = os.environ nnodes = int(current_env['SLURM_NNODES']) dist_world_size = int(current_env['SLURM_NTASKS']) args.rank = int(current_env['SLURM_PROCID']) args.local_rank = int(current_env['SLURM_LOCALID']) print('start process: rank={}({}), master addr={}, port={}, nnodes={}, world size={}'.format(args.rank, args.local_rank, current_env['MASTER_ADDR'], current_env['MASTER_PORT'], nnodes, dist_world_size)) current_env['WORLD_SIZE'] = str(dist_world_size) batch_size = (args.batch_size // dist_world_size) args.batch_size = batch_size single_process_main(args)
def parse_args(): parser = argparse.ArgumentParser(description='MMDet test (and eval) a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--work-dir', help='the directory to save the file containing evaluation metrics') parser.add_argument('--out', help='output result file in pickle format') parser.add_argument('--ceph', action='store_true', help='whether not to evaluate the checkpoint during training') parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed') parser.add_argument('--gpu-ids', type=int, nargs='+', help='(Deprecated, please use --gpu-id) ids of gpus to use (only applicable to non-distributed training)') parser.add_argument('--gpu-id', type=int, default=0, help='id of gpu to use (only applicable to non-distributed testing)') parser.add_argument('--format-only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server') parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') parser.add_argument('--show', action='store_true', help='show results') parser.add_argument('--show-dir', help='directory where painted images will be saved') parser.add_argument('--show-score-thr', type=float, default=0.3, help='score threshold (default: 0.3)') parser.add_argument('--gpu-collect', action='store_true', help='whether to use gpu to collect results.') parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu-collect is not specified') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function (deprecate), change to --eval-options instead.') parser.add_argument('--eval-options', nargs='+', action=DictAction, help='custom options for evaluation, the key-value pair in xxx=yyy format will be kwargs for dataset.evaluate() function') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) if (args.options and args.eval_options): raise ValueError('--options and --eval-options cannot be both specified, --options is deprecated in favor of --eval-options') if args.options: warnings.warn('--options is deprecated in favor of --eval-options') args.eval_options = args.options return args
def testActorLoss(): policy = DummyActorPolicy() sac = SAC(env_spec=None, policy=policy, qf1=DummyCriticNet(), qf2=DummyCriticNet(), replay_buffer=None, discount=1, buffer_batch_size=2, target_entropy=3.0, initial_log_entropy=0, optimizer=MagicMock, max_path_length=10, gradient_steps_per_itr=1) observations = torch.Tensor([[1.0, 2.0], [3.0, 4.0]]) action_dists = policy(observations)[0] actions = torch.Tensor(action_dists.rsample_with_pre_tanh_value()) samples_data = dict(observation=observations) log_pi = action_dists.log_prob(actions) expected_loss = ((((2 * 10) - (2 + 1)) - (4 + 1)) / 2) loss = sac._actor_objective(samples_data, actions, log_pi) assert np.all(np.isclose(loss, expected_loss))
def pack_data(frame_resize: np.ndarray, batch_input_shape: Tuple[(int, int)], ori_shape: Tuple[(int, int)]) -> dict: assert (frame_resize.shape[:2] == batch_input_shape) data_sample = DetDataSample() data_sample.set_metainfo({'img_shape': batch_input_shape, 'ori_shape': ori_shape, 'scale_factor': ((batch_input_shape[0] / ori_shape[0]), (batch_input_shape[1] / ori_shape[1]))}) frame_resize = torch.from_numpy(frame_resize).permute((2, 0, 1)) data = {'inputs': frame_resize, 'data_sample': data_sample} return data
class Sqrt(Layer): def __init__(self, bigdl_type='float'): super(Sqrt, self).__init__(None, bigdl_type)
def save_mat_props(mat: Union[(bpy.types.Material, str)]) -> None: log.info(f'Saving material properties for {mat.name}') _SAVED_MATERIALS[mat.name] = get_mat_props(mat)
def get_dataset(args): (train, query, gallery) = (None, None, None) if (args.dataset == 'cub200'): train = Cub200Dataset(args.data_path, split='train') query = Cub200Dataset(args.data_path, split='test') if (args.dataset == 'sop'): train = SOPDataset(args.data_path, split='train') query = SOPDataset(args.data_path, split='test') if (args.dataset == 'inshop'): train = InShopDataset(args.data_path, split='train') query = InShopDataset(args.data_path, split='query') gallery = InShopDataset(args.data_path, split='gallery') return (train, query, gallery)
def resnet50(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> ResNet: return ResNet(torchvision.models.resnet50(pretrained, progress, **kwargs))
class FeedForward(nn.Module): def __init__(self, dim, mult=4, dropout=0.0): super().__init__() self.net = nn.Sequential(nn.Linear(dim, ((dim * mult) * 2)), GEGLU(), nn.Dropout(dropout), nn.Linear((dim * mult), dim)) def forward(self, x): return self.net(x)
class InputFeatures(object): def __init__(self, example_id, choices_features, label): self.example_id = example_id self.choices_features = [{'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids} for (input_ids, input_mask, segment_ids) in choices_features] self.label = label
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data-root', '-d', required=True, type=str) (parser.add_argument('--vocab-type', default='unigram', required=True, type=str, choices=['bpe', 'unigram', 'char']),) parser.add_argument('--vocab-size', default=8000, type=int) parser.add_argument('--cmvn-type', default='utterance', choices=['global', 'utterance'], help='The type of cepstral mean and variance normalization') args = parser.parse_args() process(args)
def encode(v, enable_max, **kwargs): if enable_max: torch.cuda.synchronize() norm = torch.max(torch.abs(v)) torch.cuda.synchronize() else: norm = torch.norm(v) w = v.view((- 1)) t = [time.time()] signs = torch.sign(w).int() probs = (torch.abs(w) / norm) mask = torch.distributions.Bernoulli(probs).sample().byte() t += [time.time()] idx = torch.arange(0, len(w)) t += [time.time()] if v.is_cuda: idx = idx.cuda() mask = mask.cuda() t += [time.time()] selected = torch.masked_select(idx, mask).long() signs = torch.masked_select(signs, mask) t += [time.time()] data = {'masking_time': (t[(- 1)] - t[(- 2)]), 'gen_mask_time': (t[1] - t[0]), 'to_gpu_time': (t[(- 2)] - t[(- 3)])} return ({'signs': signs, 'size': v.size(), 'selected': selected, 'norm': norm}, data)
class QqpProcessor(DataProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question1'].numpy().decode('utf-8'), tensor_dict['question2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_test_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test') def get_labels(self): return ['0', '1'] def _create_examples(self, lines, set_type): test_mode = (set_type == 'test') q1_index = (1 if test_mode else 3) q2_index = (2 if test_mode else 4) examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = ('%s-%s' % (set_type, line[0])) try: text_a = line[q1_index] text_b = line[q2_index] label = (None if test_mode else line[5]) except IndexError: continue examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
def create_parser(): parser = None try: grammar = LA simplified_grammar = SIMPLIFIED global _grammar_content _grammar_content = grammar parser = get_compiled_parser(simplified_grammar) get_compiled_parser(_grammar_content, _default_key) except IOError: print('IO Error!') return parser
class VolumetricFullConvolution(Layer): def __init__(self, n_input_plane, n_output_plane, kt, kw, kh, dt=1, dw=1, dh=1, pad_t=0, pad_w=0, pad_h=0, adj_t=0, adj_w=0, adj_h=0, n_group=1, no_bias=False, wRegularizer=None, bRegularizer=None, bigdl_type='float'): super(VolumetricFullConvolution, self).__init__(None, bigdl_type, n_input_plane, n_output_plane, kt, kw, kh, dt, dw, dh, pad_t, pad_w, pad_h, adj_t, adj_w, adj_h, n_group, no_bias, wRegularizer, bRegularizer) def set_init_method(self, weight_init_method=None, bias_init_method=None): callBigDlFunc(self.bigdl_type, 'setInitMethod', self.value, weight_init_method, bias_init_method) return self
def accuracy(x, y): acc = ((100 * torch.sum((torch.argmax(x, dim=(- 1)) == y))) / len(y)) return acc.item()
class NoopResetEnv(gym.Wrapper): def __init__(self, env, noop_max=30): gym.Wrapper.__init__(self, env) self.noop_max = noop_max self.override_num_noops = None self.noop_action = 0 assert (env.unwrapped.get_action_meanings()[0] == 'NOOP') def reset(self, **kwargs): self.env.reset(**kwargs) if (self.override_num_noops is not None): noops = self.override_num_noops else: noops = self.unwrapped.np_random.randint(1, (self.noop_max + 1)) assert (noops > 0) obs = None for _ in range(noops): (obs, _, done, _) = self.env.step(self.noop_action) if done: obs = self.env.reset(**kwargs) return obs def step(self, ac): return self.env.step(ac)
class RandomAgent(): def __init__(self, state_size, action_size): self.state_size = state_size self.action_size = action_size def act(self, state): return 2 def step(self, memories): return def save(self, filename): return def load(self, filename): return
def parse_ground_truth(example: Dict[(str, Any)], data_name): if ('gt_cot' in example): return (example['gt_cot'], strip_string(example['gt'])) if (data_name in ['math', 'ocw', 'math-minival']): gt_cot = example['solution'] gt_ans = extract_answer(gt_cot) elif (data_name == 'gsm8k'): (gt_cot, gt_ans) = example['answer'].split('####') elif (data_name == 'gsm-hard'): (gt_cot, gt_ans) = (example['code'], example['target']) elif (data_name == 'svamp'): (gt_cot, gt_ans) = (example['Equation'], example['Answer']) elif (data_name == 'asdiv'): gt_cot = example['formula'] gt_ans = re.sub('\\(.*?\\)', '', example['answer']) elif (data_name == 'mawps'): (gt_cot, gt_ans) = (None, example['target']) elif (data_name == 'tabmwp'): gt_cot = example['solution'] gt_ans = example['answer'] if (example['ans_type'] in ['integer_number', 'decimal_number']): if ('/' in gt_ans): gt_ans = (int(gt_ans.split('/')[0]) / int(gt_ans.split('/')[1])) elif (',' in gt_ans): gt_ans = float(gt_ans.replace(',', '')) elif ('%' in gt_ans): gt_ans = (float(gt_ans.split('%')[0]) / 100) else: gt_ans = float(gt_ans) elif (data_name == 'bbh'): (gt_cot, gt_ans) = (None, example['target']) else: raise NotImplementedError(data_name) gt_cot = str(gt_cot).strip() gt_ans = strip_string(gt_ans) return (gt_cot, gt_ans)
def draw_box_offscreen(): import open3d as o3d import open3d.visualization.rendering as rendering render = rendering.OffscreenRenderer(640, 480) cube_red = o3d.geometry.TriangleMesh.create_box(1, 2, 4) cube_red.compute_vertex_normals() cube_red.paint_uniform_color((1.0, 0.0, 0.0)) default_mat = rendering.MaterialRecord() render.scene.add_geometry('box', cube_red, default_mat) render.setup_camera(60.0, [0, 0, 0], [0, 10, 0], [0, 0, 1]) _ = render.render_to_image()
class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): _backends = ['tokenizers'] def __init__(self, *args, **kwargs): requires_backends(self, ['tokenizers'])
class RandomSequenceCrop(object): def __init__(self, seq_len): self.seq_len = seq_len def __call__(self, input): if (type(input) == list): input_seq_len = len(input) elif ('shape' in dir(input)): input_seq_len = input.shape[0] max_start_ind = ((input_seq_len - self.seq_len) + 1) assert (max_start_ind > 0), (('Sequence length longer than input sequence length: ' + str(input_seq_len)) + '.') start_ind = random.choice(range(max_start_ind)) return input[start_ind:(start_ind + self.seq_len)]
class ClusterNet5gHead(nn.Module): def __init__(self, output_k: int, num_sub_heads: int, batchnorm_track: bool=True) -> None: super(ClusterNet5gHead, self).__init__() self.batchnorm_track = batchnorm_track self.num_sub_heads = num_sub_heads self.heads = nn.ModuleList([nn.Sequential(nn.Linear((512 * BasicBlock.expansion), output_k), nn.Softmax(dim=1)) for _ in range(self.num_sub_heads)]) def forward(self, x, kmeans_use_features=False): results = [] for i in range(self.num_sub_heads): if kmeans_use_features: results.append(x) else: results.append(self.heads[i](x)) return results
def filter_weight(cfg, module): group_decay = [] group_no_decay = [] for m in module.modules(): if isinstance(m, nn.Linear): group_decay.append(m.weight) if (m.bias is not None): if (cfg.OPTIMIZER.WEIGHT_DECAY.NO_BIAS is True): group_no_decay.append(m.bias) else: group_decay.append(m.bias) elif isinstance(m, nn.modules.conv._ConvNd): group_decay.append(m.weight) if (m.bias is not None): if (cfg.OPTIMIZER.WEIGHT_DECAY.NO_BIAS is True): group_no_decay.append(m.bias) else: group_decay.append(m.bias) elif isinstance(m, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm, nn.LayerNorm)): if (cfg.OPTIMIZER.WEIGHT_DECAY.NO_NORM is True): if (m.weight is not None): group_no_decay.append(m.weight) if (m.bias is not None): group_no_decay.append(m.bias) else: if (m.weight is not None): group_decay.append(m.weight) if (m.bias is not None): group_decay.append(m.bias) assert (len(list(module.parameters())) == (len(group_decay) + len(group_no_decay))) new_group_decay = filter((lambda p: p.requires_grad), group_decay) new_group_no_decay = filter((lambda p: p.requires_grad), group_no_decay) groups = [dict(params=new_group_decay), dict(params=new_group_no_decay, weight_decay=0.0)] return groups
def get_runner_class(cfg): runner_cls = registry.get_runner_class(cfg.run_cfg.get('runner', 'runner_base')) return runner_cls
class BirdsTask(): def __init__(self): self.hint_vocab = util.Index() self.START = START self.STOP = STOP with open(os.path.join(birds_path, 'hendricks_data', 'CUB_feature_dict.pkl')) as feat_f: self.features = pickle.load(feat_f) self.captions = {} with open(os.path.join(birds_path, 'hendricks_data', 'captions.tsv')) as capt_f: reader = csv.DictReader(capt_f, delimiter='\t') for row in reader: caption = row['Description'].lower().replace('.', ' .').replace(',', ' ,') toks = (([START] + caption.split()) + [STOP]) toks = [self.hint_vocab.index(w) for w in toks] url = row['Input.image_url'] inst = '/'.join(url.split('/')[(- 2):]) if (inst not in self.captions): self.captions[inst] = [] self.captions[inst].append(toks) classes = sorted(list(set((k.split('/')[0] for k in self.captions)))) classes.remove('cub_missing') shuf_random = np.random.RandomState(999) shuf_random.shuffle(classes) assert (len(classes) == 200) data_classes = {'train': classes[:100], 'val': classes[100:110], 'test': classes[100:200]} data_insts = {} for fold in ('train', 'val', 'test'): classes = data_classes[fold] data_classes[fold] = classes instances = {cls: [] for cls in classes} for key in self.features.keys(): (cls, inst) = key.split('/') if (cls in instances): instances[cls].append(key) data_insts[fold] = instances self.train_classes = data_classes['train'] self.val_classes = data_classes['val'] self.test_classes = data_classes['test'] self.train_insts = data_insts['train'] self.val_insts = data_insts['val'] self.test_insts = data_insts['test'] self.n_features = self.features[self.features.keys()[0]].size def sample_train(self, n_batch, augment): assert (not augment) batch = [] for _ in range(n_batch): cls = random.choice(self.train_classes) insts = [random.choice(self.train_insts[cls]) for _ in range(N_EX)] captions = self.captions[insts[0]] caption = captions[random.choice(len(captions))] feats = np.asarray([self.features[inst] for inst in insts]) label = random.randint(2) if (label == 0): other_cls = choose_except(self.train_classes, [cls]) other_inst = random.choice(self.train_insts[other_cls]) else: other_inst = choose_except(self.train_insts[cls], insts) other_feats = self.features[other_inst] datum = Datum(caption, feats, other_feats, label) batch.append(datum) return batch def sample_heldout(self, classes, insts): batch = [] local_random = np.random.RandomState(0) for (i, cls) in enumerate(classes): datum_insts = insts[cls][:N_EX] caption = self.captions[datum_insts[0]][0] feats = np.asarray([self.features[inst] for inst in datum_insts]) label = (i % 2) if (label == 0): other_cls = choose_except(classes, [cls], local_random) other_inst = insts[other_cls][N_EX] else: other_inst = insts[cls][N_EX] other_feats = self.features[other_inst] datum = Datum(caption, feats, other_feats, label) batch.append(datum) return batch def sample_val(self, same=False): return self.sample_heldout(self.val_classes, self.val_insts) def sample_test(self, same=False): return self.sample_heldout(self.test_classes, self.test_insts)
def skip(conv, args): if ((args.keep_lang != 'all') or (args.skip_lang is not None)): text = '\n'.join([x['value'] for x in conv['conversations']]) try: lang_code = Detector(text).language.code except (pycld2.error, polyglot.detect.base.UnknownLanguage): lang_code = 'unknown' if ((args.keep_lang != 'all') and (lang_code != args.keep_lang)): return True if (lang_code == args.skip_lang): return True if args.reduce_rep: for sentence in conv['conversations']: val = sentence['value'] sub = re.search('(\\d)\\1{8}', val) if (sub is not None): return True return False
class lazyset(set): def load(self): pass def _lazy(self, method, *args): print('!') if (set.__len__(self) == 0): self.load() setattr(self, method, types.MethodType(getattr(set, method), self)) return getattr(set, method)(self, *args) def __repr__(self): return self._lazy('__repr__') def __len__(self): return self._lazy('__len__') def __iter__(self): return self._lazy('__iter__') def __contains__(self, *args): return self._lazy('__contains__', *args) def __sub__(self, *args): return self._lazy('__sub__', *args) def __and__(self, *args): return self._lazy('__and__', *args) def __or__(self, *args): return self._lazy('__or__', *args) def __xor__(self, *args): return self._lazy('__xor__', *args) def __isub__(self, *args): return self._lazy('__isub__', *args) def __iand__(self, *args): return self._lazy('__iand__', *args) def __ior__(self, *args): return self._lazy('__ior__', *args) def __ixor__(self, *args): return self._lazy('__ixor__', *args) def __gt__(self, *args): return self._lazy('__gt__', *args) def __lt__(self, *args): return self._lazy('__lt__', *args) def __gte__(self, *args): return self._lazy('__gte__', *args) def __lte__(self, *args): return self._lazy('__lte__', *args) def add(self, *args): return self._lazy('add', *args) def pop(self, *args): return self._lazy('pop', *args) def remove(self, *args): return self._lazy('remove', *args) def discard(self, *args): return self._lazy('discard', *args) def isdisjoint(self, *args): return self._lazy('isdisjoint', *args) def issubset(self, *args): return self._lazy('issubset', *args) def issuperset(self, *args): return self._lazy('issuperset', *args) def union(self, *args): return self._lazy('union', *args) def intersection(self, *args): return self._lazy('intersection', *args) def difference(self, *args): return self._lazy('difference', *args)
def validate_keras_model(platform, model_file, input_file, mace_out_file, input_names, input_shapes, input_data_formats, output_names, output_shapes, output_data_formats, validation_threshold, input_data_types, log_file): from tensorflow import keras import tensorflow_model_optimization as tfmot if (not os.path.isfile(model_file)): util.MaceLogger.error(VALIDATION_MODULE, (("Input model file '" + model_file) + "' does not exist!")) with tfmot.quantization.keras.quantize_scope(): keras_model = keras.models.load_model(model_file, compile=False) input = [] for i in range(len(input_names)): input_value = load_data(util.formatted_file_name(input_file, input_names[i]), input_data_types[i]) input_value = input_value.reshape(input_shapes[i]) if ((input_data_formats[i] == DataFormat.NCHW) and (len(input_shapes[i]) == 4)): input_value = input_value.transpose((0, 2, 3, 1)) elif ((input_data_formats[i] == DataFormat.OIHW) and (len(input_shapes[i]) == 4)): input_value = input_value.transpose((2, 3, 1, 0)) input.append(input_value) output_values = keras_model.predict(input) mace_check((len(output_names) == 1), 'Unexpected') for i in range(len(output_names)): output_file_name = util.formatted_file_name(mace_out_file, output_names[i]) mace_out_value = load_data(output_file_name, get_data_type_by_value(output_values)) (mace_out_value, real_output_shape, real_output_data_format) = get_real_out_value_shape_df(platform, mace_out_value, output_shapes[i], output_data_formats[i]) compare_output(output_names[i], mace_out_value, output_values, validation_threshold, log_file, real_output_shape, real_output_data_format)
def grow_axis(ax, d): (l, b, r, t) = ax.get_position().extents ax.set_position(matplotlib.transforms.Bbox.from_extents(((l - d), (b - d), (r + d), (t + d))))
class CircleCIJob(): name: str additional_env: Dict[(str, Any)] = None cache_name: str = None cache_version: str = '0.6' docker_image: List[Dict[(str, str)]] = None install_steps: List[str] = None marker: Optional[str] = None parallelism: Optional[int] = 1 pytest_num_workers: int = 8 pytest_options: Dict[(str, Any)] = None resource_class: Optional[str] = 'xlarge' tests_to_run: Optional[List[str]] = None working_directory: str = '~/transformers' def __post_init__(self): if (self.additional_env is None): self.additional_env = {} if (self.cache_name is None): self.cache_name = self.name if (self.docker_image is None): self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE) if (self.install_steps is None): self.install_steps = [] if (self.pytest_options is None): self.pytest_options = {} if isinstance(self.tests_to_run, str): self.tests_to_run = [self.tests_to_run] if (self.parallelism is None): self.parallelism = 1 def to_dict(self): env = COMMON_ENV_VARIABLES.copy() env.update(self.additional_env) job = {'working_directory': self.working_directory, 'docker': self.docker_image, 'environment': env} if (self.resource_class is not None): job['resource_class'] = self.resource_class if (self.parallelism is not None): job['parallelism'] = self.parallelism steps = ['checkout', {'attach_workspace': {'at': '~/transformers/test_preparation'}}, {'restore_cache': {'keys': [(f'v{self.cache_version}-{self.cache_name}-' + '{{ checksum "setup.py" }}'), f'v{self.cache_version}-{self.cache_name}-']}}] steps.extend([{'run': l} for l in self.install_steps]) steps.append({'save_cache': {'key': (f'v{self.cache_version}-{self.cache_name}-' + '{{ checksum "setup.py" }}'), 'paths': ['~/.cache/pip']}}) steps.append({'run': {'name': 'Show installed libraries and their versions', 'command': 'pip freeze | tee installed.txt'}}) steps.append({'store_artifacts': {'path': '~/transformers/installed.txt'}}) all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options} pytest_flags = [(f'--{key}={value}' if (value is not None) else f'-{key}') for (key, value) in all_options.items()] pytest_flags.append((f'--make-reports={self.name}' if ('examples' in self.name) else f'--make-reports=tests_{self.name}')) test_command = (f'python -m pytest -n {self.pytest_num_workers} ' + ' '.join(pytest_flags)) if (self.parallelism == 1): if (self.tests_to_run is None): test_command += ' << pipeline.parameters.tests_to_run >>' else: test_command += (' ' + ' '.join(self.tests_to_run)) else: tests = self.tests_to_run if (tests is None): folder = os.environ['test_preparation_dir'] test_file = os.path.join(folder, 'filtered_test_list.txt') if os.path.exists(test_file): with open(test_file) as f: tests = f.read().split(' ') if (tests == ['tests']): tests = [os.path.join('tests', x) for x in os.listdir('tests')] expanded_tests = [] for test in tests: if test.endswith('.py'): expanded_tests.append(test) elif (test == 'tests/models'): expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)]) elif (test == 'tests/pipelines'): expanded_tests.extend([os.path.join(test, x) for x in os.listdir(test)]) else: expanded_tests.append(test) random.shuffle(expanded_tests) tests = ' '.join(expanded_tests) n_executors = max((len(tests) // 10), 1) if (n_executors > self.parallelism): n_executors = self.parallelism job['parallelism'] = n_executors command = f'''echo {tests} | tr " " " " >> tests.txt''' steps.append({'run': {'name': 'Get tests', 'command': command}}) command = 'TESTS=$(circleci tests split tests.txt) && echo $TESTS > splitted_tests.txt' steps.append({'run': {'name': 'Split tests', 'command': command}}) steps.append({'store_artifacts': {'path': '~/transformers/tests.txt'}}) steps.append({'store_artifacts': {'path': '~/transformers/splitted_tests.txt'}}) test_command = (f'python -m pytest -n {self.pytest_num_workers} ' + ' '.join(pytest_flags)) test_command += ' $(cat splitted_tests.txt)' if (self.marker is not None): test_command += f' -m {self.marker}' test_command += ' | tee tests_output.txt' steps.append({'run': {'name': 'Run tests', 'command': test_command}}) steps.append({'store_artifacts': {'path': '~/transformers/tests_output.txt'}}) steps.append({'store_artifacts': {'path': '~/transformers/reports'}}) job['steps'] = steps return job def job_name(self): return (self.name if ('examples' in self.name) else f'tests_{self.name}')
class DefaultArticleParser(ArticleParser): def extract_translations(self, title, text): translations = list() for tr in self.cfg.trad_re.finditer(text): wc = tr.group(self.cfg.wc_field) if ((not wc) or (not wc.strip()) or (not (wc in self.wikt_cfg.wikicodes))): continue word = tr.group(self.cfg.word_field) if ((not word) or (not word.strip())): continue word = word.strip() if self.skip_word(word): continue translations.append((wc, word)) return set(translations) def skip_word(self, word): if (self.cfg.skip_translation_re and self.cfg.skip_translation_re.search(word)): return True if ('\n' in word): return True return False
class SGD(optim.SGD): _grad() def step(self, closure=None): loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] for p in group['params']: if (hasattr(p, 'is_placeholder') or ((p.grad is None) and ((not hasattr(p, 'is_sparse_param')) or (p.dense.grad is None)))): continue if hasattr(p, 'is_sparse_param'): d_p = p.dense.grad.masked_select(p.mask) p = p._values() else: d_p = p.grad if (weight_decay != 0): d_p = d_p.add(p, alpha=weight_decay) if (momentum != 0): param_state = self.state[p] if ('momentum_buffer' not in param_state): buf = param_state['momentum_buffer'] = torch.clone(d_p).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(d_p, alpha=(1 - dampening)) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf p.add_(d_p, alpha=(- group['lr'])) return loss
class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ['flax'] def __init__(self, *args, **kwargs): requires_backends(self, ['flax'])
def minimum_batch_size(cfg: ConfigDict) -> int: if (not hasattr(cfg, 'train_batch_size')): return 1 if isinstance(cfg.train_batch_size, int): return cfg.train_batch_size return min(cfg.train_batch_size)
class fast_rcnn_outputs(nn.Module): def __init__(self, dim_in): super().__init__() self.cls_score = nn.Linear(dim_in, cfg.MODEL.NUM_CLASSES) if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG: self.bbox_pred = nn.Linear(dim_in, (4 * 2)) else: self.bbox_pred = nn.Linear(dim_in, (4 * cfg.MODEL.NUM_CLASSES)) self._init_weights() def _init_weights(self): init.normal_(self.cls_score.weight, std=0.01) init.constant_(self.cls_score.bias, 0) init.normal_(self.bbox_pred.weight, std=0.001) init.constant_(self.bbox_pred.bias, 0) def detectron_weight_mapping(self): detectron_weight_mapping = {'cls_score.weight': 'cls_score_w', 'cls_score.bias': 'cls_score_b', 'bbox_pred.weight': 'bbox_pred_w', 'bbox_pred.bias': 'bbox_pred_b'} orphan_in_detectron = [] return (detectron_weight_mapping, orphan_in_detectron) def forward(self, x): if (x.dim() == 4): x = x.squeeze(3).squeeze(2) cls_score = self.cls_score(x) if (not self.training): cls_score = F.softmax(cls_score, dim=1) bbox_pred = self.bbox_pred(x) return (cls_score, bbox_pred)
class Sampler(object): def __init__(self, env, policy, batch_size, max_path_length): assert (hasattr(env, 'reset') and hasattr(env, 'step')) self.env = env self.policy = policy self.batch_size = batch_size self.max_path_length = max_path_length def obtain_samples(self): raise NotImplementedError
def process_hits(es, response, patent_application_id_column, patent_citation_column, application_claim_number_column, application_claim_text_column, related_passages_against_claim_column, category_column): print(response) all_response_patent_applications = response.get('hits').get('hits') for element in all_response_patent_applications: patent_application_id = element.get('_id') claims_text_raw = element.get('_source').get('claims') max_claim = int(claims_text_raw.split('<claim id="c-en-00')[(- 1)][:2]) for claim in range(1, (max_claim + 1)): for citation_id in element.get('_source').get('citation_ids'): print(citation_id) response_citation = es.search(index='ep_patent_citations', body=query_citation_id(citation_id), size=10000) print(response_citation) try: response_citation.get('hits').get('hits')[0].get('_source') except: continue response_rel_claims = response_citation.get('hits').get('hits')[0].get('_source').get('category_A') response_rel_passage = response_citation.get('hits').get('hits')[0].get('_source').get('rel-passage_A') if (claim in response_rel_claims): try: application_claim_text_column.append(claims_text_raw.split((((('<claim id="c-en-00' + '{:02d}'.format(claim)) + '" num="00') + '{:02d}'.format(claim)) + '">'))[1].split('</claim>')[0]) except: print(((('Discarded Claim. ID: ' + str(claim)) + ', Patent Application ID: ') + str(patent_application_id))) continue patent_application_id_column.append(patent_application_id) patent_citation_column.append(citation_id) application_claim_number_column.append(claim) related_passages_against_claim_column.append(response_rel_passage) category_column.append('A')
class SawyerGoalWrapper(Wrapper): reward_type_dict = {'dense': 'hand_distance', 'sparse': 'hand_success'} observation_keys = ['observation', 'desired_goal', 'achieved_goal'] def __init__(self, env, reward_type='sparse'): Wrapper.__init__(self, env=env) self.env = env self.action_space = env.action_space for key in list(env.observation_space.spaces.keys()): if (key not in self.observation_keys): del env.observation_space.spaces[key] self.observation_space = env.observation_space self.reward_type = reward_type if hasattr(self.env, 'puck_space'): self.reward_type = 'puck_success' self.env.reward_type = 'puck_success' if (hasattr(self.env, 'env') and hasattr(self.env.env, 'reward_type')): self.env.env.reward_type = 'puck_success' else: if hasattr(self.env, 'reward_type'): self.env.reward_type = self.reward_type_dict[self.reward_type] if (hasattr(self.env, 'env') and hasattr(self.env.env, 'reward_type')): self.env.env.reward_type = self.reward_type_dict[self.reward_type] if ('Door' in self.env.__str__()): self.reward_type = 'angle_success' def reset(self): return self.env.reset() def step(self, action): (obs_dict, reward, done, info) = self.env.step(action) obs = {'observation': obs_dict['observation'], 'desired_goal': obs_dict['desired_goal'], 'achieved_goal': obs_dict['achieved_goal']} if ('hand_success' in info.keys()): info['is_success'] = info['hand_success'] if ('success' in info.keys()): info['is_success'] = info['success'] if (self.reward_type == 'puck_success'): info['is_success'] = info['puck_success'] elif (self.reward_type == 'angle_success'): info['is_success'] = info['angle_success'] return (obs, reward, done, info) def render(self, mode='human'): return self.env.render() def compute_reward(self, achieved_goal, desired_goal, info): return self.compute_rewards(achieved_goal, desired_goal, info) def compute_rewards(self, achieved_goal, desired_goal, info): obs = {'state_achieved_goal': achieved_goal, 'state_desired_goal': desired_goal} action = np.array([]) return self.env.compute_rewards(action, obs) def sample_goal(self): goal_dict = self.env.sample_goal() return goal_dict['desired_goal']
def returnPeakList(): peak_list = {} peak_list['quinine'] = np.array([[8.43, 174], [8.418, 182], [7.881, 168], [7.858, 182], [7.449, 149], [7.437, 146], [7.269, 97], [7.262, 124], [7.246, 71], [7.239, 141], [7.229, 185], [7.222, 124], [5.755, 35], [5.735, 39], [5.729, 43], [5.711, 64], [5.693, 46], [5.686, 47], [5.667, 43], [5.489, 86], [5.482, 89], [5.379, 71], [4.959, 103], [4.916, 99], [4.913, 86], [4.91, 97], [4.907, 109], [4.881, 95], [3.878, 1000], [3.461, 35], [3.46, 35], [3.455, 38], [3.449, 37], [3.449, 37], [3.067, 85], [3.042, 68], [3.033, 69], [3.007, 60], [2.641, 53], [2.63, 66], [2.624, 56], [2.607, 74], [2.601, 72], [2.6, 72], [2.597, 71], [2.235, 42], [2.235, 42], [2.234, 42], [2.233, 42], [1.786, 71], [1.781, 79], [1.773, 66], [1.767, 58], [1.745, 59], [1.732, 71], [1.714, 80], [1.689, 31], [1.688, 31], [1.504, 38], [1.503, 38], [1.497, 43], [1.473, 76], [1.459, 50], [1.455, 53], [1.454, 53], [1.449, 47], [1.448, 47]]) peak_list['mannitol'] = np.array([[4.432, 965], [4.418, 1000], [4.362, 382], [4.347, 917], [4.333, 409], [4.163, 772], [4.145, 819], [3.637, 164], [3.629, 191], [3.623, 172], [3.614, 225], [3.61, 236], [3.602, 224], [3.596, 215], [3.587, 217], [3.562, 236], [3.543, 470], [3.524, 357], [3.485, 104], [3.476, 112], [3.47, 212], [3.462, 212], [3.456, 198], [3.449, 217], [3.441, 128], [3.435, 110], [3.427, 101], [3.408, 263], [3.394, 343], [3.381, 275], [3.366, 299], [3.352, 148]]) peak_list['menthone'] = np.array([[2.375, 84], [2.37, 91], [2.366, 102], [2.36, 99], [2.343, 108], [2.338, 102], [2.333, 125], [2.328, 127], [2.17, 41], [2.166, 57], [2.153, 85], [2.149, 79], [2.139, 70], [2.136, 111], [2.132, 66], [2.119, 90], [2.115, 70], [2.107, 36], [2.106, 37], [2.102, 50], [2.097, 34], [2.095, 37], [2.089, 58], [2.085, 56], [2.081, 99], [2.075, 89], [2.067, 183], [2.057, 172], [2.054, 176], [2.053, 166], [2.048, 130], [2.045, 114], [2.04, 59], [2.035, 94], [2.031, 109], [2.024, 127], [2.021, 132], [2.015, 43], [1.993, 190], [1.99, 190], [1.961, 157], [1.958, 147], [1.942, 30], [1.936, 36], [1.935, 37], [1.931, 53], [1.926, 65], [1.922, 87], [1.92, 82], [1.917, 84], [1.914, 81], [1.908, 72], [1.902, 53], [1.899, 68], [1.893, 98], [1.885, 80], [1.879, 64], [1.876, 47], [1.87, 67], [1.864, 44], [1.859, 56], [1.854, 56], [1.849, 42], [1.844, 52], [1.839, 45], [1.833, 33], [1.828, 38], [1.409, 59], [1.406, 47], [1.402, 73], [1.39, 33], [1.381, 101], [1.378, 151], [1.376, 169], [1.37, 116], [1.365, 83], [1.363, 79], [1.358, 124], [1.354, 92], [1.351, 93], [1.342, 40], [1.34, 37], [1.338, 44], [1.333, 60], [1.331, 59], [1.326, 33], [1.022, 882], [1.006, 968], [0.992, 41], [0.933, 57], [0.923, 985], [0.906, 986], [0.893, 37], [0.884, 32], [0.882, 32], [0.863, 981], [0.846, 1000]]) peak_list['caffein'] = np.array([[7.512, 96], [7.51, 95], [3.997, 523], [3.996, 507], [3.583, 1000], [3.406, 998]]) peak_list['saccharose'] = np.array([[5.423, 308], [5.414, 314], [4.232, 391], [4.21, 506], [4.076, 207], [4.055, 344], [4.034, 198], [3.918, 80], [3.909, 96], [3.903, 166], [3.897, 84], [3.893, 140], [3.888, 97], [3.882, 135], [3.873, 138], [3.861, 149], [3.854, 140], [3.848, 99], [3.847, 99], [3.845, 98], [3.83, 862], [3.823, 854], [3.815, 550], [3.799, 55], [3.789, 181], [3.764, 267], [3.741, 231], [3.68, 1000], [3.58, 235], [3.57, 228], [3.555, 183], [3.545, 187], [3.498, 200], [3.474, 279], [3.45, 136]]) peak_list['phenylalanine'] = np.array([[7.467, 187], [7.463, 334], [7.459, 165], [7.451, 239], [7.446, 880], [7.442, 612], [7.431, 406], [7.428, 950], [7.426, 672], [7.422, 207], [7.414, 50], [7.409, 214], [7.405, 475], [7.401, 365], [7.396, 67], [7.393, 143], [7.387, 514], [7.379, 148], [7.372, 141], [7.368, 231], [7.363, 823], [7.359, 1000], [7.353, 251], [7.347, 348], [7.343, 640], [4.019, 471], [4.006, 525], [3.999, 536], [3.992, 52], [3.986, 505], [3.339, 221], [3.326, 216], [3.302, 328], [3.289, 309], [3.154, 365], [3.133, 351], [3.117, 252], [3.097, 236]]) peak_list['lactose'] = np.array([[6.342, 670], [6.331, 662], [5.107, 510], [5.097, 510], [4.908, 407], [4.898, 618], [4.888, 420], [4.793, 354], [4.785, 361], [4.707, 45], [4.685, 345], [4.67, 565], [4.659, 606], [4.646, 292], [4.627, 43], [4.621, 55], [4.564, 41], [4.564, 41], [4.537, 460], [4.525, 468], [4.507, 79], [4.494, 262], [4.479, 562], [4.467, 1000], [4.205, 41], [4.204, 42], [4.203, 42], [4.202, 43], [4.185, 481], [4.166, 480], [4.154, 41], [4.148, 41], [3.725, 175], [3.716, 368], [3.708, 244], [3.7, 206], [3.691, 390], [3.684, 282], [3.674, 66], [3.67, 69], [3.647, 593], [3.634, 790], [3.625, 840], [3.594, 71], [3.589, 64], [3.578, 284], [3.567, 142], [3.555, 668], [3.541, 361], [3.531, 560], [3.516, 362], [3.498, 387], [3.485, 326], [3.471, 216], [3.459, 721], [3.445, 457], [3.429, 244], [3.413, 115], [3.406, 101], [3.402, 95], [3.4, 93], [3.397, 90], [3.393, 85], [3.392, 84], [3.391, 82], [3.389, 80], [3.387, 79], [3.383, 76], [3.38, 73], [3.377, 72], [3.376, 71], [3.374, 69], [3.374, 69], [3.373, 69], [3.372, 69], [3.37, 68], [3.369, 68], [3.368, 67], [3.367, 67], [3.366, 67], [3.364, 66], [3.363, 66], [3.362, 65], [3.361, 64], [3.354, 109], [3.343, 90], [3.33, 351], [3.313, 816], [3.285, 495], [3.263, 564], [3.239, 331], [3.194, 148], [3.18, 215], [3.171, 257], [3.162, 198], [3.148, 135]]) peak_list['ascorbic acid'] = np.array([[8.309, 81], [4.857, 52], [4.786, 32], [4.785, 32], [4.783, 31], [4.781, 31], [4.723, 988], [4.72, 1000], [3.754, 133], [3.751, 134], [3.735, 321], [3.731, 222], [3.719, 167], [3.715, 169], [3.477, 66], [3.461, 103], [3.451, 561], [3.446, 533], [3.435, 432], [3.426, 429], [3.42, 118], [3.4, 77]]) peak_list['citric acid'] = np.array([[2.783, 570], [2.745, 1000], [2.677, 994], [2.638, 559]]) peak_list['sodium p-hydroxybenzoate'] = np.array([[7.808, 919], [7.787, 1000], [6.743, 991], [6.722, 1000]]) peak_list['fruit alcohol'] = np.array([[12.42, 116], [4.286, 727], [4.275, 822], [4.267, 892], [4.255, 770], [2.648, 658], [2.636, 684], [2.609, 995], [2.597, 970], [2.481, 1000], [2.462, 970], [2.442, 660], [2.423, 661]]) peak_list['folic acid'] = np.array([[8.676, 1000], [8.186, 313], [8.167, 315], [7.69, 717], [7.668, 761], [6.975, 333], [6.68, 726], [6.658, 722], [4.522, 519], [4.508, 514], [4.377, 138], [4.369, 164], [4.366, 184], [4.358, 174], [4.354, 168], [4.353, 168], [4.346, 147], [2.365, 301], [2.347, 655], [2.328, 374], [2.084, 139], [2.065, 165], [1.937, 143]]) peak_list['taurine'] = np.array([[3.818, 1000], [3.619, 80], [3.602, 181], [3.585, 102], [3.352, 106], [3.336, 195], [3.319, 81]]) peak_list['cholesterol'] = np.array([[5.356, 88], [5.351, 67], [5.348, 65], [5.343, 81], [3.538, 31], [3.534, 45], [3.524, 54], [3.521, 55], [3.511, 45], [3.507, 35], [3.494, 30], [2.27, 79], [1.996, 99], [1.854, 154], [1.83, 157], [1.494, 183], [1.329, 124], [1.1, 168], [1.007, 1000], [0.923, 498], [0.907, 465], [0.876, 764], [0.871, 757], [0.859, 717], [0.855, 699], [0.692, 42], [0.678, 927]]) peak_list['adenosine'] = np.array([[8.382, 942], [8.182, 38], [8.181, 39], [8.168, 1000], [7.409, 448], [5.915, 431], [5.9, 439], [5.51, 388], [5.495, 475], [5.483, 194], [5.472, 134], [5.25, 333], [5.239, 330], [4.663, 103], [4.648, 221], [4.635, 222], [4.62, 101], [4.187, 111], [4.176, 215], [4.169, 218], [4.157, 109], [4.006, 119], [3.998, 304], [3.99, 296], [3.982, 118], [3.724, 67], [3.714, 113], [3.705, 80], [3.693, 120], [3.69, 110], [3.684, 178], [3.675, 101], [3.607, 96], [3.598, 114], [3.589, 115], [3.589, 115], [3.579, 128], [3.568, 79], [3.559, 77], [3.55, 63]]) peak_list['myo-inositol'] = np.array([[4.073, 326], [4.066, 661], [4.058, 401], [3.654, 465], [3.629, 1000], [3.615, 43], [3.605, 919], [3.574, 22], [3.571, 23], [3.568, 25], [3.567, 25], [3.555, 926], [3.548, 938], [3.53, 507], [3.523, 554], [3.305, 414], [3.282, 641], [3.259, 306]]) peak_list['oleic acid'] = np.array([[5.361, 102], [5.359, 81], [5.352, 131], [5.347, 237], [5.345, 243], [5.339, 138], [5.338, 138], [5.333, 81], [5.33, 109], [2.365, 171], [2.346, 306], [2.327, 202], [2.019, 261], [2.005, 246], [1.99, 111], [1.65, 101], [1.633, 150], [1.614, 127], [1.599, 61], [1.596, 62], [1.311, 812], [1.289, 540], [1.268, 1000], [0.898, 250], [0.881, 774], [0.863, 303]]) peak_list['glycerol'] = np.array([[4.485, 411], [3.48, 72], [3.466, 238], [3.453, 453], [3.439, 346], [3.426, 203], [3.405, 624], [3.392, 316], [3.378, 1000], [3.365, 712], [3.34, 34], [3.327, 993], [3.312, 744], [3.3, 574], [3.285, 435]]) peak_list['theophylline'] = np.array([[7.979, 276], [3.446, 948], [3.391, 29], [3.384, 26], [3.246, 1000], [3.19, 32], [3.176, 25]]) peak_list['limonene'] = np.array([[5.403, 120], [5.4, 128], [5.397, 125], [5.391, 119], [4.704, 534], [4.702, 614], [2.13, 36], [2.123, 36], [2.1, 132], [2.083, 106], [2.076, 97], [2.07, 69], [2.063, 98], [2.058, 83], [2.053, 118], [2.05, 122], [2.046, 108], [2.043, 97], [2.038, 89], [2.03, 57], [2.027, 55], [2.025, 53], [2.016, 50], [2.013, 51], [2.01, 49], [2.008, 45], [2.006, 44], [2.004, 42], [2.001, 39], [1.999, 33], [1.975, 81], [1.97, 83], [1.963, 81], [1.96, 82], [1.949, 32], [1.938, 64], [1.933, 85], [1.93, 79], [1.924, 68], [1.918, 73], [1.913, 68], [1.91, 61], [1.904, 63], [1.898, 70], [1.892, 57], [1.887, 37], [1.883, 37], [1.823, 32], [1.817, 64], [1.811, 67], [1.804, 67], [1.798, 64], [1.792, 64], [1.786, 83], [1.781, 78], [1.778, 76], [1.772, 82], [1.766, 71], [1.76, 35], [1.731, 779], [1.728, 1000], [1.725, 737], [1.648, 600], [1.522, 50], [1.508, 47], [1.494, 82], [1.49, 67], [1.479, 77], [1.476, 64], [1.464, 61], [1.462, 74], [1.45, 62], [1.447, 69], [1.433, 32], [1.418, 37]]) peak_list['linalool'] = np.array([[5.942, 125], [5.915, 136], [5.898, 144], [5.871, 148], [5.233, 161], [5.23, 162], [5.19, 144], [5.187, 143], [5.139, 42], [5.136, 52], [5.132, 46], [5.129, 37], [5.121, 78], [5.118, 94], [5.115, 78], [5.107, 39], [5.103, 48], [5.1, 54], [5.097, 44], [5.065, 165], [5.061, 156], [5.038, 155], [5.035, 149], [2.062, 39], [2.039, 72], [2.019, 107], [2.0, 88], [1.979, 59], [1.961, 66], [1.96, 66], [1.959, 65], [1.676, 520], [1.678, 544], [1.62, 74], [1.6, 604], [1.586, 160], [1.579, 69], [1.57, 193], [1.561, 126], [1.555, 123], [1.545, 189], [1.536, 48], [1.529, 100], [1.52, 33], [1.512, 32], [1.272, 1000]]) peak_list['ethyl decanoate'] = np.array([[4.243, 71], [4.163, 245], [4.083, 257], [4.005, 84], [2.374, 43], [2.361, 42], [2.289, 114], [2.209, 86], [2.202, 101], [1.633, 31], [1.627, 35], [1.619, 36], [1.614, 37], [1.61, 37], [1.606, 37], [1.603, 37], [1.598, 36], [1.539, 30], [1.372, 56], [1.329, 407], [1.268, 657], [1.25, 1000], [1.17, 299], [0.937, 60], [0.936, 60], [0.92, 52], [0.91, 51], [0.906, 49], [0.878, 202], [0.811, 41]]) peak_list['hexyl hexanoate'] = np.array([[4.08, 383], [4.063, 739], [4.046, 396], [2.311, 380], [2.292, 580], [2.282, 30], [2.273, 398], [1.328, 429], [1.325, 457], [1.321, 732], [1.316, 527], [1.311, 715], [1.303, 484], [0.936, 33], [0.935, 33], [0.916, 426], [0.911, 396], [0.899, 1000], [0.894, 893], [0.88, 404], [0.877, 313]]) peak_list['estragole'] = np.array([[7.139, 86], [7.133, 58], [7.114, 33], [7.067, 39], [7.046, 125], [7.04, 167], [7.012, 30], [6.862, 206], [6.835, 54], [6.789, 35], [6.763, 99], [6.101, 30], [5.906, 38], [5.875, 30], [5.801, 53], [5.729, 33], [5.144, 35], [5.137, 46], [5.12, 93], [5.103, 76], [5.087, 93], [5.073, 49], [5.063, 32], [4.982, 37], [4.972, 35], [4.967, 46], [4.958, 62], [4.949, 67], [4.944, 45], [4.932, 35], [4.925, 34], [3.752, 1000], [3.348, 79], [3.275, 74]]) peak_list['geranyl acetate'] = np.array([[5.366, 41], [5.363, 43], [5.36, 31], [5.348, 81], [5.345, 84], [5.343, 56], [5.33, 44], [5.327, 45], [5.102, 38], [5.099, 37], [5.089, 62], [5.085, 75], [5.082, 63], [5.071, 34], [5.068, 40], [5.065, 33], [4.598, 226], [4.58, 214], [2.116, 70], [2.114, 69], [2.099, 114], [2.082, 89], [2.063, 182], [2.048, 1000], [2.03, 53], [1.707, 520], [1.705, 530], [1.683, 456], [1.681, 469], [1.602, 511]]) peak_list['benzyl acetate'] = np.array([[7.327, 1000], [7.237, 27], [7.221, 24], [5.086, 488], [2.065, 960], [2.028, 27]]) peak_list['geraniol'] = np.array([[5.499, 31], [5.484, 34], [5.42, 69], [5.41, 68], [5.405, 67], [5.345, 41], [5.33, 43], [5.157, 30], [5.14, 33], [5.13, 38], [5.129, 38], [5.114, 57], [5.098, 71], [5.082, 64], [5.066, 43], [5.044, 30], [5.024, 34], [4.184, 226], [4.178, 230], [4.107, 201], [4.102, 203], [2.152, 34], [2.137, 40], [2.048, 441], [1.683, 1000], [1.677, 961], [1.668, 820], [1.61, 537], [1.6, 539], [1.568, 194]]) peak_list['propyl acetate'] = np.array([[4.1, 120], [4.025, 250], [3.95, 134], [2.05, 1000], [1.771, 45], [1.698, 69], [1.693, 68], [1.624, 51], [1.614, 92], [1.537, 59], [1.023, 157], [0.945, 316], [0.937, 174], [0.867, 56], [0.859, 92], [0.848, 53]]) peak_list['isoamyl alcohol'] = np.array([[3.749, 164], [3.674, 299], [3.655, 35], [3.599, 170], [1.728, 37], [1.66, 42], [1.599, 36], [1.571, 253], [1.534, 43], [1.53, 41], [1.501, 184], [1.426, 128], [1.357, 37], [1.343, 31], [0.952, 1000], [0.885, 805]]) peak_list['ethyl lactate'] = np.array([[4.386, 24], [4.359, 95], [4.308, 70], [4.279, 307], [4.232, 75], [4.199, 322], [4.156, 30], [4.12, 112], [3.115, 34], [1.453, 776], [1.401, 22], [1.377, 1000], [1.299, 775], [1.219, 327]]) peak_list['butyric acid'] = np.array([[11.507, 1000], [2.428, 203], [2.417, 222], [2.344, 367], [2.338, 455], [2.264, 331], [2.256, 349], [1.876, 49], [1.804, 86], [1.793, 127], [1.78, 40], [1.726, 106], [1.716, 254], [1.705, 146], [1.638, 183], [1.629, 175], [1.624, 218], [1.564, 60], [1.55, 151], [1.472, 47], [1.061, 263], [1.053, 489], [0.976, 966], [0.969, 544], [0.899, 171], [0.891, 300], [0.88, 157]]) peak_list['isovaleric acid'] = np.array([[11.916, 207], [2.304, 32], [2.292, 47], [2.266, 102], [2.246, 144], [2.222, 287], [2.206, 346], [2.171, 76], [2.118, 41], [2.106, 47], [2.045, 39], [1.025, 1000], [0.998, 261], [0.979, 226], [0.952, 832]]) peak_list['diethyl succinate'] = np.array([[4.273, 92], [4.193, 314], [4.113, 330], [4.034, 111], [2.618, 1000], [1.339, 408], [1.26, 847], [1.18, 359]]) peak_list['quinic acid'] = np.array([[5.412, 44], [4.589, 150], [3.907, 282], [3.899, 511], [3.892, 558], [3.885, 527], [3.877, 283], [3.786, 252], [3.775, 288], [3.764, 448], [3.755, 438], [3.744, 332], [3.733, 278], [3.268, 581], [3.261, 579], [3.249, 560], [3.242, 531], [1.9, 403], [1.891, 616], [1.885, 327], [1.88, 301], [1.866, 794], [1.858, 1000], [1.848, 420], [1.79, 371], [1.787, 369], [1.776, 399], [1.772, 374], [1.757, 258], [1.752, 261], [1.742, 710], [1.72, 515], [1.709, 393], [1.697, 72], [1.687, 348]]) peak_list['malonic acid'] = np.array([[11.189, 476], [3.45, 1000], [3.408, 56]]) peak_list['lactic acid'] = np.array([[12.366, 0], [4.07, 59], [4.053, 192], [4.036, 195], [4.019, 60], [3.43, 14], [1.244, 1000], [1.227, 983]]) peak_list['fumaric acid'] = np.array([[12.98, 30], [6.647, 1000]]) peak_list['epicatechin'] = np.array([[9.048, 971], [8.838, 1000], [8.743, 920], [8.656, 898], [6.823, 463], [6.82, 477], [6.611, 135], [6.606, 66], [6.591, 761], [6.586, 525], [6.582, 450], [6.565, 84], [6.561, 94], [5.825, 530], [5.819, 573], [5.649, 562], [5.644, 542], [4.66, 456], [4.602, 333], [4.591, 344], [3.934, 187], [3.925, 185], [2.635, 106], [2.623, 120], [2.593, 190], [2.583, 145], [2.391, 142], [2.383, 122]]) peak_list['eugenol'] = np.array([[6.845, 196], [6.838, 42], [6.83, 47], [6.823, 239], [6.671, 64], [6.666, 261], [6.664, 302], [6.659, 181], [6.651, 78], [6.65, 137], [6.648, 101], [6.646, 71], [6.645, 87], [6.643, 60], [5.986, 46], [5.969, 91], [5.961, 49], [5.952, 50], [5.944, 126], [5.927, 130], [5.918, 55], [5.91, 56], [5.901, 112], [5.885, 53], [5.727, 43], [5.08, 112], [5.079, 98], [5.075, 152], [5.051, 134], [5.046, 117], [5.033, 155], [5.029, 135], [5.026, 157], [3.801, 1000], [3.3, 184], [3.297, 136], [3.287, 111], [3.283, 185], [3.28, 138]]) peak_list['cinnamic acid'] = np.array([[7.827, 403], [7.787, 425], [7.583, 37], [7.58, 53], [7.577, 51], [7.571, 270], [7.565, 306], [7.558, 250], [7.555, 368], [7.552, 203], [7.546, 332], [7.537, 47], [7.534, 52], [7.445, 46], [7.439, 44], [7.434, 47], [7.427, 141], [7.422, 168], [7.417, 1000], [7.411, 767], [7.406, 324], [7.403, 405], [7.401, 502], [7.395, 118], [7.393, 110], [7.389, 86], [7.382, 41], [7.379, 34], [7.376, 35], [6.485, 692], [6.444, 670]]) peak_list['caffeic acid'] = np.array([[7.519, 491], [7.341, 660], [7.031, 1000], [6.94, 474], [6.92, 384], [6.811, 758], [6.723, 365], [6.272, 835], [6.095, 649]]) peak_list['gallic acid'] = np.array([[7.093, 1000]]) peak_list['acetic acid'] = np.array([[11.417, 55], [2.099, 1000]]) peak_list['benzoic acid'] = np.array([[12.094, 203], [8.213, 76], [8.185, 645], [8.167, 749], [8.149, 267], [8.143, 300], [8.121, 246], [8.097, 706], [8.083, 356], [8.075, 1000], [7.701, 103], [7.683, 86], [7.624, 253], [7.614, 381], [7.585, 194], [7.567, 440], [7.558, 231], [7.547, 781], [7.537, 746], [7.53, 943], [7.525, 821], [7.512, 473], [7.483, 297], [7.465, 329], [7.454, 508], [7.448, 684], [7.442, 549], [7.432, 196], [7.423, 128], [7.42, 134], [7.398, 140], [7.381, 241], [7.37, 142], [7.351, 211]]) peak_list['riboflavin'] = np.array([[11.354, 345], [7.895, 279], [7.851, 355], [4.953, 31], [4.95, 31], [4.949, 31], [4.949, 31], [4.947, 30], [4.945, 30], [4.926, 46], [4.924, 47], [4.922, 47], [4.92, 48], [4.919, 47], [4.916, 45], [4.907, 35], [4.905, 35], [4.904, 35], [4.903, 35], [4.901, 36], [4.896, 40], [4.894, 40], [4.616, 81], [4.587, 70], [4.586, 70], [4.584, 69], [4.583, 69], [4.261, 90], [4.253, 73], [4.245, 70], [4.237, 88], [3.679, 31], [3.672, 43], [3.671, 42], [3.662, 166], [3.656, 196], [3.646, 311], [3.636, 306], [3.62, 70], [3.486, 82], [3.473, 101], [3.459, 103], [3.448, 50], [3.443, 50], [2.488, 43], [2.47, 1000], [2.448, 39], [2.388, 952]]) peak_list['riboflavin'] = np.array([[9.086, 911], [9.084, 951], [9.08, 1000], [9.078, 929], [8.744, 791], [8.74, 849], [8.732, 863], [8.727, 863], [8.266, 613], [8.262, 742], [8.26, 704], [8.256, 649], [8.251, 107], [8.246, 722], [8.242, 820], [8.241, 863], [8.236, 785], [8.226, 291], [7.668, 275], [7.544, 625], [7.542, 628], [7.532, 569], [7.53, 619], [7.524, 616], [7.522, 614], [7.512, 552], [7.51, 606]]) peak_list['ethyl alcohol'] = np.array([[3.811, 109], [3.73, 427], [3.652, 469], [3.576, 132], [2.607, 199], [2.599, 168], [1.303, 430], [1.286, 23], [1.226, 1000], [1.207, 36], [1.199, 25], [1.146, 376]]) peak_list['acetaldehyde'] = np.array([[9.803, 87], [9.794, 251], [9.784, 252], [9.775, 88], [2.211, 1000], [2.201, 998]]) peak_list['hexyl acetate'] = np.array([[4.129, 67], [4.056, 178], [3.984, 101], [2.04, 1000], [1.622, 39], [1.606, 33], [1.557, 32], [1.554, 32], [1.545, 30], [1.541, 30], [1.533, 30], [1.426, 38], [1.42, 39], [1.394, 50], [1.387, 52], [1.355, 99], [1.333, 141], [1.317, 152], [1.306, 154], [1.289, 141], [0.956, 52], [0.927, 42], [0.926, 42], [0.894, 174], [0.828, 32]]) peak_list['benzaldehyde'] = np.array([[10.003, 1000], [7.884, 127], [7.879, 531], [7.877, 459], [7.876, 611], [7.873, 292], [7.864, 240], [7.858, 700], [7.855, 675], [7.634, 103], [7.63, 186], [7.627, 106], [7.617, 148], [7.612, 381], [7.607, 167], [7.597, 235], [7.593, 374], [7.59, 184], [7.53, 491], [7.526, 252], [7.51, 654], [7.507, 262], [7.496, 133], [7.492, 305], [7.491, 276], [7.49, 221]]) return peak_list
class SlideJoint(Constraint): def __init__(self, a, b, anchr1, anchr2, min, max): self._constraint = cp.cpSlideJointNew(a._body, b._body, anchr1, anchr2, min, max) self._ccontents = self._constraint.contents self._sjc = cp.cast(self._constraint, ct.POINTER(cp.cpSlideJoint)).contents self._set_bodies(a, b) def _get_anchr1(self): return self._sjc.anchr1 def _set_anchr1(self, anchr): self._sjc.anchr1 = anchr anchr1 = property(_get_anchr1, _set_anchr1) def _get_anchr2(self): return self._sjc.anchr2 def _set_anchr2(self, anchr): self._sjc.anchr2 = anchr anchr2 = property(_get_anchr2, _set_anchr2) def _get_min(self): return self._sjc.min def _set_min(self, min): self._sjc.min = min min = property(_get_min, _set_min) def _get_max(self): return self._sjc.max def _set_max(self, max): self._sjc.max = max max = property(_get_max, _set_max)
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10): assert (embeddings1.shape[0] == embeddings2.shape[0]) assert (embeddings1.shape[1] == embeddings2.shape[1]) nrof_pairs = min(len(actual_issame), embeddings1.shape[0]) nrof_thresholds = len(thresholds) k_fold = KFold(n_splits=nrof_folds, shuffle=False) val = np.zeros(nrof_folds) far = np.zeros(nrof_folds) diff = np.subtract(embeddings1, embeddings2) dist = np.sum(np.square(diff), 1) indices = np.arange(nrof_pairs) for (fold_idx, (train_set, test_set)) in enumerate(k_fold.split(indices)): far_train = np.zeros(nrof_thresholds) for (threshold_idx, threshold) in enumerate(thresholds): (_, far_train[threshold_idx]) = calculate_val_far(threshold, dist[train_set], actual_issame[train_set]) if (np.max(far_train) >= far_target): f = interpolate.interp1d(far_train, thresholds, kind='slinear') threshold = f(far_target) else: threshold = 0.0 (val[fold_idx], far[fold_idx]) = calculate_val_far(threshold, dist[test_set], actual_issame[test_set]) val_mean = np.mean(val) far_mean = np.mean(far) val_std = np.std(val) return (val_mean, val_std, far_mean)
class UAJob(): NOT_LAUNCHED = 'Not launched' INCOMPLETE = 'Incomplete' PRETRAINED = 'Pretrained' DONE = 'Done' def __init__(self, train_args, sweep_output_dir, adapt_algorithm): args_str = json.dumps(train_args, sort_keys=True) args_hash = hashlib.md5(args_str.encode('utf-8')).hexdigest() self.output_dir = os.path.join(sweep_output_dir, args_hash) self.adapt_algorithm = adapt_algorithm self.train_args = copy.deepcopy(train_args) self.train_args['output_dir'] = self.output_dir command = ['python', '-m', 'domainbed.scripts.unsupervised_adaptation', '--input_dir', self.train_args['output_dir'], '--adapt_algorithm', adapt_algorithm] self.command_str = ' '.join(command) if os.path.exists(os.path.join(self.output_dir, 'done')): if os.path.exists(os.path.join(self.output_dir, 'done_{}'.format(adapt_algorithm))): self.state = UAJob.DONE else: self.state = UAJob.PRETRAINED elif os.path.exists(os.path.join(self.output_dir, 'results_{}.jsonl'.format(adapt_algorithm))): self.state = UAJob.INCOMPLETE else: self.state = UAJob.NOT_LAUNCHED def __str__(self): job_info = (self.train_args['dataset'], self.train_args['algorithm'], self.train_args['test_envs'], self.train_args['hparams_seed'], self.adapt_algorithm) return '{}: {} {}'.format(self.state, self.output_dir, job_info) def launch(jobs, launcher_fn): print('Launching...') jobs = jobs.copy() np.random.shuffle(jobs) print('Making job directories:') for job in tqdm.tqdm(jobs, leave=False): os.makedirs(job.output_dir, exist_ok=True) commands = [job.command_str for job in jobs] launcher_fn(commands) print(f'Launched {len(jobs)} jobs!') print('Launching...') jobs = jobs.copy() np.random.shuffle(jobs) print('Making job directories:') for job in tqdm.tqdm(jobs, leave=False): os.makedirs(job.output_dir, exist_ok=True) commands = [job.command_str for job in jobs] launcher_fn(commands) print(f'Launched {len(jobs)} jobs!')
class AverageMeter(object): def __init__(self): self.history = [] self.last = None self.val = 0 self.sum = 0 self.count = 0 def reset(self): self.last = self.mean() self.history.append(self.last) self.val = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += (val * n) self.count += n def mean(self): if (self.count == 0): return 0.0 return (self.sum / self.count)
def _union_lcs(evaluated_sentences, reference_sentence): if (len(evaluated_sentences) <= 0): raise ValueError('Collections must contain at least 1 sentence.') lcs_union = set() reference_words = _split_into_words([reference_sentence]) combined_lcs_length = 0 for eval_s in evaluated_sentences: evaluated_words = _split_into_words([eval_s]) lcs = set(_recon_lcs(reference_words, evaluated_words)) combined_lcs_length += len(lcs) lcs_union = lcs_union.union(lcs) union_lcs_count = len(lcs_union) union_lcs_value = (union_lcs_count / combined_lcs_length) return union_lcs_value
class P7(GenericPenaltyLagrangian): def __call__(self, y: Tensor, : Tensor, : Tensor) -> Tensor: return (((self..mul * self.((( * y) / ))) * ( ** 2)) / )
def parseTii(filename): assert False annotations = [] doc = xml.dom.minidom.parse(filename) for file in doc.getElementsByTagName('file'): anno = Annotation() for filename in file.getElementsByTagName('filename'): aNode = filename.getAttributeNode('Src') anno.imageName = (aNode.firstChild.data[:(- 4)] + '.png') for objects in file.getElementsByTagName('objects'): for vehicle in objects.getElementsByTagName('vehicle'): aNode = vehicle.getAttributeNode('Type') type = aNode.firstChild.data if (type == 'pedestrian'): rect = AnnoRect() aNode = vehicle.getAttributeNode('FR') frontrear = aNode.firstChild.data aNode = vehicle.getAttributeNode('SD') side = aNode.firstChild.data if (frontrear == '1'): orientation = 'FR' elif (side == '1'): orientation = 'SD' aNode = vehicle.getAttributeNode((orientation + '_TopLeft_X')) rect.x1 = float(aNode.firstChild.data) aNode = vehicle.getAttributeNode((orientation + '_TopLeft_Y')) rect.y1 = float(aNode.firstChild.data) aNode = vehicle.getAttributeNode((orientation + '_BottomRight_X')) rect.x2 = float(aNode.firstChild.data) aNode = vehicle.getAttributeNode((orientation + '_BottomRight_Y')) rect.y2 = float(aNode.firstChild.data) print('pedestrian:', anno.imageName, rect.x1, rect.y1, rect.x2, rect.y2) anno.rects.append(rect) annotations.append(anno) return annotations
def _depthwise_conv_block(inputs, pointwise_conv_filters, alpha, depth_multiplier=1, strides=(1, 1), block_id=1): channel_axis = (1 if (backend.image_data_format() == 'channels_first') else (- 1)) pointwise_conv_filters = int((pointwise_conv_filters * alpha)) if (strides == (1, 1)): x = inputs else: x = layers.ZeroPadding2D(((0, 1), (0, 1)), name=('conv_pad_%d' % block_id))(inputs) x = layers.DepthwiseConv2D((3, 3), padding=('same' if (strides == (1, 1)) else 'valid'), depth_multiplier=depth_multiplier, strides=strides, use_bias=False, name=('conv_dw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_dw_%d_bn' % block_id))(x) x = layers.ReLU(6.0, name=('conv_dw_%d_relu' % block_id))(x) x = layers.Conv2D(pointwise_conv_filters, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=('conv_pw_%d' % block_id))(x) x = layers.BatchNormalization(axis=channel_axis, name=('conv_pw_%d_bn' % block_id))(x) return layers.ReLU(6.0, name=('conv_pw_%d_relu' % block_id))(x)
class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer): def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs): super().__init__(*args, **kwargs) self.eval_examples = eval_examples self.post_process_function = post_process_function def evaluate(self, eval_dataset: Optional[Dataset]=None, eval_examples=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval', max_length: Optional[int]=None, num_beams: Optional[int]=None) -> Dict[(str, float)]: self._max_length = (max_length if (max_length is not None) else self.args.generation_max_length) self._num_beams = (num_beams if (num_beams is not None) else self.args.generation_num_beams) eval_dataset = (self.eval_dataset if (eval_dataset is None) else eval_dataset) eval_dataloader = self.get_eval_dataloader(eval_dataset) eval_examples = (self.eval_examples if (eval_examples is None) else eval_examples) compute_metrics = self.compute_metrics self.compute_metrics = None eval_loop = (self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop) try: output = eval_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys) finally: self.compute_metrics = compute_metrics if ((self.post_process_function is not None) and (self.compute_metrics is not None)): eval_preds = self.post_process_function(eval_examples, eval_dataset, output) metrics = self.compute_metrics(eval_preds) for key in list(metrics.keys()): if (not key.startswith(f'{metric_key_prefix}_')): metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key) self.log(metrics) else: metrics = {} if (self.args.tpu_metrics_debug or self.args.debug): xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics def predict(self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str='test'): predict_dataloader = self.get_test_dataloader(predict_dataset) compute_metrics = self.compute_metrics self.compute_metrics = None eval_loop = (self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop) try: output = eval_loop(predict_dataloader, description='Prediction', prediction_loss_only=(True if (compute_metrics is None) else None), ignore_keys=ignore_keys) finally: self.compute_metrics = compute_metrics if ((self.post_process_function is None) or (self.compute_metrics is None)): return output predictions = self.post_process_function(predict_examples, predict_dataset, output.predictions, 'predict') metrics = self.compute_metrics(predictions) for key in list(metrics.keys()): if (not key.startswith(f'{metric_key_prefix}_')): metrics[f'{metric_key_prefix}_{key}'] = metrics.pop(key) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
def get_tables(soup: BeautifulSoup, season: int) -> List[pd.DataFrame]: datasets = [] if (season >= 1969): tables: List[PageElement] = soup.find_all('table') if (season == 1981): tables = [x for x in tables if ('overall' in x.get('id', ''))] for table in tables: data = [] headings: List[PageElement] = [th.get_text() for th in table.find('tr').find_all('th')] data.append(headings) table_body: PageElement = table.find('tbody') rows: List[PageElement] = table_body.find_all('tr') for row in rows: cols: List[PageElement] = row.find_all('td') cols_text: List[str] = [ele.text.strip() for ele in cols] cols_text.insert(0, row.find_all('a')[0].text.strip()) data.append([ele for ele in cols_text if ele]) datasets.append(data) else: data = [] table = soup.find('table') headings = [th.get_text() for th in table.find('tr').find_all('th')] headings[0] = 'Name' if (season >= 1930): for _ in range(15): headings.pop() elif (season >= 1876): for _ in range(14): headings.pop() else: for _ in range(16): headings.pop() data.append(headings) table_body = table.find('tbody') rows = table_body.find_all('tr') for row in rows: if (row.find_all('a') == []): continue cols = row.find_all('td') if (season >= 1930): for _ in range(15): cols.pop() elif (season >= 1876): for _ in range(14): cols.pop() else: for _ in range(16): cols.pop() cols = [ele.text.strip() for ele in cols] cols.insert(0, row.find_all('a')[0].text.strip()) data.append([ele for ele in cols if ele]) datasets.append(data) for idx in range(len(datasets)): datasets[idx] = pd.DataFrame(datasets[idx]) return datasets
def _draw_segment(surface, segment): body = segment.body pv1 = (body.position + segment.a.rotated(body.angle)) pv2 = (body.position + segment.b.rotated(body.angle)) p1 = to_pygame(pv1, surface) p2 = to_pygame(pv2, surface) if hasattr(segment, 'color'): color = segment.color elif segment.body.is_static: color = pygame.color.THECOLORS['lightgrey'] else: color = pygame.color.THECOLORS['blue'] pygame.draw.lines(surface, color, False, [p1, p2], max(int((segment.radius * 2)), 1))
def ReadNonScoredWords(non_scored_words_file): global non_scored_words try: f = open(non_scored_words_file, encoding='utf-8') except: sys.exit(('modify_ctm_edits.py: error opening file: --non-scored-words=' + non_scored_words_file)) for line in f.readlines(): a = line.split() if (not (len(line.split()) == 1)): sys.exit('modify_ctm_edits.py: bad line in non-scored-words file {0}: {1}'.format(non_scored_words_file, line)) non_scored_words.add(a[0]) f.close()
def compute_auroc(predictions, targets) -> float: if ((targets - targets.int()).sum() > 0.0): raise RuntimeError('targets for AUROC must be binary') auc = roc_auc_score(targets.reshape((- 1)), predictions.reshape((- 1))) return auc
def test_osipkovmerritt_hernquist_sigmar(): pot = potential.HernquistPotential(amp=2.3, a=1.3) ras = [0.3, 2.3, 5.7] for ra in ras: dfh = osipkovmerrittHernquistdf(pot=pot, ra=ra) numpy.random.seed(10) samp = dfh.sample(n=100000) tol = 0.05 check_sigmar_against_jeans(samp, pot, tol, beta=(lambda r: (1.0 / (1.0 + ((ra ** 2.0) / (r ** 2.0))))), rmin=(pot._scale / 10.0), rmax=(pot._scale * 10.0), bins=31) return None
_searcher('bo') class BayesianOptimizationSearcher(Searcher): def __init__(self, search_space, seed=42): super().__init__(search_space) idx_search_space = {} for (key, space) in zip(self.search_space_keys, self.search_space_pool): if isinstance(space, ContinuousSearchSpace): idx_search_space[key] = tuple(space.bound) else: idx_search_space[key] = (0, (space.total_num - 1)) self.bo_agent = BayesianOptimization(idx_search_space, random_seed=seed) def suggest(self): param_indices = self.bo_agent.gen_next_params() self.last_param_indices = param_indices return self.params_vec2params_dict(self.indices2params_vec(param_indices)) def get_feedback(self, metric): assert (self.last_param_indices is not None), ('Need run suggest first ' + 'to get parameters and the input metric is corresponding to this parameters.') try: self.bo_agent._space.register(self.last_param_indices, metric) except KeyError: logger.debug('Find registered params, skip it.') pass if ((self.best is None) or (self.best[1] < metric)): param = self.params_vec2params_dict(self.indices2params_vec(self.last_param_indices)) self.best = (param, metric) self.last_param_indices = None def feedback(self, param, metric): if ((self.best is None) or (self.best[1] < metric)): self.best = (param, metric) self.bo_agent._space.register(param, metric) def indices2params_vec(self, indices): res = [] for (key, ind) in indices.items(): space = self.search_space_pool[self.search_space_keys.index(key)] if isinstance(space, ContinuousSearchSpace): res.append(ind) else: ind = int(min(max(round(ind), 0), (space.total_num - 1))) res.append(space.get_value(ind)) return res
class cnn_cifar100(nn.Module): def __init__(self): super(cnn_cifar100, self).__init__() self.n_cls = 100 self.conv1 = torch.nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5) self.conv2 = torch.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5) self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2) self.fc1 = torch.nn.Linear(((64 * 5) * 5), 384) self.fc2 = torch.nn.Linear(384, 192) self.fc3 = torch.nn.Linear(192, self.n_cls) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view((- 1), ((64 * 5) * 5)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
_grad() def evaluate(data_loader, model, device): criterion = torch.nn.CrossEntropyLoss() metric_logger = misc.MetricLogger(delimiter=' ') header = 'Test:' model.eval() for batch in metric_logger.log_every(data_loader, 100, header): images = batch[0] target = batch[(- 1)] images = images.to(device, non_blocking=True) target = target.to(device, non_blocking=True) with torch.cuda.amp.autocast(): output = model(images) loss = criterion(output, target) (acc1, acc5) = accuracy(output, target, topk=(1, 5)) batch_size = images.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) metric_logger.synchronize_between_processes() print('* {top1.global_avg:.3f} {top5.global_avg:.3f} loss {losses.global_avg:.3f}'.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for (k, meter) in metric_logger.meters.items()}
_Intersection.register('intersection') class Intersection(_Intersection): def __init__(self, intersection_temperature: float=0.0, approximation_mode: Optional[str]=None) -> None: super().__init__() self.intersection_temperature = intersection_temperature self.approximation_mode = approximation_mode def _forward(self, left: BoxTensor, right: BoxTensor) -> BoxTensor: if (self.intersection_temperature == 0): return hard_intersection(left, right) else: return gumbel_intersection(left, right, self.intersection_temperature, self.approximation_mode)
class DataTrainingArguments(): train_data_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'}) eval_data_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}) line_by_line: bool = field(default=False, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}) mlm: bool = field(default=False, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'}) mlm_probability: float = field(default=0.15, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'}) plm_probability: float = field(default=(1 / 6), metadata={'help': 'Ratio of length of a span of masked tokens to surrounding context length for permutation language modeling.'}) max_span_length: int = field(default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'}) task_mode: Optional[str] = field(default=None, metadata={'help': 'The task mode'}) matching_objective: Optional[str] = field(default='kl', metadata={'help': 'The distillation objective'}) distill: Optional[str] = field(default='no', metadata={'help': 'yes/no'}) finetuned_model_path: Optional[str] = field(default='/u/scr/xlisali/contrast_LM/transformers/examples/full/full/webnlgfinetune_n_20_act_cat_b=6-e=10_d=0.0_u=no_lr=1e-05_w=0.0_s=101_r=n_m=512_earlystop', metadata={'help': 'finetuned model path (teacher model)'}) format_mode: Optional[str] = field(default='cat', metadata={'help': 'The mode of data2text format (cat, peek, nopeek)'}) lowdata_token: Optional[str] = field(default='summarize', metadata={'help': 'The token to be prepended at initialization time. '}) use_lowdata_token: Optional[str] = field(default='yes', metadata={'help': 'Whether we should use the lowdata token and pass it to the prefixTuning Model for the initialization trick. '}) train_embs: Optional[str] = field(default='no', metadata={'help': 'whether the train word embeddings'}) max_source_length: Optional[int] = field(default=512, metadata={'help': 'the max source length of summarization data. '}) train_max_target_length: Optional[int] = field(default=100, metadata={'help': 'the max target length for training data. '}) val_max_target_length: Optional[int] = field(default=100, metadata={'help': 'the max target length for dev data. '}) block_size: int = field(default=(- 1), metadata={'help': 'Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to the model max input length for single sentence inputs (take into account special tokens).'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
_task('wsc') class WSCTask(LegacyFairseqTask): def add_args(parser): parser.add_argument('data', metavar='DIR', help='path to data directory; we load <split>.jsonl') parser.add_argument('--init-token', type=int, default=None, help='add token at the beginning of each batch item') def __init__(self, args, vocab): super().__init__(args) self.vocab = vocab self.mask = vocab.add_symbol('<mask>') self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) if (args.bpe == 'gpt2'): self.leading_space = True self.trailing_space = False else: self.leading_space = False self.trailing_space = True def load_dictionary(cls, filename): dictionary = Dictionary.load(filename) dictionary.add_symbol('<mask>') return dictionary def setup_task(cls, args, **kwargs): assert (args.criterion == 'wsc'), 'Must set --criterion=wsc' vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(vocab))) return cls(args, vocab) def binarize(self, s: str, append_eos: bool=False): if (self.tokenizer is not None): s = self.tokenizer.encode(s) if (self.bpe is not None): s = self.bpe.encode(s) tokens = self.vocab.encode_line(s, append_eos=append_eos, add_if_not_exist=False).long() if (self.args.init_token is not None): tokens = torch.cat([tokens.new([self.args.init_token]), tokens]) return tokens def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space): toks = self.binarize(((((prefix + leading_space) + txt) + trailing_space) + suffix), append_eos=True) mask = torch.zeros_like(toks, dtype=torch.bool) mask_start = len(self.binarize(prefix)) mask_size = len(self.binarize((leading_space + txt))) mask[mask_start:(mask_start + mask_size)] = 1 return (toks, mask) def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs): if (data_path is None): data_path = os.path.join(self.args.data, (split + '.jsonl')) if (not os.path.exists(data_path)): raise FileNotFoundError('Cannot find data: {}'.format(data_path)) query_tokens = [] query_masks = [] query_lengths = [] candidate_tokens = [] candidate_masks = [] candidate_lengths = [] labels = [] for (sentence, pronoun_span, query, label) in wsc_utils.jsonl_iterator(data_path): prefix = sentence[:pronoun_span.start].text suffix = sentence[pronoun_span.end:].text_with_ws leading_space = (' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else '') trailing_space = (' ' if pronoun_span.text_with_ws.endswith(' ') else '') cand_spans = wsc_utils.filter_noun_chunks(wsc_utils.extended_noun_chunks(sentence), exclude_pronouns=True, exclude_query=query, exact_match=False) if (query is not None): (query_toks, query_mask) = self.binarize_with_mask(query, prefix, suffix, leading_space, trailing_space) query_len = len(query_toks) else: (query_toks, query_mask, query_len) = (None, None, 0) query_tokens.append(query_toks) query_masks.append(query_mask) query_lengths.append(query_len) (cand_toks, cand_masks) = ([], []) for cand_span in cand_spans: (toks, mask) = self.binarize_with_mask(cand_span.text, prefix, suffix, leading_space, trailing_space) cand_toks.append(toks) cand_masks.append(mask) cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad()) cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0) assert (cand_toks.size() == cand_masks.size()) candidate_tokens.append(cand_toks) candidate_masks.append(cand_masks) candidate_lengths.append(cand_toks.size(1)) labels.append(label) query_lengths = np.array(query_lengths) query_tokens = ListDataset(query_tokens, query_lengths) query_masks = ListDataset(query_masks, query_lengths) candidate_lengths = np.array(candidate_lengths) candidate_tokens = ListDataset(candidate_tokens, candidate_lengths) candidate_masks = ListDataset(candidate_masks, candidate_lengths) labels = ListDataset(labels, ([1] * len(labels))) dataset = {'id': IdDataset(), 'query_tokens': query_tokens, 'query_masks': query_masks, 'candidate_tokens': candidate_tokens, 'candidate_masks': candidate_masks, 'labels': labels, 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(query_tokens, reduce=True)} nested_dataset = NestedDictionaryDataset(dataset, sizes=[query_lengths]) with data_utils.numpy_seed(self.args.seed): shuffle = np.random.permutation(len(query_tokens)) dataset = SortDataset(nested_dataset, sort_order=[shuffle]) if return_only: return dataset self.datasets[split] = dataset return self.datasets[split] def build_dataset_for_inference(self, sample_json): with tempfile.NamedTemporaryFile(buffering=0) as h: h.write((json.dumps(sample_json) + '\n').encode('utf-8')) dataset = self.load_dataset('disambiguate_pronoun', data_path=h.name, return_only=True) return dataset def disambiguate_pronoun(self, model, sentence, use_cuda=False): sample_json = wsc_utils.convert_sentence_to_json(sentence) dataset = self.build_dataset_for_inference(sample_json) sample = dataset.collater([dataset[0]]) if use_cuda: sample = utils.move_to_cuda(sample) def get_masked_input(tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask.bool()] = self.mask return masked_tokens def get_lprobs(tokens, mask): (logits, _) = model(src_tokens=get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=(- 1), dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze((- 1))).squeeze((- 1)) mask = mask.type_as(scores) scores = ((scores * mask).sum(dim=(- 1)) / mask.sum(dim=(- 1))) return scores cand_lprobs = get_lprobs(sample['candidate_tokens'][0], sample['candidate_masks'][0]) if (sample['query_tokens'][0] is not None): query_lprobs = get_lprobs(sample['query_tokens'][0].unsqueeze(0), sample['query_masks'][0].unsqueeze(0)) return ((query_lprobs >= cand_lprobs).all().item() == 1) else: best_idx = cand_lprobs.argmax().item() full_cand = sample['candidate_tokens'][0][best_idx] mask = sample['candidate_masks'][0][best_idx] toks = full_cand[mask.bool()] return self.bpe.decode(self.source_dictionary.string(toks)).strip() def source_dictionary(self): return self.vocab def target_dictionary(self): return self.vocab
class InputExample(object): def __init__(self, guid, text_a, text_b=None, label=None, pairID=None): self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label self.pairID = pairID def __repr__(self): return str(self.to_json_string()) def to_dict(self): output = copy.deepcopy(self.__dict__) return output def to_json_string(self): return (json.dumps(self.to_dict(), indent=2, sort_keys=True) + '\n')
class ConstraintListState(): def __init__(self, constraints: List[Constraint]): self.constraints = constraints self.max_seqlen = max([c.seqlen for c in constraints]) self.n_constraints = len(constraints) self.completed = False self.init_state() def init_state(self): self.complete_constraints = [] self.inprogress_constraint = None self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] def get_bank(self): add = 0 if self.inprogress_constraint: add += (self.max_seqlen - self.inprogress_constraint.remaining()) return ((len(self.complete_constraints) * self.max_seqlen) + add) def advance(self): token_list = [] if (self.inprogress_constraint is None): for constraint in self.pending_constraints: advance = constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) else: advance = self.inprogress_constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) if (len(token_list) == 0): return None else: return token_list def reset(self, token_ids: Optional[List[int]]): self.init_state() if (token_ids is not None): for token in token_ids: (complete, stepped) = self.add(token) if self.completed: break def add(self, token_id: int): if (not isinstance(token_id, int)): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.') (complete, stepped) = (False, False) if self.completed: complete = True stepped = False return (complete, stepped) if (self.inprogress_constraint is not None): (stepped, complete, reset) = self.inprogress_constraint.update(token_id) if reset: self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) self.inprogress_constraint = None if complete: self.complete_constraints.append(self.inprogress_constraint) self.inprogress_constraint = None if (len(self.pending_constraints) == 0): self.completed = True else: for (cidx, pending_constraint) in enumerate(self.pending_constraints): if pending_constraint.does_advance(token_id): (stepped, complete, reset) = pending_constraint.update(token_id) if (not stepped): raise Exception('`constraint.update(token_id)` is not yielding incremental progress, even though `constraint.does_advance(token_id)` is true.') if complete: self.complete_constraints.append(pending_constraint) self.inprogress_constraint = None if ((not complete) and stepped): self.inprogress_constraint = pending_constraint if (complete or stepped): self.pending_constraints = (self.pending_constraints[:cidx] + self.pending_constraints[(cidx + 1):]) if ((len(self.pending_constraints) == 0) and (self.inprogress_constraint is None)): self.completed = True break return (complete, stepped) def copy(self, stateful=True): new_state = ConstraintListState(self.constraints) if stateful: new_state.complete_constraints = [constraint.copy(stateful=True) for constraint in self.complete_constraints] if (self.inprogress_constraint is not None): new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] return new_state
def process(words, labels, tokenizer, vocabulary, max_seq_length): input_id = [] label_id = [] words = ((['[CLS]'] + words) + ['[SEP]']) labels = ((['S'] + labels) + ['S']) for word in words: token = tokenizer.tokenize(word) input_id.extend(token) input_id = tokenizer.convert_tokens_to_ids(input_id) attention_mask = ([1] * len(input_id)) segment_id = ([0] * len(input_id)) for label in labels: label_id.append(vocabulary.to_index(label)) label_mask = ([1] * len(label_id)) input_id = input_id[0:max_seq_length] attention_mask = attention_mask[0:max_seq_length] segment_id = segment_id[0:max_seq_length] label_id = label_id[0:max_seq_length] label_mask = label_mask[0:max_seq_length] input_id += [0 for _ in range((max_seq_length - len(input_id)))] attention_mask += [0 for _ in range((max_seq_length - len(attention_mask)))] segment_id += [0 for _ in range((max_seq_length - len(segment_id)))] label_id += [(- 1) for _ in range((max_seq_length - len(label_id)))] label_mask += [0 for _ in range((max_seq_length - len(label_mask)))] return (input_id, attention_mask, segment_id, label_id, label_mask)
def minimizer_initial(identifier): from scipy.optimize import minimize from .cem_function import posterior_function_for_minimization from .parameter import ModelParameters a = ModelParameters() a.stellar_identifier = identifier res = minimize(fun=posterior_function_for_minimization, x0=a.p0, args=a, method='Nelder-Mead', tol=a.tol_minimization, options={'maxiter': a.maxiter_minimization}) if a.verbose: print(res.message) return res.x
def read_mot_results(filename, is_gt, is_ignore): valid_labels = {1} ignore_labels = {2, 7, 8, 12} results_dict = dict() if os.path.isfile(filename): with open(filename, 'r') as f: for line in f.readlines(): linelist = line.split(',') if (len(linelist) < 7): continue fid = int(linelist[0]) if (fid < 1): continue results_dict.setdefault(fid, list()) box_size = (float(linelist[4]) * float(linelist[5])) if is_gt: if (('MOT16-' in filename) or ('MOT17-' in filename)): label = int(float(linelist[7])) mark = int(float(linelist[6])) if ((mark == 0) or (label not in valid_labels)): continue score = 1 elif is_ignore: if (('MOT16-' in filename) or ('MOT17-' in filename)): label = int(float(linelist[7])) vis_ratio = float(linelist[8]) if ((label not in ignore_labels) and (vis_ratio >= 0)): continue else: continue score = 1 else: score = float(linelist[6]) tlwh = tuple(map(float, linelist[2:6])) target_id = int(linelist[1]) results_dict[fid].append((tlwh, target_id, score)) return results_dict
class SpikePlot(spaic.BaseModule): def __init__(self, spike_monitors=[], plot_interval=1): super(SpikePlot, self).__init__() self.spike_monitors = spike_monitors self.interval = plot_interval self.draw_proc = None self.queue = Queue(maxsize=10) self.mutex = Lock()
class ParallelExtractor(): def __init__(self, module: Extractor, gpu: Union[(str, List[str], List[List[str]])]) -> None: self.module = module if ('cuda:' in str(gpu)): gpu = int(gpu.split('cuda:')[(- 1)]) (self.feature_extractor, device) = prepare_module(self.module.feature_extractor, gpu) device = [device] self.module.device = self.device = device self.transform = self.module.transform def __call__(self, images: torch.Tensor) -> torch.Tensor: return self.module(images.to(self.device[0])) def extract_features_as_numpy(self, images: torch.Tensor) -> np.ndarray: features = self.__call__(images) return features.cpu().detach().numpy()
def get_host_info(): try: username = getuser() except KeyError: username = 'Nobody' return '{}{}'.format(username, gethostname())
def convert_to_wav(root: Path, filenames: List[str], target_sr=16000): out_root = (root / 'wav') out_root.mkdir(exist_ok=True, parents=True) print('Converting to WAV...') for n in tqdm(filenames): in_path = ((root / 'clips') / n).as_posix() (waveform, sr) = torchaudio.load(in_path) (converted, converted_sr) = torchaudio.sox_effects.apply_effects_tensor(waveform, sr, [['rate', str(target_sr)], ['channels', '1']]) out_path = (out_root / Path(n).with_suffix('.wav').name).as_posix() torchaudio.save(out_path, converted, converted_sr, encoding='PCM_S', bits_per_sample=16)
class TensorStats(Message): variable_count: int = 0 total_variable_size: int = 0 max_variable_size: int = 0 kv_embedding_dims: List[int] = None
def update_dataset(file: str, bilou_file: str) -> None: orig_data = get_orig_dataset(file) bilou_data = get_bilou_dataset(bilou_file) new_data = update_ner(orig_data, bilou_data) target_dir = (os.path.split(file)[0] + '_bilou') os.makedirs(target_dir, exist_ok=True) with codecs.open(os.path.join(target_dir, os.path.split(file)[1]), 'w') as w: json.dump(new_data, w) return
def trendDataTest(timeSteps): dampening = (2.0 * float((1.0 / timeSteps))) power = 0.35 displacement = (- 2.5) f1 = gT.linearTrendFn data = gT.generate(f1, power=power, displacement=displacement, timeSteps=timeSteps) f2 = gT.logTrendFn data += gT.generate(f2, dampening=dampening, displacement=displacement, timeSteps=timeSteps) f3 = gT.negExpTrendFn t3 = gT.generate(f3, dampening=dampening, displacement=displacement, timeSteps=timeSteps) return data
class EVENTKG240KLoader(BaseLoader): def __init__(self, dataset_path, download=False): super().__init__(dataset_path, download, raw_data_path='EVENTKG240K/raw_data', processed_data_path='EVENTKG240K/processed_data', train_name='eventkg240k_train.txt', valid_name='eventkg240k_valid.txt', test_name='eventkg240k_test.txt', data_name='EVENTKG240K') self.time_vocab = Vocabulary() self.entity_lut_name = 'eventkg240k_entities_lut.json' self.event_lut_name = 'eventkg240k_events_lut.json' self.relation_lut_name = 'eventkg240k_relations_lut.json' def _load_data(self, path, data_type): return BaseLoader._load_data(self, path=path, data_type=data_type, column_names=['head', 'relation', 'tail', 'start', 'end']) def download_action(self): self.downloader.EVENTKG240K() def _build_vocabs(self, train_data, valid_data, test_data): BaseLoader._build_vocabs(self, train_data, valid_data, test_data) self.time_vocab.buildVocab(train_data['start'].tolist(), train_data['end'].tolist(), valid_data['start'].tolist(), valid_data['end'].tolist(), test_data['start'].tolist(), test_data['end'].tolist()) def load_all_vocabs(self): return (self.node_vocab, self.relation_vocab, self.time_vocab) def save_vocabs_to_pickle(self, file_name): with open(file_name, 'wb') as file: pickle.dump([self.node_vocab, self.relation_vocab, self.time_vocab], file, pickle.HIGHEST_PROTOCOL) def load_vocabs_from_pickle(self, file_name): with open(file_name, 'rb') as file: (self.node_vocab, self.relation_vocab, self.time_vocab) = pickle.load(file) def _load_lut(self, path): total_path = os.path.join(self.raw_data_path, path) lut = LookUpTable() lut.read_json(total_path) lut.transpose() return lut def load_node_lut(self): preprocessed_file = os.path.join(self.processed_data_path, 'node_lut.pkl') if os.path.exists(preprocessed_file): node_lut = LookUpTable() node_lut.read_from_pickle(preprocessed_file) else: entity_lut = self._load_lut(self.entity_lut_name) entity_lut.add_column((['entity'] * len(entity_lut.data)), 'node_type') event_lut = self._load_lut(self.event_lut_name) event_lut.add_column((['event'] * len(event_lut.data)), 'node_type') node_lut = entity_lut.append(event_lut) node_lut.add_vocab(self.node_vocab) df = pd.DataFrame([self.node_vocab.word2idx]).T df = df.rename({0: 'name_id'}, axis=1) node_lut.data = pd.merge(df, node_lut.data, left_index=True, right_index=True, how='outer') node_lut.data = node_lut.data.sort_values(by='name_id') node_lut.save_to_pickle(preprocessed_file) return node_lut def load_relation_lut(self): preprocessed_file = os.path.join(self.processed_data_path, 'relation_lut.pkl') if os.path.exists(preprocessed_file): relation_lut = LookUpTable() relation_lut.read_from_pickle(preprocessed_file) else: relation_lut = self._load_lut(self.relation_lut_name) relation_lut.add_vocab(self.relation_vocab) df = pd.DataFrame([self.relation_vocab.word2idx]).T df = df.rename({0: 'name_id'}, axis=1) relation_lut.data = pd.merge(df, relation_lut.data, left_index=True, right_index=True, how='outer') relation_lut.data = relation_lut.data.sort_values(by='name_id') relation_lut.save_to_pickle(preprocessed_file) return relation_lut def load_time_lut(self): time_lut = LookUpTable() time_lut.add_vocab(self.time_vocab) return time_lut def load_all_lut(self): node_lut = self.load_node_lut() node_lut.add_processed_path(self.processed_data_path) relation_lut = self.load_relation_lut() relation_lut.add_processed_path(self.processed_data_path) time_lut = self.load_time_lut() return (node_lut, relation_lut, time_lut) def describe(self): tb = pt.PrettyTable() tb.field_names = [self.data_name, 'train', 'valid', 'test', 'node', 'relation', 'time'] tb.add_row(['num', self.train_len, self.valid_len, self.test_len, len(self.node_vocab), len(self.relation_vocab), len(self.time_vocab)]) print(tb)
class RefEvaluation(): def __init__(self, refer, Res): self.evalRefs = [] self.eval = {} self.refToEval = {} self.refer = refer self.Res = Res def evaluate(self): evalRefIds = [ann['ref_id'] for ann in self.Res] refToGts = {} for ref_id in evalRefIds: ref = self.refer.Refs[ref_id] gt_sents = [sent['sent'].encode('ascii', 'ignore').decode('ascii') for sent in ref['sentences']] refToGts[ref_id] = gt_sents refToRes = {ann['ref_id']: [ann['sent']] for ann in self.Res} print('tokenization...') tokenizer = PTBTokenizer() self.refToRes = tokenizer.tokenize(refToRes) self.refToGts = tokenizer.tokenize(refToGts) print('setting up scorers...') scorers = [(Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), (Meteor(), 'METEOR'), (Rouge(), 'ROUGE_L'), (Cider(), 'CIDEr')] for (scorer, method) in scorers: print(('computing %s score...' % scorer.method())) (score, scores) = scorer.compute_score(self.refToGts, self.refToRes) if (type(method) == list): for (sc, scs, m) in zip(score, scores, method): self.setEval(sc, m) self.setRefToEvalRefs(scs, self.refToGts.keys(), m) print(('%s: %0.3f' % (m, sc))) else: self.setEval(score, method) self.setRefToEvalRefs(scores, self.refToGts.keys(), method) print(('%s: %0.3f' % (method, score))) self.setEvalRefs() def setEval(self, score, method): self.eval[method] = score def setRefToEvalRefs(self, scores, refIds, method): for (refId, score) in zip(refIds, scores): if (not (refId in self.refToEval)): self.refToEval[refId] = {} self.refToEval[refId]['ref_id'] = refId self.refToEval[refId][method] = score def setEvalRefs(self): self.evalRefs = [eval for (refId, eval) in self.refToEval.items()]
def main(): opt = TestOptions() args = opt.initialize() os.environ['CUDA_VISIBLE_DEVICES'] = args.GPU if (not os.path.exists(args.save)): os.makedirs(args.save) args.restore_from = args.restore_opt1 model1 = CreateSSLModel(args) model1.eval() model1.cuda() args.restore_from = args.restore_opt2 model2 = CreateSSLModel(args) model2.eval() model2.cuda() args.restore_from = args.restore_opt3 model3 = CreateSSLModel(args) model3.eval() model3.cuda() targetloader = CreateTrgDataSSLLoader(args) IMG_MEAN = np.array((104., 116., 122.), dtype=np.float32) IMG_MEAN = torch.reshape(torch.from_numpy(IMG_MEAN), (1, 3, 1, 1)) mean_img = torch.zeros(1, 1) predicted_label = np.zeros((len(targetloader), 512, 1024)) predicted_prob = np.zeros((len(targetloader), 512, 1024)) image_name = [] with torch.no_grad(): for (index, batch) in enumerate(targetloader): if ((index % 100) == 0): print(('%d processd' % index)) (image, _, name) = batch if (mean_img.shape[(- 1)] < 2): (B, C, H, W) = image.shape mean_img = IMG_MEAN.repeat(B, 1, H, W) image = (image.clone() - mean_img) image = Variable(image).cuda() output1 = model1(image) output1 = nn.functional.softmax(output1, dim=1) output2 = model2(image) output2 = nn.functional.softmax(output2, dim=1) output3 = model3(image) output3 = nn.functional.softmax(output3, dim=1) (a, b) = (0.3333, 0.3333) output = (((a * output1) + (b * output2)) + (((1.0 - a) - b) * output3)) output = nn.functional.interpolate(output, (512, 1024), mode='bilinear', align_corners=True).cpu().data[0].numpy() output = output.transpose(1, 2, 0) (label, prob) = (np.argmax(output, axis=2), np.max(output, axis=2)) predicted_label[index] = label.copy() predicted_prob[index] = prob.copy() image_name.append(name[0]) thres = [] for i in range(19): x = predicted_prob[(predicted_label == i)] if (len(x) == 0): thres.append(0) continue x = np.sort(x) thres.append(x[np.int(np.round((len(x) * 0.66)))]) print(thres) thres = np.array(thres) thres[(thres > 0.9)] = 0.9 print(thres) for index in range(len(targetloader)): name = image_name[index] label = predicted_label[index] prob = predicted_prob[index] for i in range(19): label[((prob < thres[i]) * (label == i))] = 255 output = np.asarray(label, dtype=np.uint8) output = Image.fromarray(output) name = name.split('/')[(- 1)] output.save(('%s/%s' % (args.save, name)))
def main(): args = parse_args() json_logs = args.json_logs for json_log in json_logs: assert json_log.endswith('.json') log_dicts = load_json_logs(json_logs) plot_curve(log_dicts, args)
def get_configuration_from_output_folder(folder): folder = folder[len(network_training_output_dir):] if folder.startswith('/'): folder = folder[1:] (configuration, task, trainer_and_plans_identifier) = folder.split('/') (trainer, plans_identifier) = trainer_and_plans_identifier.split('__') return (configuration, task, trainer, plans_identifier)
class BigBirdTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] prefix_tokens: List[int] = [] def __init__(self, vocab_file, unk_token='<unk>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', sep_token='[SEP]', mask_token='[MASK]', cls_token='[CLS]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None: bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token) eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token) unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token) pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token) cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token) sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token) mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token) self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if (not hasattr(self, 'sp_model_kwargs')): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): out_string = self.sp_model.decode_pieces(tokens) return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)): copyfile(self.vocab_file, out_vocab_file) elif (not os.path.isfile(self.vocab_file)): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is None): return (([1] + ([0] * len(token_ids_0))) + [1]) return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1]))
def strQ2B(ustring): rstring = '' for uchar in ustring: inside_code = ord(uchar) if (inside_code == 12288): inside_code = 32 elif ((inside_code >= 65281) and (inside_code <= 65374)): inside_code -= 65248 rstring += unichr(inside_code) return rstring
def update_filename_to_nifti(filename: str) -> str: extension = get_file_extension(filename) if (not ('nii' in extension)): filename = filename.replace(extension, '.nii.gz') return filename
def build_detection_model(cfg): meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE] return meta_arch(cfg)
class CelebASnipServer(SnipServer): def init_test_loader(self): self.test_loader = get_data_loader(EXP_NAME, data_type='test', num_workers=8, batch_size=100, shuffle=False, pin_memory=True) def init_clients(self): if args.client_selection: list_usr = [[i] for i in range(num_users)] else: nusr = (num_users // NUM_CLIENTS) list_usr = [list(range((nusr * j), ((nusr * (j + 1)) if (j != (NUM_CLIENTS - 1)) else num_users))) for j in range(NUM_CLIENTS)] models = [self.model for _ in range(NUM_CLIENTS)] return (models, list_usr)
def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma): rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), nenvs, nsteps, True) rs = batch_to_seq(R, nenvs, nsteps, True) ds = batch_to_seq(D, nenvs, nsteps, True) q_is = batch_to_seq(q_i, nenvs, nsteps, True) vs = batch_to_seq(v, nenvs, (nsteps + 1), True) v_final = vs[(- 1)] qret = v_final qrets = [] for i in range((nsteps - 1), (- 1), (- 1)): check_shape([qret, ds[i], rs[i], rho_bar[i], q_is[i], vs[i]], ([[nenvs]] * 6)) qret = (rs[i] + ((gamma * qret) * (1.0 - ds[i]))) qrets.append(qret) qret = ((rho_bar[i] * (qret - q_is[i])) + vs[i]) qrets = qrets[::(- 1)] qret = seq_to_batch(qrets, flat=True) return qret
def get_best_results(log_json_path): dataset = get_model_dataset(log_json_path) max_dict = dict() max_memory = 0 with open(log_json_path, 'r') as f: for line in f.readlines(): log_line = json.loads(line) if ('mode' not in log_line.keys()): continue if ((log_line['mode'] == 'train') and (max_memory <= log_line['memory'])): max_memory = log_line['memory'] elif (log_line['mode'] == 'val'): result_dict = {key: log_line[key] for key in RESULTS_LUT[dataset] if (key in log_line)} if (len(max_dict) == 0): max_dict = result_dict max_dict['epoch'] = log_line['epoch'] elif all([(max_dict[key] <= result_dict[key]) for key in result_dict]): max_dict.update(result_dict) max_dict['epoch'] = log_line['epoch'] max_dict['memory'] = max_memory return max_dict
class DeResNetBatchNorm(_DeResNet): def __init__(self, inplanes, planes, strides, output_paddings, activation): super(DeResNetBatchNorm, self).__init__(DeResNetBlockBatchNorm, inplanes, planes, strides, output_paddings, activation)
class RailAgentStatus(IntEnum): READY_TO_DEPART = 0 ACTIVE = 1 DONE = 2 DONE_REMOVED = 3
def decode_gnss_message(bin_msg, print_decoded=True, print_debug_information=False): if print_decoded: print(' START DECODE GNSS MESSAGE ') assert (message_kind(bin_msg) == 'G') expected_message_length = int((1 + (((len(bin_msg) - 2) - _BD_GNSS_PACKET_LENGTH) / (_BD_GNSS_PACKET_LENGTH - 1)))) if print_decoded: print('expected number of packets based on message length: {}'.format(expected_message_length)) nbr_gnss_fixes = one_byte_to_int(bin_msg[1:2]) message_metadata = GNSS_Metadata(nbr_gnss_fixes=nbr_gnss_fixes) if print_decoded: print('number of fixes since boot at message creation: {}'.format(nbr_gnss_fixes)) crrt_packet_start = 2 list_decoded_packets = [] while True: crrt_byte_start = byte_to_char(bin_msg[crrt_packet_start]) assert (crrt_byte_start == 'F') crrt_decoded_packet = decode_gnss_packet(bin_msg[crrt_packet_start:(crrt_packet_start + _BD_GNSS_PACKET_LENGTH)], print_decoded=print_decoded, print_debug_information=print_debug_information) list_decoded_packets.append(crrt_decoded_packet) trailing_char = byte_to_char(bin_msg[((crrt_packet_start + _BD_GNSS_PACKET_LENGTH) - 1)]) assert (trailing_char in ['E', 'F']) if (trailing_char == 'E'): break else: crrt_packet_start += (_BD_GNSS_PACKET_LENGTH - 1) assert (expected_message_length == len(list_decoded_packets)) if print_decoded: print(' DONE DECODE GNSS MESSAGE ') return (message_metadata, list_decoded_packets)
class ChebnetII_prop(MessagePassing): def __init__(self, K, name, Init=False, bias=True, **kwargs): super(ChebnetII_prop, self).__init__(aggr='add', **kwargs) self.K = K self.name = name self.Init = Init self.temp = Parameter(torch.Tensor((self.K + 1))) self.reset_parameters() def reset_parameters(self): self.temp.data.fill_(1.0) def forward(self, x, edge_index, edge_weight=None): coe_tmp = self.temp coe = coe_tmp.clone() for i in range((self.K + 1)): coe[i] = (coe_tmp[0] * cheby(i, math.cos((((self.K + 0.5) * math.pi) / (self.K + 1))))) for j in range(1, (self.K + 1)): x_j = math.cos(((((self.K - j) + 0.5) * math.pi) / (self.K + 1))) coe[i] = (coe[i] + (coe_tmp[j] * cheby(i, x_j))) coe[i] = ((2 * coe[i]) / (self.K + 1)) if (self.name == 'fb100'): (edge_index_tilde, norm_tilde) = gcn_norm(edge_index, edge_weight, num_nodes=x.size(0), dtype=x.dtype) else: (edge_index1, norm1) = get_laplacian(edge_index, edge_weight, normalization='sym', dtype=x.dtype, num_nodes=x.size(self.node_dim)) (edge_index_tilde, norm_tilde) = add_self_loops(edge_index1, norm1, fill_value=(- 1.0), num_nodes=x.size(self.node_dim)) Tx_0 = x Tx_1 = self.propagate(edge_index_tilde, x=x, norm=norm_tilde, size=None) out = (((coe[0] / 2) * Tx_0) + (coe[1] * Tx_1)) for i in range(2, (self.K + 1)): Tx_2 = self.propagate(edge_index_tilde, x=Tx_1, norm=norm_tilde, size=None) Tx_2 = ((2 * Tx_2) - Tx_0) out = (out + (coe[i] * Tx_2)) (Tx_0, Tx_1) = (Tx_1, Tx_2) return out def message(self, x_j, norm): return (norm.view((- 1), 1) * x_j) def __repr__(self): return '{}(K={}, temp={})'.format(self.__class__.__name__, self.K, self.temp)
class BioGptModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def test_attr_dep_from_somewhere_else(): run_cell('import fakelib as fake') run_cell('fake.y = 7') run_cell('x = fake.y + 1') run_cell('fake.y = 42') run_cell('logging.info(x)') assert_detected('`x` depends on old value of `fake.y`')
class Normalize(Layer): def __init__(self, p, eps=1e-10, bigdl_type='float'): super(Normalize, self).__init__(None, bigdl_type, p, eps)
class CrossCityTrainer(Trainer): def __init__(self, args, cuda=None, train_id='None', logger=None): super().__init__(args, cuda, train_id, logger) source_data_set = City_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split=args.split, base_size=args.base_size, crop_size=args.crop_size, class_13=args.class_13) self.source_dataloader = data.DataLoader(source_data_set, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True) source_data_set = City_Dataset(args, data_root_path=args.source_data_path, list_path=args.source_list_path, split='val', base_size=args.base_size, crop_size=args.crop_size, class_13=args.class_13) self.source_val_dataloader = data.DataLoader(source_data_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True) print(self.args.source_dataset, self.args.city_name) target_data_set = CrossCity_Dataset(args, data_root_path=args.data_root_path, list_path=args.list_path, split=args.split, base_size=args.target_base_size, crop_size=args.target_crop_size, class_13=args.class_13) self.target_dataloader = data.DataLoader(target_data_set, batch_size=self.args.batch_size, shuffle=True, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True) target_data_set = CrossCity_Dataset(args, data_root_path=args.data_root_path, list_path=args.list_path, split='val', base_size=args.target_base_size, crop_size=args.target_crop_size, class_13=args.class_13) self.target_val_dataloader = data.DataLoader(target_data_set, batch_size=self.args.batch_size, shuffle=False, num_workers=self.args.data_loader_workers, pin_memory=self.args.pin_memory, drop_last=True) self.dataloader.val_loader = self.target_val_dataloader self.dataloader.valid_iterations = ((len(target_data_set) + self.args.batch_size) // self.args.batch_size) self.ignore_index = (- 1) if (self.args.target_mode == 'hard'): self.target_loss = nn.CrossEntropyLoss(ignore_index=(- 1)) elif (self.args.target_mode == 'entropy'): self.target_loss = softCrossEntropy(ignore_index=(- 1)) elif (self.args.target_mode == 'IW_entropy'): self.target_loss = IWsoftCrossEntropy(ignore_index=(- 1), num_class=self.args.num_classes, ratio=self.args.IW_ratio) elif (self.args.target_mode == 'maxsquare'): self.target_loss = MaxSquareloss(ignore_index=(- 1), num_class=self.args.num_classes) elif (self.args.target_mode == 'IW_maxsquare'): self.target_loss = IW_MaxSquareloss(ignore_index=(- 1), num_class=self.args.num_classes, ratio=self.args.IW_ratio) self.target_loss.to(self.device) self.target_hard_loss = nn.CrossEntropyLoss(ignore_index=(- 1)) self.threshold = self.args.threshold def main(self): self.logger.info('Global configuration as follows:') for (key, val) in vars(self.args).items(): self.logger.info('{:16} {}'.format(key, val)) current_device = torch.cuda.current_device() self.logger.info('This model will run on {}'.format(torch.cuda.get_device_name(current_device))) if (self.args.pretrained_ckpt_file is not None): if os.path.isdir(self.args.pretrained_ckpt_file): self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, (self.train_id + 'best.pth')) self.load_checkpoint(self.args.pretrained_ckpt_file) if (not self.args.continue_training): self.best_MIou = 0 self.best_iter = 0 self.current_iter = 0 self.current_epoch = 0 if self.args.continue_training: self.load_checkpoint(os.path.join(self.args.checkpoint_dir, (self.train_id + 'best.pth'))) self.args.iter_max = (self.dataloader.num_iterations * self.args.epoch_num) self.epoch_num = self.args.epoch_num self.validate() self.validate_source() self.train() self.writer.close() def train_one_epoch(self): tqdm_epoch = tqdm(zip(self.source_dataloader, self.target_dataloader), total=self.dataloader.num_iterations, desc='Train Epoch-{}-total-{}'.format((self.current_epoch + 1), self.epoch_num)) self.logger.info('Training one epoch...') self.Eval.reset() loss_seg_value = 0 loss_target_value = 0 loss_seg_value_2 = 0 loss_target_value_2 = 0 iter_num = self.dataloader.num_iterations if self.args.freeze_bn: self.model.eval() self.logger.info('freeze bacth normalization successfully!') else: self.model.train() batch_idx = 0 for (batch_s, batch_t) in tqdm_epoch: self.poly_lr_scheduler(optimizer=self.optimizer, init_lr=self.args.lr) self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]['lr'], self.current_iter) (x, y, _) = batch_s if self.cuda: (x, y) = (Variable(x).to(self.device), Variable(y).to(device=self.device, dtype=torch.long)) pred = self.model(x) if isinstance(pred, tuple): pred_2 = pred[1] pred = pred[0] y = torch.squeeze(y, 1) loss = self.loss(pred, y) loss_ = loss if self.args.multi: loss_2 = (self.args.lambda_seg * self.loss(pred_2, y)) loss_ += loss_2 loss_seg_value_2 += (loss_2.cpu().item() / iter_num) loss_.backward() loss_seg_value += (loss.cpu().item() / iter_num) pred = pred.data.cpu().numpy() label = y.cpu().numpy() argpred = np.argmax(pred, axis=1) self.Eval.add_batch(label, argpred) (x, _, _) = batch_t if self.cuda: x = Variable(x).to(self.device) pred = self.model(x) if isinstance(pred, tuple): pred_2 = pred[1] pred = pred[0] pred_P_2 = F.softmax(pred_2, dim=1) pred_P = F.softmax(pred, dim=1) if (self.args.target_mode == 'hard'): label = torch.argmax(pred_P.detach(), dim=1) if self.args.multi: label_2 = torch.argmax(pred_P_2.detach(), dim=1) else: label = pred_P if self.args.multi: label_2 = pred_P_2 (maxpred, argpred) = torch.max(pred_P.detach(), dim=1) if self.args.multi: (maxpred_2, argpred_2) = torch.max(pred_P_2.detach(), dim=1) if (self.args.target_mode == 'hard'): mask = (maxpred > self.threshold) label = torch.where(mask, label, (torch.ones(1).to(self.device, dtype=torch.long) * self.ignore_index)) loss_target = (self.args.lambda_target * self.target_loss(pred, label)) loss_target_ = loss_target if self.args.multi: pred_c = ((pred_P + pred_P_2) / 2) (maxpred_c, argpred_c) = torch.max(pred_c, dim=1) mask = ((maxpred > self.threshold) | (maxpred_2 > self.threshold)) label_2 = torch.where(mask, argpred_c, (torch.ones(1).to(self.device, dtype=torch.long) * self.ignore_index)) loss_target_2 = ((self.args.lambda_seg * self.args.lambda_target) * self.target_hard_loss(pred_2, label_2)) loss_target_ += loss_target_2 loss_target_value_2 += (loss_target_2 / iter_num) loss_target_.backward() loss_target_value += (loss_target / iter_num) self.optimizer.step() self.optimizer.zero_grad() if ((batch_idx % 400) == 0): if self.args.multi: self.logger.info('epoch{}-batch-{}:loss_seg={:.3f}-loss_target={:.3f}; loss_seg_2={:.3f}-loss_target_2={:.3f}'.format(self.current_epoch, batch_idx, loss.item(), loss_target.item(), loss_2.item(), loss_target_2.item())) else: self.logger.info('epoch{}-batch-{}:loss_seg={:.3f}-loss_target={:.3f}'.format(self.current_epoch, batch_idx, loss.item(), loss_target.item())) batch_idx += 1 self.current_iter += 1 self.writer.add_scalar('train_loss', loss_seg_value, self.current_epoch) tqdm.write('The average loss of train epoch-{}-:{}'.format(self.current_epoch, loss_seg_value)) self.writer.add_scalar('target_loss', loss_target_value, self.current_epoch) tqdm.write('The average target_loss of train epoch-{}-:{:.3f}'.format(self.current_epoch, loss_target_value)) if self.args.multi: self.writer.add_scalar('train_loss_2', loss_seg_value_2, self.current_epoch) tqdm.write('The average loss_2 of train epoch-{}-:{}'.format(self.current_epoch, loss_seg_value_2)) self.writer.add_scalar('target_loss_2', loss_target_value_2, self.current_epoch) tqdm.write('The average target_loss_2 of train epoch-{}-:{:.3f}'.format(self.current_epoch, loss_target_value_2)) tqdm_epoch.close() self.validate_source()
class DefaultConfig(): dataset: DataConfig = DataConfig() stage1: Stage1Config = Stage1Config() stage2: Stage2Config = Stage2Config()
class CIFAR10(): def __init__(self, args, normalize=False): self.args = args self.norm_layer = transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.247, 0.243, 0.262]) self.tr_train = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor()] self.tr_test = [transforms.ToTensor()] if normalize: self.tr_train.append(self.norm_layer) self.tr_test.append(self.norm_layer) self.tr_train = transforms.Compose(self.tr_train) self.tr_test = transforms.Compose(self.tr_test) def data_loaders(self, **kwargs): trainset = datasets.CIFAR10(root=os.path.join(self.args.data_dir, 'CIFAR10'), train=True, download=True, transform=self.tr_train) subset_indices = np.random.permutation(np.arange(len(trainset)))[:int((self.args.data_fraction * len(trainset)))] train_loader = DataLoader(trainset, batch_size=self.args.batch_size, sampler=SubsetRandomSampler(subset_indices), **kwargs) testset = datasets.CIFAR10(root=os.path.join(self.args.data_dir, 'CIFAR10'), train=False, download=True, transform=self.tr_test) test_loader = DataLoader(testset, batch_size=self.args.test_batch_size, shuffle=False, **kwargs) print(f'Traing loader: {len(train_loader.dataset)} images, Test loader: {len(test_loader.dataset)} images') return (train_loader, test_loader)