code
stringlengths
101
5.91M
('/slot-details/<slotNumber>', methods=('GET',)) def getSlotDetails(slotNumber): return render_template('beacon-frontend/one-slot.html', slotNumber=slotNumber)
def and_pred(*preds): def new_pred(*args): for pred in preds: if (not pred(*args)): return False return True return new_pred
def tokenize(sql, value_tokenize, parsed=False, **kwargs): ast = (sql if parsed else parse(sql)) tokenizer = Tokenizer(value_tokenize, **kwargs) (tokens, token_types) = tokenizer.tokenize(ast) if tokenizer.atomic_value: return (tokens, token_types, tokenizer.constants) else: return (tokens, token_types)
def test_kernel_and_bias_defaults(): (graph, _) = create_graph_features() generator = ClusterNodeGenerator(graph) cluster_gcn = ClusterGCN(layer_sizes=[2, 2], activations=['relu', 'relu'], generator=generator) for layer in cluster_gcn._layers: if isinstance(layer, GraphConvolution): assert isinstance(layer.kernel_initializer, tf.initializers.GlorotUniform) assert isinstance(layer.bias_initializer, tf.initializers.Zeros) assert (layer.kernel_regularizer is None) assert (layer.bias_regularizer is None) assert (layer.kernel_constraint is None) assert (layer.bias_constraint is None)
def _persist_stop_words(stop_words, path): stop_words = sorted(stop_words) with path.open(encoding='utf8', mode='w') as f: for stop_word in stop_words: f.write(('%s\n' % stop_word))
def run_benchmark(benchmark, ranks, opts): group = dist.new_group(ranks=ranks, backend=benchmark.distributed_backend) measurements = [] if (dist.get_rank() in set(ranks)): if (not opts): opts = dict() measurements = benchmark_process_group(group, benchmark, **opts) dist.destroy_process_group(group) dist.barrier() return list(itertools.chain(*allgather_object(measurements)))
class ReLU6(Hardtanh): def __init__(self, inplace=False): super(ReLU6, self).__init__(0, 6, inplace) def extra_repr(self): inplace_str = ('inplace' if self.inplace else '') return inplace_str
def get_random_pairs(size): pairs = [] while (len(pairs) < size): must_same = choice([True, False]) if must_same: class_ = images_above1.sample().iloc[0]['class'] same_pairs = images_above1[(images_above1['class'] == class_)].sample(2) pairs.append((same_pairs.iloc[0], same_pairs.iloc[1])) else: pair1 = images_above1.sample() pair2 = images_above1[(images_above1['class'] != pair1.iloc[0]['class'])].sample() pairs.append((pair1.iloc[0], pair2.iloc[0])) return pairs
class TStrHashF_Md5(object): thisown = _swig_property((lambda x: x.this.own()), (lambda x, v: x.this.own(v)), doc='The membership flag') __repr__ = _swig_repr def GetPrimHashCd(*args): return _snap.TStrHashF_Md5_GetPrimHashCd(*args) GetPrimHashCd = staticmethod(GetPrimHashCd) def GetSecHashCd(*args): return _snap.TStrHashF_Md5_GetSecHashCd(*args) GetSecHashCd = staticmethod(GetSecHashCd) def __init__(self): _snap.TStrHashF_Md5_swiginit(self, _snap.new_TStrHashF_Md5()) __swig_destroy__ = _snap.delete_TStrHashF_Md5
class InstallPlatlib(install): 'Fix auditwheel error, def finalize_options(self) -> None: install.finalize_options(self) if self.distribution.has_ext_modules(): self.install_lib = self.install_platlib
.parametrize('shuffle', [False, True]) def test_kg_triple_sequence_shuffle(shuffle): seq = KGTripleSequence(max_node_iloc=10, source_ilocs=[0, 1, 2, 3, 4], rel_ilocs=[0, 1, 0, 1, 0], target_ilocs=[4, 3, 2, 1, 0], batch_size=5, shuffle=shuffle, negative_samples=None, sample_strategy='uniform', seed=None) assert (len(seq) == 1) def sample(): ret = seq[0] seq.on_epoch_end() return ret (first, *rest) = [sample() for _ in range(20)] should_be_equal = (not shuffle) assert (all((epoch_sample_equal(first, r) for r in rest)) == should_be_equal)
class SawyerButtonPressWallEnv(SawyerXYZEnv): def __init__(self): hand_low = ((- 0.5), 0.4, 0.05) hand_high = (0.5, 1, 0.5) obj_low = ((- 0.05), 0.85, 0.05) obj_high = (0.05, 0.9, 0.05) super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high) self.init_config = {'obj_init_pos': np.array([0.0, 0.9, 0.05], dtype=np.float32), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32)} self.goal = np.array([0, 0.84, 0.12]) self.obj_init_pos = self.init_config['obj_init_pos'] self.hand_init_pos = self.init_config['hand_init_pos'] goal_low = self.hand_low goal_high = self.hand_high self._random_reset_space = Box(np.array(obj_low), np.array(obj_high)) self.goal_space = Box(np.array(goal_low), np.array(goal_high)) def model_name(self): return full_v1_path_for('sawyer_xyz/sawyer_button_press_wall.xml') _assert_task_is_set def step(self, action): ob = super().step(action) (reward, reachDist, pressDist) = self.compute_reward(action, ob) self.curr_path_length += 1 info = {'reachDist': reachDist, 'goalDist': pressDist, 'epRew': reward, 'pickRew': None, 'success': float((pressDist <= 0.02))} return (ob, reward, False, info) def _target_site_config(self): return [] def _get_pos_objects(self): return self.data.site_xpos[self.model.site_name2id('buttonStart')] def _set_obj_xyz(self, pos): qpos = self.data.qpos.flat.copy() qvel = self.data.qvel.flat.copy() qpos[9] = pos qvel[9] = 0 self.set_state(qpos, qvel) def reset_model(self): self._reset_hand() self._target_pos = self.goal.copy() self.obj_init_pos = self.init_config['obj_init_pos'] if self.random_init: goal_pos = self._get_state_rand_vec() self.obj_init_pos = goal_pos button_pos = goal_pos.copy() button_pos[1] -= 0.06 button_pos[2] += 0.07 self._target_pos = button_pos self.sim.model.body_pos[self.model.body_name2id('box')] = self.obj_init_pos self.sim.model.body_pos[self.model.body_name2id('button')] = self._target_pos self._set_obj_xyz(0) self._target_pos = self._get_site_pos('hole') self.maxDist = np.abs((self.data.site_xpos[self.model.site_name2id('buttonStart')][1] - self._target_pos[1])) self.target_reward = ((1000 * self.maxDist) + (1000 * 2)) return self._get_obs() def _reset_hand(self): super()._reset_hand(10) (rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')) self.init_fingerCOM = ((rightFinger + leftFinger) / 2) self.pickCompleted = False def compute_reward(self, actions, obs): del actions objPos = obs[3:6] leftFinger = self._get_site_pos('leftEndEffector') fingerCOM = leftFinger pressGoal = self._target_pos[1] pressDist = np.abs((objPos[1] - pressGoal)) reachDist = np.linalg.norm((objPos - fingerCOM)) c1 = 1000 c2 = 0.01 c3 = 0.001 if (reachDist < 0.05): pressRew = ((1000 * (self.maxDist - pressDist)) + (c1 * (np.exp(((- (pressDist ** 2)) / c2)) + np.exp(((- (pressDist ** 2)) / c3))))) else: pressRew = 0 pressRew = max(pressRew, 0) reward = ((- reachDist) + pressRew) return [reward, reachDist, pressDist]
def calc_boomerang_cod(location, orientation): r_vectors = bm.get_boomerang_r_vectors_15(location, orientation) dist = 0.96087 coh = ((location + ((np.cos((np.pi / 4.0)) * (dist / 2.1)) * (r_vectors[0] - location))) + ((np.cos((np.pi / 4.0)) * (dist / 2.1)) * (r_vectors[14] - location))) return coh
class CSBuFLO(): def __init__(self, initial_rho=INIT_RHO): self.initial_rho = initial_rho def reset(self): self.defended_trace = Queue() self.client = CSBuFLOEndpoint(self.defended_trace, OUT, self.initial_rho) self.server = CSBuFLOEndpoint(self.defended_trace, IN, self.initial_rho) self.client_packets = Queue() self.server_packets = Queue() def defend(self, packets): self.reset() for (t, s) in packets: if (s > 0): self.client_packets.append((t, s)) else: self.server_packets.append((t, s)) nevents = {} self.cur_time = 0.0 running = True while running: cond1 = ((not self.client_packets) and (not self.client.output_buff)) cond2 = ((not self.server_packets) and (not self.server.output_buff)) if (cond1 and cond2): self.server.process((self.cur_time, None), ON_LOAD) self.client.process((self.cur_time, None), ON_LOAD) if (not self.client_packets): self.client.more_data = False if (not self.server_packets): self.server.more_data = False if (self.client.done_xmitting(self.cur_time) and self.server.done_xmitting(self.cur_time)): break next_event = self.process_next() if (next_event not in nevents): nevents[next_event] = 1 else: nevents[next_event] += 1 return list(self.defended_trace) def process_next(self): time_c_read_packet = self.client_packets.seeleft() time_s_read_packet = self.server_packets.seeleft() c_timeout = (self.client.next_timeout(),) s_timeout = (self.server.next_timeout(),) t_events = [time_c_read_packet, time_s_read_packet, c_timeout, s_timeout] next_packet = min([x for x in t_events if (x is not None)], key=(lambda x: x[0])) next_event = t_events.index(next_packet) next_time = next_packet[0] if (next_time < self.cur_time): print('I travelled back in time:') print('{} => {}'.format(self.cur_time, next_time)) print(t_events) raise TimeTravel self.cur_time = next_time process_event = {0: self.client_read, 1: self.server_read, 2: self.client_timeout, 3: self.server_timeout} process_event[next_event]() if (self.client.sent.seeleft() is not None): self.client_sent() if (self.server.sent.seeleft() is not None): self.server_sent() return next_event def client_read(self): packet = self.client_packets.popleft() self.client.process(packet, UPSTREAM_APPLICATION_DATA) def server_read(self): packet = self.server_packets.popleft() self.server.process(packet, UPSTREAM_APPLICATION_DATA) def client_sent(self): packet = self.client.sent.popleft() self.server.process(packet, DOWNSTREAM_APPLICATION_DATA) def server_sent(self): packet = self.server.sent.popleft() self.client.process(packet, DOWNSTREAM_APPLICATION_DATA) def client_timeout(self): packet = (self.client.next_timeout(), None) self.client.process(packet, TIMEOUT) def server_timeout(self): packet = (self.server.next_timeout(), None) self.server.process(packet, TIMEOUT)
('shorter_resize_for_crop') def shorter_resize_for_crop(cfg, **kwargs): size = (kwargs['input_size'] if (kwargs['input_size'] != None) else cfg.INPUT_SIZE) assert (size[0] == size[1]), 'this img-process only process square-image' return transforms.Resize(int((size[0] / 0.875)))
_model('lightconv_lm') class LightConvLanguageModel(FairseqLanguageModel): def __init__(self, decoder): super().__init__(decoder) def add_args(parser): parser.add_argument('--dropout', default=0.1, type=float, metavar='D', help='dropout probability') parser.add_argument('--attention-dropout', default=0.0, type=float, metavar='D', help='dropout probability for attention weights') parser.add_argument('--relu-dropout', default=0.0, type=float, metavar='D', help='dropout probability after ReLU in FFN') parser.add_argument('--input-dropout', type=float, metavar='D', help='dropout probability of the inputs') parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension') parser.add_argument('--decoder-output-dim', type=int, metavar='N', help='decoder output dimension') parser.add_argument('--decoder-input-dim', type=int, metavar='N', help='decoder input dimension') parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN') parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers') parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads or LightConv/DynamicConv heads') parser.add_argument('--decoder-normalize-before', default=False, action='store_true', help='apply layernorm before each decoder block') parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion') parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)') parser.add_argument('--share-decoder-input-output-embed', default=False, action='store_true', help='share decoder input and output embeddings') parser.add_argument('--character-embeddings', default=False, action='store_true', help='if set, uses character embedding convolutions to produce token embeddings') parser.add_argument('--character-filters', type=str, metavar='LIST', default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]', help='size of character embeddings') parser.add_argument('--character-embedding-dim', type=int, metavar='N', default=4, help='size of character embeddings') parser.add_argument('--char-embedder-highway-layers', type=int, metavar='N', default=2, help='number of highway layers for character token embeddder') parser.add_argument('--adaptive-input', default=False, action='store_true', help='if set, uses adaptive input') parser.add_argument('--adaptive-input-factor', type=float, metavar='N', help='adaptive input factor') parser.add_argument('--adaptive-input-cutoff', metavar='EXPR', help='comma separated list of adaptive input cutoff points.') parser.add_argument('--tie-adaptive-weights', action='store_true', help='if set, ties the weights of adaptive softmax and adaptive input') parser.add_argument('--tie-adaptive-proj', action='store_true', help='if set, ties the projection weights of adaptive softmax and adaptive input') parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder') parser.add_argument('--decoder-kernel-size-list', type=(lambda x: options.eval_str_list(x, int)), help='list of kernel size (default: "[3,7,15,31,31,31]")') parser.add_argument('--decoder-glu', type=options.eval_bool, help='glu after in proj') parser.add_argument('--decoder-conv-type', default='dynamic', type=str, choices=['dynamic', 'lightweight'], help='type of convolution') parser.add_argument('--weight-softmax', default=True, type=options.eval_bool) parser.add_argument('--weight-dropout', type=float, metavar='D', help='dropout probability for conv weights') def build_model(cls, args, task): base_lm_architecture(args) if (not hasattr(args, 'max_source_positions')): args.max_source_positions = args.tokens_per_sample if (not hasattr(args, 'max_target_positions')): args.max_target_positions = args.tokens_per_sample if args.character_embeddings: embed_tokens = CharacterTokenEmbedder(task.dictionary, eval(args.character_filters), args.character_embedding_dim, args.decoder_embed_dim, args.char_embedder_highway_layers) elif args.adaptive_input: embed_tokens = AdaptiveInput(len(task.dictionary), task.dictionary.pad(), args.decoder_input_dim, args.adaptive_input_factor, args.decoder_embed_dim, options.eval_str_list(args.adaptive_input_cutoff, type=int)) else: embed_tokens = Embedding(len(task.dictionary), args.decoder_input_dim, task.dictionary.pad()) if args.tie_adaptive_weights: assert args.adaptive_input assert (args.adaptive_input_factor == args.adaptive_softmax_factor) assert (args.adaptive_softmax_cutoff == args.adaptive_input_cutoff), '{} != {}'.format(args.adaptive_softmax_cutoff, args.adaptive_input_cutoff) assert (args.decoder_input_dim == args.decoder_output_dim) decoder = LightConvDecoder(args, task.output_dictionary, embed_tokens, no_encoder_attn=True, final_norm=False) return LightConvLanguageModel(decoder)
def evaluate_box_proposals(dataset, roidb): res = _empty_box_proposal_results() areas = {'all': '', 'small': 's', 'medium': 'm', 'large': 'l'} for limit in [100, 1000, 5000, 10000, 50000, 100000]: for (area, suffix) in areas.items(): stats = json_dataset_evaluator.evaluate_box_proposals(dataset, roidb, area=area, limit=limit, thresholds=[0.7]) key = 'AR{}{:d}'.format(suffix, limit) res['box_proposal'][key] = stats['ar'] return OrderedDict([(dataset.name, res)])
class BengaliDoc2vec(): def __init__(self, model_path: str='', tokenizer: Callable=None): if ((model_path == '') or (model_path == ModelTypeEnum.NEWS_DOC2VEC)): model_path = download_model(ModelTypeEnum.NEWS_DOC2VEC) if (model_path == ModelTypeEnum.WIKI_DOC2VEC): model_path = download_model(ModelTypeEnum.WIKI_DOC2VEC) self.tokenizer = tokenizer self.model = Doc2Vec.load(model_path) def get_document_vector(self, document: str) -> np.ndarray: if self.tokenizer: tokens = self.tokenizer(document) else: tokens = default_tokenizer.tokenize(document) vector = self.model.infer_vector(tokens) return vector def get_document_similarity(self, document_1: str, document_2: str) -> float: if self.tokenizer: document_1_tokens = self.tokenizer(document_1) document_2_tokens = self.tokenizer(document_2) else: document_1_tokens = default_tokenizer.tokenize(document_1) document_2_tokens = default_tokenizer.tokenize(document_2) document_1_vector = self.model.infer_vector(document_1_tokens) document_2_vector = self.model.infer_vector(document_2_tokens) similarity = round((1 - spatial.distance.cosine(document_1_vector, document_2_vector)), 2) return similarity
class ElectraForTokenClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def main(): print('*** Negative Sample Generation ***') num_neg_e_per_pos = 20 neg_sample_strategy = 'hist_rnd' rnd_seed = 42 name = 'tgbl-comment' dataset = PyGLinkPropPredDataset(name=name, root='datasets') train_mask = dataset.train_mask val_mask = dataset.val_mask test_mask = dataset.test_mask data = dataset.get_TemporalData() data_splits = {} data_splits['train'] = data[train_mask] data_splits['val'] = data[val_mask] data_splits['test'] = data[test_mask] (min_dst_idx, max_dst_idx) = (int(data.dst.min()), int(data.dst.max())) if (neg_sample_strategy == 'hist_rnd'): historical_data = data_splits['train'] else: historical_data = None neg_sampler = NegativeEdgeGenerator(dataset_name=name, first_dst_id=min_dst_idx, last_dst_id=max_dst_idx, num_neg_e=num_neg_e_per_pos, strategy=neg_sample_strategy, rnd_seed=rnd_seed, historical_data=historical_data) partial_path = '.' start_time = time.time() split_mode = 'val' print(f'INFO: Start generating negative samples: {split_mode} --- {neg_sample_strategy}') neg_sampler.generate_negative_samples(data=data_splits[split_mode], split_mode=split_mode, partial_path=partial_path) print(f'INFO: End of negative samples generation. Elapsed Time (s): {(time.time() - start_time): .4f}') start_time = time.time() split_mode = 'test' print(f'INFO: Start generating negative samples: {split_mode} --- {neg_sample_strategy}') neg_sampler.generate_negative_samples(data=data_splits[split_mode], split_mode=split_mode, partial_path=partial_path) print(f'INFO: End of negative samples generation. Elapsed Time (s): {(time.time() - start_time): .4f}')
class Partition1(nn.Module): LAYER_SCOPES = ['WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[2]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/ReLU[relu1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/ReLU[relu2]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Dropout[dropout]', 'WideResNet/NetworkBlock[block1]/Sequential[layer]/BasicBlock[3]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv1]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/GroupNorm[bn2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/ReLU[relu2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Dropout[dropout]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[conv2]', 'WideResNet/NetworkBlock[block2]/Sequential[layer]/BasicBlock[0]/Conv2d[convShortcut]'] TENSORS = [] def __init__(self, layers, tensors, device='cuda:1'): super().__init__() for (idx, layer_scope) in enumerate(self.LAYER_SCOPES): self.add_module(f'l_{idx}', layers[layer_scope]) b = p = 0 for tensor_scope in self.TENSORS: tensor = tensors[tensor_scope] if isinstance(tensor, nn.Parameter): self.register_parameter(f'p_{p}', tensor) p += 1 else: self.register_buffer(f'b_{b}', tensor) b += 1 self.device = torch.device(device) self.input_structure = [1, 1] self.lookup = {'l_0': 'block1.layer.2.conv2', 'l_1': 'block1.layer.3.bn1', 'l_2': 'block1.layer.3.relu1', 'l_3': 'block1.layer.3.conv1', 'l_4': 'block1.layer.3.bn2', 'l_5': 'block1.layer.3.relu2', 'l_6': 'block1.layer.3.dropout', 'l_7': 'block1.layer.3.conv2', 'l_8': 'block2.layer.0.bn1', 'l_9': 'block2.layer.0.relu1', 'l_10': 'block2.layer.0.conv1', 'l_11': 'block2.layer.0.bn2', 'l_12': 'block2.layer.0.relu2', 'l_13': 'block2.layer.0.dropout', 'l_14': 'block2.layer.0.conv2', 'l_15': 'block2.layer.0.convShortcut'} self.to(self.device) def forward(self, *args): (x0, x1) = unflatten(args, self.input_structure) t_0 = self.l_0(x1) t_0 = torch.add(x0, t_0) t_1 = self.l_1(t_0) t_1 = self.l_2(t_1) t_1 = self.l_3(t_1) t_1 = self.l_4(t_1) t_1 = self.l_5(t_1) t_1 = self.l_6(t_1) t_1 = self.l_7(t_1) t_1 = torch.add(t_0, t_1) t_1 = self.l_8(t_1) t_1 = self.l_9(t_1) t_0 = self.l_10(t_1) t_1 = self.l_15(t_1) t_0 = self.l_11(t_0) t_0 = self.l_12(t_0) t_0 = self.l_13(t_0) t_0 = self.l_14(t_0) return list(flatten((t_1, t_0))) def state_dict(self, *args, **kwargs): return state_dict(self, *args, **kwargs) def load_state_dict(self, *args, **kwargs): return load_state_dict(self, *args, **kwargs) def named_parameters(self, *args, **kwargs): return named_parameters(self, *args, **kwargs) def named_buffers(self, *args, **kwargs): return named_buffers(self, *args, **kwargs) def cpu(self): return cpu(self) def cuda(self, device=None): return cuda(self, device=device) def to(self, *args, **kwargs): return to(self, *args, **kwargs)
def dump_mixed_4a_7a(name='Mixed_4a'): dump_conv2d(name=(name + '/Branch_0/Conv2d_0a_1x1')) dump_conv2d(name=(name + '/Branch_0/Conv2d_1a_3x3')) dump_conv2d(name=(name + '/Branch_1/Conv2d_0a_1x1')) dump_conv2d(name=(name + '/Branch_1/Conv2d_0b_1x7')) dump_conv2d(name=(name + '/Branch_1/Conv2d_0c_7x1')) dump_conv2d(name=(name + '/Branch_1/Conv2d_1a_3x3'))
def get_rank(group): if use_xla(): assert (group[0] == 'tpu') my_group = _find_my_group(group[1]) return my_group.index(get_global_rank()) else: return dist.get_rank(group=group)
def append_atom(): choices = [['single', ['C', 'N', 'O', 'F', 'S', 'Cl', 'Br'], (7 * [(1.0 / 7.0)])], ['double', ['C', 'N', 'O'], (3 * [(1.0 / 3.0)])], ['triple', ['C', 'N'], (2 * [(1.0 / 2.0)])]] p_BO = [0.6, 0.35, 0.05] index = np.random.choice(list(range(3)), p=p_BO) (BO, atom_list, p) = choices[index] new_atom = np.random.choice(atom_list, p=p) if (BO == 'single'): rxn_smarts = '[*;!H0:1]>>[*:1]X'.replace('X', ('-' + new_atom)) if (BO == 'double'): rxn_smarts = '[*;!H0;!H1:1]>>[*:1]X'.replace('X', ('=' + new_atom)) if (BO == 'triple'): rxn_smarts = '[*;H3:1]>>[*:1]X'.replace('X', ('#' + new_atom)) return rxn_smarts
def to_diff_file(old_str, new_str): diff = difflib.ndiff(old_str.splitlines(1), new_str.splitlines(1)) result = '\n'.join(diff) return result
def test_release(): parser = _get_command_line_parser(['valid-detector'], [], []) result = parser.parse_args(['run', 'ex1', 'valid-detector', '--tag', 'FSE17']) assert_equals('fse17', result.requested_release)
def reset_handler(handler): for _logger in all_loggers: del _logger.handlers[:] _logger.addHandler(handler) if (file_handler is not None): _logger.addHandler(file_handler)
def model_fn_builder(bert_config, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): def model_fn(features, labels, mode, params): tf.logging.info('*** Features ***') for name in sorted(features.keys()): tf.logging.info((' name = %s, shape = %s' % (name, features[name].shape))) input_ids = features['input_ids'] input_mask = features['input_mask'] segment_ids = features['segment_ids'] masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] next_sentence_labels = features['next_sentence_labels'] is_training = (mode == tf.estimator.ModeKeys.TRAIN) model = tfm.BertModel(config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) (masked_lm_loss, masked_lm_example_loss, masked_lm_log_probs) = rp.get_masked_lm_output(bert_config, model.get_sequence_output(), model.get_embedding_table(), masked_lm_positions, masked_lm_ids, masked_lm_weights) (next_sentence_loss, next_sentence_example_loss, next_sentence_log_probs) = rp.get_next_sentence_output(bert_config, model.get_pooled_output(), next_sentence_labels) total_loss = (masked_lm_loss + next_sentence_loss) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = tfm.get_assigment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info('**** Trainable Variables ****') for var in tvars: init_string = '' if (var.name in initialized_variable_names): init_string = ', *INIT_FROM_CKPT*' tf.logging.info(' name = %s, shape = %s%s', var.name, var.shape, init_string) output_spec = None if (mode == tf.estimator.ModeKeys.TRAIN): masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] next_sentence_labels = features['next_sentence_labels'] train_op = optimization.create_optimizer(total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif (mode == tf.estimator.ModeKeys.EVAL): masked_lm_positions = features['masked_lm_positions'] masked_lm_ids = features['masked_lm_ids'] masked_lm_weights = features['masked_lm_weights'] next_sentence_labels = features['next_sentence_labels'] def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels): masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [(- 1), masked_lm_log_probs.shape[(- 1)]]) masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=(- 1), output_type=tf.int32) masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [(- 1)]) masked_lm_ids = tf.reshape(masked_lm_ids, [(- 1)]) masked_lm_weights = tf.reshape(masked_lm_weights, [(- 1)]) masked_lm_accuracy = tf.metrics.accuracy(labels=masked_lm_ids, predictions=masked_lm_predictions, weights=masked_lm_weights) masked_lm_mean_loss = tf.metrics.mean(values=masked_lm_example_loss, weights=masked_lm_weights) next_sentence_log_probs = tf.reshape(next_sentence_log_probs, [(- 1), next_sentence_log_probs.shape[(- 1)]]) next_sentence_predictions = tf.argmax(next_sentence_log_probs, axis=(- 1), output_type=tf.int32) next_sentence_labels = tf.reshape(next_sentence_labels, [(- 1)]) next_sentence_accuracy = tf.metrics.accuracy(labels=next_sentence_labels, predictions=next_sentence_predictions) next_sentence_mean_loss = tf.metrics.mean(values=next_sentence_example_loss) return {'masked_lm_accuracy': masked_lm_accuracy, 'masked_lm_loss': masked_lm_mean_loss, 'next_sentence_accuracy': next_sentence_accuracy, 'next_sentence_loss': next_sentence_mean_loss} eval_metrics = (metric_fn, [masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids, masked_lm_weights, next_sentence_example_loss, next_sentence_log_probs, next_sentence_labels]) output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) elif (mode == tf.estimator.ModeKeys.PREDICT): masked_lm_log_probs = tf.reshape(masked_lm_log_probs, [(- 1), masked_lm_log_probs.shape[(- 1)]]) masked_lm_predictions = tf.argmax(masked_lm_log_probs, axis=(- 1), output_type=tf.int32) next_sentence_log_probs = tf.reshape(next_sentence_log_probs, [(- 1), next_sentence_log_probs.shape[(- 1)]]) next_sentence_predictions = tf.argmax(next_sentence_log_probs, axis=(- 1), output_type=tf.int32) masked_lm_predictions = tf.reshape(masked_lm_predictions, [1, masked_lm_positions.shape[(- 1)]]) next_sentence_predictions = tf.reshape(next_sentence_predictions, [1, 1]) predictions = {'masked_lm_predictions': masked_lm_predictions, 'next_sentence_predictions': next_sentence_predictions} output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions, scaffold_fn=scaffold_fn) return output_spec else: raise ValueError(('Only TRAIN, EVAL and PREDICT modes are supported: %s' % mode)) return output_spec return model_fn
def setup_snapshot_image_grid(training_set, random_seed=0): rnd = np.random.RandomState(random_seed) gw = np.clip((7680 // training_set.image_shape[2]), 15, 32) gh = np.clip((4320 // training_set.image_shape[1]), 8, 32) if (not training_set.has_labels): all_indices = list(range(len(training_set))) rnd.shuffle(all_indices) grid_indices = [all_indices[(i % len(all_indices))] for i in range((gw * gh))] else: label_groups = dict() for idx in range(len(training_set)): label = tuple(training_set.get_details(idx).raw_label.flat[::(- 1)]) if (label not in label_groups): label_groups[label] = [] label_groups[label].append(idx) label_order = sorted(label_groups.keys()) for label in label_order: rnd.shuffle(label_groups[label]) grid_indices = [] for y in range(gh): label = label_order[(y % len(label_order))] indices = label_groups[label] grid_indices += [indices[(x % len(indices))] for x in range(gw)] label_groups[label] = [indices[((i + gw) % len(indices))] for i in range(len(indices))] (images, labels, _) = zip(*[training_set[i] for i in grid_indices]) return ((gw, gh), np.stack(images), np.stack(labels))
class SubsampleDataset(BaseWrapperDataset): def __init__(self, dataset, size_ratio): super().__init__(dataset) assert (size_ratio < 1) self.actual_size = np.ceil((len(dataset) * size_ratio)).astype(int) self.indices = np.random.choice(list(range(len(self.dataset))), self.actual_size, replace=False) print('subsampled dataset from {} to {} (ratio={})'.format(len(self.dataset), self.actual_size, size_ratio)) def __getitem__(self, index): return self.dataset[self.indices[index]] def __len__(self): return self.actual_size def collater(self, samples): return self.dataset.collater(samples) def sizes(self): return self.dataset.sizes[self.indices] def name(self): return self.dataset.name def num_tokens(self, index): return self.dataset.num_tokens(self.indices[index]) def size(self, index): return self.dataset.size(self.indices[index]) def ordered_indices(self): if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) def prefetch(self, indices): self.dataset.prefetch(self.indices[indices])
def simulate(N=100, k=10000): truth = gen_prob_dist(N) area_ratio = truth (accept, alias) = create_alias_table(area_ratio) ans = np.zeros(N) for _ in range(k): i = alias_sample(accept, alias) ans[i] += 1 return ((ans / np.sum(ans)), truth)
class CommandCollection(MultiCommand): def __init__(self, name=None, sources=None, **attrs): MultiCommand.__init__(self, name, **attrs) self.sources = (sources or []) def add_source(self, multi_cmd): self.sources.append(multi_cmd) def get_command(self, ctx, cmd_name): for source in self.sources: rv = source.get_command(ctx, cmd_name) if (rv is not None): if self.chain: _check_multicommand(self, cmd_name, rv) return rv def list_commands(self, ctx): rv = set() for source in self.sources: rv.update(source.list_commands(ctx)) return sorted(rv)
def polevl(x, coefs, n): ans = 0 power = (len(coefs) - 1) for coef in coefs: ans += (coef * (x ** power)) power -= 1 return ans
class TestCOCOeval(unittest.TestCase): def test_fast_eval(self): detections = [{'image_id': 139, 'category_id': 1, 'bbox': [417., 159., 47., 143.], 'score': 0., 'segmentation': {'size': [426, 640], 'counts': 'Tc`52W=3N0N4aNN^E7]:4XE1g:8kDMT;UO1gE[Nk8h1dFiNY9Z1aFkN]9g2J3NdN`FlN`9S1cFRN07]9g1bFoM6;X9c1cFoM=8R9g1bFQN>3U9Y30O01OO1O001N2O1N1O4L4L5UNoE3V:CVF6Q:<k9[O`F=];HYnX2'}}, {'image_id': 139, 'category_id': 1, 'bbox': [383., 172., 17., 36.], 'score': 0., 'segmentation': {'size': [426, 640], 'counts': 'lZP5m0Z<300O100OO00]OlC0T<OnCOT<OnCNX<JnC2bQT3'}}, {'image_id': 139, 'category_id': 1, 'bbox': [457., 158., 9., 8.], 'score': 0., 'segmentation': {'size': [426, 640], 'counts': 'bSo54T=2N2O1001O006ImiW2'}}] gt_annotations = {'categories': [{'supercategory': 'person', 'id': 1, 'name': 'person'}, {'supercategory': 'furniture', 'id': 65, 'name': 'bed'}], 'images': [{'license': 4, 'file_name': '.jpg', 'coco_url': ' 'height': 640, 'width': 586, 'date_captured': '2013-11-18 13:09:47', 'flickr_url': ' 'id': 285}, {'license': 2, 'file_name': '.jpg', 'coco_url': ' 'height': 426, 'width': 640, 'date_captured': '2013-11-21 01:34:01', 'flickr_url': ' 'id': 139}], 'annotations': [{'segmentation': [[428.19, 219.47, 430.94, 209.57, 430.39, 210.12, 421.32, 216.17, 412.8, 217.27, 413.9, 214.24, 422.42, 211.22, 429.29, 201.6, 430.67, 181.8, 430.12, 175.2, 427.09, 168.06, 426.27, 164.21, 430.94, 159.26, 440.29, 157.61, 446.06, 163.93, 448.53, 168.06, 448.53, 173.01, 449.08, 174.93, 454.03, 185.1, 455.41, 188.4, 458.43, 195.0, 460.08, 210.94, 462.28, 226.61, 460.91, 233.76, 454.31, 234.04, 460.08, 256.85, 462.56, 268.13, 465.58, 290.67, 465.85, 293.14, 463.38, 295.62, 452.66, 295.34, 448.26, 294.52, 443.59, 282.7, 446.06, 235.14, 446.34, 230.19, 438.09, 232.39, 438.09, 221.67, 434.24, 221.12, 427.09, 219.74]], 'area': 2913., 'iscrowd': 0, 'image_id': 139, 'bbox': [412.8, 157.61, 53.05, 138.01], 'category_id': 1, 'id': 230831}, {'segmentation': [[384.98, 206.58, 384.43, 199.98, 385.25, 193.66, 385.25, 190.08, 387.18, 185.13, 387.18, 182.93, 386.08, 181.01, 385.25, 178.81, 385.25, 175.79, 388.0, 172.76, 394.88, 172.21, 398.72, 173.31, 399.27, 176.06, 399.55, 183.48, 397.9, 185.68, 395.15, 188.98, 396.8, 193.38, 398.45, 194.48, 399.0, 205.75, 395.43, 207.95, 388.83, 206.03]], 'area': 435., 'iscrowd': 0, 'image_id': 139, 'bbox': [384.43, 172.21, 15.12, 35.74], 'category_id': 1, 'id': 233201}]} experiments = {'full': (detections, gt_annotations, {})} experiments['empty_dt'] = ([], gt_annotations, {}) gt = copy.deepcopy(gt_annotations) gt['annotations'] = [] experiments['empty_gt'] = (detections, gt, {}) experiments['no_categories'] = (detections, gt_annotations, {'useCats': 0}) experiments['no_ious'] = (detections, gt_annotations, {'iouThrs': []}) experiments['no_rec_thrs'] = (detections, gt_annotations, {'recThrs': []}) experiments['no_max_dets'] = (detections, gt_annotations, {'maxDets': []}) experiments['one_max_det'] = (detections, gt_annotations, {'maxDets': [1]}) experiments['no_area'] = (detections, gt_annotations, {'areaRng': [], 'areaRngLbl': []}) annotation_fields = ['id', 'image_id', 'category_id', 'score', 'area', 'iscrowd', 'ignore', 'bbox', 'segmentation'] for a in annotation_fields: gt = copy.deepcopy(gt_annotations) for g in gt['annotations']: if (a in g): del g[a] dt = copy.deepcopy(detections) for d in dt: if (a in d): del d[a] experiments[('omit_gt_' + a)] = (detections, gt, {}) experiments[('omit_dt_' + a)] = (dt, gt_annotations, {}) for (name, (dt, gt, params)) in experiments.items(): try: with tempfile.TemporaryDirectory() as tmpdir: json_file_name = os.path.join(tmpdir, (('gt_' + name) + '.json')) with open(json_file_name, 'w') as f: json.dump(gt, f) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file_name) except Exception: pass for iou_type in ['bbox', 'segm', 'keypoints']: api_exception = None try: with contextlib.redirect_stdout(io.StringIO()): coco_dt = coco_api.loadRes(dt) coco_eval = COCOeval(coco_api, coco_dt, iou_type) for (p, v) in params.items(): setattr(coco_eval.params, p, v) coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() except Exception as ex: api_exception = ex opt_exception = None try: with contextlib.redirect_stdout(io.StringIO()): coco_dt = coco_api.loadRes(dt) coco_eval_opt = COCOeval_opt(coco_api, coco_dt, iou_type) for (p, v) in params.items(): setattr(coco_eval_opt.params, p, v) coco_eval_opt.evaluate() coco_eval_opt.accumulate() coco_eval_opt.summarize() except Exception as ex: opt_exception = ex if ((api_exception is not None) and (opt_exception is not None)): api_error = ('' if (api_exception is None) else type(api_exception).__name__) opt_error = ('' if (opt_exception is None) else type(opt_exception).__name__) msg = ("%s: comparing COCO APIs, '%s' != '%s'" % (name, api_error, opt_error)) self.assertTrue((api_error == opt_error), msg=msg) else: for k in ['precision', 'recall']: diff = np.abs((coco_eval.eval[k] - coco_eval_opt.eval[k])) abs_diff = (np.max(diff) if (diff.size > 0) else 0.0) msg = ('%s: comparing COCO APIs, %s differs by %f' % (name, k, abs_diff)) self.assertTrue((abs_diff < 0.0001), msg=msg) (os.environ.get('CI'), 'Require COCO data.') def test_unknown_category(self): dataset = 'coco_2017_val_100' evaluator = COCOEvaluator(dataset) evaluator.reset() inputs = DatasetCatalog.get(dataset)[:2] pred = Instances((100, 100)) pred.pred_boxes = Boxes(torch.rand(2, 4)) pred.scores = torch.rand(2) pred.pred_classes = torch.tensor([10, 80]) output = {'instances': pred} evaluator.process(inputs, [output, output]) with self.assertRaises(AssertionError): evaluator.evaluate()
class Node2Vec(): def __init__(self, emb_size, generator=None, node_num=None, multiplicity=None): self.generator = generator if (generator is not None): self._get_sizes_from_generator(generator) else: self.input_node_num = _require_without_generator(node_num, 'node_num') self.multiplicity = _require_without_generator(multiplicity, 'multiplicity') self.emb_size = emb_size target_embedding_initializer = keras.initializers.RandomUniform(minval=(- 1.0), maxval=1.0) self.target_embedding = Embedding(self.input_node_num, self.emb_size, input_length=1, name='target_embedding', embeddings_initializer=target_embedding_initializer) context_embedding_initializer = keras.initializers.TruncatedNormal(stddev=(1.0 / math.sqrt((self.emb_size * 1.0)))) self.context_embedding = Embedding(self.input_node_num, self.emb_size, input_length=1, name='context_embedding', embeddings_initializer=context_embedding_initializer) def _get_sizes_from_generator(self, generator): if (not isinstance(generator, (Node2VecNodeGenerator, Node2VecLinkGenerator))): raise TypeError('Generator should be an instance of Node2VecNodeGenerator or Node2VecLinkGenerator') self.multiplicity = generator.multiplicity self.input_node_num = generator.graph.number_of_nodes() if (len(list(generator.graph.node_types)) > 1): raise ValueError('Node2Vec called on graph with more than one node type.') def __call__(self, xin, embedding): if (embedding == 'target'): h_layer = self.target_embedding(xin) elif (embedding == 'context'): h_layer = self.context_embedding(xin) else: raise ValueError('wrong embedding argument is supplied: {}, should be "target" or "context"'.format(embedding)) h_layer = Reshape((self.emb_size,))(h_layer) return h_layer def _node_model(self, embedding='target'): x_inp = Input(shape=(1,)) x_out = self(x_inp, embedding) return (x_inp, x_out) def _link_model(self): (x_inp_src, x_out_src) = self._node_model('target') (x_inp_dst, x_out_dst) = self._node_model('context') x_inp = [x_inp_src, x_inp_dst] x_out = [x_out_src, x_out_dst] return (x_inp, x_out) def in_out_tensors(self, multiplicity=None): if (multiplicity is None): multiplicity = self.multiplicity if (self.multiplicity == 1): return self._node_model() elif (self.multiplicity == 2): return self._link_model() else: raise ValueError('Currently only multiplicities of 1 and 2 are supported.') def default_model(self, flatten_output=True): warnings.warn('The .default_model() method is deprecated. Please use .in_out_tensors() method instead.', DeprecationWarning, stacklevel=2) return self.build() node_model = deprecated_model_function(_node_model, 'node_model') link_model = deprecated_model_function(_link_model, 'link_model') build = deprecated_model_function(in_out_tensors, 'build')
class Wide_ResNet(nn.Module): def __init__(self, depth, widen_factor, num_classes, stride=1, parallel=False): super(Wide_ResNet, self).__init__() self.num_classes = num_classes self.in_planes = 16 assert (((depth - 4) % 6) == 0), 'Wide-resnet_v2 depth should be 6n+4' n = int(((depth - 4) / 6)) k = widen_factor print(('| Wide-Resnet %dx%d' % (depth, k))) nStages = [16, (16 * k), (32 * k), (64 * k)] self.conv1 = conv3x3(3, nStages[0], stride=stride) self.layer1 = self._wide_layer(wide_basic, nStages[1], n, stride=1) self.layer2 = self._wide_layer(wide_basic, nStages[2], n, stride=2) self.layer3 = self._wide_layer(wide_basic, nStages[3], n, stride=2) self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9) def _wide_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = x out = self.conv1(out) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = act(self.bn1(out)) return out
class DummyCriticNet(): def __init__(self): pass def parameters(self): return torch.zeros(5) def __call__(self, observation, actions): value = torch.max(observation, dim=(- 1)).values q_value = torch.max(actions, axis=(- 1)).values ret = (value + q_value) return ret
def get_chunks(seq, tags, message=None): default = tags[NONE] idx_to_tag = {idx: tag for (tag, idx) in tags.items()} chunks = [] (chunk_type, chunk_start) = (None, None) for (i, tok) in enumerate(seq): if ((tok == default) and (chunk_type is not None)): chunk = (chunk_type, chunk_start, i) chunks.append(chunk) (chunk_type, chunk_start) = (None, None) elif (tok != default): (tok_chunk_class, tok_chunk_type) = get_chunk_type(tok, idx_to_tag) if (chunk_type is None): (chunk_type, chunk_start) = (tok_chunk_type, i) elif ((tok_chunk_type != chunk_type) or (tok_chunk_class == 'B')): chunk = (chunk_type, chunk_start, i) chunks.append(chunk) (chunk_type, chunk_start) = (tok_chunk_type, i) else: pass if (chunk_type is not None): chunk = (chunk_type, chunk_start, len(seq)) chunks.append(chunk) return chunks
def masked_select(tensor: Tensor, *, mask: Tensor, dims: Sequence[Dim], out_dim: Optional[Dim]=None) -> Tuple[(Tensor, Dim)]: return tensor._raw_backend.masked_select(tensor, mask=mask, dims=dims, out_dim=out_dim)
('(float32[:], float32[:], int32, int32, float32[:])', device=True, inline=True) def line_segment_intersection(pts1, pts2, i, j, temp_pts): A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[(2 * i)] A[1] = pts1[((2 * i) + 1)] B[0] = pts1[(2 * ((i + 1) % 4))] B[1] = pts1[((2 * ((i + 1) % 4)) + 1)] C[0] = pts2[(2 * j)] C[1] = pts2[((2 * j) + 1)] D[0] = pts2[(2 * ((j + 1) % 4))] D[1] = pts2[((2 * ((j + 1) % 4)) + 1)] BA0 = (B[0] - A[0]) BA1 = (B[1] - A[1]) DA0 = (D[0] - A[0]) CA0 = (C[0] - A[0]) DA1 = (D[1] - A[1]) CA1 = (C[1] - A[1]) acd = ((DA1 * CA0) > (CA1 * DA0)) bcd = (((D[1] - B[1]) * (C[0] - B[0])) > ((C[1] - B[1]) * (D[0] - B[0]))) if (acd != bcd): abc = ((CA1 * BA0) > (BA1 * CA0)) abd = ((DA1 * BA0) > (BA1 * DA0)) if (abc != abd): DC0 = (D[0] - C[0]) DC1 = (D[1] - C[1]) ABBA = ((A[0] * B[1]) - (B[0] * A[1])) CDDC = ((C[0] * D[1]) - (D[0] * C[1])) DH = ((BA1 * DC0) - (BA0 * DC1)) Dx = ((ABBA * DC0) - (BA0 * CDDC)) Dy = ((ABBA * DC1) - (BA1 * CDDC)) temp_pts[0] = (Dx / DH) temp_pts[1] = (Dy / DH) return True return False
class TestMultipleInputsMultipleOutputsKerasFQExporter(KerasFakeQuantExporterBaseTest): def get_input_shape(self): return [(30, 30, 3), (28, 28, 3)] def get_tpc(self): tp = generate_test_tp_model({'weights_n_bits': 2}) return generate_keras_tpc(name='test_conv2d_2bit_fq_weight', tp_model=tp) def get_model(self): inputs1 = Input(shape=self.get_input_shape()[0]) inputs2 = Input(shape=self.get_input_shape()[1]) x = Conv2D(3, 3)(inputs1) y = Conv2D(3, 3, padding='same')(inputs2) x = Add()([x, y]) model = keras.Model(inputs=[inputs1, inputs2], outputs=[x, y]) return model def run_checks(self): assert (len(self.loaded_model.input_shape) == 2) assert (self.exportable_model.input_shape == self.loaded_model.input_shape) assert (len(self.loaded_model.output_shape) == 2) assert (self.loaded_model.output_shape == self.exportable_model.output_shape)
class DebertaV2ForMaskedLM(): def __init__(self, *args, **kwargs): requires_pytorch(self) def from_pretrained(self, *args, **kwargs): requires_pytorch(self)
def check_and_reduce_pair(x1, x2=None): y1 = [Integer(x) for x in x1] if ((x2 is None) or (not x2) or (x2[0] is Infinity)): y2 = [Infinity] if (not y1): raise ValueError('continued fraction can not represent infinity') elif ((len(y1) > 1) and (y1[(- 1)] == 1)): y1.pop() y1[(- 1)] += 1 else: y2 = [Integer(x) for x in x2] if any(((b <= ZZ_0) for b in y2)): raise ValueError('the elements of the period cannot be negative') while (y1 and (y1[(- 1)] == y2[(- 1)])): y1.pop() y2.insert(0, y2.pop()) if ((len(y2) == 1) and (y2[0] == 0)): if (not y1): y1 = [ZZ_0] elif ((len(y1) > 1) and (y1[(- 1)] == 1)): y1.pop() y1[(- 1)] += 1 n2 = len(y2) for i in range(1, ((n2 + 2) // 2)): if (((n2 % i) == 0) and (y2[:(- i)] == y2[i:])): y2 = y2[:i] break for i in range(1, len(y1)): if (y1[i] <= 0): raise ValueError('all quotient except the first must be positive') return (tuple(y1), tuple(y2))
.gpu .parametrize('img', (real_image3d()[1], random_image((33, 44, 55)))) .parametrize('n_rays', (4, 16, 32)) .parametrize('grid', ((1, 1, 1), (1, 2, 4))) def test_types_gpu(img, n_rays, grid): mode = 'opencl' rays = Rays_GoldenSpiral(n_rays) gt = star_dist3D(img, rays=rays, grid=grid, mode=mode) for dtype in (np.int8, np.int16, np.int32, np.uint8, np.uint16, np.uint32): x = star_dist3D(img.astype(dtype), rays=rays, grid=grid, mode=mode) print('test_stardist3D (mode {mode}) for shape {img.shape} and type {dtype}'.format(mode=mode, img=img, dtype=dtype)) check_similar(gt, x)
def get_training_parser(default_task='translation'): parser = get_parser('Trainer', default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser
def _run_operation(n: BaseNode, input_tensors: List, op_func: Any, quantize_node_activation_fn, use_activation_quantization: bool) -> Tuple[(Union[(List, torch.Tensor)], Union[(List, torch.Tensor)])]: op_call_args = (n.op_call_args if isinstance(n, FunctionalNode) else []) functional_kwargs = (n.op_call_kwargs if isinstance(n, FunctionalNode) else {}) if (isinstance(n, FunctionalNode) and n.inputs_as_list): out_tensors_of_n_float = op_func(input_tensors, *op_call_args, **functional_kwargs) else: out_tensors_of_n_float = op_func(*(input_tensors + op_call_args), **functional_kwargs) out_tensors_of_n = out_tensors_of_n_float if use_activation_quantization: if isinstance(out_tensors_of_n_float, list): out_tensors_of_n_float = torch.cat(out_tensors_of_n_float, dim=0) out_tensors_of_n = quantize_node_activation_fn(out_tensors_of_n_float) return (out_tensors_of_n, out_tensors_of_n_float)
class DistOptimizerHook(OptimizerHook): def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=(- 1)): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb def after_train_iter(self, runner): runner.optimizer.zero_grad() runner.outputs['loss'].backward() allreduce_grads(runner.model, self.coalesce, self.bucket_size_mb) if (self.grad_clip is not None): self.clip_grads(runner.model.parameters()) runner.optimizer.step()
def print_header(): header = ' _____ _ ____ _______ _ ___ _ _ _____ \n/ ___| | / /\\ \\ / / ___ \\ | / _ \\ | \\ | || ___|\n\\ `--.| |/ / \\ V /| |_/ / | / /_\\ \\| \\| || |__ \n `--. \\ \\ \\ / | __/| | | _ || . ` || __| \n/\\__/ / |\\ \\ | | | | | |____| | | || |\\ || |___ \n\\____/\\_| \\_/ \\_/ \\_| \\_____/\\_| |_/\\_| \\_/\\____/' console.print(f'''[bright_black]{header}[/bright_black] ''')
def build_joint_config(args: argparse.Namespace): drop_rates = ((0.0, 0.05, args.drop_rate) if args.use_locked_drop else (args.drop_rate, 0.0, 0.0)) if (args.ck_decoder == 'sequence_tagging'): ck_decoder_config = SequenceTaggingDecoderConfig(scheme=args.scheme, use_crf=args.use_crf, fl_gamma=args.fl_gamma, sl_epsilon=args.sl_epsilon, in_drop_rates=drop_rates) elif (args.ck_decoder == 'span_classification'): ck_decoder_config = SpanClassificationDecoderConfig(agg_mode=args.agg_mode, fl_gamma=args.fl_gamma, sl_epsilon=args.sl_epsilon, neg_sampling_rate=args.neg_sampling_rate, neg_sampling_power_decay=args.neg_sampling_power_decay, neg_sampling_surr_rate=args.neg_sampling_surr_rate, neg_sampling_surr_size=args.neg_sampling_surr_size, max_span_size=args.max_span_size, size_emb_dim=args.size_emb_dim, in_drop_rates=drop_rates) elif (args.ck_decoder == 'boundary_selection'): ck_decoder_config = BoundarySelectionDecoderConfig(use_biaffine=args.use_biaffine, affine=EncoderConfig(arch=args.affine_arch, hid_dim=args.affine_dim, num_layers=args.affine_num_layers, in_drop_rates=(0.4, 0.0, 0.0), hid_drop_rate=0.2), fl_gamma=args.fl_gamma, sl_epsilon=args.sl_epsilon, neg_sampling_rate=args.neg_sampling_rate, neg_sampling_power_decay=args.neg_sampling_power_decay, neg_sampling_surr_rate=args.neg_sampling_surr_rate, neg_sampling_surr_size=args.neg_sampling_surr_size, sb_epsilon=args.sb_epsilon, size_emb_dim=args.size_emb_dim) if (args.attr_decoder == 'None'): attr_decoder_config = None elif (args.attr_decoder == 'span_attr_classification'): attr_decoder_config = SpanAttrClassificationDecoderConfig(agg_mode=args.agg_mode, neg_sampling_rate=args.neg_sampling_rate, max_size_id=args.max_size_id, size_emb_dim=args.size_emb_dim, label_emb_dim=args.label_emb_dim, in_drop_rates=drop_rates) if (args.rel_decoder == 'None'): rel_decoder_config = None elif (args.rel_decoder == 'span_rel_classification'): rel_decoder_config = SpanRelClassificationDecoderConfig(agg_mode=args.agg_mode, fl_gamma=args.fl_gamma, sl_epsilon=args.sl_epsilon, neg_sampling_rate=args.neg_sampling_rate, max_size_id=args.max_size_id, size_emb_dim=args.size_emb_dim, label_emb_dim=args.label_emb_dim, in_drop_rates=drop_rates) decoder_config = JointExtractionDecoderConfig(ck_decoder=ck_decoder_config, attr_decoder=attr_decoder_config, rel_decoder=rel_decoder_config) return ExtractorConfig(**collect_IE_assembly_config(args), decoder=decoder_config)
class RestrictedImageNet(DataSet): def __init__(self, data_path, **kwargs): ds_name = 'restricted_imagenet' ds_kwargs = {'num_classes': len(constants.RESTRICTED_IMAGNET_RANGES), 'mean': ch.tensor([0.4717, 0.4499, 0.3837]), 'std': ch.tensor([0.26, 0.2516, 0.2575]), 'custom_class': None, 'label_mapping': get_label_mapping(ds_name, constants.RESTRICTED_IMAGNET_RANGES), 'transform_train': da.TRAIN_TRANSFORMS_IMAGENET, 'transform_test': da.TEST_TRANSFORMS_IMAGENET} super(RestrictedImageNet, self).__init__(ds_name, data_path, **ds_kwargs) def get_model(self, arch, pretrained): if pretrained: raise ValueError("Dataset doesn't support pytorch_pretrained") return imagenet_models.__dict__[arch](num_classes=self.num_classes)
def register_Ns3DefaultDeleter__Ns3SpectrumSignalParameters_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::DefaultDeleter< ns3::SpectrumSignalParameters > const &', 'arg0')]) cls.add_method('Delete', 'void', [param('ns3::SpectrumSignalParameters *', 'object')], is_static=True) return
def _create_fake_setuptools_pkg_info(placeholder): if ((not placeholder) or (not os.path.exists(placeholder))): log.warn('Could not find the install location') return pyver = ('%s.%s' % (sys.version_info[0], sys.version_info[1])) setuptools_file = ('setuptools-%s-py%s.egg-info' % (SETUPTOOLS_FAKED_VERSION, pyver)) pkg_info = os.path.join(placeholder, setuptools_file) if os.path.exists(pkg_info): log.warn('%s already exists', pkg_info) return log.warn('Creating %s', pkg_info) try: f = open(pkg_info, 'w') except EnvironmentError: log.warn("Don't have permissions to write %s, skipping", pkg_info) return try: f.write(SETUPTOOLS_PKG_INFO) finally: f.close() pth_file = os.path.join(placeholder, 'setuptools.pth') log.warn('Creating %s', pth_file) f = open(pth_file, 'w') try: f.write(os.path.join(os.curdir, setuptools_file)) finally: f.close()
class EuclideanLoss(BaseLossWithValidity): def calculate_loss(self, a, b): assert (a.ndim == b.ndim) assert (a.ndim > 1) squared_difference = torch.pow((a - b), 2) ssd = torch.sum(squared_difference, axis=tuple(range(1, a.ndim))) return torch.sqrt(ssd)
_function_dispatch(_fftn_dispatcher) def ifftn(a, s=None, axes=None, norm=None): return _raw_fftnd(a, s, axes, ifft, norm)
def test_compare_gt(): a_raw = torch.tensor([2.0, 2.0, 2.0]) b_raw = torch.tensor([1.0, 2.0, 3.0]) feature_dim = Dim(3) a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32') b = Tensor(name='b', raw_tensor=b_raw, dims=[feature_dim], dtype='float32') result = (a > b) result_alt1 = rf.compare(a, '>', b) result_alt2 = rf.compare(a, 'greater', b) assert (result.raw_tensor.tolist() == [True, False, False]) assert (result_alt1.raw_tensor.tolist() == [True, False, False]) assert (result_alt2.raw_tensor.tolist() == [True, False, False])
def imnormalize_(img, mean, std, to_rgb=True): assert (img.dtype != np.uint8) mean = np.float64(mean.reshape(1, (- 1))) stdinv = (1 / np.float64(std.reshape(1, (- 1)))) if to_rgb: cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) cv2.subtract(img, mean, img) cv2.multiply(img, stdinv, img) return img
class LongformerTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, merges_file, errors='replace', bos_token='<s>', eos_token='</s>', sep_token='</s>', cls_token='<s>', unk_token='<unk>', pad_token='<pad>', mask_token='<mask>', add_prefix_space=False, **kwargs): bos_token = (AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token) eos_token = (AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token) sep_token = (AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token) cls_token = (AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token) unk_token = (AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token) pad_token = (AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token) mask_token = (AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token) super().__init__(errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs) with open(vocab_file, encoding='utf-8') as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for (k, v) in self.encoder.items()} self.errors = errors self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for (k, v) in self.byte_encoder.items()} with open(merges_file, encoding='utf-8') as merges_handle: bpe_merges = merges_handle.read().split('\n')[1:(- 1)] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space self.pat = re.compile("'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+") def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if (token in self.cache): return self.cache[token] word = tuple(token) pairs = get_pairs(word) if (not pairs): return token while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def _tokenize(self, text): bpe_tokens = [] for token in re.findall(self.pat, text): token = ''.join((self.byte_encoder[b] for b in token.encode('utf-8'))) bpe_tokens.extend((bpe_token for bpe_token in self.bpe(token).split(' '))) return bpe_tokens def _convert_token_to_id(self, token): return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): return self.decoder.get(index) def convert_tokens_to_string(self, tokens): text = ''.join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors) return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) merge_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])) with open(vocab_file, 'w', encoding='utf-8') as f: f.write((json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + '\n')) index = 0 with open(merge_file, 'w', encoding='utf-8') as writer: writer.write('#version: 0.2\n') for (bpe_tokens, token_index) in sorted(self.bpe_ranks.items(), key=(lambda kv: kv[1])): if (index != token_index): logger.warning(f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive. Please check that the tokenizer is not corrupted!') index = token_index writer.write((' '.join(bpe_tokens) + '\n')) index += 1 return (vocab_file, merge_file) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return (((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is None): return (([1] + ([0] * len(token_ids_0))) + [1]) return (((([1] + ([0] * len(token_ids_0))) + [1, 1]) + ([0] * len(token_ids_1))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return (len((((((cls + token_ids_0) + sep) + sep) + token_ids_1) + sep)) * [0]) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', self.add_prefix_space) if ((is_split_into_words or add_prefix_space) and ((len(text) > 0) and (not text[0].isspace()))): text = (' ' + text) return (text, kwargs)
class CCInterpreter(StackInterpreter): name = 'cc' def __init__(self): mc_retval = MemoryChunkCCRetval('retval', ty_mpc) super(CCInterpreter, self).__init__(ty_mpc, mc_retval=mc_retval) self.err_return = '0' self.mc_py_constants = MemoryChunkConstants('py_constants', ty_python) self.mc_domain = MemoryChunkPyConstant('domain') self.chunks = [self.mc_args, self.mc_retval, self.mc_constants, self.mc_py_constants, self.mc_stack, self.mc_code, self.mc_domain] pg = params_gen(A=self.mc_args, C=self.mc_constants, D=self.mc_code, S=self.mc_stack, P=self.mc_py_constants) self.pg = pg self.c_header = ri(0, '\n #include <mpc.h>\n #include "wrapper_cc.h"\n ') self.pxd_header = ri(0, '\n from sage.rings.real_mpfr cimport RealNumber\n from sage.libs.mpfr cimport *\n from sage.rings.complex_mpfr cimport ComplexNumber\n from sage.libs.mpc cimport *\n ') self.pyx_header = ri(0, ' # distutils: libraries = mpfr mpc gmp\n\n cdef public bint cc_py_call_helper(object domain, object fn,\n int n_args,\n mpc_t* args, mpc_t retval) except 0:\n py_args = []\n cdef int i\n cdef ComplexNumber ZERO=domain.zero()\n cdef ComplexNumber cn\n for i from 0 <= i < n_args:\n cn = ZERO._new()\n mpfr_set(cn.__re, mpc_realref(args[i]), MPFR_RNDN)\n mpfr_set(cn.__im, mpc_imagref(args[i]), MPFR_RNDN)\n py_args.append(cn)\n cdef ComplexNumber result = domain(fn(*py_args))\n mpc_set_fr_fr(retval, result.__re,result.__im, MPC_RNDNN)\n return 1\n ') instrs = [InstrSpec('load_arg', pg('A[D]', 'S'), code='mpc_set(o0, i0, MPC_RNDNN);'), InstrSpec('load_const', pg('C[D]', 'S'), code='mpc_set(o0, i0, MPC_RNDNN);'), InstrSpec('return', pg('S', ''), code='mpc_set(retval, i0, MPC_RNDNN);\nreturn 1;\n'), InstrSpec('py_call', pg('P[D]', 'S'), uses_error_handler=True, code='\n if (!cc_py_call_helper(domain, i0, n_i1, i1, o0)) {\n goto error;\n}\n')] for (name, op) in [('add', 'mpc_add'), ('sub', 'mpc_sub'), ('mul', 'mpc_mul'), ('div', 'mpc_div'), ('pow', 'mpc_pow')]: instrs.append(instr_funcall_2args_mpc(name, pg('SS', 'S'), op)) instrs.append(instr_funcall_2args_mpc('ipow', pg('SD', 'S'), 'mpc_pow_si')) for name in ['neg', 'log', 'log10', 'exp', 'cos', 'sin', 'tan', 'acos', 'asin', 'atan', 'cosh', 'sinh', 'tanh', 'acosh', 'asinh', 'atanh']: instrs.append(instr_funcall_1arg_mpc(name, pg('S', 'S'), ('mpc_' + name))) instrs.append(InstrSpec('invert', pg('S', 'S'), code='mpc_ui_div(o0, 1, i0, MPC_RNDNN);')) self.instr_descs = instrs self._set_opcodes() self.ipow_range = (int((- (2 ** 31))), int(((2 ** 31) - 1)))
def conv_block(input_mat, num_filters, kernel_size, batch_norm): X = Conv3D(num_filters, kernel_size=(kernel_size, kernel_size, kernel_size), strides=(1, 1, 1), padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters, kernel_size=(kernel_size, kernel_size, kernel_size), strides=(1, 1, 1), padding='same')(X) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat, X]) return X
def is_file(path, filename, restore=False): if os.path.isdir(path): if os.path.isfile((path + filename)): if (restore == False): open((path + filename), 'w').close() else: pass else: open((path + filename), 'w').close() else: os.mkdir(path) open((path + filename), 'w').close()
class Environment(object): sandboxed = False overlayed = False linked_to = None shared = False code_generator_class = CodeGenerator context_class = Context def __init__(self, block_start_string=BLOCK_START_STRING, block_end_string=BLOCK_END_STRING, variable_start_string=VARIABLE_START_STRING, variable_end_string=VARIABLE_END_STRING, comment_start_string=COMMENT_START_STRING, comment_end_string=COMMENT_END_STRING, line_statement_prefix=LINE_STATEMENT_PREFIX, line_comment_prefix=LINE_COMMENT_PREFIX, trim_blocks=TRIM_BLOCKS, lstrip_blocks=LSTRIP_BLOCKS, newline_sequence=NEWLINE_SEQUENCE, keep_trailing_newline=KEEP_TRAILING_NEWLINE, extensions=(), optimized=True, undefined=Undefined, finalize=None, autoescape=False, loader=None, cache_size=400, auto_reload=True, bytecode_cache=None, enable_async=False): self.block_start_string = block_start_string self.block_end_string = block_end_string self.variable_start_string = variable_start_string self.variable_end_string = variable_end_string self.comment_start_string = comment_start_string self.comment_end_string = comment_end_string self.line_statement_prefix = line_statement_prefix self.line_comment_prefix = line_comment_prefix self.trim_blocks = trim_blocks self.lstrip_blocks = lstrip_blocks self.newline_sequence = newline_sequence self.keep_trailing_newline = keep_trailing_newline self.undefined = undefined self.optimized = optimized self.finalize = finalize self.autoescape = autoescape self.filters = DEFAULT_FILTERS.copy() self.tests = DEFAULT_TESTS.copy() self.globals = DEFAULT_NAMESPACE.copy() self.loader = loader self.cache = create_cache(cache_size) self.bytecode_cache = bytecode_cache self.auto_reload = auto_reload self.policies = DEFAULT_POLICIES.copy() self.extensions = load_extensions(self, extensions) self.enable_async = enable_async self.is_async = (self.enable_async and have_async_gen) if self.is_async: from . import asyncsupport _environment_sanity_check(self) def add_extension(self, extension): self.extensions.update(load_extensions(self, [extension])) def extend(self, **attributes): for (key, value) in iteritems(attributes): if (not hasattr(self, key)): setattr(self, key, value) def overlay(self, block_start_string=missing, block_end_string=missing, variable_start_string=missing, variable_end_string=missing, comment_start_string=missing, comment_end_string=missing, line_statement_prefix=missing, line_comment_prefix=missing, trim_blocks=missing, lstrip_blocks=missing, extensions=missing, optimized=missing, undefined=missing, finalize=missing, autoescape=missing, loader=missing, cache_size=missing, auto_reload=missing, bytecode_cache=missing): args = dict(locals()) del args['self'], args['cache_size'], args['extensions'] rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.overlayed = True rv.linked_to = self for (key, value) in iteritems(args): if (value is not missing): setattr(rv, key, value) if (cache_size is not missing): rv.cache = create_cache(cache_size) else: rv.cache = copy_cache(self.cache) rv.extensions = {} for (key, value) in iteritems(self.extensions): rv.extensions[key] = value.bind(rv) if (extensions is not missing): rv.extensions.update(load_extensions(rv, extensions)) return _environment_sanity_check(rv) lexer = property(get_lexer, doc='The lexer for this environment.') def iter_extensions(self): return iter(sorted(self.extensions.values(), key=(lambda x: x.priority))) def getitem(self, obj, argument): try: return obj[argument] except (AttributeError, TypeError, LookupError): if isinstance(argument, string_types): try: attr = str(argument) except Exception: pass else: try: return getattr(obj, attr) except AttributeError: pass return self.undefined(obj=obj, name=argument) def getattr(self, obj, attribute): try: return getattr(obj, attribute) except AttributeError: pass try: return obj[attribute] except (TypeError, LookupError, AttributeError): return self.undefined(obj=obj, name=attribute) def call_filter(self, name, value, args=None, kwargs=None, context=None, eval_ctx=None): func = self.filters.get(name) if (func is None): fail_for_missing_callable('no filter named %r', name) args = ([value] + list((args or ()))) if (getattr(func, 'contextfilter', False) is True): if (context is None): raise TemplateRuntimeError('Attempted to invoke context filter without context') args.insert(0, context) elif (getattr(func, 'evalcontextfilter', False) is True): if (eval_ctx is None): if (context is not None): eval_ctx = context.eval_ctx else: eval_ctx = EvalContext(self) args.insert(0, eval_ctx) elif (getattr(func, 'environmentfilter', False) is True): args.insert(0, self) return func(*args, **(kwargs or {})) def call_test(self, name, value, args=None, kwargs=None): func = self.tests.get(name) if (func is None): fail_for_missing_callable('no test named %r', name) return func(value, *(args or ()), **(kwargs or {})) def parse(self, source, name=None, filename=None): try: return self._parse(source, name, filename) except TemplateSyntaxError: self.handle_exception(source=source) def _parse(self, source, name, filename): return Parser(self, source, name, encode_filename(filename)).parse() def lex(self, source, name=None, filename=None): source = text_type(source) try: return self.lexer.tokeniter(source, name, filename) except TemplateSyntaxError: self.handle_exception(source=source) def preprocess(self, source, name=None, filename=None): return reduce((lambda s, e: e.preprocess(s, name, filename)), self.iter_extensions(), text_type(source)) def _tokenize(self, source, name, filename=None, state=None): source = self.preprocess(source, name, filename) stream = self.lexer.tokenize(source, name, filename, state) for ext in self.iter_extensions(): stream = ext.filter_stream(stream) if (not isinstance(stream, TokenStream)): stream = TokenStream(stream, name, filename) return stream def _generate(self, source, name, filename, defer_init=False): return generate(source, self, name, filename, defer_init=defer_init, optimized=self.optimized) def _compile(self, source, filename): return compile(source, filename, 'exec') def compile(self, source, name=None, filename=None, raw=False, defer_init=False): source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if (filename is None): filename = '<template>' else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: self.handle_exception(source=source_hint) def compile_expression(self, source, undefined_to_none=True): parser = Parser(self, source, state='variable') try: expr = parser.parse_expression() if (not parser.stream.eos): raise TemplateSyntaxError('chunk after expression', parser.stream.current.lineno, None, None) expr.set_environment(self) except TemplateSyntaxError: if (sys.exc_info() is not None): self.handle_exception(source=source) body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)] template = self.from_string(nodes.Template(body, lineno=1)) return TemplateExpression(template, undefined_to_none) def compile_templates(self, target, extensions=None, filter_func=None, zip='deflated', log_function=None, ignore_errors=True, py_compile=False): from .loaders import ModuleLoader if (log_function is None): def log_function(x): pass if py_compile: if ((not PY2) or PYPY): import warnings warnings.warn("'py_compile=True' has no effect on PyPy or Python 3 and will be removed in version 3.0", DeprecationWarning, stacklevel=2) py_compile = False else: import imp import marshal py_header = (imp.get_magic() + u'yyyy'.encode('iso-8859-15')) if (sys.version_info >= (3, 3)): py_header += u'\x00\x00\x00\x00'.encode('iso-8859-15') def write_file(filename, data): if zip: info = ZipInfo(filename) info.external_attr = (493 << 16) zip_file.writestr(info, data) else: if isinstance(data, text_type): data = data.encode('utf8') with open(os.path.join(target, filename), 'wb') as f: f.write(data) if (zip is not None): from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]) log_function(('Compiling into Zip archive "%s"' % target)) else: if (not os.path.isdir(target)): os.makedirs(target) log_function(('Compiling into folder "%s"' % target)) try: for name in self.list_templates(extensions, filter_func): (source, filename, _) = self.loader.get_source(self, name) try: code = self.compile(source, name, filename, True, True) except TemplateSyntaxError as e: if (not ignore_errors): raise log_function(('Could not compile "%s": %s' % (name, e))) continue filename = ModuleLoader.get_module_filename(name) if py_compile: c = self._compile(code, encode_filename(filename)) write_file((filename + 'c'), (py_header + marshal.dumps(c))) log_function(('Byte-compiled "%s" as %s' % (name, (filename + 'c')))) else: write_file(filename, code) log_function(('Compiled "%s" as %s' % (name, filename))) finally: if zip: zip_file.close() log_function('Finished compiling templates') def list_templates(self, extensions=None, filter_func=None): names = self.loader.list_templates() if (extensions is not None): if (filter_func is not None): raise TypeError('either extensions or filter_func can be passed, but not both') def filter_func(x): return (('.' in x) and (x.rsplit('.', 1)[1] in extensions)) if (filter_func is not None): names = [name for name in names if filter_func(name)] return names def handle_exception(self, source=None): from .debug import rewrite_traceback_stack reraise(*rewrite_traceback_stack(source=source)) def join_path(self, template, parent): return template def _load_template(self, name, globals): if (self.loader is None): raise TypeError('no loader for this environment specified') cache_key = (weakref.ref(self.loader), name) if (self.cache is not None): template = self.cache.get(cache_key) if ((template is not None) and ((not self.auto_reload) or template.is_up_to_date)): return template template = self.loader.load(self, name, globals) if (self.cache is not None): self.cache[cache_key] = template return template def get_template(self, name, parent=None, globals=None): if isinstance(name, Template): return name if (parent is not None): name = self.join_path(name, parent) return self._load_template(name, self.make_globals(globals)) def select_template(self, names, parent=None, globals=None): if isinstance(names, Undefined): names._fail_with_undefined_error() if (not names): raise TemplatesNotFound(message=u'Tried to select from an empty list of templates.') globals = self.make_globals(globals) for name in names: if isinstance(name, Template): return name if (parent is not None): name = self.join_path(name, parent) try: return self._load_template(name, globals) except (TemplateNotFound, UndefinedError): pass raise TemplatesNotFound(names) def get_or_select_template(self, template_name_or_list, parent=None, globals=None): if isinstance(template_name_or_list, (string_types, Undefined)): return self.get_template(template_name_or_list, parent, globals) elif isinstance(template_name_or_list, Template): return template_name_or_list return self.select_template(template_name_or_list, parent, globals) def from_string(self, source, globals=None, template_class=None): globals = self.make_globals(globals) cls = (template_class or self.template_class) return cls.from_code(self, self.compile(source), globals, None) def make_globals(self, d): if (not d): return self.globals return dict(self.globals, **d)
def check_error(res: int) -> None: if (res != _cudart.cudaError.success): raise CudaError(res)
def add_python_install(libz3Component): name = 'python_install' reg_component(name, PythonInstallComponent(name, libz3Component))
class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None) def forward(self, x): if (not self.equalInOut): x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x)))) if (self.droprate > 0): out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(out) return torch.add((x if self.equalInOut else self.convShortcut(x)), out)
def find_extension(codec): if (codec in extensions_dict): return codec for (ext, infos) in extensions_dict.items(): if (codec in infos.get('codec', [])): return ext raise ValueError('The audio_codec you chose is unknown by MoviePy. You should report this. In the meantime, you can specify a temp_audiofile with the right extension in write_videofile.')
class TransformedDataset(Dataset): def __init__(self, dataset, transform=None, target_transform=None, as_rgb=False): self.dataset = dataset self.transform = transform self.target_transform = target_transform self.as_rgb = as_rgb def __len__(self): return len(self.dataset) def __getitem__(self, index): (img, label) = self.dataset[index] if self.target_transform: label = self.target_transform(label) else: label = label.astype(int) img = np.stack(([(img / 255.0)] * (3 if self.as_rgb else 1)), axis=0) if (self.transform is not None): img = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) return (img, label)
class Model(torch.nn.Module): def __init__(self, args, concat=False): super(Model, self).__init__() self.args = args self.num_features = args.num_features self.nhid = args.nhid self.num_classes = args.num_classes self.dropout_ratio = args.dropout_ratio self.model = args.model self.concat = concat if (self.model == 'gcn'): self.conv1 = GCNConv(self.num_features, self.nhid) elif (self.model == 'sage'): self.conv1 = SAGEConv(self.num_features, self.nhid) elif (self.model == 'gat'): self.conv1 = GATConv(self.num_features, self.nhid) if self.concat: self.lin0 = torch.nn.Linear(self.num_features, self.nhid) self.lin1 = torch.nn.Linear((self.nhid * 2), self.nhid) self.lin2 = torch.nn.Linear(self.nhid, self.num_classes) def forward(self, data): (x, edge_index, batch) = (data.x, data.edge_index, data.batch) edge_attr = None x = F.relu(self.conv1(x, edge_index, edge_attr)) x = gmp(x, batch) if self.concat: news = torch.stack([data.x[(data.batch == idx).nonzero().squeeze()[0]] for idx in range(data.num_graphs)]) news = F.relu(self.lin0(news)) x = torch.cat([x, news], dim=1) x = F.relu(self.lin1(x)) x = F.log_softmax(self.lin2(x), dim=(- 1)) return x
def tensorflow2pytorch(): lookup_inception_resnet_v1 = {'conv2d_1a': ['InceptionResnetV1/Conv2d_1a_3x3', load_tf_basicConv2d], 'conv2d_2a': ['InceptionResnetV1/Conv2d_2a_3x3', load_tf_basicConv2d], 'conv2d_2b': ['InceptionResnetV1/Conv2d_2b_3x3', load_tf_basicConv2d], 'conv2d_3b': ['InceptionResnetV1/Conv2d_3b_1x1', load_tf_basicConv2d], 'conv2d_4a': ['InceptionResnetV1/Conv2d_4a_3x3', load_tf_basicConv2d], 'conv2d_4b': ['InceptionResnetV1/Conv2d_4b_3x3', load_tf_basicConv2d], 'repeat_1': ['InceptionResnetV1/Repeat/block35', load_tf_repeat_1], 'mixed_6a': ['InceptionResnetV1/Mixed_6a', load_tf_mixed6a], 'repeat_2': ['InceptionResnetV1/Repeat_1/block17', load_tf_repeat_2], 'mixed_7a': ['InceptionResnetV1/Mixed_7a', load_tf_mixed7a], 'repeat_3': ['InceptionResnetV1/Repeat_2/block8', load_tf_repeat_3], 'block8': ['InceptionResnetV1/Block8', load_tf_block17_8], 'last_linear': ['InceptionResnetV1/Bottleneck/weights', load_tf_linear], 'last_bn': ['InceptionResnetV1/Bottleneck/BatchNorm', load_tf_batchNorm], 'logits': ['Logits', load_tf_linear]} print('\nLoad VGGFace2-trained weights and save\n') mdl = InceptionResnetV1(num_classes=8631).eval() tf_mdl_dir = 'data/-114759' data_name = 'vggface2' load_tf_model_weights(mdl, lookup_inception_resnet_v1, tf_mdl_dir) state_dict = mdl.state_dict() torch.save(state_dict, f'{tf_mdl_dir}-{data_name}.pt') torch.save({'logits.weight': state_dict['logits.weight'], 'logits.bias': state_dict['logits.bias']}, f'{tf_mdl_dir}-{data_name}-logits.pt') state_dict.pop('logits.weight') state_dict.pop('logits.bias') torch.save(state_dict, f'{tf_mdl_dir}-{data_name}-features.pt') print('\nLoad CASIA-Webface-trained weights and save\n') mdl = InceptionResnetV1(num_classes=10575).eval() tf_mdl_dir = 'data/-102900' data_name = 'casia-webface' load_tf_model_weights(mdl, lookup_inception_resnet_v1, tf_mdl_dir) state_dict = mdl.state_dict() torch.save(state_dict, f'{tf_mdl_dir}-{data_name}.pt') torch.save({'logits.weight': state_dict['logits.weight'], 'logits.bias': state_dict['logits.bias']}, f'{tf_mdl_dir}-{data_name}-logits.pt') state_dict.pop('logits.weight') state_dict.pop('logits.bias') torch.save(state_dict, f'{tf_mdl_dir}-{data_name}-features.pt') lookup_pnet = {'conv1': ['pnet/conv1', load_tf_conv2d_trans], 'prelu1': ['pnet/PReLU1', load_tf_linear], 'conv2': ['pnet/conv2', load_tf_conv2d_trans], 'prelu2': ['pnet/PReLU2', load_tf_linear], 'conv3': ['pnet/conv3', load_tf_conv2d_trans], 'prelu3': ['pnet/PReLU3', load_tf_linear], 'conv4_1': ['pnet/conv4-1', load_tf_conv2d_trans], 'conv4_2': ['pnet/conv4-2', load_tf_conv2d_trans]} lookup_rnet = {'conv1': ['rnet/conv1', load_tf_conv2d_trans], 'prelu1': ['rnet/prelu1', load_tf_linear], 'conv2': ['rnet/conv2', load_tf_conv2d_trans], 'prelu2': ['rnet/prelu2', load_tf_linear], 'conv3': ['rnet/conv3', load_tf_conv2d_trans], 'prelu3': ['rnet/prelu3', load_tf_linear], 'dense4': ['rnet/conv4', load_tf_linear], 'prelu4': ['rnet/prelu4', load_tf_linear], 'dense5_1': ['rnet/conv5-1', load_tf_linear], 'dense5_2': ['rnet/conv5-2', load_tf_linear]} lookup_onet = {'conv1': ['onet/conv1', load_tf_conv2d_trans], 'prelu1': ['onet/prelu1', load_tf_linear], 'conv2': ['onet/conv2', load_tf_conv2d_trans], 'prelu2': ['onet/prelu2', load_tf_linear], 'conv3': ['onet/conv3', load_tf_conv2d_trans], 'prelu3': ['onet/prelu3', load_tf_linear], 'conv4': ['onet/conv4', load_tf_conv2d_trans], 'prelu4': ['onet/prelu4', load_tf_linear], 'dense5': ['onet/conv5', load_tf_linear], 'prelu5': ['onet/prelu5', load_tf_linear], 'dense6_1': ['onet/conv6-1', load_tf_linear], 'dense6_2': ['onet/conv6-2', load_tf_linear], 'dense6_3': ['onet/conv6-3', load_tf_linear]} print('\nLoad PNet weights and save\n') tf_mdl_dir = (lambda sess: detect_face.create_mtcnn(sess, None)) mdl = PNet() data_name = 'pnet' load_tf_model_weights(mdl, lookup_pnet, tf_mdl_dir, is_resnet=False, arg_num=0) torch.save(mdl.state_dict(), f'data/{data_name}.pt') tf.reset_default_graph() with tf.Session() as sess: compare_mtcnn(mdl, tf_mdl_dir, sess, 0, torch.randn(1, 256, 256, 3).detach()) print('\nLoad RNet weights and save\n') mdl = RNet() data_name = 'rnet' load_tf_model_weights(mdl, lookup_rnet, tf_mdl_dir, is_resnet=False, arg_num=1) torch.save(mdl.state_dict(), f'data/{data_name}.pt') tf.reset_default_graph() with tf.Session() as sess: compare_mtcnn(mdl, tf_mdl_dir, sess, 1, torch.randn(1, 24, 24, 3).detach()) print('\nLoad ONet weights and save\n') mdl = ONet() data_name = 'onet' load_tf_model_weights(mdl, lookup_onet, tf_mdl_dir, is_resnet=False, arg_num=2) torch.save(mdl.state_dict(), f'data/{data_name}.pt') tf.reset_default_graph() with tf.Session() as sess: compare_mtcnn(mdl, tf_mdl_dir, sess, 2, torch.randn(1, 48, 48, 3).detach())
def main(): config_path = './config.json' with open(config_path) as f: args = json.load(f) args = AttrDict(args) device = torch.device(args.device) args.model = nn.chimera(**args['model_options']) args.model.to(device) args.train_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tr', device) args.valid_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'cv', device) args.test_loader = data.wsj0_2mix_dataloader(args.model_name, args.feature_options, 'tt', device) args.optimizer = utils.build_optimizer(args.model.parameters(), args.optimizer_options) args.loss_fn = loss.loss_chimera_msa trainer = utils.trainer(args) trainer.run() tester = tester_chimera(args) tester.eval()
def constant(fill_value: RawTensorTypes, *, dims: Sequence[Dim], dtype: Optional[str]=None, device: Optional[str]=None, sparse_dim: Optional[Dim]=None, feature_dim: Optional[Dim]=None) -> Tensor: return full(dims=dims, fill_value=fill_value, dtype=dtype, device=device, sparse_dim=sparse_dim, feature_dim=feature_dim)
def _calculate_dynamic_per_channel_qparams(X, dtype): if isinstance(X, torch.Tensor): X = X.numpy() (qmin, qmax) = (torch.iinfo(dtype).min, torch.iinfo(dtype).max) n_levels = (qmax - qmin) scale = np.zeros(X.shape[0], dtype=np.float64) zero_point = np.zeros(X.shape[0], dtype=np.int64) for i in range(zero_point.shape[0]): min_val = X.min() max_val = X.max() if (min_val == max_val): scale[i] = 1.0 zero_point[i] = 0 else: max_val = max(max_val, 0.0) min_val = min(min_val, 0.0) scale[i] = ((max_val - min_val) / n_levels) scale[i] = max(scale[i], np.finfo(np.float32).eps) zero_point[i] = (qmin - round((min_val / scale[i]))) zero_point[i] = max(qmin, zero_point[i]) zero_point[i] = min(qmax, zero_point[i]) return (scale, zero_point)
def test_linalg_cholesky(): A = generate_positive_semidefinite_matrix(100, np.float64) ref = np.linalg.cholesky(A) val = linalg_cholesky(A) assert (relative_error(val, ref) < 1e-10)
def validate_nl_postcode(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(postcode.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): return df[column].apply(postcode.is_valid) else: return df.applymap(postcode.is_valid) return postcode.is_valid(df)
def register_Ns3LteRrcSapRrcConnectionReestablishmentReject_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::RrcConnectionReestablishmentReject const &', 'arg0')]) return
class Gatv2CifarNet(CifarNet): def make_graph_layer(self, hidden_dim, layer_idx): heads = (8 if (layer_idx != (self.num_graph_layers - 1)) else 1) return GATv2Conv(hidden_dim, (hidden_dim // heads), heads=heads)
class GroupedBatchSampler(BatchSampler): def __init__(self, sampler, group_ids, batch_size): if (not isinstance(sampler, Sampler)): raise ValueError('sampler should be an instance of torch.utils.data.Sampler, but got sampler={}'.format(sampler)) self.sampler = sampler self.group_ids = group_ids self.batch_size = batch_size def __iter__(self): buffer_per_group = defaultdict(list) samples_per_group = defaultdict(list) num_batches = 0 for idx in self.sampler: group_id = self.group_ids[idx] buffer_per_group[group_id].append(idx) samples_per_group[group_id].append(idx) if (len(buffer_per_group[group_id]) == self.batch_size): (yield buffer_per_group[group_id]) num_batches += 1 del buffer_per_group[group_id] assert (len(buffer_per_group[group_id]) < self.batch_size) expected_num_batches = len(self) num_remaining = (expected_num_batches - num_batches) if (num_remaining > 0): for (group_id, _) in sorted(buffer_per_group.items(), key=(lambda x: len(x[1])), reverse=True): remaining = (self.batch_size - len(buffer_per_group[group_id])) samples_from_group_id = _repeat_to_at_least(samples_per_group[group_id], remaining) buffer_per_group[group_id].extend(samples_from_group_id[:remaining]) assert (len(buffer_per_group[group_id]) == self.batch_size) (yield buffer_per_group[group_id]) num_remaining -= 1 if (num_remaining == 0): break assert (num_remaining == 0) def __len__(self): return (len(self.sampler) // self.batch_size)
class AttentionBlock(nn.Module): def __init__(self, F_g, F_l, n_coefficients): super(AttentionBlock, self).__init__() self.W_gate = nn.Sequential(nn.Conv2d(F_g, n_coefficients, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(n_coefficients)) self.W_x = nn.Sequential(nn.Conv2d(F_l, n_coefficients, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(n_coefficients)) self.psi = nn.Sequential(nn.Conv2d(n_coefficients, 1, kernel_size=1, stride=1, padding=0, bias=True), nn.BatchNorm2d(1), nn.Sigmoid()) self.relu = nn.ReLU(inplace=True) def forward(self, gate, skip_connection): g1 = self.W_gate(gate) x1 = self.W_x(skip_connection) psi = self.relu((g1 + x1)) psi = self.psi(psi) out = (skip_connection * psi) return out
class TrivialMapEliminationTest(unittest.TestCase): def test_can_be_applied(self): graph = trivial_map_sdfg() count = graph.apply_transformations(TrivialMapElimination) self.assertGreater(count, 0) def test_removes_map(self): graph = trivial_map_sdfg() graph.apply_transformations(TrivialMapElimination) state = graph.nodes()[0] map_entries = [n for n in state.nodes() if isinstance(n, dace.sdfg.nodes.MapEntry)] self.assertEqual(len(map_entries), 0) def test_raplaces_map_params_in_scope(self): graph = trivial_map_sdfg() graph.apply_transformations(TrivialMapElimination) state = graph.nodes()[0] B = [n for n in state.nodes() if (isinstance(n, dace.sdfg.nodes.AccessNode) and (n.data == 'B'))][0] out_memlet = state.in_edges(B)[0] self.assertEqual(out_memlet.data.subset, dace.subsets.Range([(0, 0, 1)]))
def check_gcc_variable_attribute(cmd, attribute): cmd._check_compiler() body = (textwrap.dedent('\n #pragma GCC diagnostic error "-Wattributes"\n #pragma clang diagnostic error "-Wattributes"\n\n int %s foo;\n\n int\n main()\n {\n return 0;\n }\n ') % (attribute,)) return (cmd.try_compile(body, None, None) != 0)
def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer: norm_module_types = (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm) params: List[Dict[(str, Any)]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module in model.modules(): for (key, value) in module.named_parameters(recurse=False): if (not value.requires_grad): continue if (value in memo): continue memo.add(value) lr = cfg.SOLVER.BASE_LR weight_decay = cfg.SOLVER.WEIGHT_DECAY if isinstance(module, norm_module_types): weight_decay = cfg.SOLVER.WEIGHT_DECAY_NORM elif (key == 'bias'): lr = (cfg.SOLVER.BASE_LR * cfg.SOLVER.BIAS_LR_FACTOR) weight_decay = cfg.SOLVER.WEIGHT_DECAY_BIAS params += [{'params': [value], 'lr': lr, 'weight_decay': weight_decay}] optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV) optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer
def logistic_loss_cond(scores, labels): cond = tf.select(tf.equal(labels, tf.zeros(tf.shape(labels))), tf.zeros(tf.shape(labels)), tf.nn.sigmoid_cross_entropy_with_logits(logits=scores, labels=labels)) cls_loss = tf.reduce_mean(tf.reduce_sum(cond, [1, 2, 3])) return cls_loss
def iterate_minibatches(inputs, targets, batchsize, shuffle=False): assert (inputs.shape[0] == targets.shape[0]) if shuffle: indices = np.arange(inputs.shape[0]) np.random.shuffle(indices) for start_idx in tqdm(range(0, inputs.shape[0], batchsize)): if shuffle: excerpt = indices[start_idx:(start_idx + batchsize)] else: excerpt = slice(start_idx, (start_idx + batchsize)) (yield (inputs[excerpt], targets[excerpt]))
def get_score(model, device, train_loader, test_loader): train_feature_space = [] with torch.no_grad(): for (imgs, _) in tqdm(train_loader, desc='Train set feature extracting'): imgs = imgs.to(device) (_, features) = model(imgs) train_feature_space.append(features) train_feature_space = torch.cat(train_feature_space, dim=0).contiguous().cpu().numpy() test_feature_space = [] with torch.no_grad(): for (imgs, _) in tqdm(test_loader, desc='Test set feature extracting'): imgs = imgs.to(device) (_, features) = model(imgs) test_feature_space.append(features) test_feature_space = torch.cat(test_feature_space, dim=0).contiguous().cpu().numpy() test_labels = test_loader.dataset.targets distances = utils.knn_score(train_feature_space, test_feature_space) auc = roc_auc_score(test_labels, distances) return (auc, train_feature_space)
.parametrize('join', ['left outer', 'right outer']) def test_combine_workspace_same_channels_outer_join_unsafe(workspace_factory, join, caplog): ws = workspace_factory() new_ws = ws.rename(channels={ws.channels[(- 1)]: 'new_channel'}) pyhf.Workspace.combine(ws, new_ws, join=join) assert ('using an unsafe join operation' in caplog.text)
def dates_checker(feature: pd.Series) -> bool: try: feature = pd.to_datetime(feature) if ((feature.min().year <= 1975) or (feature.min().year is np.nan)): return False else: return True except ValueError: return False except Exception: raise ValueError('Something is wrong with object types')
('/upload') def upload(): username = request.args.get('username') filename = request.files.get('attachment').filename re.search(username, filename)
def process_en_conll03(paths, short_name): ner_input_path = paths['NERBASE'] conll_path = os.path.join(ner_input_path, 'english', 'en_conll03') ner_output_path = paths['NER_DATA_DIR'] convert_en_conll03.process_dataset('en_conll03', conll_path, ner_output_path)
def cvt_ECSSD(): img_list = sorted(glob(os.path.join(args.src, 'images', '*.jpg')), key=(lambda x: (len(x), x))) mask_list = sorted(glob(os.path.join(args.src, 'ground_truth_mask', '*.png')), key=(lambda x: (len(x), x))) dst_img_dir = os.path.join(args.dst, 'JPEGImages', args.name) dst_mask_dir = os.path.join(args.dst, 'Annotations', args.name) cp_files(img_list, dst_img_dir) if (not os.path.exists(dst_mask_dir)): os.makedirs(dst_mask_dir) mask_list = [(x, dst_mask_dir) for x in mask_list] pools = multiprocessing.Pool(worker_n) pools.map(cvt_mask_palette, mask_list) pools.close() pools.join()
def merge_output(res, total_pixels, batch_size): model_outputs = {} for entry in res[0]: if (res[0][entry] is None): continue if (len(res[0][entry].shape) == 1): model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, (- 1), 1) for r in res], 1).reshape((batch_size * total_pixels)) else: model_outputs[entry] = torch.cat([r[entry].reshape(batch_size, (- 1), r[entry].shape[(- 1)]) for r in res], 1).reshape((batch_size * total_pixels), (- 1)) return model_outputs
class PredictDiff(nn.Module): def __init__(self, config, cln=21, in_channel=256, in_channel2=128, dr_rate_d=0.5): super(PredictDiff, self).__init__() self.config = config chn = 256 self.conv1c = Conv2dbnPR(cln, chn, kernel_size=1, stride=1, padding=0) self.conv1abc = Bottleneck((chn * 2), chn, kernel_size=3, padding=1) self.pred_abc = nn.Conv2d(chn, 1, kernel_size=1, stride=1, padding=0, bias=False) def forward(self, inputs): (xab, xc_in) = inputs xc = self.conv1c(xc_in) xabc = torch.cat((xab, xc), dim=1) xabc = self.conv1abc(xabc) xabc = self.pred_abc(xabc) return xabc
def main(): set_seeds(2020) args = vars(parser.parse_args()) alphabet = Protein() cfgs = [] data_cfg = config.DataConfig(args['data_config']) cfgs.append(data_cfg) if (args['lm_model_config'] is None): model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), num_classes=8) cfgs += [model_cfg] else: lm_model_cfg = config.ModelConfig(args['lm_model_config'], idx='lm_model_config', input_dim=len(alphabet)) model_cfg = config.ModelConfig(args['model_config'], input_dim=len(alphabet), lm_dim=((lm_model_cfg.num_layers * lm_model_cfg.hidden_dim) * 2), num_classes=8) cfgs += [model_cfg, lm_model_cfg] if (model_cfg.model_type == 'RNN'): pr_model_cfg = config.ModelConfig(args['pr_model_config'], idx='pr_model_config', model_type='MLP', num_classes=8) if pr_model_cfg.projection: pr_model_cfg.set_input_dim(model_cfg.embedding_dim) else: pr_model_cfg.set_input_dim((model_cfg.hidden_dim * 2)) cfgs.append(pr_model_cfg) run_cfg = config.RunConfig(args['run_config'], sanity_check=args['sanity_check']) cfgs.append(run_cfg) (output, save_prefix) = set_output(args, 'eval_secstr_log', test=True) os.environ['CUDA_VISIBLE_DEVICES'] = (args['device'] if (args['device'] is not None) else '') (device, data_parallel) = (torch.device(('cuda' if torch.cuda.is_available() else 'cpu')), (torch.cuda.device_count() > 1)) config.print_configs(args, cfgs, device, output) flag_rnn = (model_cfg.model_type == 'RNN') flag_lm_model = (args['lm_model_config'] is not None) (idxs_test, datasets_test, iterators_test) = ([key for key in data_cfg.path.keys() if ('test' in key)], [], []) start = Print(' '.join((['start loading test datasets'] + idxs_test)), output) collate_fn = (dataset.collate_sequences if flag_rnn else None) for idx_test in idxs_test: dataset_test = secstr.load_secstr(data_cfg, idx_test, alphabet, args['sanity_check']) dataset_test = dataset.Seq_dataset(*dataset_test, alphabet, run_cfg, flag_rnn, model_cfg.max_len, truncate=False) iterator_test = torch.utils.data.DataLoader(dataset_test, run_cfg.batch_size_eval, collate_fn=collate_fn) datasets_test.append(dataset_test) iterators_test.append(iterator_test) end = Print(' '.join(['loaded', str(len(dataset_test)), 'sequences']), output) Print(' '.join(['elapsed time:', str((end - start))]), output, newline=True) start = Print('start initializing a model', output) models_list = [] if (not flag_rnn): model = plus_tfm.PLUS_TFM(model_cfg) elif (not flag_lm_model): model = plus_rnn.PLUS_RNN(model_cfg) else: model = p_elmo.P_ELMo(model_cfg) models_list.append([model, '', flag_lm_model, flag_rnn, False]) if flag_lm_model: lm_model = p_elmo.P_ELMo_lm(lm_model_cfg) models_list.append([lm_model, 'lm', True, False, False]) if flag_rnn: pr_model = mlp.MLP(pr_model_cfg) models_list.append([pr_model, 'pr', False, False, False]) (params, pr_params) = ([], []) for (model, idx, frz, _, _) in models_list: if frz: continue elif (idx != 'pr'): params += [p for p in model.parameters() if p.requires_grad] else: pr_params += [p for p in model.parameters() if p.requires_grad] load_models(args, models_list, device, data_parallel, output, tfm_cls=flag_rnn) get_loss = (plus_rnn.get_loss if flag_rnn else plus_tfm.get_loss) end = Print('end initializing a model', output) Print(''.join(['elapsed time:', str((end - start))]), output, newline=True) start = Print('start setting trainer configurations', output) tasks_list = [] tasks_list.append(['cls', [], ['acc8', 'acc3']]) if (not flag_lm_model): tasks_list.append(['lm', [], ['acc']]) trainer = Trainer(models_list, get_loss, run_cfg, tasks_list) trainer_args = {} trainer_args['data_parallel'] = data_parallel trainer_args['paired'] = False if flag_rnn: trainer_args['projection'] = pr_model_cfg.projection if flag_rnn: trainer_args['evaluate_cls'] = plus_rnn.evaluate_cls_amino else: trainer_args['evaluate_cls'] = plus_tfm.evaluate_cls_amino trainer_args['evaluate'] = ['cls', secstr.evaluate_secstr] end = Print('end setting trainer configurations', output) Print(''.join(['elapsed time:', str((end - start))]), output, newline=True) start = Print('start evaluating a model', output) Print(trainer.get_headline(test=True), output) for (idx_test, dataset_test, iterator_test) in zip(idxs_test, datasets_test, iterators_test): dataset_test.set_augment(False) trainer.set_exec_flags(['cls', 'lm'], [True, False]) for (b, batch) in enumerate(iterator_test): batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch] trainer.evaluate(batch, trainer_args) if ((b % 10) == 0): print('# cls {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr) print((' ' * 150), end='\r', file=sys.stderr) if (not flag_lm_model): dataset_test.set_augment(True) trainer.set_exec_flags(['cls', 'lm'], [False, True]) for (b, batch) in enumerate(iterator_test): batch = [(t.to(device) if (type(t) is torch.Tensor) else t) for t in batch] trainer.evaluate(batch, trainer_args) if ((b % 10) == 0): print('# lm {:.1%} loss={:.4f}'.format((b / len(iterator_test)), trainer.loss_eval), end='\r', file=sys.stderr) print((' ' * 150), end='\r', file=sys.stderr) Print(trainer.get_log(test_idx=idx_test, args=trainer_args), output) trainer.reset() end = Print('end evaluating a model', output) Print(''.join(['elapsed time:', str((end - start))]), output, newline=True) output.close()
def vgg_a(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, scope='vgg_a'): with tf.variable_scope(scope, 'vgg_a', [inputs]) as sc: end_points_collection = (sc.name + '_end_points') with slim.arg_scope([slim.conv2d, slim.max_pool2d], outputs_collections=end_points_collection): net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.max_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6') net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout6') net = slim.conv2d(net, 4096, [1, 1], scope='fc7') net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope='dropout7') net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='fc8') end_points = slim.utils.convert_collection_to_dict(end_points_collection) if spatial_squeeze: net = tf.squeeze(net, [1, 2], name='fc8/squeezed') end_points[(sc.name + '/fc8')] = net return (net, end_points)
_utils.test() def test_matrix_field_non_constant_index(): m = ti.Matrix.field(2, 2, ti.i32, 5) v = ti.Vector.field(10, ti.i32, 5) def func1(): for i in range(5): for (j, k) in ti.ndrange(2, 2): m[i][(j, k)] = ((j * j) + (k * k)) func1() assert (m[1][(0, 1)] == 1) assert (m[2][(1, 0)] == 1) assert (m[3][(1, 1)] == 2) assert (m[4][(0, 1)] == 1) def func2(): for i in range(5): for j in range(4): v[i][(j * j)] = (j * j) func2() assert (v[1][0] == 0) assert (v[1][1] == 1) assert (v[1][4] == 4) assert (v[1][9] == 9)
def register_functions(root_module): module = root_module module.add_function('Integral', 'double', [param('ns3::SpectrumValue const &', 'arg')]) module.add_function('Log', 'ns3::SpectrumValue', [param('ns3::SpectrumValue const &', 'arg')]) module.add_function('Log10', 'ns3::SpectrumValue', [param('ns3::SpectrumValue const &', 'arg')]) module.add_function('Log2', 'ns3::SpectrumValue', [param('ns3::SpectrumValue const &', 'arg')]) module.add_function('Norm', 'double', [param('ns3::SpectrumValue const &', 'x')]) module.add_function('Pow', 'ns3::SpectrumValue', [param('ns3::SpectrumValue const &', 'lhs'), param('double', 'rhs')]) module.add_function('Pow', 'ns3::SpectrumValue', [param('double', 'lhs'), param('ns3::SpectrumValue const &', 'rhs')]) module.add_function('Prod', 'double', [param('ns3::SpectrumValue const &', 'x')]) module.add_function('Sum', 'double', [param('ns3::SpectrumValue const &', 'x')]) register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module) register_functions_ns3_internal(module.get_submodule('internal'), root_module) register_functions_ns3_tests(module.get_submodule('tests'), root_module) return
_numpy_output(non_zero=True, check_dtype=True) def test_square_as_multiply(A: dace.complex64[10], I: dace.bool_[10]): np.multiply(A, A, where=I)
def fusion_re(**kwargs): sq = squeezenet1_1(pretrained=True) model = CreateNetFusion_re(sq, stack=True) return model
class JBluesDetailed(ProcessingPlasmaProperty): outputs = ('j_blues',) latex_name = 'J_{\\textrm{blue}}' def __init__(self, plasma_parent, w_epsilon): super(JBluesDetailed, self).__init__(plasma_parent) self.w_epsilon = w_epsilon def calculate(self, lines, nu, t_rad, w, j_blues_norm_factor, j_blue_estimator): if (len(j_blue_estimator) == 0): return JBluesDiluteBlackBody.calculate(lines, nu, t_rad, w) else: j_blues = pd.DataFrame((j_blue_estimator * j_blues_norm_factor.value), index=lines.index, columns=np.arange(len(t_rad))) for i in range(len(t_rad)): zero_j_blues = (j_blues[i] == 0.0) j_blues[i][zero_j_blues] = (self.w_epsilon * intensity_black_body(nu[zero_j_blues].values, t_rad[i])) return j_blues
class PaviClient(object): def __init__(self, url, username=None, password=None, instance_id=None): self.url = url self.username = self._get_env_var(username, 'PAVI_USERNAME') self.password = self._get_env_var(password, 'PAVI_PASSWORD') self.instance_id = instance_id self.log_queue = None self.logger = None def _get_env_var(self, var, env_var): if (var is not None): return str(var) var = os.getenv(env_var) if (not var): raise ValueError('"{}" is neither specified nor defined as env variables'.format(env_var)) return var def _print_log(self, msg, level=logging.INFO, *args, **kwargs): if (self.logger is not None): self.logger.log(level, msg, *args, **kwargs) else: print(msg, *args, **kwargs) def connect(self, model_name, work_dir=None, info=dict(), timeout=5, logger=None): if (logger is not None): self.logger = logger self._print_log('connecting pavi service {}...'.format(self.url)) post_data = dict(time=str(datetime.now()), username=self.username, password=self.password, instance_id=self.instance_id, model=model_name, work_dir=(osp.abspath(work_dir) if work_dir else ''), session_file=info.get('session_file', ''), session_text=info.get('session_text', ''), model_text=info.get('model_text', ''), device=get_host_info()) try: response = requests.post(self.url, json=post_data, timeout=timeout) except Exception as ex: self._print_log('fail to connect to pavi service: {}'.format(ex), level=logging.ERROR) else: if (response.status_code == 200): self.instance_id = response.text self._print_log('pavi service connected, instance_id: {}'.format(self.instance_id)) self.log_queue = Queue() self.log_thread = Thread(target=self.post_worker_fn) self.log_thread.daemon = True self.log_thread.start() return True else: self._print_log('fail to connect to pavi service, status code: {}, err message: {}'.format(response.status_code, response.reason), level=logging.ERROR) return False def post_worker_fn(self, max_retry=3, queue_timeout=1, req_timeout=3): while True: try: log = self.log_queue.get(timeout=queue_timeout) except Empty: time.sleep(1) except Exception as ex: self._print_log('fail to get logs from queue: {}'.format(ex), level=logging.ERROR) else: retry = 0 while (retry < max_retry): try: response = requests.post(self.url, json=log, timeout=req_timeout) except Exception as ex: retry += 1 self._print_log('error when posting logs to pavi: {}'.format(ex), level=logging.ERROR) else: status_code = response.status_code if (status_code == 200): break else: self._print_log('unexpected status code: {}, err msg: {}'.format(status_code, response.reason), level=logging.ERROR) retry += 1 if (retry == max_retry): self._print_log('fail to send logs of iteration {}'.format(log['iter_num']), level=logging.ERROR) def log(self, phase, iter, outputs): if (self.log_queue is not None): logs = {'time': str(datetime.now()), 'instance_id': self.instance_id, 'flow_id': phase, 'iter_num': iter, 'outputs': outputs, 'msg': ''} self.log_queue.put(logs)
class AutoModelForAudioFrameClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class DecoderEmbedding(nn.Module): def __init__(self, n_responses, n_dims, seq_len): super(DecoderEmbedding, self).__init__() self.n_dims = n_dims self.seq_len = seq_len self.response_embed = nn.Embedding(n_responses, n_dims) self.time_embed = nn.Linear(1, n_dims, bias=False) self.position_embed = nn.Embedding(seq_len, n_dims) def forward(self, responses): e = self.response_embed(responses) seq = torch.arange(self.seq_len, device=Config.device).unsqueeze(0) p = self.position_embed(seq) return (p + e)