code
stringlengths
101
5.91M
def words_vec(w2v, words, use_norm=False): if callable(getattr(w2v, 'words_vec', None)): return w2v.words_vec(words, use_norm) return {word: w2v.wv.word_vec(word, use_norm) for word in words if (word in w2v.wv)}
_module() class DistillCls(BaseCls): def __init__(self, encoder_args=None, cls_args=None, distill_args=None, criterion_args=None, **kwargs): super().__init__(encoder_args, cls_args, criterion_args) self.distill = encoder_args.get('distill', True) in_channels = self.encoder.distill_channels distill_args.distill_head_args.in_channels = in_channels self.dist_head = build_model_from_cfg(distill_args.distill_head_args) self.dist_model = build_model_from_cfg(distill_args).cuda() load_checkpoint(self.dist_model, distill_args.pretrained_path) self.dist_model.eval() def forward(self, p0, f0=None): if hasattr(p0, 'keys'): (p0, f0) = (p0['pos'], p0['x']) if (self.distill and self.training): (global_feat, distill_feature) = self.encoder.forward_cls_feat(p0, f0) return (self.prediction(global_feat), self.dist_head(distill_feature)) else: global_feat = self.encoder.forward_cls_feat(p0, f0) return self.prediction(global_feat) def get_loss(self, pred, gt, inputs): return self.criterion(inputs, pred, gt.long(), self.dist_model) def get_logits_loss(self, data, gt): (logits, dist_logits) = self.forward(data) return (logits, self.criterion(data, [logits, dist_logits], gt.long(), self.dist_model))
def even_quantile_labels(vals, nclasses, verbose=True): label = ((- 1) * np.ones(vals.shape[0], dtype=np.int)) interval_lst = [] lower = (- np.inf) for k in range((nclasses - 1)): upper = np.quantile(vals, ((k + 1) / nclasses)) interval_lst.append((lower, upper)) inds = ((vals >= lower) * (vals < upper)) label[inds] = k lower = upper label[(vals >= lower)] = (nclasses - 1) interval_lst.append((lower, np.inf)) if verbose: print('Class Label Intervals:') for (class_idx, interval) in enumerate(interval_lst): print(f'Class {class_idx}: [{interval[0]}, {interval[1]})]') return label
.parametrize('seed', [313, 314]) .parametrize('op', ['+', '-']) def test_variable_arithmetic_unary_ops(seed, op): rng = np.random.RandomState(seed) vx = nn.Variable.from_numpy_array(rng.randn(2, 3, 4).astype(np.float32)) with nn.auto_forward(): vz = eval('{0} vx'.format(op)) ref_z = eval('{0} vx.d'.format(op)) assert_allclose(ref_z, vz.d)
_level_function(module='ak.str') def ltrim(array, characters, *, highlevel=True, behavior=None, attrs=None): (yield (array,)) return _impl(array, characters, highlevel, behavior, attrs)
def register_Ns3CidFactory_methods(root_module, cls): cls.add_constructor([param('ns3::CidFactory const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Allocate', 'ns3::Cid', [param('ns3::Cid::Type', 'type')]) cls.add_method('AllocateBasic', 'ns3::Cid', []) cls.add_method('AllocateMulticast', 'ns3::Cid', []) cls.add_method('AllocatePrimary', 'ns3::Cid', []) cls.add_method('AllocateTransportOrSecondary', 'ns3::Cid', []) cls.add_method('FreeCid', 'void', [param('ns3::Cid', 'cid')]) cls.add_method('IsBasic', 'bool', [param('ns3::Cid', 'cid')], is_const=True) cls.add_method('IsPrimary', 'bool', [param('ns3::Cid', 'cid')], is_const=True) cls.add_method('IsTransport', 'bool', [param('ns3::Cid', 'cid')], is_const=True) return
def general_pattern(pattern): general_pattern_list = [] for x in pattern.split(' '): if (x in KEY_KEYWORD_SET): general_pattern_list.append(x) return ' '.join(general_pattern_list)
def defend(list1, list2, r_in=0.02, r_out=0.02, mintime=42): datasize = 1 buf = [0, 0] listind = 0 starttime = list1[0][0] lastpostime = starttime lastnegtime = starttime curtime = starttime count = [0, 0] lastind = [0, 0] for i in range(0, len(list1)): if (list1[i][1] > 0): lastind[0] = i else: lastind[1] = i defintertime = [[r_out], [r_in]] while ((listind < len(list1)) or ((buf[0] + buf[1]) > 0) or (curtime < (starttime + mintime))): if (curtime >= (starttime + mintime)): for j in range(0, 2): if (listind > lastind[j]): defintertime[j][0] = 10000 ind = int(((curtime - starttime) * 10)) if (ind >= len(defintertime[0])): ind = (len(defintertime[0]) // 2) if ((lastpostime + defintertime[0][ind]) < (lastnegtime + defintertime[1][ind])): cursign = 0 curtime = (lastpostime + defintertime[0][ind]) lastpostime += defintertime[0][ind] else: cursign = 1 curtime = (lastnegtime + defintertime[1][ind]) lastnegtime += defintertime[1][ind] tosend = datasize if (buf[cursign] > 0): if (buf[cursign] <= datasize): tosend -= buf[cursign] buf[cursign] = 0 listind += 1 else: tosend = 0 buf[cursign] -= datasize if (listind < len(list1)): while ((list1[listind][0] <= curtime) and (fsign(list1[listind][1]) == cursign) and (tosend > 0)): if (tosend >= abs(list1[listind][1])): tosend -= abs(list1[listind][1]) listind += 1 else: buf[cursign] = (abs(list1[listind][1]) - tosend) tosend = 0 if (listind >= len(list1)): break if (cursign == 0): list2.append([curtime, datasize]) else: list2.append([curtime, (- datasize)]) count[cursign] += 1
def masked_metric_iou(mask, reg_weight=0, norm_by_mask=True): def iou_metric(y_true, y_pred): axis = ((- 1) if backend_channels_last() else 1) y_pred = K.maximum(0.0, y_pred) inter = K.mean(K.square(K.minimum(y_true, y_pred)), axis=axis) union = K.mean(K.square(K.maximum(y_true, y_pred)), axis=axis) iou = (inter / (union + K.epsilon())) loss = K.expand_dims(iou, axis) return loss return generic_masked_loss(mask, iou_metric, reg_weight=reg_weight, norm_by_mask=norm_by_mask)
def unify_batches(name: str, train_registry: Path, val_registry: Path, train_dir: Path, val_dir: Path, index_dir: Path, batch_formats: Tuple[(Tuple[(str, Tuple[(str, ...)])], ...)], max_epochs: int=400, initial_final_alpha: float=0.2) -> None: overwatch.info(f'Phase 3 Preprocessing :: Assembling *Data-Locked* Batches for Dataset `{name}`') with open(train_registry, 'r') as f: train_registrations = json.load(f) with open(val_registry, 'r') as f: val_registrations = json.load(f) full_set_inputs = set(batch_formats[(- 1)][1]) for (_, subset_inputs) in batch_formats[:(- 1)]: assert full_set_inputs.issuperset(set(subset_inputs)), 'We have a problem with batch formats...' (b_keys, unique_states) = ({b[0] for b in batch_formats}, set()) state_elements = [s for s in full_set_inputs if ('state_' in s)] (do_initial, do_final) = (('state_initial' in state_elements), ('state_final' in state_elements)) n_int = ((len(state_elements) - 2) if (('state_initial' in state_elements) and ('state_final' in state_elements)) else 0) overwatch.info('\tSerializing Epochs to JSON --> Storing mapping of Epoch -> Image Paths') for b in b_keys: os.makedirs((index_dir / b), exist_ok=True) overwatch.info('\tWriting Validation Epoch to Disk') (val_epoch_idx, _, uniq_s) = serialize_epoch(index_dir, val_registrations, val_dir, batch_formats, do_initial, do_final, initial_final_alpha, n_int, epoch=0, is_validation=True) if (val_epoch_idx != (- 1)): unique_states |= uniq_s (epochs, n_frames_per_epoch) = (list(range(max_epochs)), (- 1)) overwatch.info('\tPlacing the Train Registry into Shared Memory') manager = mp.Manager() mg_registry = manager.dict(train_registrations) with mp.Pool((mp.cpu_count() // 4)) as pool: overwatch.info('\tWriting Train Batches per Epoch to Disk') precompute_fn = partial(serialize_epoch, index_dir, mg_registry, train_dir, batch_formats, do_initial, do_final, initial_final_alpha, n_int) for (epoch_idx, n_frames, uniq_s) in pool.imap_unordered(precompute_fn, epochs): if (epoch_idx == (- 1)): continue unique_states |= uniq_s n_frames_per_epoch = n_frames overwatch.info(f'Train Uniqueness: {len(unique_states)} States & {len(mg_registry)} Utterances') overwatch.info(f'Final Statistics :: 1 Epoch has ~ {n_frames_per_epoch} Frames...')
def recurrent_fn(params, rng_key, action, state): del params current_player = state.current_player state = env.step(state, action) logits = policy_fn(state.legal_action_mask) value = value_fn(rng_key, state) reward = state.rewards[current_player] value = jax.lax.select(state.terminated, 0.0, value) discount = jax.lax.select(state.terminated, 0.0, (- 1.0)) recurrent_fn_output = mctx.RecurrentFnOutput(reward=reward, discount=discount, prior_logits=logits, value=value) return (recurrent_fn_output, state)
_utils.test(require=ti.extension.sparse, exclude=[ti.metal]) def test_append_u8(): x = ti.field(ti.u8) pixel = ti.root.dynamic(ti.j, 20) pixel.place(x) def make_list(): ti.loop_config(serialize=True) for i in range(20): x[()].append(((i * i) * i)) make_list() for i in range(20): assert (x[i] == (((i * i) * i) % 256))
def plot_belief_grad_b(belief, **kwargs): df = check_belief_grad_b(belief, **kwargs) (fig, axs) = plt.subplots(1, 2, figsize=(8, 4)) axs[0].plot(df['b'], df['r'], '-', label='r') axs[0].plot(df['b'], df['A1'], '--', label='$\\partial_{b} A$') axs[0].set(xlabel='b') axs[0].legend() axs[1].plot(df['b'], df['v'], '-', label='v') axs[1].plot(df['b'], df['A2'], '--', label='$\\partial^2_{b} A$') axs[1].set(xlabel='b') ylim = axs[1].get_ylim() if ((ylim[1] - ylim[0]) < EPSILON): axs[1].set_ylim((ylim[0] - 0.12), (ylim[1] + 0.12)) axs[1].legend() kwargs_str = ' '.join((f'{key}={val}' for (key, val) in kwargs.items())) fig.suptitle(f'{belief.__name__} {kwargs_str}') fig.tight_layout(rect=[0, 0.03, 1, 0.95])
class GradientPTQTest(GradientPTQBaseTest): def compare(self, quantized_model, float_model, input_x=None, quantization_info=None): y = float_model(input_x) y_hat = quantized_model(input_x) cs = cosine_similarity(y.numpy(), y_hat.numpy()) self.unit_test.assertTrue(np.isclose(cs, 1, rtol=0.0001), msg=f'fail cosine similarity check: {cs}')
class create_model_3(torch.nn.Module): def __init__(self): super(create_model_3, self).__init__() self.conv1 = Conv2d(3, 3, kernel_size=1, stride=1) self.bn = BatchNorm2d(3) self.bn = bn_weight_change(self.bn) self.bn2 = BatchNorm2d(3) self.bn2 = bn_weight_change(self.bn2) def forward(self, inp): x = self.conv1(inp) x = self.bn(x) x = self.bn2(x) return (x + inp)
def z_cost(z, errors, mean, std): epsilon = (mean + (z * std)) (delta_mean, delta_std) = deltas(errors, epsilon, mean, std) (above, consecutive) = count_above(errors, epsilon) numerator = (- ((delta_mean / mean) + (delta_std / std))) denominator = (above + (consecutive ** 2)) if (denominator == 0): return np.inf return (numerator / denominator)
def ones_d(shape): if isinstance(shape, (list, tuple)): shape = tf.stack(shape) return tf.ones(shape)
class MininetTopoFromNxGraph(Topo): def build(self, graph): hosts = {} for node in graph.nodes(data=True): name = node[0] params = node[1] if ('is_not_mininet_switch' in params): hosts[name] = self.addSwitch(name) else: hosts[name] = self.addHost(name, ip=(params['ip'] + params['netmask']), mac=params['mac']) for edge in graph.edges(data=True): link_opts = edge[2] self.addLink(hosts[edge[0]], hosts[edge[1]], **link_opts)
def test_crossover_wrong_type(chromosome): with pytest.raises(AssertionError): chromosome.cross_over(0, 0, 0)
def get_task(config: configure_finetuning.FinetuningConfig, task_name, tokenizer): if (task_name == 'cola'): return classification_tasks.CoLA(config, tokenizer) elif (task_name == 'mrpc'): return classification_tasks.MRPC(config, tokenizer) elif (task_name == 'mnli'): return classification_tasks.MNLI(config, tokenizer) elif (task_name == 'sst'): return classification_tasks.SST(config, tokenizer) elif (task_name == 'rte'): return classification_tasks.RTE(config, tokenizer) elif (task_name == 'qnli'): return classification_tasks.QNLI(config, tokenizer) elif (task_name == 'qqp'): return classification_tasks.QQP(config, tokenizer) elif (task_name == 'sts'): return classification_tasks.STS(config, tokenizer) elif (task_name == 'squad'): return qa_tasks.SQuAD(config, tokenizer) elif (task_name == 'squadv1'): return qa_tasks.SQuADv1(config, tokenizer) elif (task_name == 'newsqa'): return qa_tasks.NewsQA(config, tokenizer) elif (task_name == 'naturalqs'): return qa_tasks.NaturalQuestions(config, tokenizer) elif (task_name == 'triviaqa'): return qa_tasks.TriviaQA(config, tokenizer) elif (task_name == 'searchqa'): return qa_tasks.SearchQA(config, tokenizer) elif (task_name == 'chunk'): return tagging_tasks.Chunking(config, tokenizer) elif (task_name == 'chemprot'): return classification_tasks.ChemProt(config, tokenizer) elif (task_name == 'NCBI-disease'): return tagging_tasks.NCBI(config, tokenizer) elif (task_name == 'BC5CDR-disease'): return tagging_tasks.BC5CDRdisease(config, tokenizer) elif (task_name == 'BC5CDR-chem'): return tagging_tasks.BC5CDRchem(config, tokenizer) elif (task_name == 'bioasq-squadv2'): return qa_tasks.BioASQv2(config, tokenizer) elif (task_name == 'bioasq-squadv1'): return qa_tasks.BioASQv1(config, tokenizer) else: raise ValueError(('Unknown task ' + task_name))
def save_img(save_dir, img, unnormalize=True, max_num=200, size=64, nrow=10, dataname='imagenet'): img = img[:max_num].detach() if unnormalize: img = img_denormlaize(img, dataname=dataname) img = torch.clamp(img, min=0.0, max=1.0) if (img.shape[(- 1)] > size): img = F.interpolate(img, size) save_image(img.cpu(), save_dir, nrow=nrow)
def create_optimizer(cfg: DictConfig, *args: List, **kwargs: Dict) -> Optimizer: if (cfg is None): return None return OPTIMIZER.get(cfg.name)(cfg, *args, **kwargs)
_context(matplotlib_settings) def scale_wavefunctions(wavefunc_list: List['WaveFunction'], potential_vals: np.ndarray, scaling: Optional[float]) -> List['WaveFunction']: scale_factors = np.array([wavefunc.amplitude_scale_factor(potential_vals) for wavefunc in wavefunc_list]) for wavefunc in wavefunc_list: wavefunc.rescale(np.max(scale_factors)) adaptive_scalefactor = (scaling or defaults.set_wavefunction_scaling(wavefunc_list, potential_vals)) for wavefunc in wavefunc_list: wavefunc.rescale(adaptive_scalefactor) return wavefunc_list
def apply_half(t): if (t.dtype is torch.float32): return t.to(dtype=torch.half) return t
def make_response_filter(status_code: str, all_status_codes: list[str]) -> FilterFunction: if (status_code == 'default'): return default_status_code(all_status_codes) return match_status_code(status_code)
class DataPrefetcher1(): def __init__(self, loader): self.loader = iter(loader) self.stream = torch.cuda.Stream() self.input_cuda = self._input_cuda_for_image self.record_stream = DataPrefetcher._record_stream_for_image self.preload() def preload(self): try: (self.next_input, self.next_target, _, _) = next(self.loader) except StopIteration: self.next_input = None self.next_target = None return with torch.cuda.stream(self.stream): self.input_cuda() self.next_target = self.next_target.cuda(non_blocking=True) def next(self): torch.cuda.current_stream().wait_stream(self.stream) input = self.next_input target = self.next_target if (input is not None): self.record_stream(input) if (target is not None): target.record_stream(torch.cuda.current_stream()) self.preload() return (input, target) def _input_cuda_for_image(self): self.next_input = self.next_input.cuda(non_blocking=True) def _record_stream_for_image(input): input.record_stream(torch.cuda.current_stream())
def define(n_eigs=20, tau=0.0): l = common(fun_v, get_exact=get_exact, n_eigs=n_eigs, tau=tau) return l
class FirstOrderOptimizer(Serializable): def __init__(self, tf_optimizer_cls=None, tf_optimizer_args=None, learning_rate=0.001, beta1=0.9, max_epochs=1000, tolerance=1e-06, batch_size=32, callback=None, verbose=False, num_slices=1, ignore_last=False, **kwargs): Serializable.quick_init(self, locals()) self._opt_fun = None self._target = None self._callback = callback if (tf_optimizer_cls is None): tf_optimizer_cls = tf.train.AdamOptimizer if (tf_optimizer_args is None): tf_optimizer_args = dict(learning_rate=learning_rate, beta1=beta1) self._tf_optimizer = tf_optimizer_cls(**tf_optimizer_args) self._max_epochs = max_epochs self._tolerance = tolerance self._batch_size = batch_size self._verbose = verbose self._input_vars = None self._train_op = None self._num_slices = num_slices self._ignore_last = ignore_last def update_opt(self, loss, target, inputs, extra_inputs=None, **kwargs): self._target = target self._train_op = self._tf_optimizer.minimize(loss, var_list=target.get_params(trainable=True)) if (extra_inputs is None): extra_inputs = list() self._input_vars = (inputs + extra_inputs) self._opt_fun = ext.lazydict(f_loss=(lambda : tensor_utils.compile_function((inputs + extra_inputs), loss))) def loss(self, inputs, extra_inputs=None): if (extra_inputs is None): extra_inputs = tuple() return sliced_fun(self._opt_fun['f_loss'], self._num_slices)(inputs, extra_inputs) def optimize(self, inputs, extra_inputs=None, callback=None): if (len(inputs) == 0): raise NotImplementedError f_loss = self._opt_fun['f_loss'] if (extra_inputs is None): extra_inputs = tuple() last_loss = sliced_fun(f_loss, self._num_slices)(inputs, extra_inputs) start_time = time.time() dataset = BatchDataset(inputs, self._batch_size, extra_inputs=extra_inputs) sess = tf.get_default_session() for epoch in range(self._max_epochs): if self._verbose: logger.log(('Epoch %d' % epoch)) progbar = pyprind.ProgBar(len(inputs[0])) for batch in dataset.iterate(update=True): if (self._ignore_last and (len(batch[0]) != self._batch_size)): continue sess.run(self._train_op, dict(list(zip(self._input_vars, batch)))) if self._verbose: progbar.update(len(batch[0])) if self._verbose: if progbar.active: progbar.stop() new_loss = sliced_fun(f_loss, self._num_slices)(inputs, extra_inputs) if self._verbose: logger.log(('Epoch: %d | Loss: %f' % (epoch, new_loss))) if (self._callback or callback): elapsed = (time.time() - start_time) callback_args = dict(loss=new_loss, params=(self._target.get_param_values(trainable=True) if self._target else None), itr=epoch, elapsed=elapsed) if self._callback: self._callback(callback_args) if callback: callback(**callback_args) if (abs((last_loss - new_loss)) < self._tolerance): break last_loss = new_loss
class Embedder(nn.Module): def __init__(self, padding, in_channels, out_channels, num_channels, max_num_channels, embed_channels, embed_num_blocks, average_function): super().__init__() def get_down_block(in_channels, out_channels, padding): return blocks.ResBlock(in_channels, out_channels, padding, upsample=False, downsample=True, norm_layer='none') if (padding == 'zero'): padding = nn.ZeroPad2d elif (padding == 'reflection'): padding = nn.ReflectionPad2d self.out_channels = embed_channels self.down_block = nn.Sequential(padding(1), spectral_norm(nn.Conv2d((in_channels + out_channels), num_channels, 3, 1, 0), eps=0.0001), nn.ReLU(), padding(1), spectral_norm(nn.Conv2d(num_channels, num_channels, 3, 1, 0), eps=0.0001), nn.AvgPool2d(2)) self.skip = nn.Sequential(spectral_norm(nn.Conv2d((in_channels + out_channels), num_channels, 1), eps=0.0001), nn.AvgPool2d(2)) layers = [] in_channels = num_channels for i in range(1, (embed_num_blocks - 1)): out_channels = min((in_channels * 2), max_num_channels) layers.append(get_down_block(in_channels, out_channels, padding)) in_channels = out_channels layers.append(get_down_block(out_channels, embed_channels, padding)) self.down_blocks = nn.Sequential(*layers) self.average_function = average_function self.finetuning = False def enable_finetuning(self, data_dict=None): self.finetuning = True def get_identity_embedding(self, data_dict): enc_stickmen = data_dict['enc_stickmen'] enc_rgbs = data_dict['enc_rgbs'] inputs = torch.cat([enc_stickmen, enc_rgbs], 2) (b, n, c, h, w) = inputs.shape inputs = inputs.view((- 1), c, h, w) out = self.down_block(inputs) out = (out + self.skip(inputs)) out = self.down_blocks(out) out = torch.relu(out) embeds_elemwise = out.view(b, n, self.out_channels, (- 1)).sum(3) if (self.average_function == 'sum'): embeds = embeds_elemwise.mean(1) elif (self.average_function == 'max'): embeds = embeds_elemwise.max(1)[0] else: raise Exception('Incorrect `average_function` argument, expected `sum` or `max`') data_dict['embeds'] = embeds data_dict['embeds_elemwise'] = embeds_elemwise def get_pose_embedding(self, data_dict): pass def forward(self, data_dict): if (not self.finetuning): self.get_identity_embedding(data_dict) self.get_pose_embedding(data_dict)
def subsets_with_hereditary_property(f, X, max_obstruction_size=None, ncpus=1): from sage.data_structures.bitset import Bitset X_labels = list(X) n = len(X_labels) X = set(range(n)) if (max_obstruction_size is None): max_obstruction_size = n bs = [Bitset([], 1) for _ in range(n)] nforb = 1 current_layer = [[]] current_size = 0 def explore_neighbors(s): new_yes_sets = [] new_no_sets = [] for i in range(((s[(- 1)] + 1) if s else 0), n): s_plus_i = (s + [i]) s_plus_i_c = Bitset(s_plus_i, n).complement() inter = Bitset([], nforb).complement() for j in s_plus_i_c: inter.intersection_update(bs[j]) if (not inter): if ((set_size >= max_obstruction_size) or f([X_labels[xx] for xx in s_plus_i])): new_yes_sets.append(s_plus_i) else: new_no_sets.append(s_plus_i) return (new_yes_sets, new_no_sets) if f([]): (yield []) else: return if (ncpus != 1): from sage.parallel.decorate import parallel explore_neighbors_paral = parallel(ncpus=ncpus)(explore_neighbors) set_size = (- 1) while current_layer: set_size += 1 new_no_sets = [] new_yes_sets = [] if (ncpus == 1): yes_no_iter = (explore_neighbors(s) for s in current_layer) else: yes_no_iter = ((yes, no) for (_, (yes, no)) in explore_neighbors_paral(current_layer)) for (yes, no) in yes_no_iter: new_yes_sets.extend(yes) new_no_sets.extend(no) for s in yes: (yield [X_labels[xx] for xx in s]) current_layer = new_yes_sets new_nforb = (nforb + len(new_no_sets)) for b in bs: b.add(new_nforb) b.discard(new_nforb) for (i, s) in enumerate(new_no_sets): for j in X.difference(s): bs[j].add((i + nforb)) nforb = new_nforb current_size += 1 if ((current_size == len(X)) and (nforb == 1) and f(X_labels)): (yield X_labels)
class LambdaWarmUpCosineScheduler(): def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): self.lr_warm_up_steps = warm_up_steps self.lr_start = lr_start self.lr_min = lr_min self.lr_max = lr_max self.lr_max_decay_steps = max_decay_steps self.last_lr = 0.0 self.verbosity_interval = verbosity_interval def schedule(self, n): if (self.verbosity_interval > 0): if ((n % self.verbosity_interval) == 0): print(f'current step: {n}, recent lr-multiplier: {self.last_lr}') if (n < self.lr_warm_up_steps): lr = ((((self.lr_max - self.lr_start) / self.lr_warm_up_steps) * n) + self.lr_start) self.last_lr = lr return lr else: t = ((n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)) t = min(t, 1.0) lr = (self.lr_min + ((0.5 * (self.lr_max - self.lr_min)) * (1 + np.cos((t * np.pi))))) self.last_lr = lr return lr def __call__(self, n): return self.schedule(n)
class Physics(mujoco.Physics): def torso_upright(self): return self.named.data.xmat[('torso', 'zz')] def head_height(self): return self.named.data.xpos[('head', 'z')] def center_of_mass_position(self): return self.named.data.subtree_com['torso'].copy() def center_of_mass_velocity(self): return self.named.data.sensordata['torso_subtreelinvel'].copy() def torso_vertical_orientation(self): return self.named.data.xmat[('torso', ['zx', 'zy', 'zz'])] def joint_angles(self): return self.data.qpos[7:].copy() def extremities(self): torso_frame = self.named.data.xmat['torso'].reshape(3, 3) torso_pos = self.named.data.xpos['torso'] positions = [] for side in ('left_', 'right_'): for limb in ('hand', 'foot'): torso_to_limb = (self.named.data.xpos[(side + limb)] - torso_pos) positions.append(torso_to_limb.dot(torso_frame)) return np.hstack(positions)
class TestBohman(object): def test_basic(self): assert_allclose(windows.bohman(6), [0, 0., 0., 0., 0., 0]) assert_allclose(windows.bohman(7, sym=True), [0, 0., 0., 1.0, 0., 0., 0]) assert_allclose(windows.bohman(6, False), [0, 0., 0., 1.0, 0., 0.])
def test_DeepWalk(): G = nx.read_edgelist('./tests/Wiki_edgelist.txt', create_using=nx.DiGraph(), nodetype=None, data=[('weight', int)]) model = DeepWalk(G, walk_length=3, num_walks=2, workers=1) model.train(window_size=3, iter=1) embeddings = model.get_embeddings()
class RNNLogic(DeepModel): include_id = False include_user_features = False include_item_features = False include_context_features = False data_loader = 'ProLogicDL' data_processor = 'RNNLogicDP' def parse_model_args(parser, model_name='RNNLogic'): parser.add_argument('--rnn_type', type=str, default='LSTM', help='RNN/LSTM/GRU.') parser.add_argument('--rnn_bi', type=int, default=0, help='1=bi-rnn/lstm/gru') return DeepModel.parse_model_args(parser, model_name) def __init__(self, rnn_type, rnn_bi, variable_num, feature_num=(- 1), *args, **kwargs): self.rnn_type = rnn_type.lower() self.rnn_bi = rnn_bi DeepModel.__init__(self, *args, feature_num=(variable_num + 3), **kwargs) assert (self.label_min == 0) assert (self.label_max == 1) def _init_weights(self): self.feature_embeddings = torch.nn.Embedding(self.feature_num, self.f_vector_size) self.l2_embeddings = ['feature_embeddings'] if (self.rnn_type == 'gru'): self.encoder = torch.nn.GRU(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1)) elif (self.rnn_type == 'lstm'): self.encoder = torch.nn.LSTM(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1)) else: self.encoder = torch.nn.RNN(input_size=self.f_vector_size, hidden_size=self.f_vector_size, batch_first=True, bidirectional=(self.rnn_bi == 1)) if (self.rnn_bi == 1): pre_size = (self.f_vector_size * 2) else: pre_size = self.f_vector_size for (i, layer_size) in enumerate(self.layers): setattr(self, ('layer_%d' % i), torch.nn.Linear(pre_size, layer_size)) pre_size = layer_size self.prediction = torch.nn.Linear(pre_size, 1) def predict(self, feed_dict): (check_list, embedding_l2) = ([], []) lengths = feed_dict[K_S_LENGTH] sents = feed_dict[X] valid_words = sents.gt(0).long() sent_lengths = valid_words.sum(dim=(- 1)) sents_vectors = (self.feature_embeddings(sents) * valid_words.unsqueeze(dim=(- 1)).float()) (sort_sent_lengths, sort_idx) = torch.topk(sent_lengths, k=len(lengths)) sort_sent_vectors = sents_vectors.index_select(dim=0, index=sort_idx) sents_packed = torch.nn.utils.rnn.pack_padded_sequence(sort_sent_vectors, sort_sent_lengths, batch_first=True) if (self.rnn_type == 'lstm'): (output_rnn, (hidden_rnn, _)) = self.encoder(sents_packed, None) else: (output_rnn, hidden_rnn) = self.encoder(sents_packed, None) if (self.rnn_bi == 1): sort_pre_layer = torch.cat((hidden_rnn[0], hidden_rnn[1]), dim=(- 1)) else: sort_pre_layer = hidden_rnn[0] unsort_idx = torch.topk(sort_idx, k=len(lengths), largest=False)[1] pre_layer = sort_pre_layer.index_select(dim=0, index=unsort_idx) for i in range(0, len(self.layers)): pre_layer = getattr(self, ('layer_%d' % i))(pre_layer) pre_layer = F.relu(pre_layer) pre_layer = torch.nn.Dropout(p=feed_dict[DROPOUT])(pre_layer) prediction = self.prediction(pre_layer).sigmoid().view([(- 1)]) out_dict = {PREDICTION: prediction, CHECK: check_list, EMBEDDING_L2: embedding_l2} return out_dict def forward(self, feed_dict): out_dict = self.predict(feed_dict) check_list = out_dict[CHECK] (prediction, label) = (out_dict[PREDICTION], feed_dict[Y]) check_list.append(('prediction', prediction)) check_list.append(('label', label)) if (self.loss_sum == 1): loss = torch.nn.BCELoss(reduction='sum')(prediction, label) else: loss = torch.nn.MSELoss(reduction='mean')(prediction, label) out_dict[LOSS] = loss out_dict[LOSS_L2] = self.l2(out_dict) out_dict[CHECK] = check_list return out_dict
def extract_clip(sb, in_filepath, out_filepath): cmd = ['ffmpeg', '-ss', hhmmss(sb[0]), '-i', in_filepath, '-t', hhmmss((sb[1] - sb[0])), '-c', 'copy', '-avoid_negative_ts', '1', '-reset_timestamps', '1', '-y', '-hide_banner', '-loglevel', 'panic', '-map', '0', out_filepath] run(cmd) if (not os.path.isfile(out_filepath)): raise Exception(f'{out_filepath}: ffmpeg clip extraction failed')
.gpu def test_tasklets_with_same_local_name(): sdfg = dace.SDFG('tester') sdfg.add_array('A', [4], dace.float32, dace.StorageType.GPU_Global) state = sdfg.add_state() (me, mx) = state.add_map('kernel', dict(i='0:1'), schedule=dace.ScheduleType.GPU_Device) t1 = state.add_tasklet('sgn', {'a'}, {'b'}, '\nmylocal: dace.float32\nif a > 0:\n mylocal = 1\nelse:\n mylocal = -1\nb = mylocal\n ') t2 = state.add_tasklet('sgn', {'a'}, {'b'}, '\nmylocal: dace.float32\nif a > 0:\n mylocal = 1\nelse:\n mylocal = -1\nb = mylocal\n ') a = state.add_read('A') b = state.add_write('A') state.add_memlet_path(a, me, t1, dst_conn='a', memlet=dace.Memlet('A[0]')) state.add_memlet_path(a, me, t2, dst_conn='a', memlet=dace.Memlet('A[1]')) state.add_memlet_path(t1, mx, b, src_conn='b', memlet=dace.Memlet('A[2]')) state.add_memlet_path(t2, mx, b, src_conn='b', memlet=dace.Memlet('A[3]')) sdfg.compile()
def test_maml_trpo_dummy_named_env(): env = GarageEnv(normalize(DummyMultiTaskBoxEnv(), expected_action_scale=10.0)) policy = GaussianMLPPolicy(env_spec=env.spec, hidden_sizes=(64, 64), hidden_nonlinearity=torch.tanh, output_nonlinearity=None) value_function = GaussianMLPValueFunction(env_spec=env.spec, hidden_sizes=(32, 32)) rollouts_per_task = 2 max_path_length = 100 runner = LocalRunner(snapshot_config) algo = MAMLTRPO(env=env, policy=policy, value_function=value_function, max_path_length=max_path_length, meta_batch_size=5, discount=0.99, gae_lambda=1.0, inner_lr=0.1, num_grad_updates=1) runner.setup(algo, env) runner.train(n_epochs=2, batch_size=(rollouts_per_task * max_path_length))
class Feature_Nov27(): def get_split_feature(self, split_tuple, parent_sentence, children_sentence_list, boxer_graph): split_pattern = boxer_graph.get_pattern_4_split_candidate(split_tuple) split_feature = split_pattern return split_feature def get_drop_ood_feature(self, ood_node, nodeset, main_sent_dict, boxer_graph): ood_word = boxer_graph.extract_oodword(ood_node, main_sent_dict) ood_position = boxer_graph.nodes[ood_node]['positions'][0] span = boxer_graph.extract_span_min_max(nodeset) boundaryVal = 'false' if ((ood_position <= span[0]) or (ood_position >= span[1])): boundaryVal = 'true' drop_ood_feature = ((ood_word + '_') + boundaryVal) return drop_ood_feature def get_drop_rel_feature(self, rel_node, nodeset, main_sent_dict, boxer_graph): rel_word = boxer_graph.relations[rel_node]['predicates'] rel_span = boxer_graph.extract_span_for_nodeset_with_rel(rel_node, nodeset) drop_rel_feature = (rel_word + '_') if (len(rel_span) <= 2): drop_rel_feature += '0-2' elif (len(rel_span) <= 5): drop_rel_feature += '2-5' elif (len(rel_span) <= 10): drop_rel_feature += '5-10' elif (len(rel_span) <= 15): drop_rel_feature += '10-15' else: drop_rel_feature += 'gt15' return drop_rel_feature def get_drop_mod_feature(self, mod_cand, main_sent_dict, boxer_graph): mod_pos = int(mod_cand[0]) mod_word = main_sent_dict[mod_pos][0] drop_mod_feature = mod_word return drop_mod_feature
def create_RepVGG_B1g2(last_stride, norm_type): return RepVGG(last_stride, norm_type, num_blocks=[4, 6, 16, 1], width_multiplier=[2, 2, 2, 4], override_groups_map=g2_map)
class ResNetGenerator(torch.nn.Module): def __init__(self, ch=64, dim_z=128, bottom_width=4, activation=torch.nn.functional.relu, n_classes=0): super().__init__() self.bottom_width = bottom_width self.activation = activation self.dim_z = dim_z self.n_classes = n_classes self.l1 = torch.nn.Linear(dim_z, (((bottom_width ** 2) * ch) * 16)) torch.nn.init.xavier_uniform_(self.l1.weight) torch.nn.init.zeros_(self.l1.bias) self.block2 = ResGenBlock((ch * 16), (ch * 8), activation=activation, upsample=True, n_classes=n_classes) self.block3 = ResGenBlock((ch * 8), (ch * 4), activation=activation, upsample=True, n_classes=n_classes) self.block4 = ResGenBlock((ch * 4), (ch * 2), activation=activation, upsample=True, n_classes=n_classes) self.block5 = ResGenBlock((ch * 2), ch, activation=activation, upsample=True, n_classes=n_classes) self.b6 = BatchNorm2d(ch) self.l6 = torch.nn.Conv2d(ch, 3, 3, stride=1, padding=1) torch.nn.init.xavier_uniform_(self.l6.weight) torch.nn.init.zeros_(self.l6.bias) def forward(self, batchsize=64, z=None, y=None, DEBUG=None, debugname='generator'): anyparam = next(self.parameters()) if (z is None): z = torch.randn(batchsize, self.dim_z, dtype=anyparam.dtype, device=anyparam.device) if ((y is None) and (self.n_classes > 0)): y = torch.randint(0, self.n_classes, (batchsize,), device=anyparam.device, dtype=torch.long) if ((y is not None) and (z.shape[0] != y.shape[0])): raise Exception('z.shape[0] != y.shape[0], z.shape[0]={}, y.shape[0]={}'.format(z.shape[0], y.shape[0])) h = z h = self.l1(h) h = h.reshape(h.shape[0], (- 1), self.bottom_width, self.bottom_width) h = self.block2(h, y) h = self.block3(h, y) h = self.block4(h, y) h = self.block5(h, y) h = self.b6(h) h = self.activation(h) h = torch.tanh(self.l6(h)) return h
class RoIAlignAvg(Module): def __init__(self, aligned_height, aligned_width, spatial_scale): super(RoIAlignAvg, self).__init__() self.aligned_width = int(aligned_width) self.aligned_height = int(aligned_height) self.spatial_scale = float(spatial_scale) def forward(self, features, rois): x = RoIAlignFunction((self.aligned_height + 1), (self.aligned_width + 1), self.spatial_scale)(features, rois) return avg_pool2d(x, kernel_size=2, stride=1)
def plot_data(ax, alg, mean_lc, mean_stderr, best_params, exp_attrs, second_time=False, is_smoothed=False, smoothing_window=1): zoomed_in = (True if is_smoothed else False) alpha = 1.0 if PLOT_RERUN_AND_ORIG: alpha = (1.0 if second_time else 0.5) print(alg) lbl = ((((alg + '$\\alpha=$ ') + str(best_params['alpha'])) + ' $\\lambda=$ ') + str(best_params.get('lmbda', best_params.get('zeta', 0)))) color = ALG_COLORS[alg] if is_smoothed: mean_lc = np.convolve(mean_lc, (np.ones(smoothing_window) / smoothing_window), mode='valid') mean_stderr = np.convolve(mean_stderr, (np.ones(smoothing_window) / smoothing_window), mode='valid') ax.plot(np.arange(mean_lc.shape[0]), mean_lc, label=lbl, linewidth=1.0, color=color, alpha=alpha) ax.fill_between(np.arange(mean_lc.shape[0]), (mean_lc - (mean_stderr / 2)), (mean_lc + (mean_stderr / 2)), color=color, alpha=(0.1 * alpha)) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.set_xlim(exp_attrs.x_lim) ax.set_ylim(exp_attrs.y_lim) if zoomed_in: ax.set_ylim([0.0, 0.4]) else: ax.yaxis.set_ticks(exp_attrs.y_axis_ticks) ax.xaxis.set_ticks(exp_attrs.x_axis_ticks) ax.set_xticklabels(exp_attrs.x_tick_labels, fontsize=25) ax.tick_params(axis='y', which='major', labelsize=exp_attrs.size_of_labels) ax.set_yticklabels([]) ax.set_xticklabels([]) ax.spines['left'].set_linewidth(2) ax.spines['bottom'].set_linewidth(2)
class ChannelGrouping(): def __init__(self, prunable_nodes: List[BaseNode], fw_info: FrameworkInfo): self.prunable_nodes = prunable_nodes self.fw_info = fw_info self._simd_groups_indices = {} def simd_groups_indices(self) -> Dict[(BaseNode, List[np.ndarray])]: return self._simd_groups_indices def group_scores_by_simd_groups(self, score_by_node: Dict[(BaseNode, np.ndarray)]): for (prunable_node, node_scores) in score_by_node.items(): self._simd_groups_indices[prunable_node] = self._group_node_scores(node_scores, prunable_node.get_simd()) def _group_node_scores(self, scores: np.ndarray, simd: int) -> List[np.ndarray]: sorted_indices = np.argsort((- scores)) num_complete_groups = (len(scores) // simd) scores_groups = [scores[sorted_indices[(i * simd):((i + 1) * simd)]] for i in range(num_complete_groups)] indices_groups = [sorted_indices[(i * simd):((i + 1) * simd)] for i in range(num_complete_groups)] remainder = (len(scores) % simd) if (remainder != 0): scores_groups.append(scores[sorted_indices[(- remainder):]]) indices_groups.append(sorted_indices[(- remainder):]) return indices_groups
def _certifi_where(): try: return __import__('certifi').where() except (ImportError, ResolutionError, ExtractionError): pass
def get_sparse_graph(graph): return nx.to_scipy_sparse_matrix(graph, format='csr', dtype=float, nodelist=graph.nodes)
class RowStandardTableauTuples_residue_shape(RowStandardTableauTuples_residue): def __init__(self, residue, shape): if (residue.size() != shape.size()): raise ValueError('the size of the shape and the length of the residue defence must coincide!') super().__init__(residue) self._shape = shape multicharge = residue.multicharge() if (shape.level() == 1): standard_shape = [[r] for r in shape] charge = [(multicharge[0] - r) for r in range(len(shape))] else: standard_shape = [[r] for mu in shape for r in mu] charge = [(multicharge[c] - r) for c in range(len(shape)) for r in range(len(shape[c]))] from sage.combinat.tableau_residues import ResidueSequence res = ResidueSequence(residue.quantum_characteristic(), charge, residue.residues()) self._standard_tableaux = res.standard_tableaux(standard_shape) if (shape.level() == 1): self._cumulative_lengths = [0, len(shape)] else: self._cumulative_lengths = ([0] * (shape.level() + 1)) for c in range(len(shape)): self._cumulative_lengths[(c + 1)] = (self._cumulative_lengths[c] + len(shape[c])) def __contains__(self, t): if (not isinstance(t, self.element_class)): try: t = RowStandardTableauTuple(t) except ValueError: return False return ((t.shape() == self._shape) and (t.residue_sequence(self._quantum_characteristic, self._multicharge) == self._residue)) def _repr_(self): return 'Row standard ({})-tableaux with {}'.format(self._shape._repr_compact_high(), self._residue.__str__('and')) def __iter__level_one(self): if (self._size == 0): (yield RowStandardTableau([])) for t in self._standard_tableaux: (yield RowStandardTableau([s[0] for s in t])) def __iter__higher_levels(self): if (self._size == 0): (yield self.element_class(self, [[] for _ in range(self._level)], check=False)) return for t in self._standard_tableaux: (yield self.element_class(self, [[t[r][0] for r in range(self._cumulative_lengths[c], self._cumulative_lengths[(c + 1)])] for c in range(self._level)], check=False)) _attribute def __iter__(self): if (self._level == 1): return self.__iter__level_one else: return self.__iter__higher_levels
def text_preprocessor(t, tokenize=False): tokens = tokenizer.tokenize(cleaning(normalizer.normalize(t))) return (tokens if tokenize else ' '.join(tokens))
def init(rng: jax.random.KeyArray) -> State: (rng1, rng2, rng3, rng4, rng5, rng6) = jax.random.split(rng, num=6) hand = jnp.arange(0, 52) hand = jax.random.permutation(rng2, hand) vul_NS = jax.random.choice(rng3, jnp.bool_([False, True])) vul_EW = jax.random.choice(rng4, jnp.bool_([False, True])) dealer = jax.random.randint(rng5, (1,), 0, 4, dtype=jnp.int32)[0] shuffled_players = _shuffle_players(rng6) current_player = shuffled_players[dealer] legal_actions = jnp.ones(38, dtype=jnp.bool_) legal_actions = legal_actions.at[DOUBLE_ACTION_NUM].set(False) legal_actions = legal_actions.at[REDOUBLE_ACTION_NUM].set(False) state = State(_shuffled_players=shuffled_players, current_player=current_player, _hand=hand, _dealer=dealer, _vul_NS=vul_NS, _vul_EW=vul_EW, legal_action_mask=legal_actions) return state
class DetectionMetricDataList(): def __init__(self): self.md = {} def __getitem__(self, key): return self.md[key] def __eq__(self, other): eq = True for key in self.md.keys(): eq = (eq and (self[key] == other[key])) return eq def get_class_data(self, detection_name: str) -> List[Tuple[(DetectionMetricData, float)]]: return [(md, dist_th) for ((name, dist_th), md) in self.md.items() if (name == detection_name)] def get_dist_data(self, dist_th: float) -> List[Tuple[(DetectionMetricData, str)]]: return [(md, detection_name) for ((detection_name, dist), md) in self.md.items() if (dist == dist_th)] def set(self, detection_name: str, match_distance: float, data: DetectionMetricData): self.md[(detection_name, match_distance)] = data def serialize(self) -> dict: return {((key[0] + ':') + str(key[1])): value.serialize() for (key, value) in self.md.items()} def deserialize(cls, content: dict): mdl = cls() for (key, md) in content.items(): (name, distance) = key.split(':') mdl.set(name, float(distance), DetectionMetricData.deserialize(md)) return mdl
def get_degree(entity: str): degree = 0 query1 = ((('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT count(?x0) as ?value WHERE {\n ?x1 ?x0 ' + ':') + entity) + '. \n FILTER regex(?x0, " }\n ') sparql.setQuery(query1) try: results = sparql.query().convert() except urllib.error.URLError: print(query1) exit(0) for result in results['results']['bindings']: degree += int(result['value']['value']) query2 = (('\n PREFIX rdf: < PREFIX rdfs: < PREFIX : < \n SELECT count(?x0) as ?value WHERE {\n :' + entity) + ' ?x0 ?x1 . \n FILTER regex(?x0, " }\n ') sparql.setQuery(query2) try: results = sparql.query().convert() except urllib.error.URLError: print(query2) exit(0) for result in results['results']['bindings']: degree += int(result['value']['value']) return degree
class Up(nn.Module): def __init__(self, in_channels, out_channels, scale_factor=2): super().__init__() self.up = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)) def forward(self, x1, x2): x1 = self.up(x1) x1 = torch.cat([x2, x1], dim=1) return self.conv(x1)
def train_step(original_sql, model_image, estimator_string, datasource, select, validation_select, model_params, train_params, validation_params, feature_column_map, label_column, save, load=None, pai_table=None, pai_val_table=None): if (model_params is None): model_params = {} if (train_params is None): train_params = {} if (validation_params is None): validation_params = {} if load: Model.load_from_db(datasource, load) load = 'model_save' else: load = None is_pai = (True if pai_table else False) fc_map = compile_ir_feature_columns(feature_column_map, EstimatorType.TENSORFLOW) field_descs = get_ordered_field_descs(feature_column_map) feature_column_names = [fd.name for fd in field_descs] feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True)) for fd in field_descs]) label_meta = None if label_column: label_meta = label_column.get_field_desc()[0].to_dict(dtype_to_string=True) feature_column_names_map = dict() for target in feature_column_map: fclist = feature_column_map[target] feature_column_names_map[target] = [fc.get_field_desc()[0].name for fc in fclist] model_params_constructed = copy.deepcopy(model_params) for optimizer_arg in ['optimizer', 'dnn_optimizer', 'linear_optimizer']: if (optimizer_arg in model_params_constructed): model_params_constructed[optimizer_arg] = get_tf_optimizer(model_params_constructed[optimizer_arg]) if ('loss' in model_params_constructed): model_params_constructed['loss'] = get_tf_loss(model_params_constructed['loss']) verbose = train_params.get('verbose', 1) batch_size = train_params.get('batch_size', 1) epoch = train_params.get('epoch', 1) save_checkpoints_steps = train_params.get('save_checkpoints_steps', 100) max_steps = train_params.get('max_steps', None) if ((max_steps is not None) and (max_steps <= 0)): max_steps = None validation_metrics = validation_params.get('metrics', 'Accuracy') validation_metrics = [v.strip() for v in validation_metrics.split(',')] validation_steps = validation_params.get('steps', 1) validation_start_delay_secs = validation_params.get('start_delay_secs', 0) validation_throttle_secs = validation_params.get('throttle_secs', 0) estimator = import_model(estimator_string) is_estimator = is_tf_estimator(estimator) if (verbose < 1): verbose = 1 set_log_level(verbose, is_estimator) model_params_constructed.update(fc_map) FLAGS = define_tf_flags() set_oss_environs(FLAGS) num_workers = len(FLAGS.worker_hosts.split(',')) worker_id = FLAGS.task_index train_dataset_fn = get_dataset_fn(select, datasource, feature_column_names, feature_metas, label_meta, is_pai, pai_table, batch_size, epochs=epoch, shuffle_size=1000, num_workers=num_workers, worker_id=worker_id) val_dataset_fn = None if (validation_select or pai_val_table): val_dataset_fn = get_dataset_fn(validation_select, datasource, feature_column_names, feature_metas, label_meta, is_pai, pai_val_table, batch_size) model_meta = collect_metadata(original_sql=original_sql, select=select, validation_select=validation_select, model_repo_image=model_image, class_name=estimator_string, attributes=model_params, features=feature_column_map, label=label_column) save_dir = 'model_save' if (not is_estimator): if isinstance(estimator, types.FunctionType): model_params_constructed['field_metas'] = feature_metas keras_train_and_save(estimator, model_params_constructed, save_dir, FLAGS, train_dataset_fn, val_dataset_fn, label_meta, epoch, verbose, validation_metrics, validation_steps, load, model_meta, is_pai) else: estimator_train_and_save(estimator, model_params_constructed, save_dir, FLAGS, train_dataset_fn, val_dataset_fn, max_steps, validation_start_delay_secs, validation_throttle_secs, save_checkpoints_steps, validation_metrics, load, model_meta) model = Model(EstimatorType.TENSORFLOW, model_meta) if ((num_workers == 1) or (worker_id == 0)): saved = model.save_to_db(datasource, save, oss_model_dir=FLAGS.sqlflow_oss_modeldir) print(('Model saved to DB: %s' % saved)) print('Done training')
_MASK_PREDICTOR.register('MaskRCNNConv1x1Predictor') class MaskRCNNConv1x1Predictor(nn.Module): def __init__(self, cfg, in_channels): super(MaskRCNNConv1x1Predictor, self).__init__() num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES num_inputs = in_channels self.mask_fcn_logits = Conv2d(num_inputs, num_classes, 1, 1, 0) for (name, param) in self.named_parameters(): if ('bias' in name): nn.init.constant_(param, 0) elif ('weight' in name): nn.init.kaiming_normal_(param, mode='fan_out', nonlinearity='relu') def forward(self, x): return self.mask_fcn_logits(x)
def state2img(input_nc=3, output_nc=3, ngf=32, n_down=6, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_id='cuda:0'): norm_layer = get_norm_layer(norm_type=norm) net = ImgGenerator(input_nc, output_nc, ngf, n_down, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9) return init_net(net, init_type, init_gain, gpu_id)
def modify_frame_indices(video_dir_path, frame_indices): modified_indices = [] for i in frame_indices: image_path = os.path.join(video_dir_path, 'image_{:05d}.jpg'.format(i)) if (not os.path.exists(image_path)): return modified_indices modified_indices.append(i) return modified_indices
def upd_params(old: dict, new: dict) -> dict: for k in new: if ((type(new[k]) is dict) and (k in old) and (type(old[k]) is dict)): upd_params(old[k], new[k]) else: old[k] = new[k] return old
class _CopyToModelParallelRegion(torch.autograd.Function): def forward(ctx, input_): return input_ def backward(ctx, grad_output): return _reduce(grad_output)
def inconsistent_item_full_pandas_dataset(): events = pd.DataFrame({'user_id': [0, 0, 1, 1, 1, 2], 'item_id': [0, 1, 0, 2, 3, 5], 'timestamp': [0, 1, 2, 3, 4, 5], 'rating': [1.1, 1.2, 1.3, 2, 3, 4]}) users = pd.DataFrame({'user_id': [0, 1, 2], 'gender': [0, 1, 0]}) items = pd.DataFrame({'item_id': [0, 1, 2, 3], 'category_id': [0, 0, 1, 2], 'feature1': [1.1, 1.2, 1.3, 1.4]}) return {'interactions': events, 'users': users, 'items': items, 'user_col': 'user_id', 'item_col': 'item_id', 'timestamp_col': 'timestamp', 'ratings_col': 'rating', 'users_cardinality': 3, 'items_cardinality': 4}
class Function_sinh_integral(BuiltinFunction): def __init__(self): BuiltinFunction.__init__(self, 'sinh_integral', nargs=1, latex_name='\\operatorname{Shi}', conversions=dict(maxima='expintegral_shi', sympy='Shi', fricas='Shi')) def _eval_(self, z): if isinstance(z, Expression): if z.is_trivial_zero(): return z elif (not z): return z def _evalf_(self, z, parent=None, algorithm=None): return _mpmath_utils_call(_mpmath_shi, z, parent=parent) def _derivative_(self, z, diff_param=None): return (sinh(z) / z)
def test_ufunc_add_reduce_simple(): A = np.random.randint(1, 10, size=(10,), dtype=np.int32) s = ufunc_add_reduce_simple(A)[0] assert np.array_equal(np.add.reduce(A), s)
def get_char_vocab_language(language): get_char_vocab(['{}.{}.jsonlines'.format(partition, language) for partition in ('train', 'dev', 'test')], 'char_vocab.{}.txt'.format(language))
class EnergyScoring(Model): def __init__(self, model: Model, temperature: float=1.0): super().__init__(None) self.model = model self.temp = temperature def forward(self, data: Data) -> Prediction: return self.forward_impl(data) def forward_impl(self, data) -> Prediction: pred = self.model(data) energy = ((- self.temp) * torch.logsumexp((pred.logits / self.temp), dim=(- 1))) pred.set_values(energy=energy, sample_confidence_epistemic=(- energy)) if (pred.logits_features is not None): logits_features = pred.logits_features energy_features = ((- self.temp) * torch.logsumexp((logits_features / self.temp), dim=(- 1))) pred.set_values(energy_features=energy_features, sample_confidence_features=(- energy_features)) return pred def load_from_storage(self): raise NotImplementedError def save_to_storage(self): raise NotImplementedError def create_storage(self, *args, **kwargs): raise NotImplementedError
def test_rmul(): value = 42 proxy = tt.ObjectProxy(value) assert ((2 * value) == (2 * proxy)) assert (int in tt.UsageTraceNode.from_proxy(proxy).children['__rmul__'].arg_types[0])
class MemoryEfficientFP16Optimizer(optim.FairseqOptimizer): def __init__(self, args, params, optimizer): if (not optimizer.supports_memory_efficient_fp16): raise ValueError('Unsupported optimizer: {}'.format(optimizer.__class__.__name__)) super().__init__(args) self.wrapped_optimizer = optimizer if (getattr(args, 'fp16_scale_window', None) is None): if (len(args.update_freq) > 1): raise ValueError('--fp16-scale-window must be given explicitly when using a custom --update-freq schedule') scale_window = (((2 ** 14) / args.distributed_world_size) / args.update_freq[0]) else: scale_window = args.fp16_scale_window self.scaler = DynamicLossScaler(init_scale=args.fp16_init_scale, scale_window=scale_window, tolerance=args.fp16_scale_tolerance, threshold=args.threshold_loss_scale) def build_optimizer(cls, args, params): fp16_optimizer = optim.build_optimizer(args, params) return cls(args, params, fp16_optimizer) def optimizer(self): return self.wrapped_optimizer.optimizer def optimizer_config(self): return self.wrapped_optimizer.optimizer_config def get_lr(self): return self.wrapped_optimizer.get_lr() def set_lr(self, lr): self.wrapped_optimizer.set_lr(lr) def state_dict(self): state_dict = self.wrapped_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): if ('loss_scale' in state_dict): self.scaler.loss_scale = state_dict['loss_scale'] self.wrapped_optimizer.load_state_dict(state_dict, optimizer_overrides) groups = self.optimizer.param_groups saved_groups = state_dict['param_groups'] id_map = {old_id: p for (old_id, p) in zip(chain(*(g['params'] for g in saved_groups)), chain(*(g['params'] for g in groups)))} for (k, v) in state_dict['state'].items(): if (k in id_map): param = id_map[k] self.optimizer.state[param] = v def backward(self, loss): loss = (loss * self.scaler.loss_scale) loss.backward() self._grads_are_scaled = True def _unscale_grads(self, multiply_grads=1.0): if self._grads_are_scaled: self._grads_are_scaled = False self.wrapped_optimizer.multiply_grads((multiply_grads / self.scaler.loss_scale)) else: assert (multiply_grads == 1.0) def multiply_grads(self, c): if self._grads_are_scaled: self._unscale_grads(c) else: self.wrapped_optimizer.multiply_grads(c) def clip_grad_norm(self, max_norm): self._unscale_grads() grad_norm = self.wrapped_optimizer.clip_grad_norm(max_norm) overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if (self.scaler.loss_scale <= self.args.min_loss_scale): raise FloatingPointError('Minimum loss scale reached ({}). Your loss is probably exploding. Try lowering the learning rate, using gradient clipping or increasing the batch size.'.format(self.args.min_loss_scale)) raise OverflowError(('setting loss scale to: ' + str(self.scaler.loss_scale))) return grad_norm def step(self, closure=None): self._unscale_grads() self.wrapped_optimizer.step(closure) def zero_grad(self): self.wrapped_optimizer.zero_grad() self._grads_are_scaled = False
.parametrize('loss_type', ['logistic', 'softmax']) def test_FM(loss_type): model_name = 'FM' (x, y, user_feature_columns, item_feature_columns) = get_xy_fd(False) if (tf.__version__ >= '2.0.0'): tf.compat.v1.disable_eager_execution() else: K.set_learning_phase(True) if (loss_type == 'logistic'): model = FM(user_feature_columns, item_feature_columns, loss_type=loss_type) model.compile('adam', 'binary_crossentropy') else: from collections import Counter item_name = 'item' train_counter = Counter(x[item_name]) item_count = [train_counter.get(i, 0) for i in range(item_feature_columns[0].vocabulary_size)] sampler_config = NegativeSampler(sampler='inbatch', num_sampled=2, item_name=item_name, item_count=item_count) model = FM(user_feature_columns, item_feature_columns, loss_type=loss_type, sampler_config=sampler_config) model.compile('adam', sampledsoftmaxloss) check_model(model, model_name, x, y)
def learning_proposal(n=100): scale = np.random.choice([0.5, 1, 1.5, 2], 1) return (((np.random.standard_normal() * scale) / np.sqrt(n)) + observed_target)
def calculate_parameters(model): return (sum((param.numel() for param in model.parameters())) / 1000000.0)
def merge_dicts(dict_old: Dict[(Any, List[float])], dict_new: Dict[(Any, List[float])], op=min) -> Dict[(Any, List[float])]: d_out = {**dict_old} for (k, v) in dict_new.items(): if (k in dict_old): d_out[k] = [op(new, old) for (old, new) in zip(dict_new[k], dict_old[k])] else: d_out[k] = dict_new[k] return d_out
_utils.test() def test_running_loss(): return steps = 16 total_loss = ti.field(ti.f32) running_loss = ti.field(ti.f32) additional_loss = ti.field(ti.f32) ti.root.place(total_loss) ti.root.dense(ti.i, steps).place(running_loss) ti.root.place(additional_loss) ti.root.lazy_grad() def compute_loss(): total_loss[None] = 0.0 for i in range(steps): ti.atomic_add(total_loss[None], (running_loss[i] * 2)) ti.atomic_add(total_loss[None], (additional_loss[None] * 3)) compute_loss() assert (total_loss.grad[None] == 1) for i in range(steps): assert (running_loss[i] == 2) assert (additional_loss.grad[None] == 3)
class Logger(): def __init__(self): self.loss_dict = OrderedDict() self.acc_dict = OrderedDict() self.result_dict = OrderedDict() self.log_dict = OrderedDict() self.log = [] def loss_update(self, loss_dict): for (k, v) in loss_dict.items(): if (k not in self.loss_dict): self.loss_dict[k] = [] self.loss_dict[k].append((v.item() if isinstance(v, torch.Tensor) else v)) def acc_update(self, acc_dict): for (k, v) in acc_dict.items(): if (k not in self.acc_dict): self.acc_dict[k] = [0, 0] self.acc_dict[k][0] += v[0] self.acc_dict[k][1] += v[1] def result_update(self, result_dict): for (k, v) in result_dict.items(): if (k not in self.result_dict): self.result_dict[k] = [] if isinstance(v, np.ndarray): self.result_dict[k].append(v) else: self.result_dict[k] += v def aggregate(self): if (len(self.log) == 0): for (k, v) in self.loss_dict.items(): loss = np.mean(v) self.log_dict[k] = loss self.log.append(('%.4f' % loss)) for (k, v) in self.acc_dict.items(): acc = (v[0] / v[1]) self.log_dict[k] = acc self.log.append(('%.4f' % acc)) for (k, v) in self.result_dict.items(): if (isinstance(v, list) and isinstance(v[0], np.ndarray)): self.result_dict[k] = np.concatenate(v, axis=0) def reset(self): self.loss_dict = OrderedDict() self.acc_dict = OrderedDict() self.result_dict = OrderedDict() self.log_dict = OrderedDict() self.log = []
def prep_plt(): plt.rc('font', size=MEDIUM_SIZE) plt.rc('axes', labelsize=LARGE_SIZE) plt.rc('xtick', labelsize=MEDIUM_SIZE) plt.rc('ytick', labelsize=MEDIUM_SIZE) plt.rc('legend', fontsize=SMALL_SIZE) plt.style.use('seaborn-muted') spine_alpha = 0.5 plt.gca().spines['right'].set_alpha(0.0) plt.gca().spines['bottom'].set_alpha(spine_alpha) plt.gca().spines['left'].set_alpha(spine_alpha) plt.gca().spines['top'].set_alpha(0.0) plt.tight_layout()
def compute_IoU(preds, labels, num_classes, ignore_index=None): hist = confusion_matrix(preds, labels, num_classes) return compute_IoU_from_cmatrix(hist, ignore_index)
def trieste_deep_ensemble_model(example_data: Dataset, ensemble_size: int, bootstrap_data: bool=False, independent_normal: bool=False) -> Tuple[(DeepEnsemble, KerasEnsemble, KerasOptimizer)]: keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal) optimizer = tf.keras.optimizers.Adam() fit_args = {'batch_size': 100, 'epochs': 1, 'callbacks': [], 'verbose': 0} optimizer_wrapper = KerasOptimizer(optimizer, fit_args) model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data) return (model, keras_ensemble, optimizer_wrapper)
def _random_queries(df: pd.DataFrame, n_queries: int, n_cols: int) -> List[str]: random_columns = [rng.choice(df.columns, size=n_cols, replace=False).tolist() for _ in range(n_queries)] unique_values = {col: df[col].unique() for col in df.columns} queries: List[str] = [_random_query(unique_values=unique_values, cols=cols) for cols in random_columns] return queries
def generate_random_basis(n_points=1000, n_dims=3, radius=1.0, random_seed=13): np.random.seed(random_seed) x = np.random.normal(size=[n_points, n_dims]) x_norms = np.sqrt(np.sum(np.square(x), axis=1)).reshape([(- 1), 1]) x_unit = (x / x_norms) r = np.random.uniform(size=[n_points, 1]) u = np.power(r, (1.0 / n_dims)) x = ((radius * x_unit) * u) np.random.seed(None) return x
def convert(input, output): img = np.asarray(Image.open(input)) assert (img.dtype == np.uint8) img = (img - 1) Image.fromarray(img).save(output)
def register_Ns3CallbackImpl__Void_Unsigned_int_Unsigned_int_Unsigned_short_Unsigned_char_Unsigned_short_Unsigned_char_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, unsigned int, unsigned int, unsigned short, unsigned char, unsigned short, unsigned char, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) cls.add_method('operator()', 'void', [param('unsigned int', 'arg0'), param('unsigned int', 'arg1'), param('short unsigned int', 'arg2'), param('unsigned char', 'arg3'), param('short unsigned int', 'arg4'), param('unsigned char', 'arg5')], is_pure_virtual=True, is_virtual=True, custom_name=u'__call__') return
def main(): global best_prec global opt if (opt['id'] != ''): model_id = opt['id'] else: model_id = time.strftime('%m_%d_%H-%M-%S') sys.stdout = Logger(osp.join(opt['log_dir'], (('log.' + model_id) + '.txt'))) checkpoint_dir = osp.join(opt['checkpoint_dir'], model_id) mkdir_if_missing(checkpoint_dir) assert (opt['gpus'] is not None) cudnn.benchmark = False cudnn.deterministic = True random.seed(opt['seed']) np.random.seed(opt['seed']) torch.manual_seed(opt['seed']) torch.cuda.manual_seed_all(opt['seed']) train_refdb = get_db(('refvg_train_' + opt['model_method'])) vocab = train_refdb.load_dictionary() opt['vocab_size'] = len(vocab) val_refdb = get_db(('refvg_val_' + opt['model_method'])) model = CR(opt) model = torch.nn.DataParallel(model).cuda() criterion = TripletLoss(opt['margin']).cuda() optimizer = torch.optim.Adam((list(model.parameters()) + list(criterion.parameters())), lr=opt['learning_rate'], betas=(opt['optim_alpha'], opt['optim_beta']), eps=opt['optim_epsilon']) scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=3, mode='max') if opt['evaluate']: if osp.isfile(opt['model']): (model, criterion) = load_checkpoint(model, criterion, opt['model']) test_refdb = get_db(('refvg_test_' + opt['model_method'])) test_dataset = RefDataset(test_refdb, vocab, opt) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=opt['batch_size'], shuffle=False, num_workers=opt['workers'], pin_memory=True) (test_loss, test_prec) = validate(test_loader, model, criterion) print(test_prec) else: print("=> no checkpoint found at '{}'".format(opt['model'])) return epoch_cur = 0 train_dataset = RefDataset(train_refdb, vocab, opt) val_dataset = RefDataset(val_refdb, vocab, opt) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opt['batch_size'], shuffle=True, num_workers=opt['workers'], pin_memory=True) val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=opt['batch_size'], shuffle=False, num_workers=opt['workers'], pin_memory=True) for epoch in range(epoch_cur, opt['max_epochs']): train(train_loader, model, criterion, optimizer, epoch) (val_loss, prec) = validate(val_loader, model, criterion, epoch) scheduler.step(prec) for (i, param_group) in enumerate(optimizer.param_groups): print(float(param_group['lr'])) is_best = (prec >= best_prec) best_prec = max(best_prec, prec) save_checkpoint({'model_state_dict': model.state_dict(), 'crit_state_dict': criterion.state_dict(), 'optimizer': optimizer.state_dict()}, is_best, checkpoint_dir, str(epoch))
def ms_ssim(X, Y, data_range=255, size_average=True, win_size=11, win_sigma=1.5, win=None, weights=None, K=(0.01, 0.03)): if (not (X.shape == Y.shape)): raise ValueError('Input images should have the same dimensions.') for d in range((len(X.shape) - 1), 1, (- 1)): X = X.squeeze(dim=d) Y = Y.squeeze(dim=d) if (not (X.type() == Y.type())): raise ValueError('Input images should have the same dtype.') if (len(X.shape) == 4): avg_pool = F.avg_pool2d elif (len(X.shape) == 5): avg_pool = F.avg_pool3d else: raise ValueError(f'Input images should be 4-d or 5-d tensors, but got {X.shape}') if (win is not None): win_size = win.shape[(- 1)] if (not ((win_size % 2) == 1)): raise ValueError('Window size should be odd.') smaller_side = min(X.shape[(- 2):]) assert (smaller_side > ((win_size - 1) * (2 ** 4))), ('Image size should be larger than %d due to the 4 downsamplings in ms-ssim' % ((win_size - 1) * (2 ** 4))) if (weights is None): weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333] weights = torch.FloatTensor(weights).to(X.device, dtype=X.dtype) if (win is None): win = _fspecial_gauss_1d(win_size, win_sigma) win = win.repeat(([X.shape[1]] + ([1] * (len(X.shape) - 1)))) levels = weights.shape[0] mcs = [] for i in range(levels): (ssim_per_channel, cs) = _ssim(X, Y, win=win, data_range=data_range, size_average=False, K=K) if (i < (levels - 1)): mcs.append(torch.relu(cs)) padding = [(s % 2) for s in X.shape[2:]] X = avg_pool(X, kernel_size=2, padding=padding) Y = avg_pool(Y, kernel_size=2, padding=padding) ssim_per_channel = torch.relu(ssim_per_channel) mcs_and_ssim = torch.stack((mcs + [ssim_per_channel]), dim=0) ms_ssim_val = torch.prod((mcs_and_ssim ** weights.view((- 1), 1, 1)), dim=0) if size_average: return ms_ssim_val.mean() else: return ms_ssim_val.mean(1)
class SetPartitionsSk_k(SetPartitionsAk_k): def _repr_(self): return (SetPartitionsAk_k._repr_(self) + (' with propagating number %s' % self.k)) def __contains__(self, x): if (not SetPartitionsAk_k.__contains__(self, x)): return False if (propagating_number(x) != self.k): return False return True def cardinality(self): return factorial(self.k) def __iter__(self): for p in Permutations(self.k): res = [] for i in range(self.k): res.append(Set([(i + 1), (- p[i])])) (yield self.element_class(self, res))
class Decoder(nn.Module): def __init__(self): super(Decoder, self).__init__() self.layer13 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1)) self.layer14 = nn.Sequential(nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(128, 128, kernel_size=3, padding=1)) self.layer16 = nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1) self.layer17 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1)) self.layer18 = nn.Sequential(nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, padding=1)) self.layer20 = nn.ConvTranspose2d(64, 32, kernel_size=4, stride=2, padding=1) self.layer21 = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(32, 32, kernel_size=3, padding=1)) self.layer22 = nn.Sequential(nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(), nn.Conv2d(32, 32, kernel_size=3, padding=1)) self.layer24 = nn.Conv2d(32, 3, kernel_size=3, padding=1) def forward(self, x): x = (self.layer13(x) + x) x = (self.layer14(x) + x) x = self.layer16(x) x = (self.layer17(x) + x) x = (self.layer18(x) + x) x = self.layer20(x) x = (self.layer21(x) + x) x = (self.layer22(x) + x) x = self.layer24(x) return x
class TensorRef(): def __init__(self, pointer=None, layout=0): self.pointer = pointer self.layout = layout def __str__(self): return ('(%x, %d)' % (self.pointer._ptr, self.layout))
class SampledHeterogeneousBreadthFirstWalk(GraphWalk): def run(self, nodes, n_size, n=1, seed=None): self._check_sizes(n_size) self._check_common_parameters(nodes, n, len(n_size), seed) (rs, _) = self._get_random_state(seed) adj = self.get_adjacency_types() walks = [] d = len(n_size) for node in nodes: for _ in range(n): q = list() walk = list() node_type = self.graph.node_type(node, use_ilocs=True) q.extend([(node, node_type, 0)]) walk.append([node]) while (len(q) > 0): frontier = q.pop(0) (current_node, current_node_type, depth) = frontier depth = (depth + 1) if (depth <= d): current_edge_types = self.graph_schema.schema[current_node_type] for et in current_edge_types: neigh_et = adj[et][current_node] if (len(neigh_et) > 0): samples = rs.choices(neigh_et, k=n_size[(depth - 1)]) else: _size = n_size[(depth - 1)] samples = ([(- 1)] * _size) walk.append(samples) q.extend([(sampled_node, et.n2, depth) for sampled_node in samples]) walks.append(walk) return walks
def bert_config(): bert_config = AutoConfig.from_pretrained(BERT_MODEL_NAME) bert_config.hidden_dropout_prob = 0.0 return bert_config
class TextEncoder(object): def __init__(self, encoder_path, bpe_path): self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat']) self.encoder = json.load(open(encoder_path)) self.decoder = {v: k for (k, v) in self.encoder.items()} merges = open(bpe_path, encoding='utf-8').read().split('\n')[1:(- 1)] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} def bpe(self, token): word = (tuple(token[:(- 1)]) + ((token[(- 1)] + '</w>'),)) if (token in self.cache): return self.cache[token] pairs = get_pairs(word) if (not pairs): return (token + '</w>') while True: bigram = min(pairs, key=(lambda pair: self.bpe_ranks.get(pair, float('inf')))) if (bigram not in self.bpe_ranks): break (first, second) = bigram new_word = [] i = 0 while (i < len(word)): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)): new_word.append((first + second)) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if (len(word) == 1): break else: pairs = get_pairs(word) word = ' '.join(word) if (word == '\n </w>'): word = '\n</w>' self.cache[token] = word return word def encode(self, texts, verbose=True): texts_tokens = [] if verbose: for text in tqdm(texts, ncols=80, leave=False): try: text = self.nlp(text_standardize(ftfy.fix_text(text))) except: text = self.nlp(text_standardize(ftfy.fix_text(text[0]))) text_tokens = [] for token in text: text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')]) texts_tokens.append(text_tokens) else: for text in texts: try: text = self.nlp(text_standardize(ftfy.fix_text(text))) except: text = self.nlp(text_standardize(ftfy.fix_text(text[0]))) text_tokens = [] for token in text: text_tokens.extend([self.encoder.get(t, 0) for t in self.bpe(token.text.lower()).split(' ')]) texts_tokens.append(text_tokens) return texts_tokens
def get_world_size(): assert torch.distributed.deprecated._initialized return torch._C._dist_get_num_processes()
class CaptionGenerator(object): def __init__(self, model, vocab, beam_size=3, max_caption_length=20, length_normalization_factor=0.0): self.vocab = vocab self.model = model self.beam_size = beam_size self.max_caption_length = max_caption_length self.length_normalization_factor = length_normalization_factor def beam_search(self, sess, encoded_image): initial_state = self.model.feed_image(sess, encoded_image) initial_beam = Caption(sentence=[self.vocab.start_id], state=initial_state[0], logprob=0.0, score=0.0, metadata=['']) partial_captions = TopN(self.beam_size) partial_captions.push(initial_beam) complete_captions = TopN(self.beam_size) for _ in range((self.max_caption_length - 1)): partial_captions_list = partial_captions.extract() partial_captions.reset() input_feed = np.array([c.sentence[(- 1)] for c in partial_captions_list]) state_feed = np.array([c.state for c in partial_captions_list]) (softmax, new_states, metadata) = self.model.inference_step(sess, input_feed, state_feed) for (i, partial_caption) in enumerate(partial_captions_list): word_probabilities = softmax[i] state = new_states[i] words_and_probs = list(enumerate(word_probabilities)) words_and_probs.sort(key=(lambda x: (- x[1]))) words_and_probs = words_and_probs[0:self.beam_size] for (w, p) in words_and_probs: if (p < 1e-12): continue sentence = (partial_caption.sentence + [w]) logprob = (partial_caption.logprob + math.log(p)) score = logprob if metadata: metadata_list = (partial_caption.metadata + [metadata[i]]) else: metadata_list = None if (w == self.vocab.end_id): if (self.length_normalization_factor > 0): score /= (len(sentence) ** self.length_normalization_factor) beam = Caption(sentence, state, logprob, score, metadata_list) complete_captions.push(beam) else: beam = Caption(sentence, state, logprob, score, metadata_list) partial_captions.push(beam) if (partial_captions.size() == 0): break if (not complete_captions.size()): complete_captions = partial_captions return complete_captions.extract(sort=True)
def main(settings): print('start processig with settings', settings) utils.set_seed(settings['seed']) global elapsed_time logdir = os.path.join(settings['logdir'], settings['method'], settings['dataset'], utils.get_runname(settings)) pathlib.Path(logdir).mkdir(parents=True, exist_ok=True) train_set = utils.dataset_from_name(split='train', **settings) val_set = utils.dataset_from_name(split='val', **settings) test_set = utils.dataset_from_name(split='test', **settings) train_loader = data.DataLoader(train_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers']) val_loader = data.DataLoader(val_set, settings['batch_size'], shuffle=True, num_workers=settings['num_workers']) test_loader = data.DataLoader(test_set, settings['batch_size'], settings['num_workers']) objectives = from_name(settings.pop('objectives'), train_set.task_names()) scores = from_objectives(objectives) rm1 = utils.RunningMean(400) rm2 = utils.RunningMean(400) method = method_from_name(objectives=objectives, **settings) train_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params())) val_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params())) test_results = dict(settings=settings, num_parameters=utils.num_parameters(method.model_params())) with open((pathlib.Path(logdir) / 'settings.json'), 'w') as file: json.dump(train_results, file) for j in range(settings['num_starts']): train_results[f'start_{j}'] = {} val_results[f'start_{j}'] = {} test_results[f'start_{j}'] = {} optimizer = torch.optim.Adam(method.model_params(), settings['lr']) if settings['use_scheduler']: scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, settings['scheduler_milestones'], gamma=settings['scheduler_gamma']) for e in range(settings['epochs']): print(f'Epoch {e}') tick = time.time() method.new_epoch(e) for (b, batch) in enumerate(train_loader): batch = utils.dict_to_cuda(batch) optimizer.zero_grad() stats = method.step(batch) optimizer.step() (loss, sim) = (stats if isinstance(stats, tuple) else (stats, 0)) print('Epoch {:03d}, batch {:03d}, train_loss {:.4f}, sim {:.4f}, rm train_loss {:.3f}, rm sim {:.3f}'.format(e, b, loss, sim, rm1(loss), rm2(sim))) tock = time.time() elapsed_time += (tock - tick) if settings['use_scheduler']: val_results[f'start_{j}'][f'epoch_{e}'] = {'lr': scheduler.get_last_lr()[0]} scheduler.step() if ((settings['train_eval_every'] > 0) and (((e + 1) % settings['train_eval_every']) == 0)): train_results = evaluate(j, e, method, scores, train_loader, logdir, reference_point=settings['reference_point'], split='train', result_dict=train_results) if ((settings['eval_every'] > 0) and (((e + 1) % settings['eval_every']) == 0)): val_results = evaluate(j, e, method, scores, val_loader, logdir, reference_point=settings['reference_point'], split='val', result_dict=val_results) test_results = evaluate(j, e, method, scores, test_loader, logdir, reference_point=settings['reference_point'], split='test', result_dict=test_results) if ((settings['checkpoint_every'] > 0) and (((e + 1) % settings['checkpoint_every']) == 0)): pathlib.Path(os.path.join(logdir, 'checkpoints')).mkdir(parents=True, exist_ok=True) torch.save(method.model.state_dict(), os.path.join(logdir, 'checkpoints', 'c_{}-{:03d}.pth'.format(j, e))) print('epoch_max={}, val_volume_max={}'.format(epoch_max, volume_max)) pathlib.Path(os.path.join(logdir, 'checkpoints')).mkdir(parents=True, exist_ok=True) torch.save(method.model.state_dict(), os.path.join(logdir, 'checkpoints', 'c_{}-{:03d}.pth'.format(j, 999999))) return volume_max
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filelist', type=str, help='list of nii files') parser.add_argument('--file', type=str, help='single nii file, if given, filelist will be ignored') parser.add_argument('--outputdir', type=str, help='folder to store result') parser.add_argument('--zres', type=float, help='target voxel size in mm') args = parser.parse_args() if (not (args.file == None)): files = list() files.append(args.file) else: files = read_filelist(args.filelist) for f in files: image = sitk.ReadImage(f) zooms = image.GetSpacing() newimage = sitk_resample_to_spacing(image, new_spacing=(zooms[0], zooms[1], args.zres), interpolator=sitk.sitkGaussian) outfilename = os.path.join(args.outputdir, (os.path.basename(f).split('.')[0] + '__resized.nii.gz')) print(outfilename) sitk.WriteImage(newimage, outfilename)
def pre_caption(caption, max_words): caption = re.sub('([,.\'!?\\"()*#:;~])', '', caption.lower()).replace('-', ' ').replace('/', ' ').replace('<person>', 'person') caption = re.sub('\\s{2,}', ' ', caption) caption = caption.rstrip('\n') caption = caption.strip(' ') caption_words = caption.split(' ') if (len(caption_words) > max_words): caption = ' '.join(caption_words[:max_words]) return caption
def get_iterator(args): with open((osp.join(args.data, args.split) + '.tsv'), 'r') as fp: lines = fp.read().split('\n') root = lines.pop(0).strip() files = [osp.join(root, line.split('\t')[0]) for line in lines if (len(line) > 0)] num = len(files) reader = Wav2VecFeatureReader(args.checkpoint, args.layer) def iterate(): for fname in files: w2v_feats = reader.get_feats(fname) (yield w2v_feats) return (iterate, num)
def test_multiple_inheritance_cpp(): mt = m.MIType(3, 4) assert (mt.foo() == 3) assert (mt.bar() == 4)
class FixedResize(object): def __init__(self, size): self.size = tuple(reversed(size)) def __call__(self, sample): img = sample['image'] mask = sample['label'] assert (img.size == mask.size) img = img.resize(self.size, Image.BILINEAR) mask = mask.resize(self.size, Image.NEAREST) return {'image': img, 'label': mask}
class MocHRBackbone(object): def __init__(self, configer): self.configer = configer def __call__(self): arch = self.configer.sub_arch from lib.models.backbones.hrnet.hrnet_config import MODEL_CONFIGS if (arch in ['hrnet32', 'hrnet48', 'hrnet64']): arch_net = HighResolutionNet(MODEL_CONFIGS[arch]) arch_net.init_weights(pretrained=self.configer.pretrained_backbone) else: raise Exception('Architecture undefined!') return arch_net
_metric def fid50k_realtrans(opts): opts.dataset_kwargs.update(max_size=None, xflip=False) fid = frechet_inception_distance.compute_fid_realtrans(opts, max_real=None, num_gen=50000) return dict(fid50k_realtrans=fid)
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False): a = np.ma.masked_array(a, np.isnan(a)) m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input) for i in range(np.count_nonzero(m.mask.ravel())): warnings.warn('All-NaN slice encountered', RuntimeWarning, stacklevel=4) if (out is not None): out[...] = m.filled(np.nan) return out return m.filled(np.nan)
class StyleContentModel_style(tf.keras.models.Model): def __init__(self, style_layers, content_layers, rotation_weight): super(StyleContentModel_style, self).__init__() self.vgg = vgg_layers((style_layers + content_layers)) self.style_layers = style_layers self.content_layers = content_layers self.num_style_layers = len(style_layers) self.vgg.trainable = False self.rotation_weight = rotation_weight def call(self, inputs): inputs = (inputs * 255.0) preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs) outputs = self.vgg(preprocessed_input) (style_outputs, content_outputs) = (outputs[:self.num_style_layers], outputs[self.num_style_layers:]) style_outputs_90 = rotation_tensor(style_outputs, 90) style_outputs_180 = rotation_tensor(style_outputs, 180) style_outputs_270 = rotation_tensor(style_outputs, 270) style_outputs_1 = [gram_matrix(style_output) for style_output in style_outputs] style_outputs_2 = [(((1 - self.rotation_weight) * gram_matrix(style_output1)) + (self.rotation_weight * gram_matrix(style_output2))) for (style_output1, style_output2) in zip(style_outputs, style_outputs_90)] style_outputs_3 = [(((1 - self.rotation_weight) * gram_matrix(style_output1)) + (self.rotation_weight * gram_matrix(style_output2))) for (style_output1, style_output2) in zip(style_outputs, style_outputs_180)] style_outputs_4 = [(((1 - self.rotation_weight) * gram_matrix(style_output1)) + (self.rotation_weight * gram_matrix(style_output2))) for (style_output1, style_output2) in zip(style_outputs, style_outputs_270)] content_dict = {content_name: value for (content_name, value) in zip(self.content_layers, content_outputs)} style_dict_1 = {style_name: value for (style_name, value) in zip(self.style_layers, style_outputs_1)} style_dict_2 = {style_name: value for (style_name, value) in zip(self.style_layers, style_outputs_2)} style_dict_3 = {style_name: value for (style_name, value) in zip(self.style_layers, style_outputs_3)} style_dict_4 = {style_name: value for (style_name, value) in zip(self.style_layers, style_outputs_4)} return {'content': content_dict, 'style_1': style_dict_1, 'style_2': style_dict_2, 'style_3': style_dict_3, 'style_4': style_dict_4}
def runKoG2P(graph, rulebook): [rule_in, rule_out] = readRules(ver_info[0], rulebook) if (ver_info[0] == 2): prono = graph2prono(unicode(graph), rule_in, rule_out) elif (ver_info[0] == 3): prono = graph2prono(graph, rule_in, rule_out) print(prono)
.parametrize('n_player', [2, 4]) def test_payoff_table(n_player: int): agents = [f'player_{i}' for i in range(n_player)] shape = ([0] * n_player) simulation_flag = SimulationFlag(np.zeros(shape).astype(bool)) table = PayoffTable(identify=agents[0], agents=agents, shared_simulation_flag=simulation_flag) pad_info = ([(0, 1)] * n_player) table.expand_table(pad_info)