code
stringlengths
101
5.91M
class SphereBivariateSpline(_BivariateSplineBase): def __call__(self, theta, phi, dtheta=0, dphi=0, grid=True): theta = np.asarray(theta) phi = np.asarray(phi) if ((theta.size > 0) and ((theta.min() < 0.0) or (theta.max() > np.pi))): raise ValueError('requested theta out of bounds.') return _BivariateSplineBase.__call__(self, theta, phi, dx=dtheta, dy=dphi, grid=grid) def ev(self, theta, phi, dtheta=0, dphi=0): return self.__call__(theta, phi, dtheta=dtheta, dphi=dphi, grid=False)
def get_boomerang_r_vectors_15(location, orientation): initial_configuration = [np.array([2.1, 0.0, 0.0]), np.array([1.8, 0.0, 0.0]), np.array([1.5, 0.0, 0.0]), np.array([1.2, 0.0, 0.0]), np.array([0.9, 0.0, 0.0]), np.array([0.6, 0.0, 0.0]), np.array([0.3, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.3, 0.0]), np.array([0.0, 0.6, 0.0]), np.array([0.0, 0.9, 0.0]), np.array([0.0, 1.2, 0.0]), np.array([0.0, 1.5, 0.0]), np.array([0.0, 1.8, 0.0]), np.array([0.0, 2.1, 0.0])] rotation_matrix = orientation.rotation_matrix() rotated_configuration = np.empty([len(initial_configuration), 3]) for (i, vec) in enumerate(initial_configuration): rotated_configuration[i] = (np.dot(rotation_matrix, vec) + np.array(location)) return rotated_configuration
def is_test_file(filenum): if (filenum in TEST_FILES): return True if ((filenum >= 1) and (filenum <= 43)): return True if ((filenum >= 144) and (filenum <= 169)): return True if ((filenum >= 900) and (filenum <= 931)): return True return False
def test_vfi_dataset(): test_ = TestVFIDataset() test_.test_base_vfi_dataset() test_.test_vfi_vimeo90k_dataset()
def _list_categories(tag): url = (' + tag) f = urllib.request.urlopen(url) return json.loads(f.read())
def tally_parameters(model): n_params = sum([p.nelement() for p in model.parameters()]) print(('* number of parameters: %d' % n_params)) enc = 0 dec = 0 for (name, param) in model.named_parameters(): if ('encoder' in name): enc += param.nelement() elif ('decoder' or ('generator' in name)): dec += param.nelement() print('encoder: ', enc) print('decoder: ', dec)
() def lamldataset_30_2(): return NumpyDataset(data=np.array([[(- 0.), 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.4031683, 0.], [0., 0.], [0., 0.], [0., 0.4621607], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.6000393], [0., 0.], [0.7375221, 0.], [0.7446845, 0.], [0., 0.], [0., 0.], [0.8831948, 0.], [0., 0.], [0.9668895, 0.], [1., 1.0602233]]), features=['column0', 'column1'], roles={'column0': NumericRole(np.float32), 'column1': NumericRole(np.float32)}, task=Task('binary'))
class FGSM(DenseAttack): def __init__(self, **kwargs): super().__init__(**kwargs) assert self.make_undirected, 'Attack only implemented for undirected graphs' self.adj_perturbed = self.adj.clone().requires_grad_(True).to(self.device) self.n_perturbations = 0 self.adj = self.adj.to(self.device) self.attr = self.attr.to(self.device) self.attacked_model = self.attacked_model.to(self.device) def _attack(self, n_perturbations: int): assert (n_perturbations > self.n_perturbations), f'Number of perturbations must be bigger as this attack is greedy (current {n_perturbations}, previous {self.n_perturbations})' n_perturbations -= self.n_perturbations self.n_perturbations += n_perturbations for i in tqdm(range(n_perturbations)): logits = self.attacked_model(self.attr, self.adj_perturbed) loss = self.calculate_loss(logits[self.idx_attack], self.labels[self.idx_attack]) gradient = torch.autograd.grad(loss, self.adj_perturbed)[0] gradient[(self.adj != self.adj_perturbed)] = 0 gradient *= (2 * (0.5 - self.adj_perturbed)) maximum = torch.max(gradient) edge_pert = (maximum == gradient).nonzero() with torch.no_grad(): new_edge_value = ((- self.adj_perturbed[(edge_pert[0][0], edge_pert[0][1])]) + 1) self.adj_perturbed[(edge_pert[0][0], edge_pert[0][1])] = new_edge_value self.adj_perturbed[(edge_pert[0][1], edge_pert[0][0])] = new_edge_value self.attr_adversary = self.attr self.adj_adversary = SparseTensor.from_dense(self.adj_perturbed.detach())
def loading_data(datasetname, val_interval): datasetname = datasetname.upper() cfg_data = getattr(setting, datasetname).cfg_data Dataset = dataset.Dataset train_loader = createTrainData(datasetname, Dataset, cfg_data) restore_transform = createRestore(cfg_data.MEAN_STD) Dataset = dataset.TestDataset val_loader = createValTestData(datasetname, Dataset, cfg_data, val_interval, mode='val') return (train_loader, val_loader, restore_transform)
_torch class XLMModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ((XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple) if is_torch_available() else ()) class XLMModelTester(object): def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_lengths=True, use_token_type_ids=True, use_labels=True, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=2, vocab_size=99, n_special=0, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, summary_type='last', use_proj=True, scope=None): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_lengths = use_input_lengths self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.asm = asm self.n_langs = n_langs self.vocab_size = vocab_size self.n_special = n_special self.summary_type = summary_type self.causal = causal self.use_proj = use_proj self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.n_langs = n_langs self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.summary_type = summary_type self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = ids_tensor([self.batch_size, self.seq_length], 2).float() input_lengths = None if self.use_input_lengths: input_lengths = ((ids_tensor([self.batch_size], vocab_size=2) + self.seq_length) - 2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() config = XLMConfig(vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj) return (config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask) def check_loss_output(self, result): self.parent.assertListEqual(list(result['loss'].size()), []) def create_and_check_xlm_model(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids, lengths=input_lengths, langs=token_type_ids) outputs = model(input_ids, langs=token_type_ids) outputs = model(input_ids) sequence_output = outputs[0] result = {'sequence_output': sequence_output} self.parent.assertListEqual(list(result['sequence_output'].size()), [self.batch_size, self.seq_length, self.hidden_size]) def create_and_check_xlm_lm_head(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMWithLMHeadModel(config) model.to(torch_device) model.eval() (loss, logits) = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) result = {'loss': loss, 'logits': logits} self.parent.assertListEqual(list(result['loss'].size()), []) self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.seq_length, self.vocab_size]) def create_and_check_xlm_simple_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() outputs = model(input_ids) outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (loss, start_logits, end_logits) = outputs result = {'loss': loss, 'start_logits': start_logits, 'end_logits': end_logits} self.parent.assertListEqual(list(result['start_logits'].size()), [self.batch_size, self.seq_length]) self.parent.assertListEqual(list(result['end_logits'].size()), [self.batch_size, self.seq_length]) self.check_loss_output(result) def create_and_check_xlm_qa(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForQuestionAnswering(config) model.to(torch_device) model.eval() outputs = model(input_ids) (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) = outputs outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask) outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels) (total_loss,) = outputs outputs = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = outputs result = {'loss': total_loss, 'start_top_log_probs': start_top_log_probs, 'start_top_index': start_top_index, 'end_top_log_probs': end_top_log_probs, 'end_top_index': end_top_index, 'cls_logits': cls_logits} self.parent.assertListEqual(list(result['loss'].size()), []) self.parent.assertListEqual(list(result['start_top_log_probs'].size()), [self.batch_size, model.config.start_n_top]) self.parent.assertListEqual(list(result['start_top_index'].size()), [self.batch_size, model.config.start_n_top]) self.parent.assertListEqual(list(result['end_top_log_probs'].size()), [self.batch_size, (model.config.start_n_top * model.config.end_n_top)]) self.parent.assertListEqual(list(result['end_top_index'].size()), [self.batch_size, (model.config.start_n_top * model.config.end_n_top)]) self.parent.assertListEqual(list(result['cls_logits'].size()), [self.batch_size]) def create_and_check_xlm_sequence_classif(self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask): model = XLMForSequenceClassification(config) model.to(torch_device) model.eval() (logits,) = model(input_ids) (loss, logits) = model(input_ids, labels=sequence_labels) result = {'loss': loss, 'logits': logits} self.parent.assertListEqual(list(result['loss'].size()), []) self.parent.assertListEqual(list(result['logits'].size()), [self.batch_size, self.type_sequence_label_size]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, input_mask) = config_and_inputs inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return (config, inputs_dict) def setUp(self): self.model_tester = XLMModelTest.XLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_model_from_pretrained(self): for model_name in list(XLM_PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]: model = XLMModel.from_pretrained(model_name, cache_dir=CACHE_DIR) self.assertIsNotNone(model)
def patch(model, target, updater, *args, **kwargs): for (name, module) in model.named_children(): model._modules[name] = patch(module, target, updater, *args, **kwargs) if isinstance(model, target): return updater.create_from(model, *args, **kwargs) return model
def check_named_results(res, attributes, ma=False): for (i, attr) in enumerate(attributes): if ma: ma_npt.assert_equal(res[i], getattr(res, attr)) else: npt.assert_equal(res[i], getattr(res, attr))
def train_loader(args): traindir = os.path.join(args.data, 'train') train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(traindir, train_transforms(args.inpSize, scale=args.scale)), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) return train_loader
def load_generated_images(path): images = [] for i in range(N_imgs): f = '{:04d}_rgb.png'.format(i) images.append(trn(Image.open(os.path.join(path, f)))) return torch.stack(images)
class MLP(Layer): def __init__(self, hidden_units, activation=tf.nn.tanh, name='mlp'): super(MLP, self).__init__(name) self.activation = activation self.projecting_layers = [tf.keras.layers.Dense(hidden_units, activation=None) for _ in range(2)] self.score_layer = tf.keras.layers.Dense(1, activation=None, use_bias=False) def __call__(self, t0, t1): t0 = self.projecting_layers[0](t0) t1 = self.projecting_layers[1](t1) t0_t1 = (tf.expand_dims(t0, axis=2) + tf.expand_dims(t1, axis=1)) return tf.squeeze(self.score_layer(self.activation(t0_t1)), axis=(- 1))
def add_img(img, all_imgs): if (all_imgs is None): all_imgs = [] all_imgs.append(img) return (None, all_imgs, all_imgs)
class LazyConv3d(_LazyConvXdMixin, Conv3d): cls_to_become = Conv3d def __init__(self, out_channels: int, kernel_size: _size_3_t, stride: _size_3_t=1, padding: _size_3_t=0, dilation: _size_3_t=1, groups: int=1, bias: bool=True, padding_mode: str='zeros', device=None, dtype=None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__(0, 0, kernel_size, stride, padding, dilation, groups, False, padding_mode, **factory_kwargs) self.weight = UninitializedParameter(**factory_kwargs) self.out_channels = out_channels if bias: self.bias = UninitializedParameter(**factory_kwargs)
def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int=0) -> int: if (dimension <= 0): dimension = fixed_dimension dimension -= num_token_to_add return dimension
def parse_domain_pddl(domain_pddl): iterator = iter(domain_pddl) define_tag = next(iterator) assert (define_tag == 'define') domain_line = next(iterator) assert ((domain_line[0] == 'domain') and (len(domain_line) == 2)) (yield domain_line[1]) requirements = pddl.Requirements([':strips']) the_types = [pddl.Type('object')] (constants, the_predicates, the_functions) = ([], [], []) correct_order = [':requirements', ':types', ':constants', ':predicates', ':functions'] seen_fields = [] first_action = None for opt in iterator: field = opt[0] if (field not in correct_order): first_action = opt break if (field in seen_fields): raise SystemExit(('Error in domain specification\n' + ("Reason: two '%s' specifications." % field))) if (seen_fields and (correct_order.index(seen_fields[(- 1)]) > correct_order.index(field))): msg = ('\nWarning: %s specification not allowed here (cf. PDDL BNF)' % field) print(msg, file=sys.stderr) seen_fields.append(field) if (field == ':requirements'): requirements = pddl.Requirements(opt[1:]) elif (field == ':types'): the_types.extend(parse_typed_list(opt[1:], constructor=pddl.Type)) elif (field == ':constants'): constants = parse_typed_list(opt[1:]) elif (field == ':predicates'): the_predicates = [parse_predicate(entry) for entry in opt[1:]] the_predicates += [pddl.Predicate('=', [pddl.TypedObject('?x', 'object'), pddl.TypedObject('?y', 'object')])] elif (field == ':functions'): the_functions = parse_typed_list(opt[1:], constructor=parse_function, default_type='number') set_supertypes(the_types) (yield requirements) (yield the_types) type_dict = {type.name: type for type in the_types} (yield type_dict) (yield constants) (yield the_predicates) predicate_dict = {pred.name: pred for pred in the_predicates} (yield predicate_dict) (yield the_functions) entries = [] if (first_action is not None): entries.append(first_action) entries.extend(iterator) the_axioms = [] the_actions = [] for entry in entries: if (entry[0] == ':derived'): axiom = parse_axiom(entry, type_dict, predicate_dict) the_axioms.append(axiom) else: action = parse_action(entry, type_dict, predicate_dict) if (action is not None): the_actions.append(action) (yield the_actions) (yield the_axioms)
class Dispatch(): def __init__(self, kernel: Kernel, args: List[NamedArgument]): self.kernel = kernel self.args = args
def test_random_df(random_df: pd.DataFrame) -> None: plot(random_df) plot(random_df, display=['Bar Chart'])
_params def test_quad_vec_simple(quadrature): n = np.arange(10) def f(x): return (x ** n) for epsabs in [0.1, 0.001, 1e-06]: if ((quadrature == 'trapezoid') and (epsabs < 0.0001)): continue kwargs = dict(epsabs=epsabs, quadrature=quadrature) exact = ((2 ** (n + 1)) / (n + 1)) (res, err) = quad_vec(f, 0, 2, norm='max', **kwargs) assert_allclose(res, exact, rtol=0, atol=epsabs) (res, err) = quad_vec(f, 0, 2, norm='2', **kwargs) assert (np.linalg.norm((res - exact)) < epsabs) (res, err) = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs) assert_allclose(res, exact, rtol=0, atol=epsabs) (res, err, *rest) = quad_vec(f, 0, 2, norm='max', epsrel=1e-08, full_output=True, limit=10000, **kwargs) assert_allclose(res, exact, rtol=0, atol=epsabs)
def log_bernoulli(x, mean, average=False, dim=None): probs = torch.clamp(mean, min=min_epsilon, max=max_epsilon) log_bernoulli = ((x * torch.log(probs)) + ((1.0 - x) * torch.log((1.0 - probs)))) if average: return torch.mean(log_bernoulli, dim) else: return torch.sum(log_bernoulli, dim)
def run(task: Task, num_samples: int, num_simulations: int, num_observation: Optional[int]=None, observation: Optional[torch.Tensor]=None, population_size: Optional[int]=None, distance: str='l2', epsilon_decay: float=0.2, distance_based_decay: bool=True, ess_min: Optional[float]=None, initial_round_factor: int=5, batch_size: int=1000, kernel: str='gaussian', kernel_variance_scale: float=0.5, use_last_pop_samples: bool=True, algorithm_variant: str='C', save_summary: bool=False, sass: bool=False, sass_fraction: float=0.5, sass_feature_expansion_degree: int=3, lra: bool=False, lra_sample_weights: bool=True, kde_bandwidth: Optional[str]='cv', kde_sample_weights: bool=False) -> Tuple[(torch.Tensor, int, Optional[torch.Tensor])]: assert (not ((num_observation is None) and (observation is None))) assert (not ((num_observation is not None) and (observation is not None))) log = sbibm.get_logger(__name__) smc_papers = dict(A='Toni 2010', B='Sisson et al. 2007', C='Beaumont et al. 2009') log.info(f'Running SMC-ABC as in {smc_papers[algorithm_variant]}.') prior = task.get_prior_dist() simulator = task.get_simulator(max_calls=num_simulations) kde = (kde_bandwidth is not None) if (observation is None): observation = task.get_observation(num_observation) if (population_size is None): population_size = 100 if (num_simulations > 10000): population_size = 1000 population_size = min(population_size, num_simulations) initial_round_size = clip_int(value=(initial_round_factor * population_size), minimum=population_size, maximum=max((0.5 * num_simulations), population_size)) inference_method = SMCABC(simulator=simulator, prior=prior, simulation_batch_size=batch_size, distance=distance, show_progress_bars=True, kernel=kernel, algorithm_variant=algorithm_variant) (output, summary) = inference_method(x_o=observation, num_particles=population_size, num_initial_pop=initial_round_size, num_simulations=num_simulations, epsilon_decay=epsilon_decay, distance_based_decay=distance_based_decay, ess_min=ess_min, kernel_variance_scale=kernel_variance_scale, use_last_pop_samples=use_last_pop_samples, return_summary=True, lra=lra, lra_with_weights=lra_sample_weights, sass=sass, sass_fraction=sass_fraction, sass_expansion_degree=sass_feature_expansion_degree, kde=kde, kde_sample_weights=kde_sample_weights, kde_kwargs=(dict(bandwidth=kde_bandwidth) if kde_bandwidth else {})) if save_summary: log.info('Saving smcabc summary to csv.') pd.DataFrame.from_dict(summary).to_csv('summary.csv', index=False) assert (simulator.num_simulations == num_simulations) if kde: kde_posterior = output samples = kde_posterior.sample(num_samples) if (num_observation is not None): true_parameters = task.get_true_parameters(num_observation=num_observation) log_prob_true_parameters = kde_posterior.log_prob(true_parameters) return (samples, simulator.num_simulations, log_prob_true_parameters) else: samples = output return (samples, simulator.num_simulations, None)
class _BaseNetwork(_IPAddressBase): def __init__(self, address): self._cache = {} def __repr__(self): return ('%s(%r)' % (self.__class__.__name__, _compat_str(self))) def __str__(self): return ('%s/%d' % (self.network_address, self.prefixlen)) def hosts(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range((network + 1), broadcast): (yield self._address_class(x)) def __iter__(self): network = int(self.network_address) broadcast = int(self.broadcast_address) for x in _compat_range(network, (broadcast + 1)): (yield self._address_class(x)) def __getitem__(self, n): network = int(self.network_address) broadcast = int(self.broadcast_address) if (n >= 0): if ((network + n) > broadcast): raise IndexError('address out of range') return self._address_class((network + n)) else: n += 1 if ((broadcast + n) < network): raise IndexError('address out of range') return self._address_class((broadcast + n)) def __lt__(self, other): if (not isinstance(other, _IPAddressBase)): return NotImplemented if (not isinstance(other, _BaseNetwork)): raise TypeError(('%s and %s are not of the same type' % (self, other))) if (self._version != other._version): raise TypeError(('%s and %s are not of the same version' % (self, other))) if (self.network_address != other.network_address): return (self.network_address < other.network_address) if (self.netmask != other.netmask): return (self.netmask < other.netmask) return False def __eq__(self, other): try: return ((self._version == other._version) and (self.network_address == other.network_address) and (int(self.netmask) == int(other.netmask))) except AttributeError: return NotImplemented def __hash__(self): return hash((int(self.network_address) ^ int(self.netmask))) def __contains__(self, other): if (self._version != other._version): return False if isinstance(other, _BaseNetwork): return False else: return (int(self.network_address) <= int(other._ip) <= int(self.broadcast_address)) def overlaps(self, other): return ((self.network_address in other) or ((self.broadcast_address in other) or ((other.network_address in self) or (other.broadcast_address in self)))) def broadcast_address(self): x = self._cache.get('broadcast_address') if (x is None): x = self._address_class((int(self.network_address) | int(self.hostmask))) self._cache['broadcast_address'] = x return x def hostmask(self): x = self._cache.get('hostmask') if (x is None): x = self._address_class((int(self.netmask) ^ self._ALL_ONES)) self._cache['hostmask'] = x return x def with_prefixlen(self): return ('%s/%d' % (self.network_address, self._prefixlen)) def with_netmask(self): return ('%s/%s' % (self.network_address, self.netmask)) def with_hostmask(self): return ('%s/%s' % (self.network_address, self.hostmask)) def num_addresses(self): return ((int(self.broadcast_address) - int(self.network_address)) + 1) def _address_class(self): msg = ('%200s has no associated address class' % (type(self),)) raise NotImplementedError(msg) def prefixlen(self): return self._prefixlen def address_exclude(self, other): if (not (self._version == other._version)): raise TypeError(('%s and %s are not of the same version' % (self, other))) if (not isinstance(other, _BaseNetwork)): raise TypeError(('%s is not a network object' % other)) if (not other.subnet_of(self)): raise ValueError(('%s not contained in %s' % (other, self))) if (other == self): return other = other.__class__(('%s/%s' % (other.network_address, other.prefixlen))) (s1, s2) = self.subnets() while ((s1 != other) and (s2 != other)): if other.subnet_of(s1): (yield s2) (s1, s2) = s1.subnets() elif other.subnet_of(s2): (yield s1) (s1, s2) = s2.subnets() else: raise AssertionError(('Error performing exclusion: s1: %s s2: %s other: %s' % (s1, s2, other))) if (s1 == other): (yield s2) elif (s2 == other): (yield s1) else: raise AssertionError(('Error performing exclusion: s1: %s s2: %s other: %s' % (s1, s2, other))) def compare_networks(self, other): if (self._version != other._version): raise TypeError(('%s and %s are not of the same type' % (self, other))) if (self.network_address < other.network_address): return (- 1) if (self.network_address > other.network_address): return 1 if (self.netmask < other.netmask): return (- 1) if (self.netmask > other.netmask): return 1 return 0 def _get_networks_key(self): return (self._version, self.network_address, self.netmask) def subnets(self, prefixlen_diff=1, new_prefix=None): if (self._prefixlen == self._max_prefixlen): (yield self) return if (new_prefix is not None): if (new_prefix < self._prefixlen): raise ValueError('new prefix must be longer') if (prefixlen_diff != 1): raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = (new_prefix - self._prefixlen) if (prefixlen_diff < 0): raise ValueError('prefix length diff must be > 0') new_prefixlen = (self._prefixlen + prefixlen_diff) if (new_prefixlen > self._max_prefixlen): raise ValueError(('prefix length diff %d is invalid for netblock %s' % (new_prefixlen, self))) start = int(self.network_address) end = (int(self.broadcast_address) + 1) step = ((int(self.hostmask) + 1) >> prefixlen_diff) for new_addr in _compat_range(start, end, step): current = self.__class__((new_addr, new_prefixlen)) (yield current) def supernet(self, prefixlen_diff=1, new_prefix=None): if (self._prefixlen == 0): return self if (new_prefix is not None): if (new_prefix > self._prefixlen): raise ValueError('new prefix must be shorter') if (prefixlen_diff != 1): raise ValueError('cannot set prefixlen_diff and new_prefix') prefixlen_diff = (self._prefixlen - new_prefix) new_prefixlen = (self.prefixlen - prefixlen_diff) if (new_prefixlen < 0): raise ValueError(('current prefixlen is %d, cannot have a prefixlen_diff of %d' % (self.prefixlen, prefixlen_diff))) return self.__class__(((int(self.network_address) & (int(self.netmask) << prefixlen_diff)), new_prefixlen)) def is_multicast(self): return (self.network_address.is_multicast and self.broadcast_address.is_multicast) def _is_subnet_of(a, b): try: if (a._version != b._version): raise TypeError('%s and %s are not of the same version'(a, b)) return ((b.network_address <= a.network_address) and (b.broadcast_address >= a.broadcast_address)) except AttributeError: raise TypeError(('Unable to test subnet containment between %s and %s' % (a, b))) def subnet_of(self, other): return self._is_subnet_of(self, other) def supernet_of(self, other): return self._is_subnet_of(other, self) def is_reserved(self): return (self.network_address.is_reserved and self.broadcast_address.is_reserved) def is_link_local(self): return (self.network_address.is_link_local and self.broadcast_address.is_link_local) def is_private(self): return (self.network_address.is_private and self.broadcast_address.is_private) def is_global(self): return (not self.is_private) def is_unspecified(self): return (self.network_address.is_unspecified and self.broadcast_address.is_unspecified) def is_loopback(self): return (self.network_address.is_loopback and self.broadcast_address.is_loopback)
class FeatureExtraction(torch.nn.Module): def __init__(self, train_fe=False, feature_extraction_cnn='vgg19', normalization=True, last_layer='', weights=None, use_cuda=True, gpu=0, ref_backbone=None): super(FeatureExtraction, self).__init__() self.normalization = normalization print(f'layer: {last_layer}') print(f'weighs: {weights}') last_layer = last_layer.split(',') if (feature_extraction_cnn == 'vgg16'): self.model = models.vgg16(pretrained=True) vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5'] start_index = 0 self.model_list = [] for l in last_layer: if (l == ''): l = 'pool4' layer_idx = vgg_feature_layers.index(l) assert (layer_idx >= start_index), 'layer order wrong!' model = nn.Sequential(*list(self.model.features.children())[start_index:(layer_idx + 1)]) self.model_list.append(model) start_index = (layer_idx + 1) if (feature_extraction_cnn == 'vgg16_bn'): if (ref_backbone is None): self.model = models.vgg16_bn(pretrained=True) else: self.model = load_model(ref_backbone) self.model.features = torch.nn.Sequential(*list(self.model.sobel), *list(self.model.features)) vgg_feature_layers = ['conv1_1', 'bn1_1', 'relu1_1', 'conv1_2', 'bn1_2', 'relu1_2', 'pool1', 'conv2_1', 'bn2_1', 'relu2_1', 'conv2_2', 'bn2_2', 'relu2_2', 'pool2', 'conv3_1', 'bn3_1', 'relu3_1', 'conv3_2', 'bn3_2', 'relu3_2', 'conv3_3', 'bn3_3', 'relu3_3', 'pool3', 'conv4_1', 'bn4_1', 'relu4_1', 'conv4_2', 'bn4_2', 'relu4_2', 'conv4_3', 'bn4_3', 'relu4_3', 'pool4', 'conv5_1', 'bn5_1', 'relu5_1', 'conv5_2', 'bn5_2', 'relu5_2', 'conv5_3', 'bn5_3', 'relu5_3', 'pool5'] start_index = 0 self.model_list = [] for l in last_layer: if (l == ''): l = 'pool4' layer_idx = vgg_feature_layers.index(l) assert (layer_idx >= start_index), 'layer order wrong!' model = nn.Sequential(*list(self.model.features.children())[start_index:(layer_idx + 1)]) self.model_list.append(model) start_index = (layer_idx + 1) if (feature_extraction_cnn == 'vgg19'): self.model = models.vgg19(pretrained=True) vgg_feature_layers = ['conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4', 'pool5'] vgg_output_dim = [64, 64, 64, 64, 64, 128, 128, 128, 128, 128, 256, 256, 256, 256, 256, 256, 256, 256, 256, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 512] start_index = 0 self.model_list = [] self.out_dim = 0 for l in last_layer: if (l == ''): l = 'relu5_4' layer_idx = vgg_feature_layers.index(l) assert (layer_idx >= start_index), 'layer order wrong!' self.out_dim += vgg_output_dim[layer_idx] model = nn.Sequential(*list(self.model.features.children())[start_index:(layer_idx + 1)]) self.model_list.append(model) start_index = (layer_idx + 1) if (feature_extraction_cnn == 'resnet101'): self.model = models.resnet101(pretrained=True) resnet_feature_layers = ['conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4'] if (last_layer == ''): last_layer = 'layer3' last_layer_idx = resnet_feature_layers.index(last_layer) resnet_module_list = [self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4] self.model = nn.Sequential(*resnet_module_list[:(last_layer_idx + 1)]) if (feature_extraction_cnn == 'resnet101_v2'): self.model = models.resnet101(pretrained=True) self.model = nn.Sequential(*list(self.model.children())[:(- 3)]) if (feature_extraction_cnn == 'densenet201'): self.model = models.densenet201(pretrained=True) self.model = nn.Sequential(*list(self.model.features.children())[:(- 4)]) if (not train_fe): for param in self.model.parameters(): param.requires_grad = False if use_cuda: self.model_list = [model.cuda(gpu) for model in self.model_list] self.weights = (([1] * len(self.model_list)) if (weights is None) else weights) def forward(self, image_batch): features_list = [] features = image_batch for (i, model) in enumerate(self.model_list): features = model(features) if self.normalization: features = featureL2Norm(features) features_list.append((features * self.weights[i])) return features_list
class HighwayExitSample(): def __init__(self): curvature_range = [(- 0.03), 0.03] self.c1 = world.world.rng_road_network.uniform(low=curvature_range[0], high=curvature_range[1]) self.c2 = world.world.rng_road_network.uniform(low=(- 0.005), high=0.005) self.c3 = world.world.rng_road_network.uniform(low=(- 0.01), high=0.01) self.c_exit = world.world.rng_road_network.uniform(low=curvature_range[0], high=(self.c3 - 0.01))
def test_petsc_error(ocp_ksp, u, rng): with pytest.raises(PETScKSPError) as e_info: u.vector().set_local(rng.rand(u.vector().local_size())) u.vector().apply('') ocp_ksp.compute_state_variables() MPI.barrier(MPI.comm_world) assert ('PETSc linear solver did not converge.' in str(e_info.value)) with pytest.raises(CashocsException): u.vector().set_local(rng.rand(u.vector().local_size())) u.vector().apply('') ocp_ksp.compute_state_variables() MPI.barrier(MPI.comm_world)
def patch_os_environ_helper(custom_environ: dict, excludes: dict): environ = {} for key in os.environ.keys(): if (key not in excludes): environ[key] = os.environ[key] for key in custom_environ.keys(): environ[key] = custom_environ[key] try: cached_environ = os.environ os.environ = environ (yield os.environ) finally: os.environ = cached_environ
class ModulusLikelihood(Likelihood): def __init__(self, y, y_name='y', isotropic=True): self.y_name = y_name self.size = self.get_size(y) self.isotropic = isotropic self.repr_init() self.y = y def sample(self, Z): Z = array2complex(Z) return np.absolute(Z) def math(self): return '$|\\cdot|$' def scalar_backward_mean(self, az, bz, y): bz = array2complex(bz) b = np.absolute(bz) I = ive_ratio((b * y)) rz = ((normalize(bz) * y) * I) return rz def scalar_backward_variance(self, az, bz, y): bz = array2complex(bz) b = np.absolute(bz) I = ive_ratio((b * y)) vz = ((0.5 * (y ** 2)) * (1 - (I ** 2))) return vz def scalar_log_partition(self, az, bz, y): b = np.absolute(bz) A = (((((- 0.5) * az) * (y ** 2)) + np.log((((2 * np.pi) * y) * ive(0, (b * y))))) + (b * y)) return A def compute_backward_posterior(self, az, bz, y): bz = array2complex(bz) b = np.absolute(bz) I = ive_ratio((b * y)) rz = ((normalize(bz) * y) * I) vz = ((0.5 * (y ** 2)) * (1 - (I ** 2))) if self.isotropic: vz = np.mean(vz) rz = complex2array(rz) return (rz, vz) def b_measure(self, mz_hat, qz_hat, tz0_hat, f): raise NotImplementedError def bz_measure(self, mz_hat, qz_hat, tz0_hat, f): raise NotImplementedError def beliefs_measure(self, az, tau_z, f): u_eff = np.maximum(0, ((az * tau_z) - 1)) if (u_eff == 0): def f_scaled_y(xi_y): y = (xi_y / np.sqrt(az)) coef_y = np.sqrt(((2 * np.pi) * az)) bz = complex2array(np.array(0)) return ((coef_y * relu(y)) * f(bz, y)) return gaussian_measure(0, 1, f_scaled_y) sz_eff = np.sqrt((az * u_eff)) def f_scaled(xi_b, xi_y): b = (sz_eff * xi_b) y = ((b / az) + (xi_y / np.sqrt(az))) coef = ((2 * np.pi) / np.sqrt(u_eff)) bz = complex2array(np.array(b)) return ((((coef * relu(b)) * relu(y)) * ive(0, (b * y))) * f(bz, y)) return gaussian_measure_2d(0, 1, 0, 1, f_scaled) def measure(self, y, f): def polar_f(theta): return (y * f((y * np.exp((theta * 1j))))) return quad(polar_f, 0, (2 * np.pi))[0] def compute_log_partition(self, az, bz, y): b = np.absolute(bz) A = (((((- 0.5) * az) * (y ** 2)) + np.log((((2 * np.pi) * y) * ive(0, (b * y))))) + (b * y)) return (A.mean() / 2)
class InstancesSchema(DictSchema): def __call__(self, values): (image_size, fields) = (values[(- 1)], values[:(- 1)]) fields = super().__call__(fields) return Instances(image_size, **fields) def flatten(cls, obj): (ret, schema) = super().flatten(obj.get_fields()) size = obj.image_size if (not isinstance(size, torch.Tensor)): size = torch.tensor(size) return ((ret + (size,)), schema)
class ConvTemporalGraphical(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, t_kernel_size=1, t_stride=1, t_padding=0, t_dilation=1, bias=True): super().__init__() self.kernel_size = kernel_size self.conv = nn.Conv2d(in_channels, (out_channels * kernel_size), kernel_size=(t_kernel_size, 1), padding=(t_padding, 0), stride=(t_stride, 1), dilation=(t_dilation, 1), bias=bias) def forward(self, x, A): assert (A.size(0) == self.kernel_size) x = self.conv(x) (n, kc, t, v) = x.size() x = x.view(n, self.kernel_size, (kc // self.kernel_size), t, v) x = torch.einsum('nkctv,kvw->nctw', (x, A)) return (x.contiguous(), A)
_quantizer(quantization_target=QuantizationTarget.Activation, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC], identifier=TrainingMethod.STE) class STEActivationQATQuantizer(BaseKerasQATTrainableQuantizer): def __init__(self, quantization_config: TrainableQuantizerActivationConfig): super().__init__(quantization_config) self.power_of_two = (quantization_config.activation_quantization_method == QuantizationMethod.POWER_OF_TWO) self.threshold_values = quantization_config.activation_quantization_params[C.THRESHOLD] self.threshold_shape = np.asarray(self.threshold_values).shape self.np_threshold_values = float(self.threshold_values) self.signed = quantization_config.activation_quantization_params[SIGNED] if self.power_of_two: self.np_threshold_values = np.power(2.0, np.ceil(np.log2(np.maximum(self.np_threshold_values, C.MIN_THRESHOLD)))) self.num_bits = quantization_config.activation_n_bits delta = (self.np_threshold_values / np.power(2.0, (self.num_bits - int(self.signed)))) min_int = ((- int(self.signed)) * (2 ** (self.num_bits - int(self.signed)))) max_int = ((2 ** (self.num_bits - int(self.signed))) - 1) self.min = (delta * min_int) self.max = (delta * max_int) def initialize_quantization(self, tensor_shape: TensorShape, name: str, layer: KerasTrainableQuantizationWrapper): ptq_threshold_tensor = layer.add_weight((name + THRESHOLD_TENSOR), shape=(), initializer=tf.keras.initializers.Constant(1.0), trainable=False) ptq_threshold_tensor.assign(self.np_threshold_values) fq_min = layer.add_weight((name + FQ_MIN), shape=(), initializer=tf.keras.initializers.Constant((- 1.0)), trainable=False) fq_min.assign(self.min) fq_max = layer.add_weight((name + FQ_MAX), shape=(), initializer=tf.keras.initializers.Constant(1.0), trainable=False) fq_max.assign(self.max) self.add_quantizer_variable(THRESHOLD_TENSOR, ptq_threshold_tensor, VariableGroup.QPARAMS) self.add_quantizer_variable(FQ_MIN, fq_min, VariableGroup.QPARAMS) self.add_quantizer_variable(FQ_MAX, fq_max, VariableGroup.QPARAMS) def __call__(self, inputs: tf.Tensor, training: bool): _min = self.get_quantizer_variable(FQ_MIN) _max = self.get_quantizer_variable(FQ_MAX) q_tensor = tf.quantization.fake_quant_with_min_max_vars(inputs, _min, _max, num_bits=self.num_bits) return q_tensor def convert2inferable(self) -> Union[(ActivationPOTInferableQuantizer, ActivationSymmetricInferableQuantizer)]: if self.power_of_two: pot_threshold = (2 ** np.ceil(np.log2(self.get_quantizer_variable(THRESHOLD_TENSOR)))) return ActivationPOTInferableQuantizer(num_bits=self.num_bits, threshold=[pot_threshold], signed=self.signed) else: return ActivationSymmetricInferableQuantizer(num_bits=self.num_bits, threshold=[self.get_quantizer_variable(THRESHOLD_TENSOR).numpy()], signed=self.signed)
def get_nonzero_len_instance_inds_by_class(data_filename): class_inds_dict = {} instance_ind = 0 with open(data_filename, 'r') as f: first_line = True for line in f: if first_line: temp_line = line category_ind = 0 while (not (temp_line.startswith('category\t') or temp_line.startswith('category\n'))): temp_line = temp_line[(temp_line.index('\t') + 1):] category_ind += 1 temp_line = line num_sents_field_ind = 0 while (not (temp_line.startswith('num_sentences\t') or temp_line.startswith('num_sentences\n'))): temp_line = temp_line[(temp_line.index('\t') + 1):] num_sents_field_ind += 1 temp_line = line max_num_tokens_field_ind = 0 while (not (temp_line.startswith('max_num_tokens_in_sentence\t') or temp_line.startswith('max_num_tokens_in_sentence\n'))): temp_line = temp_line[(temp_line.index('\t') + 1):] max_num_tokens_field_ind += 1 first_line = False else: if (line.strip() == ''): continue category = get_nth_field_in_line(line, category_ind) num_sents = int(get_nth_field_in_line(line, num_sents_field_ind)) max_num_tokens = int(get_nth_field_in_line(line, max_num_tokens_field_ind)) try: list_to_append_to = class_inds_dict[category] except KeyError: class_inds_dict[category] = [] list_to_append_to = class_inds_dict[category] if ((num_sents > 0) and (max_num_tokens > 0)): list_to_append_to.append([instance_ind, num_sents, max_num_tokens]) instance_ind += 1 assert (len(class_inds_dict) == 10) print((('Iterated over ' + str(instance_ind)) + ' instances.')) return class_inds_dict
def test_process_text(): result = process_text(words, tags) lemma = [x.lemma for x in result.words] print(lemma) assert (lemma == expected)
_sentencepiece _tokenizers class XLNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLNetTokenizer rust_tokenizer_class = XLNetTokenizerFast test_rust_tokenizer = True test_sentencepiece = True def setUp(self): super().setUp() tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): token = '<s>' token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<unk>') self.assertEqual(vocab_keys[1], '<s>') self.assertEqual(vocab_keys[(- 1)], '<eod>') self.assertEqual(len(vocab_keys), 1006) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1000) def test_full_tokenizer(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize('This is a test') self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est']) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize('I was born in 92000, and this is false.') self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.']) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.']) def test_tokenizer_lower(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=True) tokens = tokenizer.tokenize('I was born in 92000, and this is false.') self.assertListEqual(tokens, [(SPIECE_UNDERLINE + ''), 'i', (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 'se', '.']) self.assertListEqual(tokenizer.tokenize('Hello'), ['he', 'll', 'o']) def test_tokenizer_no_lower(self): tokenizer = XLNetTokenizer(SAMPLE_VOCAB, do_lower_case=False) tokens = tokenizer.tokenize('I was born in 92000, and this is false.') self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 'se', '.']) def test_sequence_builders(self): tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') text = tokenizer.encode('sequence builders', add_special_tokens=False) text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert (encoded_sentence == (text + [4, 3])) assert (encoded_pair == (((text + [4]) + text_2) + [4, 3])) def test_tokenizer_integration(self): expected_encoding = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='xlnet-base-cased', revision='cc31ec7ca9a106dee7bb312b73ae511')
def SubstituteTemplate(template, values): text = template changed = True while changed: changed = False for (key, value) in values.items(): regex = ('\\$\\{%s\\}' % key) newtext = re.sub(regex, value, text) if (newtext != text): changed = True text = newtext return text
def get_edge_label(dataset, current, horizon, mode): if (mode == 'before'): edge_label = torch.cat([dataset[(current + i)].edge_label for i in range(1, (horizon + 1))], dim=0) edge_label_index = torch.cat([dataset[(current + i)].edge_label_index for i in range(1, (horizon + 1))], dim=1) elif (mode == 'at'): edge_label = dataset[(current + horizon)].edge_label edge_label_index = dataset[(current + horizon)].edge_label_index return (edge_label, edge_label_index)
def yawVsPowerContour(yws, ws, ti, xs, ys, res=30, saveas=None): from mpl_toolkits import mplot3d x = np.linspace(0, res, res) y = np.linspace(0, res, res) (X, Y) = np.meshgrid(x, y) powerNeural = np.zeros((res, res)) powerFloris = np.zeros((res, res)) cnt = 0 for i in range(res): for j in range(res): if (len(yws) == 2): yws = [i, j] elif (len(yws) == 3): yws = [0, i, j] r = compare(yws, ws, ti, xs, ys, print_times=False, timings=False, power_opt=True, single=False) (powerNeural[(i, j)], powerFloris[(i, j)]) = (((- r[0]) / 1000000.0), r[1]) fig = plt.figure(1) ax = plt.axes(projection='3d') ax.contour3D(X, Y, powerNeural, 50, cmap='viridis') ax.set_xlabel('Yaw b') ax.set_ylabel('Yaw a') ax.set_zlabel('Power (MW)') ax.set_title('Neural') if (saveas != None): fig.savefig((('figures/' + str(saveas)) + 'yvpd'), dpi=1200) else: plt.show() fig = plt.figure(2) ax = plt.axes(projection='3d') ax.contour3D(X, Y, powerFloris, 50, cmap='viridis') ax.set_xlabel('Yaw b') ax.set_ylabel('Yaw a') ax.set_zlabel('Power (MW)') ax.set_title('FLORIS') if (saveas != None): fig.savefig((('figures/' + str(saveas)) + 'yvpf'), dpi=1200) else: plt.show()
def interpolate_data_grad_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, output_size, mode, align_corners=True, half_pixel=False, half_pixel_for_nn=False, channel_last=False): gdx = grad_inputs[0] gdy = F.interpolate(gdx, None, output_size, mode, align_corners, half_pixel, half_pixel_for_nn, channel_last) return gdy
class VoxelGenerator(): def __init__(self, voxel_size, point_cloud_range, max_num_points, max_voxels=20000): point_cloud_range = np.array(point_cloud_range, dtype=np.float32) voxel_size = np.array(voxel_size, dtype=np.float32) grid_size = ((point_cloud_range[3:] - point_cloud_range[:3]) / voxel_size) grid_size = np.round(grid_size).astype(np.int64) self._voxel_size = voxel_size self._point_cloud_range = point_cloud_range self._max_num_points = max_num_points self._max_voxels = max_voxels self._grid_size = grid_size def generate(self, points): return points_to_voxel(points, self._voxel_size, self._point_cloud_range, self._max_num_points, True, self._max_voxels) def voxel_size(self): return self._voxel_size def max_num_points_per_voxel(self): return self._max_num_points def point_cloud_range(self): return self._point_cloud_range def grid_size(self): return self._grid_size
def idx_for_value(value: Union[(int, float, complex)], param_vals: ndarray) -> int: location = int(np.abs((param_vals - value)).argmin()) selected_value = param_vals[location] if cmath.isclose(param_vals[location], value): return location if (not settings.FUZZY_SLICING): raise ValueError('No matching entry for parameter value {} in the array.'.format(value)) if ((not cmath.isclose(selected_value, value)) and settings.FUZZY_WARNING): warnings.warn_explicit('Using fuzzy value based indexing: selected value is {}'.format(selected_value), UserWarning, '', location) return location
def test_clean_up(digraph_multiple_roots): digraph_multiple_roots._clean_up() with pytest.raises(AttributeError): assert (digraph_multiple_roots.X_ is None) with pytest.raises(AttributeError): assert (digraph_multiple_roots.y_ is None)
class Ui_Form(object): def setupUi(self, Form): Form.setObjectName('Form') Form.resize(1800, 660) self.pushButton = QtWidgets.QPushButton(Form) self.pushButton.setGeometry(QtCore.QRect(1160, 360, 81, 27)) self.pushButton.setObjectName('pushButton') self.pushButton_2 = QtWidgets.QPushButton(Form) self.pushButton_2.setGeometry(QtCore.QRect(10, 10, 97, 27)) self.pushButton_2.setObjectName('pushButton_2') self.pushButton_3 = QtWidgets.QPushButton(Form) self.pushButton_3.setGeometry(QtCore.QRect(10, 40, 97, 27)) self.pushButton_3.setObjectName('pushButton_3') self.pushButton_4 = QtWidgets.QPushButton(Form) self.pushButton_4.setGeometry(QtCore.QRect(130, 10, 97, 27)) self.pushButton_4.setObjectName('pushButton_4') self.pushButton_5 = QtWidgets.QPushButton(Form) self.pushButton_5.setGeometry(QtCore.QRect(130, 40, 97, 27)) self.pushButton_5.setObjectName('pushButton_5') self.pushButton_6 = QtWidgets.QPushButton(Form) self.pushButton_6.setGeometry(QtCore.QRect(250, 10, 97, 27)) self.pushButton_6.setObjectName('pushButton_6') self.pushButton_7 = QtWidgets.QPushButton(Form) self.pushButton_7.setGeometry(QtCore.QRect(250, 40, 97, 27)) self.pushButton_7.setObjectName('pushButton_7') self.pushButton_8 = QtWidgets.QPushButton(Form) self.pushButton_8.setGeometry(QtCore.QRect(450, 10, 97, 27)) self.pushButton_8.setObjectName('pushButton_8') self.pushButton_9 = QtWidgets.QPushButton(Form) self.pushButton_9.setGeometry(QtCore.QRect(450, 40, 97, 27)) self.pushButton_9.setObjectName('pushButton_9') self.pushButton_10 = QtWidgets.QPushButton(Form) self.pushButton_10.setGeometry(QtCore.QRect(570, 10, 97, 27)) self.pushButton_10.setObjectName('pushButton_10') self.pushButton_11 = QtWidgets.QPushButton(Form) self.pushButton_11.setGeometry(QtCore.QRect(570, 40, 97, 27)) self.pushButton_11.setObjectName('pushButton_11') self.pushButton_12 = QtWidgets.QPushButton(Form) self.pushButton_12.setGeometry(QtCore.QRect(690, 10, 97, 27)) self.pushButton_12.setObjectName('pushButton_12') self.pushButton_13 = QtWidgets.QPushButton(Form) self.pushButton_13.setGeometry(QtCore.QRect(690, 40, 97, 27)) self.pushButton_13.setObjectName('pushButton_13') self.pushButton_14 = QtWidgets.QPushButton(Form) self.pushButton_14.setGeometry(QtCore.QRect(810, 10, 97, 27)) self.pushButton_14.setObjectName('pushButton_14') self.pushButton_15 = QtWidgets.QPushButton(Form) self.pushButton_15.setGeometry(QtCore.QRect(810, 40, 97, 27)) self.pushButton_15.setObjectName('pushButton_15') self.pushButton_16 = QtWidgets.QPushButton(Form) self.pushButton_16.setGeometry(QtCore.QRect(930, 10, 97, 27)) self.pushButton_16.setObjectName('pushButton_16') self.pushButton_17 = QtWidgets.QPushButton(Form) self.pushButton_17.setGeometry(QtCore.QRect(930, 40, 97, 27)) self.pushButton_17.setObjectName('pushButton_17') self.pushButton_18 = QtWidgets.QPushButton(Form) self.pushButton_18.setGeometry(QtCore.QRect(1050, 10, 97, 27)) self.pushButton_18.setObjectName('pushButton_18') self.pushButton_19 = QtWidgets.QPushButton(Form) self.pushButton_19.setGeometry(QtCore.QRect(1050, 40, 97, 27)) self.pushButton_19.setObjectName('pushButton_19') self.pushButton_20 = QtWidgets.QPushButton(Form) self.pushButton_20.setGeometry(QtCore.QRect(1170, 10, 97, 27)) self.pushButton_20.setObjectName('pushButton_20') self.pushButton_21 = QtWidgets.QPushButton(Form) self.pushButton_21.setGeometry(QtCore.QRect(1170, 40, 97, 27)) self.pushButton_21.setObjectName('pushButton_21') self.pushButton_22 = QtWidgets.QPushButton(Form) self.pushButton_22.setGeometry(QtCore.QRect(1290, 10, 97, 27)) self.pushButton_22.setObjectName('pushButton_22') self.pushButton_23 = QtWidgets.QPushButton(Form) self.pushButton_23.setGeometry(QtCore.QRect(1290, 40, 97, 27)) self.pushButton_23.setObjectName('pushButton_23') self.pushButton_24 = QtWidgets.QPushButton(Form) self.pushButton_24.setGeometry(QtCore.QRect(1410, 10, 97, 27)) self.pushButton_24.setObjectName('pushButton_24') self.pushButton_25 = QtWidgets.QPushButton(Form) self.pushButton_25.setGeometry(QtCore.QRect(1410, 40, 97, 27)) self.pushButton_25.setObjectName('pushButton_25') self.pushButton_26 = QtWidgets.QPushButton(Form) self.pushButton_26.setGeometry(QtCore.QRect(1530, 10, 97, 27)) self.pushButton_26.setObjectName('pushButton_26') self.pushButton_27 = QtWidgets.QPushButton(Form) self.pushButton_27.setGeometry(QtCore.QRect(1530, 40, 97, 27)) self.pushButton_27.setObjectName('pushButton_27') self.graphicsView = QtWidgets.QGraphicsView(Form) self.graphicsView.setGeometry(QtCore.QRect(20, 120, 512, 512)) self.graphicsView.setObjectName('graphicsView') self.graphicsView_2 = QtWidgets.QGraphicsView(Form) self.graphicsView_2.setGeometry(QtCore.QRect(620, 120, 512, 512)) self.graphicsView_2.setObjectName('graphicsView_2') self.graphicsView_3 = QtWidgets.QGraphicsView(Form) self.graphicsView_3.setGeometry(QtCore.QRect(1260, 120, 512, 512)) self.graphicsView_3.setObjectName('graphicsView_3') self.retranslateUi(Form) self.pushButton.clicked.connect(Form.edit) self.pushButton_2.clicked.connect(Form.open) self.pushButton_3.clicked.connect(Form.open_mask) self.pushButton_4.clicked.connect(Form.clear) self.pushButton_5.clicked.connect(Form.undo) self.pushButton_6.clicked.connect(Form.save_img) self.pushButton_7.clicked.connect(Form.bg_mode) self.pushButton_8.clicked.connect(Form.skin_mode) self.pushButton_9.clicked.connect(Form.nose_mode) self.pushButton_10.clicked.connect(Form.eye_g_mode) self.pushButton_11.clicked.connect(Form.l_eye_mode) self.pushButton_12.clicked.connect(Form.r_eye_mode) self.pushButton_13.clicked.connect(Form.l_brow_mode) self.pushButton_14.clicked.connect(Form.r_brow_mode) self.pushButton_15.clicked.connect(Form.l_ear_mode) self.pushButton_16.clicked.connect(Form.r_ear_mode) self.pushButton_17.clicked.connect(Form.mouth_mode) self.pushButton_18.clicked.connect(Form.u_lip_mode) self.pushButton_19.clicked.connect(Form.l_lip_mode) self.pushButton_20.clicked.connect(Form.hair_mode) self.pushButton_21.clicked.connect(Form.hat_mode) self.pushButton_22.clicked.connect(Form.ear_r_mode) self.pushButton_23.clicked.connect(Form.neck_l_mode) self.pushButton_24.clicked.connect(Form.neck_mode) self.pushButton_25.clicked.connect(Form.cloth_mode) self.pushButton_26.clicked.connect(Form.increase) self.pushButton_27.clicked.connect(Form.decrease) QtCore.QMetaObject.connectSlotsByName(Form) def retranslateUi(self, Form): _translate = QtCore.QCoreApplication.translate Form.setWindowTitle(_translate('Form', 'MaskGAN')) self.pushButton.setText(_translate('Form', 'Edit')) self.pushButton_2.setText(_translate('Form', 'Open Image')) self.pushButton_3.setText(_translate('Form', 'Open Mask')) self.pushButton_4.setText(_translate('Form', 'Clear')) self.pushButton_5.setText(_translate('Form', 'Undo')) self.pushButton_6.setText(_translate('Form', 'Save Image')) self.pushButton_7.setText(_translate('Form', 'BackGround')) self.pushButton_8.setText(_translate('Form', 'Skin')) self.pushButton_9.setText(_translate('Form', 'Nose')) self.pushButton_10.setText(_translate('Form', 'Eyeglass')) self.pushButton_11.setText(_translate('Form', 'Left Eye')) self.pushButton_12.setText(_translate('Form', 'Right Eye')) self.pushButton_13.setText(_translate('Form', 'Left Eyebrow')) self.pushButton_14.setText(_translate('Form', 'Right Eyebrow')) self.pushButton_15.setText(_translate('Form', 'Left ear')) self.pushButton_16.setText(_translate('Form', 'Right ear')) self.pushButton_17.setText(_translate('Form', 'Mouth')) self.pushButton_18.setText(_translate('Form', 'Upper Lip')) self.pushButton_19.setText(_translate('Form', 'Lower Lip')) self.pushButton_20.setText(_translate('Form', 'Hair')) self.pushButton_21.setText(_translate('Form', 'Hat')) self.pushButton_22.setText(_translate('Form', 'Earring')) self.pushButton_23.setText(_translate('Form', 'Necklace')) self.pushButton_24.setText(_translate('Form', 'Neck')) self.pushButton_25.setText(_translate('Form', 'Cloth')) self.pushButton_26.setText(_translate('Form', '+')) self.pushButton_27.setText(_translate('Form', '-'))
def test_should_explain_output(convolutional_model, random_data, mocker): mocker.patch('tf_explain.core.smoothgrad.grid_display', side_effect=(lambda x: x)) (images, labels) = random_data explainer = SmoothGrad() grid = explainer.explain((images, labels), convolutional_model, 0) assert (grid.shape == images.shape[:(- 1)])
def read_tfrecord(example): features = {'image': tf.io.FixedLenFeature([], tf.string), 'class': tf.io.FixedLenFeature([], tf.int64), 'one_hot_class': tf.io.VarLenFeature(tf.float32)} example = tf.io.parse_single_example(example, features) image = tf.image.decode_jpeg(example['image'], channels=3) image = (tf.cast(image, tf.float32) / 255.0) class_label = tf.cast(example['class'], tf.int32) one_hot_class = tf.sparse.to_dense(example['one_hot_class']) one_hot_class = tf.reshape(one_hot_class, [5]) return (image, one_hot_class)
def create_model(hparams, model, length=22): train_graph = tf.Graph() with train_graph.as_default(): train_model = model(hparams, tf.contrib.learn.ModeKeys.TRAIN) eval_graph = tf.Graph() with eval_graph.as_default(): eval_model = model(hparams, tf.contrib.learn.ModeKeys.EVAL) infer_graph = tf.Graph() with infer_graph.as_default(): infer_model = model(hparams, tf.contrib.learn.ModeKeys.INFER) return (TrainModel(graph=train_graph, model=train_model), EvalModel(graph=eval_graph, model=eval_model), InferModel(graph=infer_graph, model=infer_model))
def sample_elite_steps(dataset: Dict[(str, np.ndarray)], elite_property: str='length', elite_traj_fraction: float=0.2, elite_step_fraction: float=0.2, samples: int=200, reverse: bool=False) -> Tuple[(np.ndarray, np.ndarary)]: (starts, ends, lengths) = util.extract_traj_markers(dataset) if (elite_property == 'length'): sorted_indices = np.argsort(lengths) elif (elite_property == 'reward'): rewards = visualize.get_episode_rewards(dataset['rewards'], starts, ends) sorted_indices = np.argsort(rewards) else: raise ValueError if reverse: sorted_indices = sorted_indices[::(- 1)] num_elites = np.ceil((elite_traj_fraction * len(lengths))).astype(int) elite_indices = sorted_indices[:(- num_elites):(- 1)] elite_index = np.random.choice(elite_indices, size=samples) elite_start = starts[elite_index] elite_proportional_time = ((1 - elite_step_fraction) + (np.random.rand(samples) * elite_step_fraction)) elite_relative_time = np.floor((elite_proportional_time * lengths[elite_index])).astype(int) elite_id = (elite_start + elite_relative_time) return (dataset['observations'][elite_id], dataset['actions'][elite_id])
def is_explicitly_view_dependent(df): target_words = {'front', 'behind', 'back', 'right', 'left', 'facing', 'leftmost', 'rightmost', 'looking', 'across'} return df.tokens.apply((lambda x: (len(set(x).intersection(target_words)) > 0)))
class open_index_h5(object): def __init__(self, f_name, mode, num_points_per_sample=None): self.f_name = f_name self.mode = mode self.num_points_per_sample = num_points_per_sample self.saver = None def __enter__(self): if (not (isinstance(self.f_name, str) or isinstance(self.f_name, unicode))): raise TypeError('File name should be str, not {}'.format(type(self.f_name))) if (not (isinstance(self.mode, str) or isinstance(self.mode, unicode))): raise TypeError('Mode name should be str, not {}'.format(type(self.f_name))) if (self.mode in set(['write', 'w'])): assert isinstance(self.num_points_per_sample, int), 'Number of sample is not indicated.' self.saver = IndexHdf5Writer(self.f_name, self.num_points_per_sample) elif (self.mode in set(['read', 'r'])): self.saver = IndexHdf5Reader(self.f_name) else: raise ValueError('{} is not a valid open mode'.format(self.mode)) return self.saver def __exit__(self, exc_type, exc_val, exc_tb): self.saver.close()
def check_fft_version(): if (version.parse(torch.__version__) >= version.parse('1.7')): if ('torch.fft' not in sys.modules): raise RuntimeError('torch.fft module available but not imported')
def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() if (not (training_args.do_train or training_args.do_eval or training_args.do_predict)): exit('Must specify at least one of --do_train, --do_eval or --do_predict!') checkpoint = None if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)): checkpoint = get_last_checkpoint(training_args.output_dir) if ((checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif ((checkpoint is not None) and (training_args.resume_from_checkpoint is None)): logger.info(f'Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)) if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f'Training/evaluation parameters {training_args}') set_seed(training_args.seed) datasets = load_dataset('glue', data_args.task_name, cache_dir=model_args.cache_dir) is_regression = (data_args.task_name == 'stsb') if (not is_regression): label_list = datasets['train'].features['label'].names num_labels = len(label_list) else: num_labels = 1 if (data_args.predict_file is not None): logger.info('Preparing user-supplied file for predictions...') data_files = {'data': data_args.predict_file} for key in data_files.keys(): logger.info(f'Loading a local file for {key}: {data_files[key]}') if data_args.predict_file.endswith('.csv'): user_dataset = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir) else: user_dataset = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir) needed_keys = task_to_keys[data_args.task_name] for key in needed_keys: assert (key in user_dataset['data'].features), f'Your supplied predict_file is missing the {key} key!' datasets['user_data'] = user_dataset['data'] config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) (sentence1_key, sentence2_key) = task_to_keys[data_args.task_name] non_label_column_names = [name for name in datasets['train'].column_names if (name != 'label')] if data_args.pad_to_max_length: padding = 'max_length' else: padding = False label_to_id = None if ((config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (not is_regression)): label_name_to_id = {k.lower(): v for (k, v) in config.label2id.items()} if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}. Ignoring the model labels as a result.''') label_to_id = {label: i for (i, label) in enumerate(label_list)} if (label_to_id is not None): config.label2id = label_to_id config.id2label = {id: label for (label, id) in config.label2id.items()} elif ((data_args.task_name is not None) and (not is_regression)): config.label2id = {l: i for (i, l) in enumerate(label_list)} config.id2label = {id: label for (label, id) in config.label2id.items()} if (data_args.max_seq_length > tokenizer.model_max_length): logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.') max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key])) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache)) metric = load_metric('glue', data_args.task_name) def compute_metrics(preds, label_ids): preds = preds['logits'] preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)) result = metric.compute(predictions=preds, references=label_ids) if (len(result) > 1): result['combined_score'] = np.mean(list(result.values())).item() return result with training_args.strategy.scope(): if (checkpoint is None): model_path = model_args.model_name_or_path else: model_path = checkpoint model = TFAutoModelForSequenceClassification.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None)) optimizer = tf.keras.optimizers.Adam(learning_rate=training_args.learning_rate, beta_1=training_args.adam_beta1, beta_2=training_args.adam_beta2, epsilon=training_args.adam_epsilon, clipnorm=training_args.max_grad_norm) if is_regression: loss_fn = tf.keras.losses.MeanSquaredError() metrics = [] else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = ['accuracy'] model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) tf_data = dict() if (isinstance(training_args.strategy, tf.distribute.TPUStrategy) or data_args.pad_to_max_length): logger.info("Padding all batches to max length because argument was set or we're on TPU.") dataset_mode = 'constant_batch' else: dataset_mode = 'variable_batch' max_samples = {'train': data_args.max_train_samples, 'validation': data_args.max_eval_samples, 'validation_matched': data_args.max_eval_samples, 'validation_mismatched': data_args.max_eval_samples, 'test': data_args.max_predict_samples, 'test_matched': data_args.max_predict_samples, 'test_mismatched': data_args.max_predict_samples, 'user_data': None} for key in datasets.keys(): if ((key == 'train') or key.startswith('validation')): assert ('label' in datasets[key].features), f'Missing labels from {key} data!' if (key == 'train'): shuffle = True batch_size = training_args.per_device_train_batch_size drop_remainder = True else: shuffle = False batch_size = training_args.per_device_eval_batch_size drop_remainder = False samples_limit = max_samples[key] dataset = datasets[key] if (samples_limit is not None): dataset = dataset.select(range(samples_limit)) data = convert_dataset_for_tensorflow(dataset, non_label_column_names, batch_size=batch_size, dataset_mode=dataset_mode, drop_remainder=drop_remainder, shuffle=shuffle) tf_data[key] = data if training_args.do_train: callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] if (training_args.do_eval and (not (data_args.task_name == 'mnli'))): validation_data = tf_data['validation'] else: validation_data = None model.fit(tf_data['train'], validation_data=validation_data, epochs=int(training_args.num_train_epochs), callbacks=callbacks) if training_args.do_eval: logger.info('*** Evaluate ***') if (data_args.task_name == 'mnli'): tasks = ['mnli', 'mnli-mm'] tf_datasets = [tf_data['validation_matched'], tf_data['validation_mismatched']] raw_datasets = [datasets['validation_matched'], datasets['validation_mismatched']] else: tasks = [data_args.task_name] tf_datasets = [tf_data['validation']] raw_datasets = [datasets['validation']] for (raw_dataset, tf_dataset, task) in zip(raw_datasets, tf_datasets, tasks): eval_predictions = model.predict(tf_dataset) eval_metrics = compute_metrics(eval_predictions, raw_dataset['label']) print(f'Evaluation metrics ({task}):') print(eval_metrics) if (training_args.do_predict or data_args.predict_file): logger.info('*** Predict ***') tasks = [] tf_datasets = [] raw_datasets = [] if training_args.do_predict: if (data_args.task_name == 'mnli'): tasks.extend(['mnli', 'mnli-mm']) tf_datasets.extend([tf_data['test_matched'], tf_data['test_mismatched']]) raw_datasets.extend([datasets['test_matched'], datasets['test_mismatched']]) else: tasks.append(data_args.task_name) tf_datasets.append(tf_data['test']) raw_datasets.append(datasets['test']) if data_args.predict_file: tasks.append('user_data') tf_datasets.append(tf_data['user_data']) raw_datasets.append(datasets['user_data']) for (raw_dataset, tf_dataset, task) in zip(raw_datasets, tf_datasets, tasks): test_predictions = model.predict(tf_dataset) if ('label' in raw_dataset): test_metrics = compute_metrics(test_predictions, raw_dataset['label']) print(f'Test metrics ({task}):') print(test_metrics) if is_regression: predictions_to_write = np.squeeze(test_predictions['logits']) else: predictions_to_write = np.argmax(test_predictions['logits'], axis=1) output_predict_file = os.path.join(training_args.output_dir, f'predict_results_{task}.txt') with open(output_predict_file, 'w') as writer: logger.info(f'***** Writing prediction results for {task} *****') writer.write('index\tprediction\n') for (index, item) in enumerate(predictions_to_write): if is_regression: writer.write(f'''{index} {item:3.3f} ''') else: item = model.config.id2label[item] writer.write(f'''{index} {item} ''')
def test_control_cg_pr_multiple(state_forms, bcs_list, J, states, controls, adjoints, config_ocp): config_ocp.set('AlgoCG', 'cg_method', 'PR') ocp = cashocs.OptimalControlProblem(state_forms, bcs_list, J, states, controls, adjoints, config=config_ocp) ocp.solve(algorithm='ncg', rtol=0.01, atol=0.0, max_iter=36) assert (ocp.solver.relative_norm <= ocp.solver.rtol)
class BaseProfilerTrainer(): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.config = config if (device is None): device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')) self.device = device self.model = model self.train_loader = train_loader self.test_loader = test_loader self.optimizer = self.init_optimizer() self.scheduler = self.init_scheduler() def init_optimizer(self): if self.config.same_lr: print('Using same LR') params = self.model.parameters() else: print('Using diff LR') m = (self.model.module if self.config.multigpu else self.model) lr = self.config.lr params = [{'params': m.get_1x_lr_params(), 'lr': (lr / 10)}, {'params': m.get_10x_lr_params(), 'lr': lr}] return optim.AdamW(params, lr=self.config.lr, weight_decay=self.config.wd) def init_scheduler(self): lrs = [l['lr'] for l in self.optimizer.param_groups] return optim.lr_scheduler.OneCycleLR(self.optimizer, lrs, epochs=self.config.epochs, steps_per_epoch=len(self.train_loader), cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=self.config.div_factor, final_div_factor=self.config.final_div_factor, pct_start=self.config.pct_start) def train_on_batch(self, batch, train_step): raise NotImplementedError def validate_on_batch(self, batch, val_step): raise NotImplementedError def train(self): print(f'Training {self.config.name}') self.should_log = self.should_write = False self.model.train() self.iters_per_epoch = len(self.train_loader) self.step = 0 prof = torch.profiler.profile(schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(f'./log/profile_{self.config.name}'), record_shapes=True, with_stack=True) prof.start() for (i, batch) in (tqdm(enumerate(self.train_loader), desc=f'Profiling', total=(((1 + 1) + 3) * 2)) if is_rank_zero(self.config) else enumerate(self.train_loader)): if (self.step >= (((1 + 1) + 3) * 2)): break losses = self.train_on_batch(batch, i) self.scheduler.step() self.step += 1 prof.step() prof.stop()
_utils.test(require=ti.extension.assertion, debug=True, gdb_trigger=False) def test_assert_ok(): def func(): x = 20 assert (10 <= x <= 20) func()
class MLP(nn.Module): def __init__(self, in_channels, out_channels, activation='relu', dropout=0): super().__init__() channels = ([in_channels] + out_channels) self.layers = nn.ModuleList() for i in range(1, len(channels)): if (dropout > 0.001): self.layers.append(nn.Dropout(p=dropout)) self.layers.append(nn.Conv2d(channels[(i - 1)], channels[i], kernel_size=1)) self.layers.append(nn.BatchNorm2d(channels[i])) self.layers.append(activation_factory(activation)) def forward(self, x): for layer in self.layers: x = layer(x) return x
def multiple_inputs_outputs_resblock(x, maps, kernel=(3, 3), pad=(1, 1), stride=(1, 1), w_bias=False, test=False, name='mo-convblock'): h = x with nn.parameter_scope(name): h = PF.convolution(h, maps, kernel=kernel, pad=pad, stride=stride, with_bias=w_bias) h = PF.batch_normalization(h, axes=[1], batch_stat=(not test)) return F.relu((h + x))
def print_results(query, results, top_k): print(f'''Query: "{query}" ''') print(f'Top {top_k} most similar sentences in the corpus to the query (smallest score is most similar):') for i in range(top_k): print(f""" - {(i + 1)}: "{results['text'][i]}" with a similarity score of {top_k_results['score'][i]:.2f}""")
class RandomLightTorsoHalfCheetah(RoboschoolXMLModifierMixin, ModifiableRoboschoolHalfCheetah): def randomize_mass(self): self.density = uniform_exclude_inner(self.np_random.uniform, self.EXTREME_LOWER_DENSITY, self.EXTREME_UPPER_DENSITY, self.RANDOM_LOWER_DENSITY, self.RANDOM_UPPER_DENSITY) with self.modify_xml('half_cheetah.xml') as tree: for elem in tree.iterfind('worldbody/body/geom'): elem.set('density', str(self.density)) def _reset(self, new=True): if new: self.randomize_mass() return super(RandomLightTorsoHalfCheetah, self)._reset(new) def parameters(self): parameters = super(RandomLightTorsoHalfCheetah, self).parameters parameters.update({'density': self.density}) return parameters
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True) def test_print_i64(): def func(i: ti.i64): print('i =', i) func(((- (2 ** 63)) + (2 ** 31))) ti.sync()
def raw_parse_dir(exps_path, prefix='predicts'): exps_path = Path(exps_path) glob_exp = '**/' if (prefix == 'predicts'): glob_file = 'predicts_*.tsv' elif (prefix == 'metrics'): glob_file = 'metrics_*.csv' else: raise ValueError(f"Get prefix = {prefix}, supports only ['predicts', 'metrics']") data_paths = list(exps_path.glob((glob_exp + glob_file))) data_paths = [p for p in data_paths if (str(p.name).startswith(prefix) and (str(p.name).find('dataset_') > (- 1)) and (str(p.name).find('model_') > (- 1)))] dataset_models_paths = {} dataset_models_dict = {} for curr_path in data_paths: (dataset_name, model_name) = parse_name(str(curr_path.name)) if (dataset_models_dict.get(dataset_name) is None): dataset_models_dict[dataset_name] = {model_name} dataset_models_paths[dataset_name] = {model_name: curr_path} else: dataset_models_dict[dataset_name] |= {model_name} dataset_models_paths[dataset_name].update({model_name: curr_path}) return (dataset_models_paths, dataset_models_dict)
.parametrize('att_layer_num,dnn_hidden_units,sparse_feature_num', [(1, (), 1), (1, (4,), 1)]) def test_AutoInt(att_layer_num, dnn_hidden_units, sparse_feature_num): if ((version.parse(tf.__version__) >= version.parse('1.14.0')) and (len(dnn_hidden_units) == 0)): return model_name = 'AutoInt' sample_size = SAMPLE_SIZE (x, y, feature_columns) = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = AutoInt(feature_columns, feature_columns, att_layer_num=att_layer_num, dnn_hidden_units=dnn_hidden_units, dnn_dropout=0.5) check_model(model, model_name, x, y)
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): multithread = True daemon_threads = True
def test_construct_mean_function_Linear(): (num_data, input_dim, output_dim) = (11, 5, 7) X = np.random.randn(num_data, input_dim) mean_functions = construct_mean_function(X, input_dim, output_dim) assert isinstance(mean_functions, gpflow.mean_functions.Linear)
((device_cc() < 80), 'Device compute capability is insufficient for SM80 tests.') class Conv2dDgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase): def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self): math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float32, element_b=cutlass.float32, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add) A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=4) B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=4) C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=8) tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst) epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32) operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Unity, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1) self.assertTrue(test_all_conv2d(operation)) def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self): math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float32, element_b=cutlass.float32, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add) A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=4) B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=4) C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=8) tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst) epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32) operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.dgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Unity, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1) self.assertTrue(test_all_conv2d(operation))
def build_trainer(wordvec_pretrain_file, *args, treebank=TREEBANK): train_trees = tree_reader.read_trees(treebank) dev_trees = train_trees[(- 1):] silver_trees = [] args = (['--wordvec_pretrain_file', wordvec_pretrain_file] + list(args)) args = constituency_parser.parse_args(args) foundation_cache = FoundationCache() model_load_name = args['load_name'] (model, _, _, _) = trainer.build_trainer(args, train_trees, dev_trees, silver_trees, foundation_cache, model_load_name) assert isinstance(model.model, lstm_model.LSTMModel) return model
class VAE(PyTorchModule): def __init__(self, representation_size, input_size, hidden_sizes=list([64, 128, 64]), init_w=0.001, hidden_init=ptu.fanin_init, output_activation=identity, output_scale=1, layer_norm=False, normalize=True, train_data_mean=None, train_data_std=None, **kwargs): self.save_init_params(locals()) super().__init__() self.representation_size = representation_size self.hidden_init = hidden_init self.output_activation = output_activation self.dist_mu = np.zeros(self.representation_size) self.dist_std = np.ones(self.representation_size) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.init_w = init_w hidden_sizes = list(hidden_sizes) self.input_size = input_size self.encoder = TwoHeadMlp(hidden_sizes, representation_size, representation_size, input_size, layer_norm=layer_norm, hidden_init=hidden_init, output_activation=output_activation, init_w=init_w) hidden_sizes.reverse() self.decoder = Mlp(hidden_sizes, input_size, representation_size, layer_norm=layer_norm, hidden_init=hidden_init, output_activation=output_activation, init_w=init_w) self.output_scale = output_scale self.normalize = normalize if (train_data_mean is None): self.train_data_mean = ptu.np_to_var(np.zeros(input_size)) else: self.train_data_mean = train_data_mean if (train_data_std is None): self.train_data_std = ptu.np_to_var(np.ones(input_size)) else: self.train_data_std = train_data_std def encode(self, input): input = input.view((- 1), self.input_size) if self.normalize: input = ((input - self.train_data_mean) / self.train_data_std) (mu, logvar) = self.encoder(input) return (mu, logvar) def reparameterize(self, mu, logvar): if self.training: std = logvar.mul(0.5).exp_() eps = Variable(std.data.new(std.size()).normal_()) return eps.mul(std).add_(mu) else: return mu def decode(self, z): z = z.view((- 1), self.representation_size) output = self.decoder(z) if self.normalize: output = ((output * self.train_data_std) + self.train_data_mean) return (output * self.output_scale) def forward(self, x): (mu, logvar) = self.encode(x) z = self.reparameterize(mu, logvar) return (self.decode(z), mu, logvar) def __getstate__(self): d = super().__getstate__() d['_dist_mu'] = self.dist_mu d['_dist_std'] = self.dist_std d['_normalize'] = self.normalize d['_train_data_mean'] = self.train_data_mean d['_train_data_std'] = self.train_data_std return d def __setstate__(self, d): super().__setstate__(d) self.dist_mu = d['_dist_mu'] self.dist_std = d['_dist_std'] self.normalize = d['_normalize'] self.train_data_mean = d['_train_data_mean'] self.train_data_std = d['_train_data_std']
def _test_pow_float_base_int_exp(dt_base, dt_exp): z = ti.field(dt_base, shape=()) def func(x: dt_base, y: dt_exp): z[None] = (x ** y) for x in [(- 6.66), (- 2), (- 1.5), (- 1), (- 0.5), 0.5, 1, 1.5, 2, 6.66]: for y in range((- 10), 10): func(x, y) assert (z[None] == pytest.approx((x ** y)))
def eval_list_fname(real_graph_filename, pred_graphs_filename, baselines, eval_every, epoch_range=None, out_file_prefix=None): if (out_file_prefix is not None): out_files = {'train': open((out_file_prefix + '_train.txt'), 'w+'), 'compare': open((out_file_prefix + '_compare.txt'), 'w+')} out_files['train'].write('degree,clustering,orbits4\n') line = 'metric,real,ours,perturbed' for bl in baselines: line += (',' + bl) line += '\n' out_files['compare'].write(line) results = {'deg': {'real': 0, 'ours': 100, 'perturbed': 0, 'kron': 0}, 'clustering': {'real': 0, 'ours': 100, 'perturbed': 0, 'kron': 0}, 'orbits4': {'real': 0, 'ours': 100, 'perturbed': 0, 'kron': 0}} num_evals = len(pred_graphs_filename) if (epoch_range is None): epoch_range = [(i * eval_every) for i in range(num_evals)] for i in range(num_evals): real_g_list = utils.load_graph_list(real_graph_filename) pred_g_list_raw = utils.load_graph_list(pred_graphs_filename[i]) if (len(real_g_list) > 200): real_g_list = real_g_list[0:200] shuffle(real_g_list) shuffle(pred_g_list_raw) real_g_len_list = np.array([len(real_g_list[i]) for i in range(len(real_g_list))]) pred_g_len_list_raw = np.array([len(pred_g_list_raw[i]) for i in range(len(pred_g_list_raw))]) perturbed_g_list_005 = perturb(real_g_list, 0.05) pred_g_list = [] pred_g_len_list = [] for value in real_g_len_list: pred_idx = find_nearest_idx(pred_g_len_list_raw, value) pred_g_list.append(pred_g_list_raw[pred_idx]) pred_g_len_list.append(pred_g_len_list_raw[pred_idx]) pred_g_len_list_raw = np.delete(pred_g_len_list_raw, pred_idx) del pred_g_list_raw[pred_idx] if (len(pred_g_list) == len(real_g_list)): break print(' epoch {} '.format(epoch_range[i])) print('real average nodes', (sum([real_g_list[i].number_of_nodes() for i in range(len(real_g_list))]) / len(real_g_list))) print('pred average nodes', (sum([pred_g_list[i].number_of_nodes() for i in range(len(pred_g_list))]) / len(pred_g_list))) print('num of real graphs', len(real_g_list)) print('num of pred graphs', len(pred_g_list)) mid = (len(real_g_list) // 2) (dist_degree, dist_clustering) = compute_basic_stats(real_g_list[:mid], real_g_list[mid:]) dist_4orbits = eval.stats.orbit_stats_all(real_g_list[:mid], real_g_list[mid:]) print('degree dist among real: ', dist_degree) print('clustering dist among real: ', dist_clustering) print('orbits dist among real: ', dist_4orbits) results['deg']['real'] += dist_degree results['clustering']['real'] += dist_clustering results['orbits4']['real'] += dist_4orbits (dist_degree, dist_clustering) = compute_basic_stats(real_g_list, pred_g_list) dist_4orbits = eval.stats.orbit_stats_all(real_g_list, pred_g_list) print('degree dist between real and pred at epoch ', epoch_range[i], ': ', dist_degree) print('clustering dist between real and pred at epoch ', epoch_range[i], ': ', dist_clustering) print('orbits dist between real and pred at epoch ', epoch_range[i], ': ', dist_4orbits) results['deg']['ours'] = min(dist_degree, results['deg']['ours']) results['clustering']['ours'] = min(dist_clustering, results['clustering']['ours']) results['orbits4']['ours'] = min(dist_4orbits, results['orbits4']['ours']) out_files['train'].write((str(dist_degree) + ',')) out_files['train'].write((str(dist_clustering) + ',')) out_files['train'].write((str(dist_4orbits) + ',')) (dist_degree, dist_clustering) = compute_basic_stats(real_g_list, perturbed_g_list_005) dist_4orbits = eval.stats.orbit_stats_all(real_g_list, perturbed_g_list_005) print('degree dist between real and perturbed at epoch ', epoch_range[i], ': ', dist_degree) print('clustering dist between real and perturbed at epoch ', epoch_range[i], ': ', dist_clustering) print('orbits dist between real and perturbed at epoch ', epoch_range[i], ': ', dist_4orbits) results['deg']['perturbed'] += dist_degree results['clustering']['perturbed'] += dist_clustering results['orbits4']['perturbed'] += dist_4orbits if (i == 0): for baseline in baselines: (dist_degree, dist_clustering) = compute_basic_stats(real_g_list, baselines[baseline]) dist_4orbits = eval.stats.orbit_stats_all(real_g_list, baselines[baseline]) results['deg'][baseline] = dist_degree results['clustering'][baseline] = dist_clustering results['orbits4'][baseline] = dist_4orbits print('Kron: deg=', dist_degree, ', clustering=', dist_clustering, ', orbits4=', dist_4orbits) out_files['train'].write('\n') for (metric, methods) in results.items(): methods['real'] /= num_evals methods['perturbed'] /= num_evals for (metric, methods) in results.items(): line = ((((((metric + ',') + str(methods['real'])) + ',') + str(methods['ours'])) + ',') + str(methods['perturbed'])) for baseline in baselines: line += (',' + str(methods[baseline])) line += '\n' out_files['compare'].write(line) for (_, out_f) in out_files.items(): out_f.close()
def process_(original, input_, past_=False, kg_type='atomic'): original = nltk.tokenize.sent_tokenize(original) if (len(original) < 5): original = [(l + '.') for l in ' '.join(original).split('.')] saved = {} for sent in input_: if (not check_empty(sent[1])): sent_id = sent[0] sent = sent[1] for evt in sent: if (len(evt) > 0): for node in evt: if (len(node) > 0): if (kg_type == 'atomic'): event_label = node[0] split = node[(- 1)] relations = node[1:(- 1)] relations = [ast.literal_eval(l) for l in relations] relation_types = [([dimensions_of_interest[l]] * len(relations[l])) for l in range(len(relations))] relations = list(itertools.chain.from_iterable(relations)) relation_types = list(itertools.chain.from_iterable(relation_types)) if (sent_id in saved.keys()): saved[sent_id]['relations'].extend(relations) saved[sent_id]['relation_types'].extend(relation_types) else: saved[sent_id] = {'relations': relations, 'relation_types': relation_types} elif (not args.comet): if (sent_id in saved.keys()): saved[sent_id]['relations'].append(' '.join([node[1], node[0], node[2]])) saved[sent_id]['relation_types'].append(node[0]) else: saved[sent_id] = {'relations': [' '.join([node[1], node[0], node[2]])], 'relation_types': [node[0]]} else: event_label = node[0] model = node[(- 1)] relations = [ast.literal_eval(r) for r in node[1:(- 1)]] relation_types = [([r[0].split(' ')[0]] * len(r)) for r in relations if (len(r) > 0)] relations = list(itertools.chain.from_iterable(relations)) relation_types = list(itertools.chain.from_iterable(relation_types)) if (sent_id in saved.keys()): saved[sent_id]['relations'].extend(relations) saved[sent_id]['relation_types'].extend(relation_types) else: saved[sent_id] = {'relations': relations, 'relation_types': relation_types} else: saved[sent[0]] = {'relations': [], 'relation_types': []} saved_rels = {} s_ids = [] (all_substories, all_relations, all_relation_types, sub_lens, sent_ids) = ([], [], [], [], []) for (sent_id, input_) in saved.items(): raw_relations = input_['relations'] relation_types = input_['relation_types'] if (len(raw_relations) > 0): relations = [(clean_r(raw_relations[r]), relation_types[r]) for r in range(len(raw_relations))] relations = list(set(relations)) relation_types = [r[1] for r in relations] relations = [r[0] for r in relations] if past_: substory = ' '.join((original[:sent_id] + [(('<|' + str(sent_id)) + '|>')])) else: substory = ' '.join(((original[:sent_id] + [(('<|' + str(sent_id)) + '|>')]) + original[(sent_id + 1):])) all_relations.extend(relations) all_substories.extend(([substory] * len(relations))) sub_lens.append(len(relations)) all_relation_types.extend(relation_types) sent_ids.append(sent_id) s_ids.extend(([sent_id] * len(relations))) else: all_substories.extend([]) sub_lens.append(0) sent_ids.append(sent_id) start = time.perf_counter() try: print(len([original[sent_id] for sent_id in s_ids])) except: import pdb pdb.set_trace() scores = score_prob(all_substories, all_relations, all_relation_types, eval_sents=[original[sent_id] for sent_id in s_ids], kg_type=args.kg_type) start_idx = 0 for (idx, sent_id) in enumerate(sent_ids): quantity = sub_lens[idx] end_idx = (start_idx + quantity) sent_scores = scores[start_idx:end_idx] sent_relations = all_relations[start_idx:end_idx] sent_types = all_relation_types[start_idx:end_idx] for dim in dimensions_of_interest: sent_scores_dim = [sent_scores[s] for s in range(len(sent_scores)) if (sent_types[s] == dim)] sent_relations_dim = [sent_relations[s] for s in range(len(sent_scores)) if (sent_types[s] == dim)] top_i = np.argsort(sent_scores_dim)[(- 5):] chosen_relations = [sent_relations_dim[i] for i in top_i] chosen_scores = [sent_scores_dim[i] for i in top_i] if (sent_id not in saved_rels.keys()): saved_rels[sent_id] = {} if (dim not in saved_rels[sent_id].keys()): saved_rels[sent_id][dim] = {'relations': [], 'scores': []} chosen_relations = [chosen_relations[i] for i in range(len(chosen_relations)) if (chosen_scores[i] != (- math.inf))] chosen_scores = [s for s in chosen_scores if (s != (- math.inf))] saved_rels[sent_id][dim]['relations'].extend(chosen_relations) saved_rels[sent_id][dim]['scores'].extend(chosen_scores) start_idx = end_idx return saved_rels
def _is_equivalent(first: data.Data, second: data.Data): if (not first.is_equivalent(second)): if any((((not isinstance(d, data.Scalar)) and (not (isinstance(d, data.Array) and (d.shape == (1,))))) for d in (first, second))): return False return True
def benchmark_to_markdown(benchmark: List[List[str]], columns: List[str], rows: List[str]): cell_width = max([len(x) for x in benchmark[0]]) fmt = ('{: >%d} ' % cell_width) out = ((('| ' + fmt.format('|')) + '| '.join([fmt.format(x) for x in columns])) + '|\n') sep = (('|:' + (cell_width * '-')) + ':') out += (((1 + len(columns)) * sep) + '|\n') for (i, r) in enumerate(rows): out += ('| ' + fmt.format(('%ds ' % r))) out += (('| ' + ' | '.join(benchmark[i])) + ' |\n') print(out) return out
class SST(ClassificationTask): def __init__(self, config: configure_finetuning.FinetuningConfig, tokenizer): super(SST, self).__init__(config, 'sst', tokenizer, ['0', '1']) def _create_examples(self, lines, split): if ('test' in split): return self._load_glue(lines, split, 1, None, None, True) else: return self._load_glue(lines, split, 0, None, 1, True)
def simGetOrientationOnPath(pathHandle, relativeDistance): orientation = ffi.new('float[3]') ret = lib.simGetOrientationOnPath(pathHandle, relativeDistance, orientation) _check_return(ret) return list(orientation)
class HTMLBinaryInputStream(HTMLUnicodeInputStream): def __init__(self, source, override_encoding=None, transport_encoding=None, same_origin_parent_encoding=None, likely_encoding=None, default_encoding='windows-1252', useChardet=True): self.rawStream = self.openStream(source) HTMLUnicodeInputStream.__init__(self, self.rawStream) self.numBytesMeta = 1024 self.numBytesChardet = 100 self.override_encoding = override_encoding self.transport_encoding = transport_encoding self.same_origin_parent_encoding = same_origin_parent_encoding self.likely_encoding = likely_encoding self.default_encoding = default_encoding self.charEncoding = self.determineEncoding(useChardet) assert (self.charEncoding[0] is not None) self.reset() def reset(self): self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace') HTMLUnicodeInputStream.reset(self) def openStream(self, source): if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: stream = BufferedStream(stream) return stream def determineEncoding(self, chardet=True): charEncoding = (self.detectBOM(), 'certain') if (charEncoding[0] is not None): return charEncoding charEncoding = (lookupEncoding(self.override_encoding), 'certain') if (charEncoding[0] is not None): return charEncoding charEncoding = (lookupEncoding(self.transport_encoding), 'certain') if (charEncoding[0] is not None): return charEncoding charEncoding = (self.detectEncodingMeta(), 'tentative') if (charEncoding[0] is not None): return charEncoding charEncoding = (lookupEncoding(self.same_origin_parent_encoding), 'tentative') if ((charEncoding[0] is not None) and (not charEncoding[0].name.startswith('utf-16'))): return charEncoding charEncoding = (lookupEncoding(self.likely_encoding), 'tentative') if (charEncoding[0] is not None): return charEncoding if chardet: try: from pip._vendor.chardet.universaldetector import UniversalDetector except ImportError: pass else: buffers = [] detector = UniversalDetector() while (not detector.done): buffer = self.rawStream.read(self.numBytesChardet) assert isinstance(buffer, bytes) if (not buffer): break buffers.append(buffer) detector.feed(buffer) detector.close() encoding = lookupEncoding(detector.result['encoding']) self.rawStream.seek(0) if (encoding is not None): return (encoding, 'tentative') charEncoding = (lookupEncoding(self.default_encoding), 'tentative') if (charEncoding[0] is not None): return charEncoding return (lookupEncoding('windows-1252'), 'tentative') def changeEncoding(self, newEncoding): assert (self.charEncoding[1] != 'certain') newEncoding = lookupEncoding(newEncoding) if (newEncoding is None): return if (newEncoding.name in ('utf-16be', 'utf-16le')): newEncoding = lookupEncoding('utf-8') assert (newEncoding is not None) elif (newEncoding == self.charEncoding[0]): self.charEncoding = (self.charEncoding[0], 'certain') else: self.rawStream.seek(0) self.charEncoding = (newEncoding, 'certain') self.reset() raise _ReparseException(('Encoding changed from %s to %s' % (self.charEncoding[0], newEncoding))) def detectBOM(self): bomDict = {codecs.BOM_UTF8: 'utf-8', codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be', codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'} string = self.rawStream.read(4) assert isinstance(string, bytes) encoding = bomDict.get(string[:3]) seek = 3 if (not encoding): encoding = bomDict.get(string) seek = 4 if (not encoding): encoding = bomDict.get(string[:2]) seek = 2 if encoding: self.rawStream.seek(seek) return lookupEncoding(encoding) else: self.rawStream.seek(0) return None def detectEncodingMeta(self): buffer = self.rawStream.read(self.numBytesMeta) assert isinstance(buffer, bytes) parser = EncodingParser(buffer) self.rawStream.seek(0) encoding = parser.getEncoding() if ((encoding is not None) and (encoding.name in ('utf-16be', 'utf-16le'))): encoding = lookupEncoding('utf-8') return encoding
def _should_count_towards_stop(event: events.ExecutionEvent) -> bool: return (isinstance(event, events.AfterExecution) and (event.status in (Status.error, Status.failure)))
def multiply(X: dace.float64[N], Y: dace.float64[N], Z: dace.float64[N]): (_[0:N]) def mult(i): (x << X[i]) (y << Y[i]) (z >> Z[i]) z = (y * x)
def print_net(model, namescope='gpu_0'): logger.info('Printing model: {}'.format(model.net.Name())) op_list = model.net.Proto().op for op in op_list: input_name = op.input output_name = str(op.output[0]) op_type = op.type op_name = op.name if ((namescope is None) or output_name.startswith(namescope)): if (output_name.find('grad') >= 0): break output_shape = workspace.FetchBlob(output_name).shape first_blob = True op_label = (op_type + (op_name if (op_name == '') else (':' + op_name))) suffix = ' ------- (op: {})'.format(op_label) for j in range(len(input_name)): if (input_name[j] in model.params): continue input_blob = workspace.FetchBlob(input_name[j]) if isinstance(input_blob, np.ndarray): input_shape = input_blob.shape logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(c2_utils.UnscopeName(str(input_name[j])), '{}'.format(input_shape), c2_utils.UnscopeName(str(output_name)), '{}'.format(output_shape), suffix)) if first_blob: first_blob = False suffix = ' ------|' logger.info('End of model: {}'.format(model.net.Name()))
class BitMaskTestSuite(unittest.TestCase): def test_set_with_mask(self): mask = 15 newval = 10 baseval = 207 self.assertEqual(util.setWithMask(baseval, newval, mask), 202, 'Problems keeping other bits untouched?') mask = 240 newval = 10 baseval = 252 self.assertEqual(util.setWithMask(baseval, newval, mask), 172, 'Problems with bit-shift?') mask = 24 newval = 7 baseval = 0 self.assertEqual(util.setWithMask(baseval, newval, mask), 24, 'Data not trimmed to mask?') mask = 24 newval = 0 baseval = 255 self.assertEqual(util.setWithMask(baseval, newval, mask), 231, 'Data not trimmed to mask?') def test_get_with_mask(self): mask = 3 baseval = 219 self.assertEqual(util.getWithMask(baseval, mask), 3, 'Bit-shift not working?') mask = 24 baseval = 219 self.assertEqual(util.getWithMask(baseval, mask), 3, 'Data not trimmed to mask?')
def octave_console(): from sage.repl.rich_output.display_manager import get_display_manager if (not get_display_manager().is_in_terminal()): raise RuntimeError('Can use the console only in the terminal. Try %%octave magics instead.') os.system('octave-cli')
def CalculateComposition(ProteinSequence, AAProperty, AAPName): TProteinSequence = StringtoNum(ProteinSequence, AAProperty) Result = {} Num = len(TProteinSequence) Result[((AAPName + 'C') + '1')] = round((float(TProteinSequence.count('1')) / Num), 3) Result[((AAPName + 'C') + '2')] = round((float(TProteinSequence.count('2')) / Num), 3) Result[((AAPName + 'C') + '3')] = round((float(TProteinSequence.count('3')) / Num), 3) return Result
def test_bitmasked(): array = ak.Array(ak.contents.BitMaskedArray(ak.index.IndexU8(np.array([0, 1, 0, 1], dtype=np.int64)), tuple, valid_when=True, length=4, lsb_order=True)) assert ak.is_tuple(array) array = ak.Array(ak.contents.BitMaskedArray(ak.index.IndexU8(np.array([0, 1, 0, 1], dtype=np.int64)), record, valid_when=True, length=4, lsb_order=True)) assert (not ak.is_tuple(array))
def complete_episode_error_info(history, episode, dialog_error, ner_errors, customer_entities, target_intent, intent_success, classified_intent, error='Other_error'): if ('ner_errors' in dialog_error): error_message = '{}>>> {} >>> ({})'.format(episode['episode'], episode['error_turn'], dialog_error['error_slot']) episode['error'] = 'NER Error' episode['filled_slots'] = error_message episode_index = episode['episode'].replace('E', '') episode_ner_error = complete_ner_errors(dialog_error, ner_errors[target_intent], customer_entities, episode_index) if (not episode_ner_error): return None ner_errors[target_intent] = episode_ner_error elif (error == 'Other_error'): if intent_success: episode['intent_prediction'] = classified_intent episode['error'] = 'Other Error' episode['filled_slots'] = history[(- 1)] episode['intent_success'] = 'yes' else: episode['error'] = 'Intent Error' if ('intent_error' not in dialog_error): dialog_error['intent_error'] = {} dialog_error['error_turn'] = episode['error_turn'] else: episode['error'] = 'Intent Error' if ('intent_error' not in dialog_error): dialog_error['intent_error'] = {} dialog_error['error_turn'] = episode['error_turn'] return episode
def app(database): settings = {} with st.sidebar: (row0_1, row0_spacer1, row0_2) = st.columns((6.0, 0.05, 4.3)) with row0_1: bot_platform = st.selectbox('Bot Platform', ['Einstein Bot', 'DialogFlow CX']) bot_platform = bot_platform.replace(' ', '_') with row0_2: settings['test_name'] = st.text_input('Bot Name') settings['test_description'] = st.text_input('Test Description') settings['bot_type'] = bot_platform settings['bot_Id'] = '' settings['status'] = 'new' st.markdown('**Dialog Generation & Simulation Configuration**') if check_unfinished_simulations(database): return (row0_1, row0_spacer1, row0_2, row0_spacer2, row0_3, row0_spacer3, row0_4, row0_space4) = st.columns((2.0, 0.1, 2.0, 0.1, 2.0, 0.1, 2.0, 0.1)) with row0_1: settings['num_seed_utterances'] = st.number_input('No. of seed utterances', (- 1)) with row0_2: settings['num_t5_paraphrases'] = st.number_input('No. of paraphrases', 16) settings['num_pegasus_paraphrases'] = settings['num_t5_paraphrases'] with row0_3: settings['num_simulations'] = st.number_input('No. of dialog simulations (per intent)', (- 1)) with row0_4: settings['max_dialog_turns'] = st.number_input('Maximum No. of dialog turns', 10) (row1_1, row1_spacer1, row1_2, row1_spacer2) = st.columns((3.5, 0.1, 3.5, 0.1)) with row1_1: st.markdown('BotSIM uses APIs to perform dialog simulation by acting as a user. Users are required to provide the API credentials (for Salesforce Einstein BotBuilder) or API tokens (Google DialogFlow)in JSON format. Contact your admins regarding the tokens/credentials.') with row1_2: with st.expander('Upload bot API credentials (example below )'): if (bot_platform == 'Einstein_Bot'): st.code('\n {\n "org_Id":"00D8cxxxxxxxxxx",\n "button_Id": "5738cxxxxxxxxxx",\n "deployment_Id": "5728cxxxxxxxxxx",\n "end_point": " }') elif (bot_platform == 'DialogFlow_CX'): st.code('\n {\n "location_id": "us-central1",\n "agent_id": "xxxxx-xxxx-xxxxx-xxxx",\n "project_id": "xxx",\n "cx_credential": "platforms/dialogflow_cx/cx.json"\n }') api_creds = st.file_uploader('') (latest_bot_id, latest_stage) = database.get_last_db_row() if (api_creds is not None): api_tokens = json.load(api_creds) if (bot_platform == 'Einstein_Bot'): assert ('org_Id' in api_tokens) assert ('end_point' in api_tokens) assert ('button_Id' in api_tokens) assert ('deployment_Id' in api_tokens) if (bot_platform == 'DialogFlow_CX'): assert ('location_id' in api_tokens) assert ('agent_id' in api_tokens) assert ('cx_credential' in api_tokens) settings.update(api_tokens) settings['bot_version'] = '1' settings['dev_intents'] = [] settings['eval_intents'] = [] if (latest_bot_id != (- 1)): config = dict(database.get_one_bot_test_instance(latest_bot_id)) if ((len(config['dev']) != 0) or (len(config['eval']) != 0)): settings['status'] = 'new' bot_id = str(database.create_test_instance(settings)) else: bot_id = latest_bot_id else: settings['status'] = 'new' bot_id = str(database.create_test_instance(settings)) settings['bot_Id'] = str(bot_id) st.success('Setup finished. Navigate to next page for BotSIM inputs.')
def retrieve_tigge_data(): date1 = [(str(i) + '-01-01') for i in xrange(2007, 2017)] date2 = [(str(i) + '-12-31') for i in xrange(2007, 2017)] dates = date1 for j in range(0, 10): dates[j] = ((date1[j] + '/to/') + date2[j]) data_dir = '/media/sebastian/Elements/Postproc_NN/data/forecasts/' for date in dates: target = (((data_dir + 'ecmwf_t2m_') + date[:4]) + '.grib') tigge_request(date, target)
_bpe('bert') class BertBPE(object): def add_args(parser): parser.add_argument('--bpe-cased', action='store_true', help='set for cased BPE', default=False) parser.add_argument('--bpe-vocab-file', type=str, help='bpe vocab file.') def __init__(self, args): try: from transformers import BertTokenizer except ImportError: raise ImportError('Please install transformers with: pip install transformers') if ('bpe_vocab_file' in args): self.bert_tokenizer = BertTokenizer(args.bpe_vocab_file, do_lower_case=(not args.bpe_cased)) else: vocab_file_name = ('bert-base-cased' if args.bpe_cased else 'bert-base-uncased') self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) def encode(self, x: str) -> str: return ' '.join(self.bert_tokenizer.tokenize(x)) def decode(self, x: str) -> str: return self.bert_tokenizer.clean_up_tokenization(self.bert_tokenizer.convert_tokens_to_string(x.split(' '))) def is_beginning_of_word(self, x: str) -> bool: return (not x.startswith('##'))
def make_model(config): body_config = config['body'] fpn_config = config['fpn'] rpn_config = config['rpn'] roi_config = config['roi'] sem_config = config['sem'] general_config = config['general'] classes = {'total': (int(general_config['num_things']) + int(general_config['num_stuff'])), 'stuff': int(general_config['num_stuff']), 'thing': int(general_config['num_things']), 'semantic': int(general_config['num_semantic'])} (norm_act_static, norm_act_dynamic) = norm_act_from_config(body_config) log_debug('Creating backbone model %s', body_config['body']) body_fn = models.__dict__[('net_' + body_config['body'])] body_params = (body_config.getstruct('body_params') if body_config.get('body_params') else {}) body = body_fn(norm_act=norm_act_static, **body_params) if body_config.get('weights'): body.load_state_dict(torch.load(body_config['weights'], map_location='cpu')) for (n, m) in body.named_modules(): for mod_id in range(1, (body_config.getint('num_frozen') + 1)): if (('mod%d' % mod_id) in n): freeze_params(m) body_channels = body_config.getstruct('out_channels') fpn_inputs = fpn_config.getstruct('inputs') fpn = FPN([body_channels[inp] for inp in fpn_inputs], fpn_config.getint('out_channels'), fpn_config.getint('extra_scales'), norm_act_static, fpn_config['interpolation']) body = FPNBody(body, fpn, fpn_inputs) proposal_generator = ProposalGenerator(rpn_config.getfloat('nms_threshold'), rpn_config.getint('num_pre_nms_train'), rpn_config.getint('num_post_nms_train'), rpn_config.getint('num_pre_nms_val'), rpn_config.getint('num_post_nms_val'), rpn_config.getint('min_size')) anchor_matcher = AnchorMatcher(rpn_config.getint('num_samples'), rpn_config.getfloat('pos_ratio'), rpn_config.getfloat('pos_threshold'), rpn_config.getfloat('neg_threshold'), rpn_config.getfloat('void_threshold')) rpn_loss = RPNLoss(rpn_config.getfloat('sigma')) rpn_algo = RPNAlgoFPN(proposal_generator, anchor_matcher, rpn_loss, rpn_config.getint('anchor_scale'), rpn_config.getstruct('anchor_ratios'), fpn_config.getstruct('out_strides'), rpn_config.getint('fpn_min_level'), rpn_config.getint('fpn_levels')) rpn_head = RPNHead(fpn_config.getint('out_channels'), len(rpn_config.getstruct('anchor_ratios')), 1, rpn_config.getint('hidden_channels'), norm_act_dynamic) prediction_generator = PredictionGenerator(roi_config.getfloat('nms_threshold'), roi_config.getfloat('score_threshold'), roi_config.getint('max_predictions')) proposal_matcher = ProposalMatcher(classes, roi_config.getint('num_samples'), roi_config.getfloat('pos_ratio'), roi_config.getfloat('pos_threshold'), roi_config.getfloat('neg_threshold_hi'), roi_config.getfloat('neg_threshold_lo'), roi_config.getfloat('void_threshold')) roi_loss = DetectionLoss(roi_config.getfloat('sigma')) roi_size = roi_config.getstruct('roi_size') roi_algo = DetectionAlgoFPN(prediction_generator, proposal_matcher, roi_loss, classes, roi_config.getstruct('bbx_reg_weights'), roi_config.getint('fpn_canonical_scale'), roi_config.getint('fpn_canonical_level'), roi_size, roi_config.getint('fpn_min_level'), roi_config.getint('fpn_levels')) roi_head = FPNROIHead(fpn_config.getint('out_channels'), classes, roi_size, norm_act=norm_act_dynamic) sem_loss = SemanticSegLoss(ohem=sem_config.getfloat('ohem')) sem_algo = SemanticSegAlgo(sem_loss, classes['semantic']) sem_head = FPNSemanticHeadDeeplab(fpn_config.getint('out_channels'), sem_config.getint('fpn_min_level'), sem_config.getint('fpn_levels'), classes['semantic'], pooling_size=sem_config.getstruct('pooling_size'), norm_act=norm_act_static) return DetSegNet(body, rpn_head, roi_head, sem_head, rpn_algo, roi_algo, sem_algo, classes)
def clean_pl_regon(df: Union[(pd.DataFrame, dd.DataFrame)], column: str, output_format: str='standard', inplace: bool=False, errors: str='coerce', progress: bool=True) -> pd.DataFrame: if (output_format not in {'compact', 'standard'}): raise ValueError(f'output_format {output_format} is invalid. It needs to be "compact" or "standard".') df = to_dask(df) df['clean_code_tup'] = df[column].map_partitions((lambda srs: [_format(x, output_format, errors) for x in srs]), meta=object) df = df.assign(_temp_=df['clean_code_tup'].map(itemgetter(0))) df = df.rename(columns={'_temp_': f'{column}_clean'}) df = df.drop(columns=['clean_code_tup']) if inplace: df[column] = df[f'{column}_clean'] df = df.drop(columns=f'{column}_clean') df = df.rename(columns={column: f'{column}_clean'}) with ProgressBar(minimum=1, disable=(not progress)): df = df.compute() return df
class QuoteStack(): def __init__(self): self._stack = [] self._single_quote_safe = True self._double_quote_safe = True def __len__(self): return len(self._stack) def __repr__(self): return repr(self._stack) def peek(self): return (self._stack[(- 1)] if self._stack else None) def pop(self): return self._stack.pop() def push(self, frame): self._stack.append(frame) if frame.f_string: if (frame.delim == "'"): self._single_quote_safe = False elif (frame.delim == '"'): self._double_quote_safe = False def safe_delimiter(self): if self._single_quote_safe: return "'" if self._double_quote_safe: return '"' return None
def main(): args = parser.parse_args() all_models = list_models(pretrained=True) if (args.model == 'all'): for model_name in all_models: export_model(model_name, args.output) else: export_model(args.model, args.output)
.parametrize('seed', [412]) .parametrize('batch_size', [2, 16]) .parametrize('grid_size', [2, 8]) .parametrize('feature_size', [4]) .parametrize('m, M', [((- 1.0), 1.0)]) def test_query_on_triplane_double_backward(seed, batch_size, grid_size, feature_size, m, M): nn.clear_parameters() ctx = get_extension_context('cudnn', device_id='0') nn.set_default_context(ctx) B = batch_size G = grid_size D = feature_size rng = np.random.RandomState(seed) query_data = (m + (rng.rand(batch_size, 3) * (M - m))) initializer_data = (rng.randn(3, G, G, D) * 0.01) query_data0 = query_data.astype(np.float32) initializer_data0 = initializer_data.astype(np.float32) query0 = nn.Variable.from_numpy_array(query_data0).apply(need_grad=True) feature0 = nn.parameter.get_parameter_or_create('F0', (3, G, G, D), initializer_data0) output0 = query_on_triplane_composite(query0, feature0, m, M) query_data1 = query_data.astype(np.float32) initializer_data1 = initializer_data.astype(np.float32) query1 = nn.Variable.from_numpy_array(query_data1).apply(need_grad=True) feature1 = nn.parameter.get_parameter_or_create('F1', (3, G, G, D), initializer_data1) output1 = F.cosine_query_on_triplane(query1, feature1, ([m] * 3), ([M] * 3)) ograd = rng.randn(*output0.shape).astype(np.float32) ograd0 = nn.Variable.from_numpy_array(ograd).apply(need_grad=True, persistent=True) ograd1 = nn.Variable.from_numpy_array(ograd).apply(need_grad=True, persistent=True) grad_query0 = nn.grad([output0], [query0], grad_outputs=[ograd0])[0] grad_query1 = nn.grad([output1], [query1], grad_outputs=[ograd1])[0] F.sink(*[grad_query0]).forward(clear_no_need_grad=True) F.sink(*[grad_query1]).forward(clear_no_need_grad=True) np.testing.assert_allclose(grad_query0.d, grad_query1.d, atol=1e-06) ograd0.grad.fill(0) ograd1.grad.fill(0) query0.grad.fill(0) query1.grad.fill(0) feature0.grad.fill(0) feature1.grad.fill(0) o0 = F.sum((grad_query0 ** 2)) o1 = F.sum((grad_query1 ** 2)) o0.forward(clear_no_need_grad=True) o1.forward(clear_no_need_grad=True) ograd = rng.randn() o0.backward(ograd, clear_buffer=True) o1.backward(ograd, clear_buffer=True) np.testing.assert_allclose(ograd0.g, ograd1.g, atol=0.0001) np.testing.assert_allclose(feature0.g, feature1.g, atol=0.008)
_task('language_modeling') class LanguageModelingTask(FairseqTask): def add_args(parser): parser.add_argument('data', help='path to data directory') parser.add_argument('--sample-break-mode', choices=['none', 'complete', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=1024, type=int, help='max number of tokens per sample for LM dataset') parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset') parser.add_argument('--output-dictionary-size', default=(- 1), type=int, help='limit the size of output dictionary') parser.add_argument('--self-target', action='store_true', help='include self target') parser.add_argument('--future-target', action='store_true', help='include future target') parser.add_argument('--past-target', action='store_true', help='include past target') def __init__(self, args, dictionary, output_dictionary, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary if (targets is None): targets = ['future'] self.targets = targets def setup_task(cls, args, **kwargs): dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) output_dictionary = dictionary if (args.output_dictionary_size >= 0): output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size) if hasattr(args, 'exclude_self_target'): args.self_target = (not args.exclude_self_target) targets = [] if args.self_target: targets.append('self') if args.future_target: targets.append('future') if args.past_target: targets.append('past') if (len(targets) == 0): targets = ['future'] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args): model = super().build_model(args) for target in self.targets: if (target not in model.supported_targets): raise ValueError('Unsupported language modeling target: {}'.format(target)) return model def load_dataset(self, split, combine=False): loaded_datasets = [] for k in itertools.count(): split_k = (split + (str(k) if (k > 0) else '')) path = os.path.join(self.args.data, split_k) if (self.args.raw_text and IndexedRawTextDataset.exists(path)): ds = IndexedRawTextDataset(path, self.dictionary) tokens = [t for l in ds.tokens_list for t in l] elif ((not self.args.raw_text) and IndexedInMemoryDataset.exists(path)): ds = IndexedInMemoryDataset(path, fix_lua_indexing=True) tokens = ds.buffer elif (k > 0): break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, self.args.data)) loaded_datasets.append(TokenBlockDataset(tokens, ds.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True)) print('| {} {} {} examples'.format(self.args.data, split_k, len(loaded_datasets[(- 1)]))) if (not combine): break if (len(loaded_datasets) == 1): dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) add_eos_for_other_targets = ((self.args.sample_break_mode is not None) and (self.args.sample_break_mode != 'none')) self.datasets[split] = MonolingualDataset(dataset, sizes, self.dictionary, self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=False, targets=self.targets) def target_dictionary(self): return self.output_dictionary
class ValidationError(ValueError): def __init__(self, message='', *args, **kwargs): ValueError.__init__(self, message, *args, **kwargs)
def discrete_to_box_wrapper(env, bound=4.0): assert isinstance(env.action_space, Discrete), 'must pass a discrete environment!' old_step = env.step n = env.action_space.n env.action_space = Box(low=(- bound), high=bound, shape=(n,)) def step(action): action = np.clip(action, (- bound), bound) action = softmax(action) action = np.random.choice(range(n), p=action) return old_step(action) env.step = step return env
class sCW_sBC_reg(atomic_reg): OP_NAME = 'sCW&sBC' _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('op_code', ctypes.c_uint64, 16), ('cmd_id_dep', ctypes.c_uint64, 24), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('opt_res0_prec', ctypes.c_uint64, 3), ('rsvd0', ctypes.c_uint64, 6), ('pwr_step', ctypes.c_uint64, 4), ('intr_en', ctypes.c_uint64, 1), ('res0_n', ctypes.c_uint64, 16), ('res0_c', ctypes.c_uint64, 16), ('res0_h', ctypes.c_uint64, 16), ('res0_w', ctypes.c_uint64, 16), ('opd0_c', ctypes.c_uint64, 16), ('opd0_w', ctypes.c_uint64, 16), ('rsvd1', ctypes.c_uint64, 32), ('res0_addr', ctypes.c_uint64, 32), ('opd0_addr', ctypes.c_uint64, 32)] cmd_short: int op_code: int cmd_id_dep: int tsk_typ: int tsk_eu_typ: int opt_res0_prec: int rsvd0: int pwr_step: int intr_en: int res0_n: int res0_c: int res0_h: int res0_w: int opd0_c: int opd0_w: int rsvd1: int res0_addr: int opd0_addr: int length: int = 256
def draw_arc(image, arc, offset=(0, 0), color=(0, 0, 255), thickness=1): caa = ((cartesian_angle(arc.circle.center, arc.a) * 180) / np.pi) cab = ((cartesian_angle(arc.circle.center, arc.b) * 180) / np.pi) if (caa > cab): caa -= 360 center = tuple(round_vector((np.array(arc.circle.center) + offset))) radius = int(round(arc.circle.radius)) cv2.ellipse(image, center, (radius, radius), 0, caa, cab, color, thickness)
class SahaFactor(ProcessingPlasmaProperty): outputs = ('phi_ik',) latex_name = ('\\Phi_{i,\\kappa}',) def calculate(self, thermal_phi_lte, thermal_lte_level_boltzmann_factor, thermal_lte_partition_function): boltzmann_factor = self._prepare_boltzmann_factor(thermal_lte_level_boltzmann_factor) phi_saha_index = get_ion_multi_index(boltzmann_factor.index) partition_function_index = get_ion_multi_index(boltzmann_factor.index, next_higher=False) phi_saha = thermal_phi_lte.loc[phi_saha_index].values phi_saha[(phi_saha == 0.0)] = sys.float_info.min partition_function = thermal_lte_partition_function.loc[partition_function_index].values return (boltzmann_factor / (phi_saha * partition_function)) def _prepare_boltzmann_factor(boltzmann_factor): atomic_number = boltzmann_factor.index.get_level_values(0) ion_number = boltzmann_factor.index.get_level_values(1) selected_ions_mask = (atomic_number != ion_number) return boltzmann_factor[selected_ions_mask]
def get_actions_learned(pred_mentions, gt_clusters, max_ents): pred_mentions = [tuple(mention) for mention in pred_mentions] mention_to_cluster = get_mention_to_cluster_idx(gt_clusters) actions = [] cell_to_cluster = {} cell_to_last_used = [0 for cell in range(max_ents)] cluster_to_cell = {} cluster_to_rem_mentions = [len(cluster) for cluster in gt_clusters] for mention in pred_mentions: used_cell_idx = None if (mention not in mention_to_cluster): actions.append(((- 1), 'i')) else: mention_cluster = mention_to_cluster[tuple(mention)] if (mention_cluster in cluster_to_cell): actions.append((cluster_to_cell[mention_cluster], 'c')) used_cell_idx = cluster_to_cell[mention_cluster] else: cur_rem_mentions = cluster_to_rem_mentions[mention_cluster] cell_info = [] for cell_idx in range(max_ents): if (cell_idx in cell_to_cluster): cell_cluster = cell_to_cluster[cell_idx] cell_rem_mentions = cluster_to_rem_mentions[cell_cluster] else: cell_rem_mentions = (- 1) cell_info.append((cell_rem_mentions, cell_to_last_used[cell_idx], cell_idx)) cell_info = sorted(cell_info, key=(lambda x: (x[0] - (1e-10 * x[1])))) min_remaining_mentions = cell_info[0][0] if (cur_rem_mentions >= min_remaining_mentions): used_cell_idx = cell_info[0][2] if (used_cell_idx is None): actions.append(((- 1), 'n')) else: actions.append((used_cell_idx, 'o')) if (used_cell_idx in cell_to_cluster): del cluster_to_cell[cell_to_cluster[used_cell_idx]] cluster_to_cell[mention_cluster] = used_cell_idx cell_to_cluster[used_cell_idx] = mention_cluster for cell_idx in range(max_ents): cell_to_last_used[cell_idx] += 1 if (used_cell_idx is not None): cell_to_last_used[used_cell_idx] = 0 cluster_to_rem_mentions[mention_cluster] -= 1 return actions
def cache_url(url, model_dir=None, progress=True): if (model_dir is None): torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch')) model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models')) if (not os.path.exists(model_dir)): os.makedirs(model_dir) parts = urlparse(url) if (parts.fragment != ''): filename = parts.fragment else: filename = os.path.basename(parts.path) if (filename == 'model_final.pkl'): filename = parts.path.replace('/', '_') cached_file = os.path.join(model_dir, filename) if ((not os.path.exists(cached_file)) and is_main_process()): sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file)) hash_prefix = HASH_REGEX.search(filename) if (hash_prefix is not None): hash_prefix = hash_prefix.group(1) if (len(hash_prefix) < 6): hash_prefix = None _download_url_to_file(url, cached_file, hash_prefix, progress=progress) synchronize() return cached_file
def get_graph(text, language='english'): sentences = _clean_text_by_sentences(text, language) graph = _build_graph([sentence.token for sentence in sentences]) _set_graph_edge_weights(graph) return graph