code
stringlengths
281
23.7M
def test_none_attr_custom_init(): assert (get_attrs_shape(NoneAttrCustomInit) == Shape(input=InputShape(constructor=NoneAttrCustomInit, kwargs=None, fields=(InputField(type=Any, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY),), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW),), overriden_types=frozenset({'a'})), output=OutputShape(fields=(OutputField(type=type(None), id='a', default=NoDefault(), accessor=create_attr_accessor('a', is_required=True), metadata=MappingProxyType({}), original=ANY),), overriden_types=frozenset({'a'}))))
def get_locked_package(dependency: Dependency, packages_by_name: dict[(str, list[Package])], decided: (dict[(Package, Dependency)] | None)=None) -> (Package | None): decided = (decided or {}) candidates = packages_by_name.get(dependency.name, []) overlapping_candidates = set() for package in candidates: old_decision = decided.get(package) if ((old_decision is not None) and (not old_decision.marker.intersect(dependency.marker).is_empty())): overlapping_candidates.add(package) if (len(overlapping_candidates) > 1): return None compatible_candidates = [package for package in candidates if (package.python_constraint.allows_all(dependency.python_constraint) and dependency.constraint.allows(package.version) and ((dependency.source_type is None) or dependency.is_same_source_as(package)))] if overlapping_candidates: filtered_compatible_candidates = [package for package in compatible_candidates if (package in overlapping_candidates)] if (not filtered_compatible_candidates): raise DependencyWalkerError(f'The `{dependency.name}` package has the following compatible candidates `{compatible_candidates}`; but, the exporter dependency walker previously elected `{overlapping_candidates.pop()}` which is not compatible with the dependency `{dependency}`. Please contribute to `poetry-plugin-export` to solve this problem.') compatible_candidates = filtered_compatible_candidates return next(iter(compatible_candidates), None)
class ChannelUsability(Enum): USABLE = True NOT_OPENED = 'channel is not open' INVALID_SETTLE_TIMEOUT = 'channel settle timeout is too low' CHANNEL_REACHED_PENDING_LIMIT = 'channel reached limit of pending transfers' CHANNEL_DOESNT_HAVE_ENOUGH_DISTRIBUTABLE = "channel doesn't have enough distributable tokens" CHANNEL_BALANCE_PROOF_WOULD_OVERFLOW = 'channel balance proof would overflow' LOCKTIMEOUT_MISMATCH = 'the lock timeout can not be used with the channel'
class d_lka_former_trainer_synapse(Trainer_synapse): def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, trans_block=None, depths=[3, 3, 3, 3], skip_connections=[True, True, True, True], seed=12345): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16, seed=seed) self.max_num_epochs = 1000 self.initial_lr = 0.01 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True self.load_pretrain_weight = False self.load_plans_file() self.crop_size = [64, 128, 128] self.input_channels = self.plans['num_modalities'] self.num_classes = (self.plans['num_classes'] + 1) self.conv_op = nn.Conv3d self.embedding_dim = 192 self.depths = depths self.num_heads = [6, 12, 24, 48] self.embedding_patch_size = [2, 4, 4] self.window_size = [4, 4, 8, 4] self.deep_supervision = True self.trans_block = trans_block self.skip_connections = skip_connections def initialize(self, training=True, force_load_plans=False): if (not self.was_initialized): maybe_mkdir_p(self.output_folder) if (force_load_plans or (self.plans is None)): self.load_plans_file() self.plans['plans_per_stage'][self.stage]['pool_op_kernel_sizes'] = [[2, 4, 4], [2, 2, 2], [2, 2, 2]] self.process_plans(self.plans) self.setup_DA_params() if self.deep_supervision: net_numpool = len(self.net_num_pool_op_kernel_sizes) weights = np.array([(1 / (2 ** i)) for i in range(net_numpool)]) weights = (weights / weights.sum()) print(weights) self.ds_loss_weights = weights self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) print('Adjusting data augmentation threads') print('Current num threads: {}'.format(self.data_aug_params.get('num_threads'))) self.data_aug_params['num_threads'] = 4 print('Updated num threads: {}'.format(self.data_aug_params.get('num_threads'))) self.folder_with_preprocessed_data = join(self.dataset_directory, (self.plans['data_identifier'] + ('_stage%d' % self.stage))) seeds_train = np.random.random_integers(0, 99999, self.data_aug_params.get('num_threads')) seeds_val = np.random.random_integers(0, 99999, max((self.data_aug_params.get('num_threads') // 2), 1)) if training: (self.dl_tr, self.dl_val) = self.get_basic_generators() if self.unpack_data: print('unpacking dataset') unpack_dataset(self.folder_with_preprocessed_data) print('done') else: print('INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you will wait all winter for your model to finish!') (self.tr_gen, self.val_gen) = get_moreDA_augmentation(self.dl_tr, self.dl_val, self.data_aug_params['patch_size_for_spatialtransform'], self.data_aug_params, deep_supervision_scales=(self.deep_supervision_scales if self.deep_supervision else None), pin_memory=self.pin_memory, use_nondetMultiThreadedAugmenter=False, seeds_train=seeds_train, seeds_val=seeds_val) self.print_to_log_file(('TRAINING KEYS:\n %s' % str(self.dataset_tr.keys())), also_print_to_console=False) self.print_to_log_file(('VALIDATION KEYS:\n %s' % str(self.dataset_val.keys())), also_print_to_console=False) else: pass self.initialize_network() self.initialize_optimizer_and_scheduler() assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel)) else: self.print_to_log_file('self.was_initialized is True, not running self.initialize again') self.was_initialized = True def initialize_network(self): self.network = D_LKA_Former(in_channels=self.input_channels, out_channels=self.num_classes, img_size=self.crop_size, feature_size=16, num_heads=4, depths=self.depths, dims=[32, 64, 128, 256], do_ds=True, trans_block=self.trans_block, skip_connections=self.skip_connections) if torch.cuda.is_available(): self.network.cuda() self.network.inference_apply_nonlin = softmax_helper n_parameters = sum((p.numel() for p in self.network.parameters() if p.requires_grad)) input_res = (1, 64, 128, 128) input = torch.ones(()).new_empty((1, *input_res), dtype=next(self.network.parameters()).dtype, device=next(self.network.parameters()).device) flops = FlopCountAnalysis(self.network, input) model_flops = flops.total() print(f'Total trainable parameters: {round((n_parameters * 1e-06), 4)} M') print(f'MAdds: {round((model_flops * 1e-09), 4)} G') def initialize_optimizer_and_scheduler(self): assert (self.network is not None), 'self.initialize_network must be called first' self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, momentum=0.99, nesterov=True) self.lr_scheduler = None def run_online_evaluation(self, output, target): if self.deep_supervision: target = target[0] output = output[0] else: target = target output = output return super().run_online_evaluation(output, target) def validate(self, do_mirroring: bool=True, use_sliding_window: bool=True, step_size: float=0.5, save_softmax: bool=True, use_gaussian: bool=True, overwrite: bool=True, validation_folder_name: str='validation_raw', debug: bool=False, all_in_gpu: bool=False, segmentation_export_kwargs: dict=None, run_postprocessing_on_folds: bool=True): ds = self.network.do_ds self.network.do_ds = False ret = super().validate(do_mirroring=do_mirroring, use_sliding_window=use_sliding_window, step_size=step_size, save_softmax=save_softmax, use_gaussian=use_gaussian, overwrite=overwrite, validation_folder_name=validation_folder_name, debug=debug, all_in_gpu=all_in_gpu, segmentation_export_kwargs=segmentation_export_kwargs, run_postprocessing_on_folds=run_postprocessing_on_folds) self.network.do_ds = ds return ret def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool=True, mirror_axes: Tuple[int]=None, use_sliding_window: bool=True, step_size: float=0.5, use_gaussian: bool=True, pad_border_mode: str='constant', pad_kwargs: dict=None, all_in_gpu: bool=False, verbose: bool=True, mixed_precision=True) -> Tuple[(np.ndarray, np.ndarray)]: ds = self.network.do_ds self.network.do_ds = False ret = super().predict_preprocessed_data_return_seg_and_softmax(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes, use_sliding_window=use_sliding_window, step_size=step_size, use_gaussian=use_gaussian, pad_border_mode=pad_border_mode, pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose, mixed_precision=mixed_precision) self.network.do_ds = ds return ret def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False): data_dict = next(data_generator) data = data_dict['data'] target = data_dict['target'] data = maybe_to_torch(data) target = maybe_to_torch(target) if torch.cuda.is_available(): data = to_cuda(data) target = to_cuda(target) self.optimizer.zero_grad() if self.fp16: with autocast(): output = self.network(data) del data l = self.loss(output, target) if do_backprop: self.amp_grad_scaler.scale(l).backward() self.amp_grad_scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.amp_grad_scaler.step(self.optimizer) self.amp_grad_scaler.update() else: output = self.network(data) del data l = self.loss(output, target) if do_backprop: l.backward() torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) self.optimizer.step() if run_online_evaluation: self.run_online_evaluation(output, target) del target return l.detach().cpu().numpy() def do_split(self): if (self.fold == 'all'): tr_keys = val_keys = list(self.dataset.keys()) else: splits_file = join(self.dataset_directory, 'splits_final.pkl') if (not isfile(splits_file)): self.print_to_log_file('Creating new 5-fold cross-validation split...') splits = [] all_keys_sorted = np.sort(list(self.dataset.keys())) kfold = KFold(n_splits=5, shuffle=True, random_state=12345) for (i, (train_idx, test_idx)) in enumerate(kfold.split(all_keys_sorted)): train_keys = np.array(all_keys_sorted)[train_idx] test_keys = np.array(all_keys_sorted)[test_idx] splits.append(OrderedDict()) splits[(- 1)]['train'] = train_keys splits[(- 1)]['val'] = test_keys save_pickle(splits, splits_file) else: self.print_to_log_file('Using splits from existing split file:', splits_file) splits = load_pickle(splits_file) self.print_to_log_file(('The split file contains %d splits.' % len(splits))) self.print_to_log_file(('Desired fold for training: %d' % self.fold)) splits[self.fold]['train'] = np.array(['img0006', 'img0007', 'img0009', 'img0010', 'img0021', 'img0023', 'img0024', 'img0026', 'img0027', 'img0031', 'img0033', 'img0034', 'img0039', 'img0040', 'img0005', 'img0028', 'img0030', 'img0037']) splits[self.fold]['val'] = np.array(['img0001', 'img0002', 'img0003', 'img0004', 'img0008', 'img0022', 'img0025', 'img0029', 'img0032', 'img0035', 'img0036', 'img0038']) if (self.fold < len(splits)): tr_keys = splits[self.fold]['train'] val_keys = splits[self.fold]['val'] self.print_to_log_file(('This split has %d training and %d validation cases.' % (len(tr_keys), len(val_keys)))) else: self.print_to_log_file(('INFO: You requested fold %d for training but splits contain only %d folds. I am now creating a random (but seeded) 80:20 split!' % (self.fold, len(splits)))) rnd = np.random.RandomState(seed=(12345 + self.fold)) keys = np.sort(list(self.dataset.keys())) idx_tr = rnd.choice(len(keys), int((len(keys) * 0.8)), replace=False) idx_val = [i for i in range(len(keys)) if (i not in idx_tr)] tr_keys = [keys[i] for i in idx_tr] val_keys = [keys[i] for i in idx_val] self.print_to_log_file(('This random 80:20 split has %d training and %d validation cases.' % (len(tr_keys), len(val_keys)))) tr_keys.sort() val_keys.sort() self.dataset_tr = OrderedDict() for i in tr_keys: self.dataset_tr[i] = self.dataset[i] self.dataset_val = OrderedDict() for i in val_keys: self.dataset_val[i] = self.dataset[i] def setup_DA_params(self): self.deep_supervision_scales = ([[1, 1, 1]] + list((list(i) for i in (1 / np.cumprod(np.vstack(self.net_num_pool_op_kernel_sizes), axis=0))))[:(- 1)]) if self.threeD: self.data_aug_params = default_3D_augmentation_params self.data_aug_params['rotation_x'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) self.data_aug_params['rotation_y'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) self.data_aug_params['rotation_z'] = (((((- 30.0) / 360) * 2.0) * np.pi), (((30.0 / 360) * 2.0) * np.pi)) if self.do_dummy_2D_aug: self.data_aug_params['dummy_2D'] = True self.print_to_log_file('Using dummy2d data augmentation') self.data_aug_params['elastic_deform_alpha'] = default_2D_augmentation_params['elastic_deform_alpha'] self.data_aug_params['elastic_deform_sigma'] = default_2D_augmentation_params['elastic_deform_sigma'] self.data_aug_params['rotation_x'] = default_2D_augmentation_params['rotation_x'] else: self.do_dummy_2D_aug = False if ((max(self.patch_size) / min(self.patch_size)) > 1.5): default_2D_augmentation_params['rotation_x'] = (((((- 15.0) / 360) * 2.0) * np.pi), (((15.0 / 360) * 2.0) * np.pi)) self.data_aug_params = default_2D_augmentation_params self.data_aug_params['mask_was_used_for_normalization'] = self.use_mask_for_norm if self.do_dummy_2D_aug: self.basic_generator_patch_size = get_patch_size(self.patch_size[1:], self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) self.basic_generator_patch_size = np.array(([self.patch_size[0]] + list(self.basic_generator_patch_size))) patch_size_for_spatialtransform = self.patch_size[1:] else: self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'], self.data_aug_params['rotation_y'], self.data_aug_params['rotation_z'], self.data_aug_params['scale_range']) patch_size_for_spatialtransform = self.patch_size self.data_aug_params['scale_range'] = (0.7, 1.4) self.data_aug_params['do_elastic'] = False self.data_aug_params['selected_seg_channels'] = [0] self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform self.data_aug_params['num_cached_per_thread'] = 2 def maybe_update_lr(self, epoch=None): if (epoch is None): ep = (self.epoch + 1) else: ep = epoch self.optimizer.param_groups[0]['lr'] = poly_lr(ep, self.max_num_epochs, self.initial_lr, 0.9) self.print_to_log_file('lr:', np.round(self.optimizer.param_groups[0]['lr'], decimals=6)) def on_epoch_end(self): super().on_epoch_end() continue_training = (self.epoch < self.max_num_epochs) if (self.epoch == 100): if (self.all_val_eval_metrics[(- 1)] == 0): self.optimizer.param_groups[0]['momentum'] = 0.95 self.network.apply(InitWeights_He(0.01)) self.print_to_log_file('At epoch 100, the mean foreground Dice was 0. This can be caused by a too high momentum. High momentum (0.99) is good for datasets where it works, but sometimes causes issues such as this one. Momentum has now been reduced to 0.95 and network weights have been reinitialized') return continue_training def run_training(self): self.maybe_update_lr(self.epoch) ds = self.network.do_ds if self.deep_supervision: self.network.do_ds = True else: self.network.do_ds = False ret = super().run_training() self.network.do_ds = ds return ret
def collect_citations(dag: ProvDAG, deduplicate: bool=True) -> bp.bibdatabase.BibDatabase: bdb = bp.bibdatabase.BibDatabase() citations = [] for node_uuid in dag: node = dag.get_node_data(node_uuid) if (node is not None): node_citations = list(node.citations.values()) citations.extend(node_citations) if deduplicate: citations = dedupe_citations(citations) bdb.entries = citations return bdb
def handle_refundtransfer(received_transfer: LockedTransferUnsignedState, channel_state: NettingChannelState, refund: ReceiveTransferRefund) -> EventsOrError: events: List[Event] (is_valid, msg, pending_locks) = is_valid_refund(refund=refund, channel_state=channel_state, sender_state=channel_state.partner_state, receiver_state=channel_state.our_state, received_transfer=received_transfer) if is_valid: assert pending_locks, 'is_valid_refund should return pending locks if valid' channel_state.partner_state.balance_proof = refund.transfer.balance_proof channel_state.partner_state.nonce = refund.transfer.balance_proof.nonce channel_state.partner_state.pending_locks = pending_locks lock = refund.transfer.lock channel_state.partner_state.secrethashes_to_lockedlocks[lock.secrethash] = lock recipient_address = channel_state.partner_state.address recipient_metadata = get_address_metadata(recipient_address, received_transfer.route_states) send_processed = SendProcessed(recipient=refund.transfer.balance_proof.sender, recipient_metadata=recipient_metadata, message_identifier=refund.transfer.message_identifier, canonical_identifier=CANONICAL_IDENTIFIER_UNORDERED_QUEUE) events = [send_processed] else: assert msg, 'is_valid_refund should return error msg if not valid' invalid_refund = EventInvalidReceivedTransferRefund(payment_identifier=received_transfer.payment_identifier, reason=msg) events = [invalid_refund] return (is_valid, events, msg)
def test_makereport_getsource(pytester: Pytester) -> None: pytester.makepyfile('\n def test_foo():\n if False: pass\n else: assert False\n ') result = pytester.runpytest() result.stdout.no_fnmatch_line('*INTERNALERROR*') result.stdout.fnmatch_lines(['*else: assert False*'])
def test_specific_location(hatch, helpers, temp_dir_data, path_append, dist_name, mocker): install_dir = (((temp_dir_data / 'foo') / 'bar') / 'baz') dist_dir = (install_dir / dist_name) python_path = (dist_dir / get_distribution(dist_name).python_path) install = mocker.patch('hatch.python.core.PythonManager.install', return_value=mocker.MagicMock(path=dist_dir, python_path=python_path)) result = hatch('python', 'install', '--private', '-d', str(install_dir), dist_name) assert (result.exit_code == 0), result.output assert (result.output == helpers.dedent(f''' Installing {dist_name} Installed {dist_name} {dist_dir} ''')) install.assert_called_once_with(dist_name) path_append.assert_not_called()
def test_qubit_vs_toffoli_original_strategy(): lam = 307.68 dE = 0.001 eps = (dE / (10 * lam)) n = 108 chi = 10 beta = 16 M = 350 ref_tof = np.asarray([95, 89, 2852, 10, 18, 10, 54, 402, 1512, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 42, 54, 1, 54, 348, 1512, 1, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 38, 54, 10, 18, 10, 497, 89, 95, 31, 1]) ref_qub = np.asarray([228, 285, 1182, 251, 241, 241, 241, 1114, 1119, 1119, 1103, 1087, 1071, 1055, 1039, 1023, 1007, 991, 975, 959, 943, 927, 911, 895, 879, 863, 847, 831, 815, 799, 783, 767, 751, 735, 719, 703, 687, 671, 655, 639, 623, 607, 591, 575, 559, 543, 527, 511, 495, 479, 463, 447, 431, 415, 399, 383, 367, 351, 335, 319, 303, 287, 271, 262, 241, 241, 241, 1113, 1119, 1105, 1119, 1103, 1087, 1071, 1055, 1039, 1023, 1007, 991, 975, 959, 943, 927, 911, 895, 879, 863, 847, 831, 815, 799, 783, 767, 751, 735, 719, 703, 687, 671, 655, 639, 623, 607, 591, 575, 559, 543, 527, 511, 495, 479, 463, 447, 431, 415, 399, 383, 367, 351, 335, 319, 303, 287, 271, 262, 241, 242, 241, 251, 475, 285, 230, 224, 193]) (tgates, qubits, _, _) = qubit_vs_toffoli(lam, dE, eps, n, chi, beta, M, algorithm='full', verbose=False) assert np.allclose(tgates.astype(int), ref_tof) assert np.allclose(qubits.astype(int), ref_qub)
def pytask_execute_task_setup(session: Session, task: PTask) -> None: is_unchanged = (has_mark(task, 'skip_unchanged') and (not has_mark(task, 'would_be_executed'))) if (is_unchanged and (not session.config['force'])): raise SkippedUnchanged is_skipped = has_mark(task, 'skip') if is_skipped: raise Skipped skipif_marks = get_marks(task, 'skipif') if skipif_marks: marker_args = [skipif(*mark.args, **mark.kwargs) for mark in skipif_marks] message = '\n'.join((arg[1] for arg in marker_args if arg[0])) should_skip = any((arg[0] for arg in marker_args)) if should_skip: raise Skipped(message) ancestor_failed_marks = get_marks(task, 'skip_ancestor_failed') if ancestor_failed_marks: message = '\n'.join((skip_ancestor_failed(*mark.args, **mark.kwargs) for mark in ancestor_failed_marks)) raise SkippedAncestorFailed(message)
def test_async_cmd_maximal_not_save(): context = Context({'cmds': {'run': ['A', 'B'], 'cwd': '/cwd', 'stdout': '/stdout', 'stderr': '/stderr', 'encoding': 'enc', 'bytes': True, 'append': True}}) step = AsyncCmdStep('blah', context) assert (len(step.commands) == 1) cmd1 = step.commands[0] assert (cmd1.cmd == ['A', 'B']) assert (cmd1.is_shell is False) assert (cmd1.is_text is False) assert (cmd1.cwd == '/cwd') assert (cmd1.is_save is False) assert (cmd1.stdout == '/stdout') assert (cmd1.stderr == '/stderr') assert (cmd1.encoding == 'enc') assert (cmd1.append is True)
class FlowNetSD(nn.Module): def __init__(self, args, batchNorm=True): super(FlowNetSD, self).__init__() self.batchNorm = batchNorm self.conv0 = conv(self.batchNorm, 6, 64) self.conv1 = conv(self.batchNorm, 64, 64, stride=2) self.conv1_1 = conv(self.batchNorm, 64, 128) self.conv2 = conv(self.batchNorm, 128, 128, stride=2) self.conv2_1 = conv(self.batchNorm, 128, 128) self.conv3 = conv(self.batchNorm, 128, 256, stride=2) self.conv3_1 = conv(self.batchNorm, 256, 256) self.conv4 = conv(self.batchNorm, 256, 512, stride=2) self.conv4_1 = conv(self.batchNorm, 512, 512) self.conv5 = conv(self.batchNorm, 512, 512, stride=2) self.conv5_1 = conv(self.batchNorm, 512, 512) self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) self.conv6_1 = conv(self.batchNorm, 1024, 1024) self.deconv5 = deconv(1024, 512) self.deconv4 = deconv(1026, 256) self.deconv3 = deconv(770, 128) self.deconv2 = deconv(386, 64) self.inter_conv5 = i_conv(self.batchNorm, 1026, 512) self.inter_conv4 = i_conv(self.batchNorm, 770, 256) self.inter_conv3 = i_conv(self.batchNorm, 386, 128) self.inter_conv2 = i_conv(self.batchNorm, 194, 64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(512) self.predict_flow4 = predict_flow(256) self.predict_flow3 = predict_flow(128) self.predict_flow2 = predict_flow(64) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1) for m in self.modules(): if isinstance(m, nn.Conv2d): if (m.bias is not None): init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if (m.bias is not None): init.uniform_(m.bias) init.xavier_uniform_(m.weight) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') def forward(self, x): out_conv0 = self.conv0(x) out_conv1 = self.conv1_1(self.conv1(out_conv0)) out_conv2 = self.conv2_1(self.conv2(out_conv1)) out_conv3 = self.conv3_1(self.conv3(out_conv2)) out_conv4 = self.conv4_1(self.conv4(out_conv3)) out_conv5 = self.conv5_1(self.conv5(out_conv4)) out_conv6 = self.conv6_1(self.conv6(out_conv5)) flow6 = self.predict_flow6(out_conv6) flow6_up = self.upsampled_flow6_to_5(flow6) out_deconv5 = self.deconv5(out_conv6) concat5 = torch.cat((out_conv5, out_deconv5, flow6_up), 1) out_interconv5 = self.inter_conv5(concat5) flow5 = self.predict_flow5(out_interconv5) flow5_up = self.upsampled_flow5_to_4(flow5) out_deconv4 = self.deconv4(concat5) concat4 = torch.cat((out_conv4, out_deconv4, flow5_up), 1) out_interconv4 = self.inter_conv4(concat4) flow4 = self.predict_flow4(out_interconv4) flow4_up = self.upsampled_flow4_to_3(flow4) out_deconv3 = self.deconv3(concat4) concat3 = torch.cat((out_conv3, out_deconv3, flow4_up), 1) out_interconv3 = self.inter_conv3(concat3) flow3 = self.predict_flow3(out_interconv3) flow3_up = self.upsampled_flow3_to_2(flow3) out_deconv2 = self.deconv2(concat3) concat2 = torch.cat((out_conv2, out_deconv2, flow3_up), 1) out_interconv2 = self.inter_conv2(concat2) flow2 = self.predict_flow2(out_interconv2) if self.training: return (flow2, flow3, flow4, flow5, flow6) else: return (flow2,)
class HuffmanLength(object): def __init__(self, code, bits=0): self.code = code self.bits = bits self.symbol = None self.reverse_symbol = None def __repr__(self): return repr((self.code, self.bits, self.symbol, self.reverse_symbol)) def _sort_func(obj): return (obj.bits, obj.code)
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None): idx = start_idx len_ = len(toks) assert (toks[idx] == 'select'), "'select' not found" idx += 1 isDistinct = False if ((idx < len_) and (toks[idx] == 'distinct')): idx += 1 isDistinct = True val_units = [] while ((idx < len_) and (toks[idx] not in CLAUSE_KEYWORDS)): agg_id = AGG_OPS.index('none') if (toks[idx] in AGG_OPS): agg_id = AGG_OPS.index(toks[idx]) idx += 1 idx = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables) if ((idx < len_) and (toks[idx] == ',')): idx += 1 return idx
class RHEL4_Network(FC3_Network): removedKeywords = FC3_Network.removedKeywords removedAttrs = FC3_Network.removedAttrs def _getParser(self): op = FC3_Network._getParser(self) op.add_argument('--notksdevice', action='store_true', default=False, version=RHEL4, help='This network device is not used for kickstart.') return op
def test_unstructure_deeply_nested_generics(genconverter): class Inner(): a: int class Outer(Generic[T]): inner: T initial = Outer[Inner](Inner(1)) raw = genconverter.unstructure(initial, Outer[Inner]) assert (raw == {'inner': {'a': 1}}) raw = genconverter.unstructure(initial) assert (raw == {'inner': {'a': 1}})
def test_scalar_overlay_visualisation(nifti_data): patient_path = nifti_data.joinpath('LCTSC-Test-S1-201') ct_path = next(patient_path.glob('IMAGES/*.nii.gz')) structures = {struct.name.split('.nii.gz')[0].split('RTSTRUCT_')[(- 1)]: sitk.ReadImage(str(struct)) for struct in patient_path.glob('STRUCTURES/*.nii.gz')} img = sitk.ReadImage(str(ct_path)) vis = ImageVisualiser(img, cut=get_com(structures['HEART'])) vis.add_scalar_overlay((structures['HEART'] * 5)) fig = vis.show() assert (len(fig.axes[0].images) == 2) img = fig.axes[0].images[(- 1)] assert (img.get_array().data.sum() == 61295)
_required def version_delete(request, package_name, version): plugin = get_object_or_404(Plugin, package_name=package_name) version = get_object_or_404(PluginVersion, plugin=plugin, version=version) if (not check_plugin_access(request.user, plugin)): return render(request, 'plugins/version_permission_deny.html', {}) if ('delete_confirm' in request.POST): version.delete() msg = _('The Plugin Version has been successfully deleted.') messages.success(request, msg, fail_silently=True) return HttpResponseRedirect(reverse('plugin_detail', args=(plugin.package_name,))) return render(request, 'plugins/version_delete_confirm.html', {'plugin': plugin, 'version': version})
class StsbProcessor(DataProcessor): def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_labels(self): return [None] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = ('%s-%s' % (set_type, line[0])) text_a = line[7] text_b = line[8] label = line[(- 1)] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False, rectified_conv=False, rectify_avg=False, norm_layer=None, dropblock_prob=0.0, last_gamma=False, number=1, custom=0): super(Bottleneck, self).__init__() group_width = (int((planes * (bottleneck_width / 64.0))) * cardinality) if (custom != 0): inplanes = custom self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) self.bn1 = norm_layer(group_width) self.dropblock_prob = dropblock_prob self.radix = radix self.avd = (avd and ((stride > 1) or is_first)) self.avd_first = avd_first if self.avd: self.avd_layer = nn.AvgPool2d(3, stride, padding=1) stride = 1 if (dropblock_prob > 0.0): self.dropblock1 = DropBlock2D(dropblock_prob, 3) if (radix == 1): self.dropblock2 = DropBlock2D(dropblock_prob, 3) self.dropblock3 = DropBlock2D(dropblock_prob, 3) if (radix >= 1): self.conv2 = splat.SplAtConv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, radix=radix, rectify=rectified_conv, rectify_avg=rectify_avg, norm_layer=norm_layer, dropblock_prob=dropblock_prob) elif rectified_conv: from rfconv import RFConv2d self.conv2 = RFConv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False, average_mode=rectify_avg) self.bn2 = norm_layer(group_width) else: self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(group_width) self.conv3 = nn.Conv2d(group_width, (planes * 4), kernel_size=1, bias=False) self.bn3 = norm_layer((planes * 4)) if last_gamma: from torch.nn.init import zeros_ zeros_(self.bn3.weight) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.dilation = dilation self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) if (self.dropblock_prob > 0.0): out = self.dropblock1(out) out = self.relu(out) if (self.avd and self.avd_first): out = self.avd_layer(out) out = self.conv2(out) if (self.radix == 0): out = self.bn2(out) if (self.dropblock_prob > 0.0): out = self.dropblock2(out) out = self.relu(out) if (self.avd and (not self.avd_first)): out = self.avd_layer(out) out = self.conv3(out) out = self.bn3(out) if (self.dropblock_prob > 0.0): out = self.dropblock3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def torch_expm(A): n_A = A.shape[0] A_fro = torch.sqrt(A.abs().pow(2).sum(dim=(1, 2), keepdim=True)) maxnorm = torch.tensor([5.], dtype=A.dtype, device=A.device) zero = torch.tensor([0.0], dtype=A.dtype, device=A.device) n_squarings = torch.max(zero, torch.ceil(torch_log2((A_fro / maxnorm)))) A_scaled = (A / (2.0 ** n_squarings)) n_squarings = n_squarings.flatten().type(torch.int64) (U, V) = torch_pade13(A_scaled) P = (U + V) Q = ((- U) + V) (R, _) = torch.solve(P, Q) res = [R] for i in range(int(n_squarings.max())): res.append(res[(- 1)].matmul(res[(- 1)])) R = torch.stack(res) expmA = R[(n_squarings, torch.arange(n_A))] return expmA[0]
def test_stl_caster_vs_stl_bind(msg): import pybind11_cross_module_tests as cm v1 = cm.VectorInt([1, 2, 3]) assert (m.load_vector_via_caster(v1) == 6) assert (cm.load_vector_via_binding(v1) == 6) v2 = [1, 2, 3] assert (m.load_vector_via_caster(v2) == 6) with pytest.raises(TypeError) as excinfo: (cm.load_vector_via_binding(v2) == 6) assert (msg(excinfo.value) == '\n load_vector_via_binding(): incompatible function arguments. The following argument types are supported:\n 1. (arg0: pybind11_cross_module_tests.VectorInt) -> int\n\n Invoked with: [1, 2, 3]\n ')
def test_format_response(): response = CachedResponse(status_code=200, expires=datetime(2021, 1, 1), headers={'Age': '0'}) response_str = format_response(response) assert ('cached; expires in ' in response_str) assert ('Age: 0' in response_str) response.expires = None assert ('never expires' in format_response(response))
.parametrize('untied', [True, False]) def test_RanksComparatorPlotter_reg_unexpected_keyword_argument_color(untied): rank0 = agg.RankResult('test', ['a', 'b'], [1, 1], {}) rank1 = agg.RankResult('test', ['a', 'b'], [1, 1], {}) rcmp = ranks_cmp.mkrank_cmp(rank0, rank1) with pytest.raises(TypeError): rcmp.plot.reg(color='k', untied=untied)
def yaml_load(f: Union[(str, IO[str])]) -> Any: start = datetime.datetime.now() with log.py_warning_filter(category=DeprecationWarning, message="Using or importing the ABCs from 'collections' instead of from 'collections\\.abc' is deprecated.*"): try: data = yaml.load(f, Loader=YamlLoader) except ValueError as e: pyyaml_error = 'could not convert string to float' if str(e).startswith(pyyaml_error): raise yaml.YAMLError(e) raise end = datetime.datetime.now() delta = (end - start).total_seconds() deadline = (10 if ('CI' in os.environ) else 2) if (delta > deadline): log.misc.warning('YAML load took unusually long, please report this at {}s\nPyYAML version: {}\nC extension: {}\nStack:\n\n{}'.format(delta, yaml.__version__, YAML_C_EXT, ''.join(traceback.format_stack()))) return data
def test_magic(): mgc = magic.Magic(mime=True) with GeneratorFile(mimed_html_generator()) as f: buffered = BufferedReader(f) file_header_bytes = buffered.peek(1024) assert (mgc.from_buffer(file_header_bytes) == 'text/html') with GeneratorFile(sample_generator()) as f: buffered = BufferedReader(f) file_header_bytes = buffered.peek(1024) assert (mgc.from_buffer(file_header_bytes) == 'text/plain')
class SlotSelector(discord.ui.Select): view: BaseSelector def __init__(self, bot, records): _options = [] for record in records[:25]: reg_channel = bot.get_channel(record['registration_channel_id']) _options.append(discord.SelectOption(label=f"Slot {record['num']} #{getattr(reg_channel, 'name', 'deleted-channel')}", description=f"{record['team_name']} (ID: {record['scrim_id']})", value=f"{record['scrim_id']}:{record['assigned_slot_id']}", emoji='')) super().__init__(placeholder='Select slot from this dropdown...', options=_options, max_values=1) async def callback(self, interaction: discord.Interaction) -> T.Any: (await interaction.response.defer()) self.view.stop() self.view.custom_id = interaction.data['values'][0]
def build_detector(cfg, train_cfg=None, test_cfg=None): if ((train_cfg is not None) or (test_cfg is not None)): warnings.warn('train_cfg and test_cfg is deprecated, please specify them in model', UserWarning) assert ((cfg.get('train_cfg') is None) or (train_cfg is None)), 'train_cfg specified in both outer field and model field ' assert ((cfg.get('test_cfg') is None) or (test_cfg is None)), 'test_cfg specified in both outer field and model field ' return ROTATED_DETECTORS.build(cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))
class ForecastDisplay(Observer, DisplayElement): __currentPressure: float = 29.92 __lastPressure: float __weatherData: WeatherData def __init__(self, weatherData: WeatherData): self.__weatherData = weatherData weatherData.registerObserver(self) def update(self, temp: float, humidity: float, pressure: float) -> None: self.__lastPressure = self.__currentPressure self.__currentPressure = pressure self.display() def display(self) -> None: print('Forecast: ', end='') if (self.__currentPressure > self.__lastPressure): print('Improving weather on the way!') elif (self.__currentPressure == self.__lastPressure): print('More of the same') elif (self.__currentPressure < self.__lastPressure): print('Watch out for cooler, rainy weather')
def runningInNotebook(): try: shell = get_ipython().__class__.__name__ if (shell == 'ZMQInteractiveShell'): return True elif (shell == 'TerminalInteractiveShell'): return False else: return False except NameError: return False
('beeref.actions.mixin.menu_structure') ('beeref.actions.mixin.actions') def test_build_menu_and_actions_with_submenu(actions_mock, menu_mock, qapp): widget = FooWidget() actions_mock.__iter__.return_value = [{'id': 'foo', 'text': '&Foo', 'callback': 'on_foo', 'group': 'bar'}] menu_mock.__iter__.return_value = [{'menu': '&Bar', 'items': ['foo']}] with patch('PyQt6.QtWidgets.QMenu.addAction') as add_mock: with patch('PyQt6.QtWidgets.QMenu.addMenu') as addmenu_mock: addmenu_mock.return_value = QtWidgets.QMenu() widget.build_menu_and_actions() assert isinstance(widget.context_menu, QtWidgets.QMenu) addmenu_mock.assert_called_once_with('&Bar') add_mock.assert_called_once_with(widget.bee_actions['foo'])
class Effect11423(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): for dmgType in ('em', 'kinetic', 'explosive', 'thermal'): fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Heavy Missiles')), f'{dmgType}Damage', ship.getModifiedItemAttr('shipBonusAB'), skill='Amarr Battleship', **kwargs)
.parametrize('tensor', [torch.rand(2, 3, 4, 5), torch.rand(2, 3, 4, 5, 6)]) .parametrize('idx', range(3)) .parametrize('ndim', range(1, 4)) .parametrize('slice_leading_dims', [True, False]) def test_getitem_batch_size_mask(tensor, idx, ndim, slice_leading_dims): if ((idx + ndim) > 4): pytest.skip('Not enough dimensions in test tensor for this combination of parameters') mask_shape = (2, 3, 4, 5)[idx:(idx + ndim)] mask = torch.randint(2, mask_shape, dtype=torch.bool) if slice_leading_dims: index = (((slice(None),) * idx) + (mask,)) else: index = (((0,) * idx) + (mask,)) index = convert_ellipsis_to_idx(index, tensor.shape) assert (tensor[index].shape == _getitem_batch_size(tensor.shape, index)), index
class TestReadBytes(): () def adapterR(self, adapter): adapter.write('*IDN?') (yield adapter) def test_read_bytes(self, adapterR): assert (adapterR.read_bytes(22) == b'SCPI,MOCK,VERSION_1.0\n') def test_read_all_bytes(self, adapterR): assert (adapterR.read_bytes((- 1)) == b'SCPI,MOCK,VERSION_1.0\n') .parametrize('count', ((- 1), 7)) def test_read_break_on_termchar(self, adapterR, count): adapterR.connection.read_termination = ',' assert (adapterR.read_bytes(count, break_on_termchar=True) == b'SCPI,') def test_read_no_break_on_termchar(self, adapterR): adapterR.connection.read_termination = ',' assert (adapterR.read_bytes((- 1)) == b'SCPI,MOCK,VERSION_1.0\n') def test_read_no_break_on_newline(self, adapter): adapter.write('*IDN?') adapter.write('*IDN?') assert (adapter.read_bytes((- 1)) == b'SCPI,MOCK,VERSION_1.0\nSCPI,MOCK,VERSION_1.0\n')
.parametrize('ndarray_type', ['numpy', 'cupy']) def test_regenie__glow_comparison(ndarray_type: str, datadir: Path) -> None: xp = pytest.importorskip(ndarray_type) with open((datadir / 'config.yml')) as fd: config = yaml.load(fd, Loader=yaml.FullLoader) for run in config['runs']: check_simulation_result(datadir, config, run, xp)
def test_column_lateral_ref_within_subquery(): sql = '\n insert into public.tgt_tbl1\n select\n sq.name\n from\n (\n select\n id || name as alias1,\n alias1 || email as name\n from\n public.src_tbl1\n ) as sq\n ' assert_column_lineage_equal(sql, [(ColumnQualifierTuple('id', 'public.src_tbl1'), ColumnQualifierTuple('name', 'public.tgt_tbl1')), (ColumnQualifierTuple('name', 'public.src_tbl1'), ColumnQualifierTuple('name', 'public.tgt_tbl1')), (ColumnQualifierTuple('email', 'public.src_tbl1'), ColumnQualifierTuple('name', 'public.tgt_tbl1'))]) sql = '\n insert into public.tgt_tbl1\n select\n sq.name\n from\n (\n select\n st1.id || st1.name as alias1,\n alias1 || st2.email as name\n from\n public.src_tbl1 as st1\n join\n public.src_tbl2 as st2\n on\n st1.id = st2.id\n ) as sq\n ' assert_column_lineage_equal(sql, [(ColumnQualifierTuple('id', 'public.src_tbl1'), ColumnQualifierTuple('name', 'public.tgt_tbl1')), (ColumnQualifierTuple('name', 'public.src_tbl1'), ColumnQualifierTuple('name', 'public.tgt_tbl1')), (ColumnQualifierTuple('email', 'public.src_tbl2'), ColumnQualifierTuple('name', 'public.tgt_tbl1'))])
class UnetConv3(nn.Module): def __init__(self, in_size, out_size, is_batchnorm, kernel_size=(3, 3, 1), padding_size=(1, 1, 0), init_stride=(1, 1, 1)): super(UnetConv3, self).__init__() if is_batchnorm: self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size), nn.InstanceNorm3d(out_size), nn.ReLU(inplace=True)) self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size), nn.InstanceNorm3d(out_size), nn.ReLU(inplace=True)) else: self.conv1 = nn.Sequential(nn.Conv3d(in_size, out_size, kernel_size, init_stride, padding_size), nn.ReLU(inplace=True)) self.conv2 = nn.Sequential(nn.Conv3d(out_size, out_size, kernel_size, 1, padding_size), nn.ReLU(inplace=True)) for m in self.children(): init_weights(m, init_type='kaiming') def forward(self, inputs): outputs = self.conv1(inputs) outputs = self.conv2(outputs) return outputs
def ebic(log_lik, n_samples, n_features, n_support, gamma='default', fit_intercept=True): if fit_intercept: n_features = (n_features + 1) n_support = (n_support + 1) if (gamma == 'default'): gamma = (1 - (0.5 * (np.log(n_samples) / np.log(n_features)))) gamma = np.clip(gamma, a_min=0, a_max=1) assert ((gamma >= 0) and (gamma <= 1)), 'Gamma should be in [0, 1]' log_model_size = log_binom(n=n_features, k=n_support) return (bic(log_lik=log_lik, n_samples=n_samples, dof=n_support) + ((2 * gamma) * log_model_size))
_doc(np.cross) def cross(a, b, axisa=(- 1), axisb=(- 1), axisc=(- 1), axis=None): if (not (isinstance(a, Quantity) and isinstance(b, Quantity))): return np.cross(a, b, axisa, axisb, axisc, axis) if (not isinstance(a, Quantity)): a = Quantity(a, dimensionless, copy=False) if (not isinstance(b, Quantity)): b = Quantity(b, dimensionless, copy=False) return Quantity(np.cross(a, b, axisa, axisb, axisc, axis), (a._dimensionality * b._dimensionality), copy=False)
class QuadraticProgramToQubo(QuadraticProgramConverter): def __init__(self, penalty: Optional[float]=None) -> None: from ..converters.integer_to_binary import IntegerToBinary from ..converters.inequality_to_equality import InequalityToEquality from ..converters.linear_equality_to_penalty import LinearEqualityToPenalty self._int_to_bin = IntegerToBinary() self._ineq_to_eq = InequalityToEquality(mode='integer') self._penalize_lin_eq_constraints = LinearEqualityToPenalty(penalty=penalty) def convert(self, problem: QuadraticProgram) -> QuadraticProgram: msg = self.get_compatibility_msg(problem) if (len(msg) > 0): raise QiskitOptimizationError('Incompatible problem: {}'.format(msg)) problem_ = self._ineq_to_eq.convert(problem) problem_ = self._int_to_bin.convert(problem_) problem_ = self._penalize_lin_eq_constraints.convert(problem_) return problem_ def interpret(self, x: Union[(np.ndarray, List[float])]) -> np.ndarray: x = self._penalize_lin_eq_constraints.interpret(x) x = self._int_to_bin.interpret(x) x = self._ineq_to_eq.interpret(x) return x def get_compatibility_msg(problem: QuadraticProgram) -> str: msg = '' if (problem.get_num_continuous_vars() > 0): msg += 'Continuous variables are not supported! ' if (len(problem.quadratic_constraints) > 0): msg += 'Quadratic constraints are not supported. ' compatible_with_integer_slack = True for l_constraint in problem.linear_constraints: linear = l_constraint.linear.to_dict() if any(((isinstance(coef, float) and (not coef.is_integer())) for coef in linear.values())): compatible_with_integer_slack = False for q_constraint in problem.quadratic_constraints: linear = q_constraint.linear.to_dict() quadratic = q_constraint.quadratic.to_dict() if (any(((isinstance(coef, float) and (not coef.is_integer())) for coef in quadratic.values())) or any(((isinstance(coef, float) and (not coef.is_integer())) for coef in linear.values()))): compatible_with_integer_slack = False if (not compatible_with_integer_slack): msg += 'Can not convert inequality constraints to equality constraint because float coefficients are in constraints. ' return msg def is_compatible(self, problem: QuadraticProgram) -> bool: return (len(self.get_compatibility_msg(problem)) == 0) def penalty(self) -> Optional[float]: return self._penalize_lin_eq_constraints.penalty def penalty(self, penalty: Optional[float]) -> None: self._penalize_lin_eq_constraints.penalty = penalty
def create_experiment(config, resume=None): if (resume is not None): print(('\n==> Restoring experiment from directory:\n' + resume)) logdir = resume else: name = 'TR_MC_nusc' logdir = os.path.join(os.path.expandvars(config.logdir), name) print(('\n==> Creating new experiment in directory:\n' + logdir)) os.makedirs(logdir, exist_ok=True) os.makedirs(os.path.join(config.logdir, 'val_images'), exist_ok=True) os.makedirs(os.path.join(config.logdir, 'train_images'), exist_ok=True) print(config.dump()) with open(os.path.join(logdir, 'config.yml'), 'w') as f: f.write(config.dump()) return logdir
class Effect6153(BaseEffect): type = 'passive' def handler(fit, module, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: mod.item.requiresSkill('High Speed Maneuvering')), 'capacitorNeed', (1 / module.getModifiedItemAttr('modeMWDCapPostDiv')), **kwargs)
def run_random_search(max_time_budget=5000000.0): nasbench.reset_budget_counters() (times, best_valids, best_tests) = ([0.0], [0.0], [0.0]) while True: spec = random_spec() data = nasbench.query(spec) if (data['validation_accuracy'] > best_valids[(- 1)]): best_valids.append(data['validation_accuracy']) best_tests.append(data['test_accuracy']) else: best_valids.append(best_valids[(- 1)]) best_tests.append(best_tests[(- 1)]) (time_spent, _) = nasbench.get_budget_counters() times.append(time_spent) if (time_spent > max_time_budget): break return (times, best_valids, best_tests)
class SizeEstimator(object): def __init__(self, model, input_size=(1, 1, 32, 32), bits=32): self.model = model self.input_size = input_size self.bits = 32 return def get_parameter_sizes(self): mods = list(self.model.modules()) sizes = [] for i in range(1, len(mods)): m = mods[i] p = list(m.parameters()) for j in range(len(p)): sizes.append(np.array(p[j].size())) self.param_sizes = sizes return def get_output_sizes(self): input_ = Variable(torch.FloatTensor(*self.input_size), volatile=True) mods = list(self.model.modules()) out_sizes = [] for i in range(1, len(mods)): m = mods[i] out = m(input_) out_sizes.append(np.array(out.size())) input_ = out self.out_sizes = out_sizes return def calc_param_bits(self): total_bits = 0 for i in range(len(self.param_sizes)): s = self.param_sizes[i] bits = (np.prod(np.array(s)) * self.bits) total_bits += bits self.param_bits = total_bits return def calc_forward_backward_bits(self): total_bits = 0 for i in range(len(self.out_sizes)): s = self.out_sizes[i] bits = (np.prod(np.array(s)) * self.bits) total_bits += bits self.forward_backward_bits = (total_bits * 2) return def calc_input_bits(self): self.input_bits = (np.prod(np.array(self.input_size)) * self.bits) return def estimate_size(self): self.get_parameter_sizes() self.get_output_sizes() self.calc_param_bits() self.calc_forward_backward_bits() self.calc_input_bits() total = ((self.param_bits + self.forward_backward_bits) + self.input_bits) total_megabytes = ((total / 8) / (1024 ** 2)) return (total_megabytes, total)
def test_trustme_cli_identities(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.chdir(tmp_path) main(argv=['-i', 'example.org', 'www.example.org']) assert tmp_path.joinpath('server.key').exists() assert tmp_path.joinpath('server.pem').exists() assert tmp_path.joinpath('client.pem').exists()
def _parse_HITRAN_class6(df, verbose=True, dataframe_type='pandas'): if (dataframe_type == 'pandas'): dgu = df['globu'].astype(str).str.extract('[ ]{9}(?P<v1u>[\\-\\d ]{2})(?P<v2u>[\\-\\d ]{2})(?P<v3u>[\\-\\d ]{2})', expand=True) dgl = df['globl'].astype(str).str.extract('[ ]{9}(?P<v1l>[\\-\\d ]{2})(?P<v2l>[\\-\\d ]{2})(?P<v3l>[\\-\\d ]{2})', expand=True) cast_to_int64_with_missing_values(dgu, ['v1u', 'v2u', 'v3u']) cast_to_int64_with_missing_values(dgl, ['v1l', 'v2l', 'v3l']) del df['globu'] del df['globl'] return pd.concat([df, dgu, dgl], axis=1) elif (dataframe_type == 'vaex'): extracted_values = df['globu'].str.extract_regex(pattern='[ ]{9}(?P<v1u>[\\-\\d ]{2})(?P<v2u>[\\-\\d ]{2})(?P<v3u>[\\-\\d ]{2})') extract_columns(df, extracted_values, ['v1u', 'v2u', 'v3u']) extracted_values = df['globl'].str.extract_regex(pattern='[ ]{9}(?P<v1l>[\\-\\d ]{2})(?P<v2l>[\\-\\d ]{2})(?P<v3l>[\\-\\d ]{2})') extract_columns(df, extracted_values, ['v1l', 'v2l', 'v3l']) df.drop('globu', inplace=True) df.drop('globl', inplace=True) return df else: raise NotImplementedError(dataframe_type)
class BehavioralRTLIRTypeEnforcerL1(bir.BehavioralRTLIRNodeVisitor): def __init__(s, component): s.component = component def enter(s, blk, context, node): s.blk = blk s.stack = deque([]) with s.register_context(context): s.visit(node) def register_context(s, context_type): s.stack.append(context_type) (yield) s.stack.pop() def get_context(s, node, obj): if (not s.stack): raise PyMTLTypeError(s.blk, node.ast, f'no context was provided to validate the inferred bitwidth of {obj}!') return s.stack[(- 1)] def mutate_datatype(s, node, descp): if (not node._is_explicit): target_Type = s.get_context(node, descp).get_dtype() node.Type = copy.deepcopy(node.Type) node.Type.dtype = target_Type def visit_FreeVar(s, node): s.mutate_datatype(node, node.obj) def visit_Number(s, node): s.mutate_datatype(node, node.value) def visit_Attribute(s, node): s.mutate_datatype(node, node.attr) def visit_Index(s, node): s.mutate_datatype(node, 'indexing')
class CircularBuffer(): def __init__(self, size): self.max_size = size self.data = np.zeros(self.max_size) self.size = 0 self.pointer = (- 1) def add(self, element): self.size = min((self.size + 1), self.max_size) self.pointer = ((self.pointer + 1) % self.max_size) self.data[self.pointer] = element return element def last(self): assert (self.pointer != (- 1)), "Can't get an element from an empty buffer!" return self.data[self.pointer] def mean(self): if (self.size > 0): return self.data[:self.size].mean() return 0.0
def main(): with open(FLAGS.cluster_spec_file) as fp: cluster_spec = json.load(fp) workers = cluster_spec['worker'] master = workers[0] number_of_ps = len(cluster_spec['ps']) ps_job = '/job:ps/' train_ops = [] for (dev_id, _) in enumerate(workers): device = '/job:worker/task:{}/'.format(dev_id) with tf.variable_scope('resnet', reuse=(dev_id != 0)): with tf.device(tf.train.replica_device_setter(ps_tasks=number_of_ps, ps_device=ps_job, worker_device=device)): train = base_model() train_ops.append(train) with tf.train.MonitoredTrainingSession(master='grpc://{}'.format(master)) as sess: for _ in range(100): sess.run(train_ops)
def pack(v_short, v_post): dest_dir = (DataDir / ExtPlats.sourcebuild) dest_dir.mkdir(parents=True, exist_ok=True) libname = LibnameForSystem[Host.system] shutil.copy((PDFiumBuildDir / libname), (dest_dir / libname)) write_pdfium_info(dest_dir, v_short, origin='sourcebuild', **v_post) run_ctypesgen(dest_dir, headers_dir=(PDFiumDir / 'public'), compile_lds=[dest_dir])
class BusinessLogicTests(object): def setUp(self): self.store = self.get_store() self.logic = BusinessLogic(self.store, '') def tearDown(self): signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) self.free_store() def test_noop(self): pass def test_create(self): created = self.logic.create(name='test_create', periodicity=12345) fetched = self.logic.get(created['id']) self.assertEqual(created, fetched) def test_create_invalid(self): with self.assertRaises(TypeError): self.logic.create(name=2, periodicity=12346) with self.assertRaises(TypeError): self.logic.create(name='', periodicity=12346) self.logic.create(name='test_create_invalid', periodicity=12346) with self.assertRaises(AlreadyExistsError): self.logic.create(name='test_create_invalid', periodicity=12346) with self.assertRaises(TypeError): self.logic.create(name='test_create_invalid2', periodicity='abc') with self.assertRaises(TypeError): self.logic.create(name='test_create_invalid2', periodicity=(- 1)) with self.assertRaises(TypeError): self.logic.create(name='test_create_invalid2', periodicity=12346, description=2) with self.assertRaises(TypeError): self.logic.create(name='test_create_invalid2', periodicity=12346, emails='test_create_') with self.assertRaises(TypeError): self.logic.create(name='test_create_invalid2', periodicity=12346, paused='abc') def test_create_emails_none(self): self.logic.create(name='test_create_emails_none', periodicity=12347, emails=None) def test_update(self): created = self.logic.create(name='test_update', periodicity=12347) self.logic.update(created['id'], name='test_update2', periodicity=12348, description='test_update2 description', emails=['test_']) fetched = self.logic.get(created['id']) self.assertEqual(fetched['name'], 'test_update2') self.assertEqual(fetched['periodicity'], 12348) self.assertEqual(fetched['description'], 'test_update2 description') self.assertEqual(fetched['emails'], ['test_']) def test_update_same_slug(self): created = self.logic.create(name='test_update_same_slug', periodicity=5) self.logic.update(created['id'], name='Test_Update_Same_Slug') fetched = self.logic.get(created['id']) self.assertEqual(fetched['name'], 'Test_Update_Same_Slug') self.assertEqual(created['slug'], fetched['slug']) def test_update_paused(self): created = self.logic.create('test_update_paused', periodicity=5) self.logic.pause(created['id']) self.logic.update(created['id'], periodicity=10) fetched = self.logic.get(created['id']) self.assertNotIn('deadline', fetched) def test_update_invalid(self): created = self.logic.create(name='test_update_invalid', periodicity=12349) with self.assertRaises(TypeError): self.logic.update(created['id'], name=2) with self.assertRaises(TypeError): self.logic.update(created['id'], name='') self.logic.create(name='test_update_invalid2', periodicity=12350) with self.assertRaises(AlreadyExistsError): self.logic.update(created['id'], name='test_update_invalid2') with self.assertRaises(TypeError): self.logic.update(created['id'], periodicity='abc') with self.assertRaises(TypeError): self.logic.update(created['id'], periodicity=(- 1)) with self.assertRaises(TypeError): self.logic.update(created['id'], description=2) with self.assertRaises(TypeError): self.logic.update(created['id'], emails='test_update_') with self.assertRaises(ValueError): self.logic.update(created['id']) def test_update_late_change(self): created = self.logic.create(name='test_update_late_change', periodicity=12351) time.sleep(1) self.logic.update(created['id'], periodicity=1) fetched = self.logic.get(created['id']) self.assertNotEqual(created['periodicity'], fetched['periodicity']) def test_update_not_found(self): with self.assertRaises(CanaryNotFoundError): self.logic.update('testunfo', name='test_update_not_found') def test_store_unset(self): created = self.logic.create('foo', 20) self.logic.pause(created['id']) def test_trigger(self): created = self.logic.create(name='test_trigger', periodicity=12352) self.logic.trigger(created['id']) self.logic.trigger(created['id'], comment='test_trigger comment') def test_trigger_late(self): created = self.logic.create(name='test_trigger_late', periodicity=1) time.sleep(1.1) self.logic.trigger(created['id']) def test_trigger_paused(self): created = self.logic.create(name='test_trigger_paused', periodicity=12353, paused=True) self.logic.trigger(created['id']) def test_trigger_not_found(self): with self.assertRaises(CanaryNotFoundError): self.logic.trigger('testtnfo') def test_pause(self): created = self.logic.create(name='test_pause', periodicity=1) time.sleep(1.1) self.logic.pause(created['id']) with self.assertRaises(AlreadyPausedError): self.logic.pause(created['id']) self.logic.unpause(created['id']) with self.assertRaises(AlreadyUnpausedError): self.logic.unpause(created['id']) self.logic.pause(created['id'], comment='test_pause pause comment') self.logic.unpause(created['id'], comment='test_pause unpause comment') def test_pause_not_found(self): with self.assertRaises(CanaryNotFoundError): self.logic.pause('testpnfo') def test_unpause_not_found(self): with self.assertRaises(CanaryNotFoundError): self.logic.unpause('testunfo') def test_delete(self): created = self.logic.create(name='test_delete', periodicity=12354) self.logic.get(created['id']) self.logic.delete(created['id']) with self.assertRaises(CanaryNotFoundError): self.logic.get(created['id']) def test_delete_not_found(self): with self.assertRaises(CanaryNotFoundError): self.logic.delete('testdnfo') def test_list(self): self.logic.list() def test_list_no_paused_canaries(self): self.logic.create('not-paused', 20) self.assertEqual(next(self.logic.list())['name'], 'not-paused') self.assertEqual(next(self.logic.list(paused=False))['name'], 'not-paused') with self.assertRaises(StopIteration): next(self.logic.list(paused=True)) def test_list_only_paused_canary(self): self.logic.create('paused', 20, paused=True) self.assertEqual(next(self.logic.list())['name'], 'paused') self.assertEqual(next(self.logic.list(paused=True))['name'], 'paused') with self.assertRaises(StopIteration): next(self.logic.list(paused=False)) def test_list_paused_and_unpaused_canary(self): self.logic.create('not-paused', 10) self.logic.create('paused', 20, paused=True) iterator = self.logic.list() self.assertEqual(set((next(iterator)['name'], next(iterator)['name'])), set(('not-paused', 'paused'))) iterator = self.logic.list(paused=True) self.assertEqual(next(iterator)['name'], 'paused') with self.assertRaises(StopIteration): next(iterator) iterator = self.logic.list(paused=False) self.assertEqual(next(iterator)['name'], 'not-paused') with self.assertRaises(StopIteration): next(iterator) def test_list_no_late_canaries(self): self.logic.create('not-late', 20) self.assertEqual(next(self.logic.list())['name'], 'not-late') self.assertEqual(next(self.logic.list(late=False))['name'], 'not-late') with self.assertRaises(StopIteration): next(self.logic.list(late=True)) def test_list_only_late_canary(self): self.logic.create('late', 1) time.sleep(1.1) self.assertEqual(next(self.logic.list())['name'], 'late') self.assertEqual(next(self.logic.list(late=True))['name'], 'late') with self.assertRaises(StopIteration): next(self.logic.list(late=False)) def test_list_late_and_not_late_canary(self): self.logic.create('late', 1) self.logic.create('not-late', 20) time.sleep(1.1) iterator = self.logic.list() self.assertEqual(set((next(iterator)['name'], next(iterator)['name'])), set(('not-late', 'late'))) iterator = self.logic.list(late=True) self.assertEqual(next(iterator)['name'], 'late') with self.assertRaises(StopIteration): next(iterator) iterator = self.logic.list(late=False) self.assertEqual(next(iterator)['name'], 'not-late') with self.assertRaises(StopIteration): next(iterator) def test_list_search(self): self.logic.create('foo', 20) next(self.logic.list(search='foo')) with self.assertRaises(StopIteration): next(self.logic.list(search='froodlefreedle')) next(self.logic.list(verbose=True)) ('smtplib.SMTP', autospec=True) def test_notify(self, mock): created = self.logic.create(name='test_notify', periodicity=1, emails=['test_']) time.sleep(1.1) self.logic.trigger(created['id']) self.assertEqual(mock.method_calls[0][0], '().connect') self.assertEqual(mock.method_calls[0][1], ('localhost', 0)) self.assertEqual(mock.method_calls[1][0], '().sendmail') self.logic.delete(created['id']) (smtplib.SMTP, 'connect', side_effect=Exception, autospec=True) def test_notify_exception(self, mock): created = self.logic.create(name='test_notify', periodicity=1, emails=['test_']) time.sleep(1.1) self.logic.trigger(created['id']) self.logic.delete(created['id']) ('smtplib.SMTP', autospec=True) def test_notify_username(self, mock): with patch.object(self.logic, 'smtp_username', 'smtpu'), patch.object(self.logic, 'smtp_password', 'smtpp'): created = self.logic.create(name='test_notify', periodicity=1, emails=['test_']) time.sleep(1.1) self.logic.trigger(created['id']) self.assertEqual(mock.method_calls[0][0], '().connect') self.assertEqual(mock.method_calls[0][1], ('localhost', 0)) self.assertEqual(mock.method_calls[1][0], '().login') self.assertEqual(mock.method_calls[1][1], ('smtpu', 'smtpp')) self.logic.delete(created['id']) def test_find_identifier(self): created = self.logic.create(name='test_find_identifier', periodicity=12355) self.assertEqual(created['id'], self.logic.find_identifier(identifier=created['id'])) self.assertEqual(created['id'], self.logic.find_identifier(name=created['name'])) def test_find_identifier_invalid(self): with self.assertRaisesRegex(Exception, 'Must specify'): self.logic.find_identifier() with self.assertRaisesRegex(Exception, 'Specify only one'): self.logic.find_identifier(name='foo', slug='bar') def test_find_identifier_slug_not_found(self): with self.assertRaisesRegex(CanaryNotFoundError, "'slug': 'test-find-identifier-slug-not-found'"): self.logic.find_identifier(slug='test-find-identifier-slug-not-found') def test_add_history(self): history = [] self.logic.add_history(history, None) for i in range(1000): self.logic.add_history(history, str(i)) def test_add_history_invalid(self): history = [] with self.assertRaises(TypeError): self.logic.add_history(history, 2) def test_schedule_next_deadline(self): self.logic.schedule_next_deadline() def test_periodicity_numeric(self): created = self.logic.create(name='test_periodicity_numeric', periodicity=1200) delta = (created['deadline'] - datetime.utcnow()).total_seconds() self.assertAlmostEqual((delta / 10), 120, places=0) def test_periodicity_schedule_inactive(self): now = datetime.utcnow() midnight_tomorrow = (now + timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0) tomorrow_schedule = '* * * * {} 1200'.format(midnight_tomorrow.isoweekday()) created = self.logic.create(name='test_periodicity_schedule_inactive', periodicity=tomorrow_schedule) delta = (created['deadline'] - midnight_tomorrow).total_seconds() self.assertAlmostEqual((delta / 10), 120, places=0) def test_periodicity_schedule_active(self): now = datetime.utcnow() created = self.logic.create(name='test_periodicity_schedule_active', periodicity='* * * * * 1200') delta = (created['deadline'] - now).total_seconds() self.assertAlmostEqual((delta / 10), 120, places=0) def test_periodicity_invalid(self): with self.assertRaises(TypeError): self.logic.create(name='test_periodicity_invalid', periodicity='* * * * 1200') def test_periodicity_invalid_newline(self): with self.assertRaises(TypeError): self.logic.create(name='test_periodicity_invalid_newline', periodicity='* * * * sat 1200\n* * * * sun 400') def test_periodicity_invalid_command(self): with self.assertRaises(TypeError): self.logic.create(name='test_periodicity_invalid_command', periodicity='* * * * * froodle') def test_periodicity_invalid_negative(self): with self.assertRaises(TypeError): self.logic.create(name='test_periodicity_invalid_negative', periodicity='* * * * * -1') def test_periodicity_invalid_overlapping(self): with self.assertRaises(TypeError): self.logic.create(name='test_periodicity_invalid_overlapping', periodicity='* * * * * 30; * * * * * 60') def test_periodicity_delta_case_2(self): periodicity = '* 0 * * * 120' whence = datetime(2016, 6, 30, 1, 0) delta = self.logic.calculate_periodicity_delta(periodicity, whence) next_deadline = (whence + delta) self.assertEqual(next_deadline, datetime(2016, 7, 1, 0, 2)) def test_periodicity_delta_case_3(self): periodicity = '* 0 * * * 120' whence = datetime(2016, 6, 30, 0, 59) delta = self.logic.calculate_periodicity_delta(periodicity, whence) next_deadline = (whence + delta) self.assertEqual(next_deadline, datetime(2016, 7, 1, 0, 2)) def test_periodicity_delta_case_4(self): periodicity = '* 0 * * * 120; * 1 * * * 600' whence = datetime(2016, 6, 30, 0, 59) delta = self.logic.calculate_periodicity_delta(periodicity, whence) next_deadline = (whence + delta) self.assertEqual(next_deadline, datetime(2016, 6, 30, 1, 9)) def test_deadline_handler_next_deadline(self): self.logic.create(name='sooner', periodicity=1) later = self.logic.create(name='later', periodicity=2) time.sleep(1.1) next_deadline = next(self.store.upcoming_deadlines()) self.assertEqual(later['name'], next_deadline['name'])
def objective(objective, objective_config: str, **kwargs) -> Type[Objective]: if (objective == 'docking'): from molpal.objectives.docking import DockingObjective return DockingObjective(objective_config, **kwargs) if (objective == 'lookup'): from molpal.objectives.lookup import LookupObjective return LookupObjective(objective_config, **kwargs) raise NotImplementedError(f'Unrecognized objective: "{objective}"')
class TestHandler(BufferingHandler): def __init__(self, only_warnings=False): self.only_warnings = only_warnings BufferingHandler.__init__(self, 0) def shouldFlush(self, record): return False def emit(self, record): if (self.only_warnings and (record.level != logging.WARNING)): return self.buffer.append(record.__dict__)
class WriteFailedError(ErrorMessage): def __init__(self, parent, song): title = _('Unable to save song') fn_format = util.bold(fsn2text(song('~basename'))) description = (_('Saving %(file-name)s failed.The file may be read-only, corrupted, or you do not have permission to edit it.') % {'file-name': fn_format}) super().__init__(parent, title, description, escape_desc=False)
class SawyerBasketballEnv(SawyerXYZEnv): def __init__(self): liftThresh = 0.3 goal_low = ((- 0.1), 0.85, 0.15) goal_high = (0.1, (0.9 + 1e-07), 0.15) hand_low = ((- 0.5), 0.4, 0.05) hand_high = (0.5, 1, 0.5) obj_low = ((- 0.1), 0.6, 0.03) obj_high = (0.1, 0.7, 0.03) super().__init__(self.model_name, hand_low=hand_low, hand_high=hand_high) self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0, 0.6, 0.03], dtype=np.float32), 'hand_init_pos': np.array((0, 0.6, 0.2), dtype=np.float32)} self.goal = np.array([0, 0.9, 0.15]) self.obj_init_pos = self.init_config['obj_init_pos'] self.obj_init_angle = self.init_config['obj_init_angle'] self.hand_init_pos = self.init_config['hand_init_pos'] self.liftThresh = liftThresh self._random_reset_space = Box(np.hstack((obj_low, goal_low)), np.hstack((obj_high, goal_high))) self.goal_space = Box((np.array(goal_low) + np.array([0, (- 0.05001), 0.1])), (np.array(goal_high) + np.array([0, (- 0.05), 0.1001]))) def model_name(self): return full_v1_path_for('sawyer_xyz/sawyer_basketball.xml') _assert_task_is_set def step(self, action): ob = super().step(action) (reward, reachDist, pickRew, placingDist) = self.compute_reward(action, ob) info = {'reachDist': reachDist, 'goalDist': placingDist, 'epRew': reward, 'pickRew': pickRew, 'success': float((placingDist <= 0.08))} return (ob, reward, False, info) def _get_pos_objects(self): return self.data.get_geom_xpos('objGeom') def reset_model(self): self._reset_hand() basket_pos = self.goal.copy() self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos self._target_pos = self.data.site_xpos[self.model.site_name2id('goal')] self.objHeight = self.data.get_geom_xpos('objGeom')[2] self.heightTarget = (self.objHeight + self.liftThresh) if self.random_init: goal_pos = self._get_state_rand_vec() basket_pos = goal_pos[3:] while (np.linalg.norm((goal_pos[:2] - basket_pos[:2])) < 0.15): goal_pos = self._get_state_rand_vec() basket_pos = goal_pos[3:] self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[(- 1)]])) self.sim.model.body_pos[self.model.body_name2id('basket_goal')] = basket_pos self._target_pos = (basket_pos + np.array([0, (- 0.05), 0.1])) self._set_obj_xyz(self.obj_init_pos) self.maxPlacingDist = (np.linalg.norm((np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._target_pos))) + self.heightTarget) return self._get_obs() def _reset_hand(self): super()._reset_hand(10) (rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')) self.init_fingerCOM = ((rightFinger + leftFinger) / 2) self.pickCompleted = False def compute_reward(self, actions, obs): objPos = obs[3:6] (rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector')) fingerCOM = ((rightFinger + leftFinger) / 2) heightTarget = self.heightTarget goal = self._target_pos reachDist = np.linalg.norm((objPos - fingerCOM)) placingDist = np.linalg.norm((objPos - goal)) assert np.all((goal == self._get_site_pos('goal'))) def reachReward(): reachRew = (- reachDist) reachDistxy = np.linalg.norm((objPos[:(- 1)] - fingerCOM[:(- 1)])) zRew = np.linalg.norm((fingerCOM[(- 1)] - self.init_fingerCOM[(- 1)])) if (reachDistxy < 0.05): reachRew = (- reachDist) else: reachRew = ((- reachDistxy) - (2 * zRew)) if (reachDist < 0.05): reachRew = ((- reachDist) + (max(actions[(- 1)], 0) / 50)) return (reachRew, reachDist) def pickCompletionCriteria(): tolerance = 0.01 if (objPos[2] >= (heightTarget - tolerance)): return True else: return False if pickCompletionCriteria(): self.pickCompleted = True def objDropped(): return ((objPos[2] < (self.objHeight + 0.005)) and (placingDist > 0.02) and (reachDist > 0.02)) def orig_pickReward(): hScale = 100 if (self.pickCompleted and (not objDropped())): return (hScale * heightTarget) elif ((reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005))): return (hScale * min(heightTarget, objPos[2])) else: return 0 def placeReward(): c1 = 1000 c2 = 0.01 c3 = 0.001 cond = (self.pickCompleted and (reachDist < 0.1) and (not objDropped())) if cond: placeRew = ((1000 * (self.maxPlacingDist - placingDist)) + (c1 * (np.exp(((- (placingDist ** 2)) / c2)) + np.exp(((- (placingDist ** 2)) / c3))))) placeRew = max(placeRew, 0) return [placeRew, placingDist] else: return [0, placingDist] (reachRew, reachDist) = reachReward() pickRew = orig_pickReward() (placeRew, placingDist) = placeReward() assert ((placeRew >= 0) and (pickRew >= 0)) reward = ((reachRew + pickRew) + placeRew) return [reward, reachDist, pickRew, placingDist]
_grad() def _get_stats_multilabel(output: torch.LongTensor, target: torch.LongTensor) -> Tuple[(torch.LongTensor, torch.LongTensor, torch.LongTensor, torch.LongTensor)]: (batch_size, num_classes, *dims) = target.shape output = output.view(batch_size, num_classes, (- 1)) target = target.view(batch_size, num_classes, (- 1)) tp = (output * target).sum(2) fp = (output.sum(2) - tp) fn = (target.sum(2) - tp) tn = (torch.prod(torch.tensor(dims)) - ((tp + fp) + fn)) return (tp, fp, fn, tn)
def parse_args(): parser = argparse.ArgumentParser(description='Finetune a transformers model on a text classification task') parser.add_argument('--task_name', type=str, default=None, help='The name of the glue task to train on.', choices=list(task_to_keys.keys())) parser.add_argument('--train_file', type=str, default=None, help='A csv or a json file containing the training data.') parser.add_argument('--validation_file', type=str, default=None, help='A csv or a json file containing the validation data.') parser.add_argument('--max_length', type=int, default=128, help='The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded if `--pad_to_max_lengh` is passed.') parser.add_argument('--pad_to_max_length', action='store_true', help='If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.') parser.add_argument('--model_name_or_path', type=str, help='Path to pretrained model or model identifier from huggingface.co/models.', required=True) parser.add_argument('--use_slow_tokenizer', action='store_true', help='If passed, will use a slow tokenizer (not backed by the Tokenizers library).') parser.add_argument('--per_device_train_batch_size', type=int, default=8, help='Batch size (per device) for the training dataloader.') parser.add_argument('--per_device_eval_batch_size', type=int, default=8, help='Batch size (per device) for the evaluation dataloader.') parser.add_argument('--learning_rate', type=float, default=5e-05, help='Initial learning rate (after the potential warmup period) to use.') parser.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay to use.') parser.add_argument('--num_train_epochs', type=int, default=3, help='Total number of training epochs to perform.') parser.add_argument('--max_train_steps', type=int, default=None, help='Total number of training steps to perform. If provided, overrides num_train_epochs.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--lr_scheduler_type', type=SchedulerType, default='linear', help='The scheduler type to use.', choices=['linear', 'cosine', 'cosine_with_restarts', 'polynomial', 'constant', 'constant_with_warmup']) parser.add_argument('--num_warmup_steps', type=int, default=0, help='Number of steps for the warmup in the lr scheduler.') parser.add_argument('--output_dir', type=str, default=None, help='Where to store the final model.') parser.add_argument('--seed', type=int, default=None, help='A seed for reproducible training.') parser.add_argument('--push_to_hub', action='store_true', help='Whether or not to push the model to the Hub.') parser.add_argument('--hub_model_id', type=str, help='The name of the repository to keep in sync with the local `output_dir`.') parser.add_argument('--hub_token', type=str, help='The token to use to push to the Model Hub.') parser.add_argument('--checkpointing_steps', type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.") parser.add_argument('--resume_from_checkpoint', type=str, default=None, help='If the training should continue from a checkpoint folder.') parser.add_argument('--with_tracking', action='store_true', help='Whether to enable experiment trackers for logging.') parser.add_argument('--report_to', type=str, default='all', help='The integration to report the results and logs to. Supported platforms are `"tensorboard"`, `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.Only applicable when `--with_tracking` is passed.') parser.add_argument('--ignore_mismatched_sizes', action='store_true', help='Whether or not to enable to load a pretrained model whose head dimensions are different.') parser.add_argument('--arch', '-a', metavar='ARCH', default='bertForSequence', choices=['BertForSequenceClassification']) parser.add_argument('--model-config', '-c', metavar='CONF', default='classic', choices=['classic', 'quantize']) parser.add_argument('--choice', nargs='+', type=str, help='Choose a linear layer to quantize') parser.add_argument('--clip_lr', default=0.0002, type=float, help='Use a seperate lr for clip_vals / stepsize') parser.add_argument('--clip_wd', default=0.0, type=float, help='weight decay for clip_vals / stepsize') def str2bool(v): if isinstance(v, bool): return v if (v.lower() in ('yes', 'true', 't', 'y', '1')): return True elif (v.lower() in ('no', 'false', 'f', 'n', '0')): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser.add_argument('--qa', type=str2bool, default=True, help='quantize activation') parser.add_argument('--qw', type=str2bool, default=True, help='quantize weights') parser.add_argument('--qg', type=str2bool, default=True, help='quantize gradients') parser.add_argument('--biased', type=str2bool, default=False, help='biased quantization') parser.add_argument('--abits', type=int, default=8, help='activation number of bits') parser.add_argument('--wbits', type=int, default=8, help='weight number of bits') parser.add_argument('--biasbits', type=int, default=16, help='bias number of bits') parser.add_argument('--bbits', type=int, default=8, help='backward number of bits') parser.add_argument('--bwbits', type=int, default=8, help='backward weight number of bits') parser.add_argument('--hadamard', type=str2bool, default=False, help='apply Hadamard transformation on gradients') parser.add_argument('--dynamic', type=str2bool, default=True, help='whether apply dynamic Hadamard transformation on gradients') parser.add_argument('--bmm', type=str2bool, default=True, help='whether apply bmm Hadamard transformation on gradients') parser.add_argument('--biprecision', type=str2bool, default=True, help='Gradient bifurcation') parser.add_argument('--twolayers_gradweight', '--2gw', type=str2bool, default=False, help='use two 4 bit to simulate a 8 bit') parser.add_argument('--twolayers_gradinputt', '--2gi', type=str2bool, default=False, help='use two 4 bit to simulate a 8 bit') parser.add_argument('--luq', type=str2bool, default=False, help='use luq for backward') parser.add_argument('--weight_quant_method', '--wfq', default='ptq', type=str, metavar='strategy', choices=['uniform', 'lsq', 'ptq']) parser.add_argument('--input_quant_method', '--ifq', default='ptq', type=str, metavar='strategy', choices=['uniform', 'lsq', 'ptq']) parser.add_argument('--learnable_step_size', type=str2bool, default=True, help='Debug to draw the variance and leverage score') parser.add_argument('--learnable_hadamard', type=str2bool, default=True, help='Debug to draw the variance and leverage score') parser.add_argument('--lsq_layerwise_input', type=str, default='layer', help='Debug to draw the variance and leverage score', choices=['layer', 'row', 'column']) parser.add_argument('--lsq_layerwise_weight', type=str, default='layer', help='Debug to draw the variance and leverage score', choices=['layer', 'row', 'column']) parser.add_argument('--retain_large_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score') parser.add_argument('--quantize_large_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score') parser.add_argument('--draw_value', type=str2bool, default=False, help='Debug to draw the variance and leverage score') parser.add_argument('--clip-value', type=float, default=100, help='Choose a linear layer to quantize') parser.add_argument('--track_step_size', type=str2bool, default=False, help='Debug to draw the variance and leverage score') parser.add_argument('--fp16', type=str2bool, default=False, help='whether use torch amp') args = parser.parse_args() if ((args.task_name is None) and (args.train_file is None) and (args.validation_file is None)): raise ValueError('Need either a task name or a training/validation file.') else: if (args.train_file is not None): extension = args.train_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.' if (args.validation_file is not None): extension = args.validation_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.' if args.push_to_hub: assert (args.output_dir is not None), 'Need an `output_dir` to create a repo when `--push_to_hub` is passed.' return args
class OSTreeContainer_TestCase(unittest.TestCase): def runTest(self): cmd = F38_OSTreeContainer() self.assertEqual(cmd.noSignatureVerification, False) op = cmd._getParser() for action in op._actions: if ('--url' in action.option_strings): self.assertEqual(action.required, True)
def test_dns_record_hashablity_does_not_consider_ttl(): record1 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b'same') record2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same') record_set = {record1, record2} assert (len(record_set) == 1) record_set.add(record1) assert (len(record_set) == 1) record3_dupe = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same') assert (record2 == record3_dupe) assert (record2.__hash__() == record3_dupe.__hash__()) record_set.add(record3_dupe) assert (len(record_set) == 1)
class W_StructPropertyAccessor(values.W_Procedure): errorname = 'struct-property-accessor' _attrs_ = _immutable_fields_ = ['property'] import_from_mixin(SingleResultMixin) def __init__(self, prop): self.property = prop def get_arity(self, promote=False): return Arity.ONE _call_method([values.W_Object, default(values.W_Object, None)], simple=False) def call_with_extra_info(self, arg, fail, env, cont, app): from pycket.interpreter import return_value if isinstance(arg, W_StructType): w_val = arg.read_property_precise(self.property) if (w_val is not None): return return_value(w_val, env, cont) elif (arg.struct_type() is not None): return arg.get_prop(self.property, env, cont) elif (fail is not None): if fail.iscallable(): return fail.call_with_extra_info([], env, cont, app) return return_value(fail, env, cont) raise SchemeException(('%s-accessor: expected %s? but got %s' % (self.property.name, self.property.name, arg.tostring())))
.requires_internet def test_install_project_no_dev_mode(hatch, helpers, temp_dir, platform, config_file, extract_installed_requirements): config_file.model.template.plugins['default']['tests'] = False config_file.save() project_name = 'My.App' with temp_dir.as_cwd(): result = hatch('new', project_name) assert (result.exit_code == 0), result.output project_path = (temp_dir / 'my-app') data_path = (temp_dir / 'data') data_path.mkdir() project = Project(project_path) helpers.update_project_environment(project, 'default', {'dev-mode': False, **project.config.envs['default']}) helpers.update_project_environment(project, 'test', {}) with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}): result = hatch('env', 'create', 'test') assert (result.exit_code == 0), result.output assert (result.output == helpers.dedent('\n Creating environment: test\n Installing project\n Checking dependencies\n ')) env_data_path = ((data_path / 'env') / 'virtual') assert env_data_path.is_dir() project_data_path = (env_data_path / project_path.name) assert project_data_path.is_dir() storage_dirs = list(project_data_path.iterdir()) assert (len(storage_dirs) == 1) storage_path = storage_dirs[0] assert (len(storage_path.name) == 8) env_dirs = list(storage_path.iterdir()) assert (len(env_dirs) == 1) env_path = env_dirs[0] assert (env_path.name == 'test') with VirtualEnv(env_path, platform): output = platform.run_command(['pip', 'freeze'], check=True, capture_output=True).stdout.decode('utf-8') requirements = extract_installed_requirements(output.splitlines()) assert (len(requirements) == 1) assert requirements[0].startswith('my-app ')
def _get_pointwise_all_likefism_data(dataset, num_negatives, train_dict): (user_input, num_idx, item_input, labels) = ([], [], [], []) num_users = dataset.num_users num_items = dataset.num_items for u in range(num_users): items_by_user = train_dict[u].copy() items_set = set(items_by_user) size = len(items_by_user) for i in items_by_user: for _ in range(num_negatives): j = np.random.randint(num_items) while (j in items_set): j = np.random.randint(num_items) user_input.append(items_by_user) item_input.append(j) num_idx.append(size) labels.append(0) items_by_user.remove(i) user_input.append(items_by_user) item_input.append(i) num_idx.append((size - 1)) labels.append(1) return (user_input, num_idx, item_input, labels)
def main(): parser = argparse.ArgumentParser(description='Identify bugfixes. Use this script together with a\n gitlog.json and a path with issues. The gitlog.json\n is created using the git_log_to_array.py script and\n the issue directory is created and populated using\n the fetch.py script.') parser.add_argument('--gitlog', type=str, help='Path to json file containing gitlog') parser.add_argument('--issue-list', type=str, help='Path to directory containing issue json files') parser.add_argument('--gitlog-pattern', type=str, help='Pattern to match a bugfix') args = parser.parse_args() issue_list = find_bug_fixes(args.issue_list, args.gitlog, args.gitlog_pattern) with open('issue_list.json', 'w') as f: f.write(json.dumps(issue_list))
def build_data_instance_training(row, correct_lang_feedback: str, include_input=False, cur_df_path=None, add_reference=False): success = row['execution_result']['success'] problem = row['prompt'] generated_solution = row['generation'] reference = row['reference'] if (not success): if ('traceback' not in row['execution_result']): language_feedback = (INCORRECT_LANG_FEEDBACK + row['execution_result']['reason']) else: language_feedback = (INCORRECT_LANG_FEEDBACK + row['execution_result']['traceback']['str']) else: language_feedback = correct_lang_feedback text_a = language_feedback text_b = (problem + generated_solution) if add_reference: text_b += ('\n' + reference) reward_offset_mapping: Mapping[(Tuple[(int, int)], Union[(float, None)])] = {(0, len(problem)): 0.0, (len(problem), len(text_b)): (1.0 if success else (- 1.0))} ret = {'success': success, 'problem': problem, 'generated_solution': generated_solution, 'language_feedback': language_feedback, 'reward_offset_mapping': dump_offset_mapping_to_json(reward_offset_mapping), 'text_a': text_a, 'text_b': text_b} if include_input: ret['input'] = (row.to_dict() if (not isinstance(row, dict)) else row) ret['data_path'] = cur_df_path return ret
def train_mlm(args, gpu_id, rank, loader, model, optimizer, scheduler): model.train() start_time = time.time() (total_loss, total_loss_mlm, total_loss_nsp) = (0.0, 0.0, 0.0) (total_correct, total_denominator) = (0.0, 0.0) total_instances = (0.0, 0.0) steps = 1 total_steps = args.total_steps loader_iter = iter(loader) while True: if (steps == (total_steps + 1)): break (src, tgt, seg) = next(loader_iter) if (gpu_id is not None): src = src.cuda(gpu_id) tgt = tgt.cuda(gpu_id) seg = seg.cuda(gpu_id) loss_info = model(src, tgt, seg) (loss, correct, denominator) = loss_info total_loss += loss.item() total_correct += correct.item() total_denominator += denominator.item() loss = (loss / args.accumulation_steps) if args.fp16: with args.amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if ((steps % args.accumulation_steps) == 0): optimizer.step() scheduler.step() model.zero_grad() if (((steps % args.report_steps) == 0) and ((not args.dist_train) or (args.dist_train and (rank == 0)))): loss = (total_loss / args.report_steps) elapsed = (time.time() - start_time) done_tokens = ((((args.batch_size * src.size(1)) * args.report_steps) * args.world_size) if args.dist_train else ((args.batch_size * src.size(1)) * args.report_steps)) print('| {:8d}/{:8d} steps| {:8.2f} tokens/s| loss {:7.2f}| acc: {:3.3f}'.format(steps, total_steps, (done_tokens / elapsed), loss, (total_correct / total_denominator))) total_loss = 0.0 (total_correct, total_denominator) = (0.0, 0.0) start_time = time.time() if (((steps % args.save_checkpoint_steps) == 0) and ((not args.dist_train) or (args.dist_train and (rank == 0)))): save_model(model, ((args.output_model_path + '-') + str(steps))) steps += 1
def test_known_answer_supression_service_type_enumeration_query(): zc = Zeroconf(interfaces=['127.0.0.1']) type_ = '_otherknown._tcp.local.' name = 'knownname' registration_name = f'{name}.{type_}' desc = {'path': '/~paulsm/'} server_name = 'ash-2.local.' info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton('10.0.1.2')]) zc.registry.async_add(info) type_2 = '_otherknown2._tcp.local.' name = 'knownname' registration_name2 = f'{name}.{type_2}' desc = {'path': '/~paulsm/'} server_name2 = 'ash-3.local.' info2 = ServiceInfo(type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton('10.0.1.2')]) zc.registry.async_add(info2) now = current_time_millis() _clear_cache(zc) generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) packets = generated.packets() question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False) assert question_answers assert (not question_answers.ucast) assert (not question_answers.mcast_now) assert question_answers.mcast_aggregate assert (not question_answers.mcast_aggregate_last_second) generated = r.DNSOutgoing(const._FLAGS_QR_QUERY) question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN) generated.add_question(question) generated.add_answer_at_time(r.DNSPointer(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, type_), now) generated.add_answer_at_time(r.DNSPointer(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, type_2), now) packets = generated.packets() question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False) assert question_answers assert (not question_answers.ucast) assert (not question_answers.mcast_now) assert (not question_answers.mcast_aggregate) assert (not question_answers.mcast_aggregate_last_second) zc.registry.async_remove(info) zc.registry.async_remove(info2) zc.close()
def read_tmy2(filename): string = '%2d%2d%2d%2d%4d%4d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%4d%1s%1d%2d%1s%1d%2d%1s%1d%4d%1s%1d%4d%1s%1d%3d%1s%1d%4d%1s%1d%3d%1s%1d%3d%1s%1d%4d%1s%1d%5d%1s%1d%10d%3d%1s%1d%3d%1s%1d%3d%1s%1d%2d%1s%1d' columns = 'year,month,day,hour,ETR,ETRN,GHI,GHISource,GHIUncertainty,DNI,DNISource,DNIUncertainty,DHI,DHISource,DHIUncertainty,GHillum,GHillumSource,GHillumUncertainty,DNillum,DNillumSource,DNillumUncertainty,DHillum,DHillumSource,DHillumUncertainty,Zenithlum,ZenithlumSource,ZenithlumUncertainty,TotCld,TotCldSource,TotCldUncertainty,OpqCld,OpqCldSource,OpqCldUncertainty,DryBulb,DryBulbSource,DryBulbUncertainty,DewPoint,DewPointSource,DewPointUncertainty,RHum,RHumSource,RHumUncertainty,Pressure,PressureSource,PressureUncertainty,Wdir,WdirSource,WdirUncertainty,Wspd,WspdSource,WspdUncertainty,Hvis,HvisSource,HvisUncertainty,CeilHgt,CeilHgtSource,CeilHgtUncertainty,PresentWeather,Pwat,PwatSource,PwatUncertainty,AOD,AODSource,AODUncertainty,SnowDepth,SnowDepthSource,SnowDepthUncertainty,LastSnowfall,LastSnowfallSource,LastSnowfallUncertaint' hdr_columns = 'WBAN,City,State,TZ,latitude,longitude,altitude' (tmy2, tmy2_meta) = _read_tmy2(string, columns, hdr_columns, str(filename)) return (tmy2, tmy2_meta)
def sample_mesh_brute(tri_points: wp.array(dtype=wp.vec3), tri_indices: wp.array(dtype=int), tri_count: int, query_points: wp.array(dtype=wp.vec3), query_faces: wp.array(dtype=int), query_signs: wp.array(dtype=float), query_dist: wp.array(dtype=float)): tid = wp.tid() min_face = int(0) min_dist = float(1000000.0) winding_angle = float(0.0) p = query_points[tid] for i in range(0, tri_count): a = tri_points[tri_indices[((i * 3) + 0)]] b = tri_points[tri_indices[((i * 3) + 1)]] c = tri_points[tri_indices[((i * 3) + 2)]] winding_angle += solid_angle(a, b, c, p) bary = triangle_closest_point(a, b, c, p) u = bary[0] v = bary[1] cp = (((u * a) + (v * b)) + (((1.0 - u) - v) * c)) cp_dist = wp.length((cp - p)) if (cp_dist < min_dist): min_dist = cp_dist min_face = i query_faces[tid] = min_face query_signs[tid] = winding_angle query_dist[tid] = min_dist
def simpleDialog(item, action, question, options, defaultOption): if isinstance(item, FileItem): filename = item.id else: filename = item.id() mb = QtWidgets.QMessageBox M = {'ok': mb.Ok, 'open': mb.Open, 'save': mb.Save, 'cancel': mb.Cancel, 'close': mb.Close, 'discard': mb.Discard, 'apply': mb.Apply, 'reset': mb.Reset, 'restoredefaults': mb.RestoreDefaults, 'help': mb.Help, 'saveall': mb.SaveAll, 'yes': mb.Yes, 'yestoall': mb.YesToAll, 'no': mb.No, 'notoall': mb.NoToAll, 'abort': mb.Abort, 'retry': mb.Retry, 'ignore': mb.Ignore} dlg = QtWidgets.QMessageBox(pyzo.main) dlg.setWindowTitle('Pyzo') dlg.setText((action + ' file:\n{}'.format(filename))) dlg.setInformativeText(question) buttons = {} for option in options: option_lower = option.lower() if (option_lower in M): button = dlg.addButton(M[option_lower]) else: button = dlg.addButton(option, dlg.AcceptRole) buttons[button] = option if (option_lower == defaultOption.lower()): dlg.setDefaultButton(button) dlg.exec_() button = dlg.clickedButton() if (button in buttons): return buttons[button] else: return None
class Effect5333(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Energy Turret')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusABC2'), skill='Amarr Battlecruiser', **kwargs)
class IterationBatchSampler(object): def __init__(self, data_source, batch_size, num_samples, shuffle=False, indices=None): self.batch_size = batch_size self.num_samples = num_samples self.shuffle = shuffle self.data_source = data_source self.index_queue = list(range(len(self.data_source))) if self.shuffle: np.random.shuffle(self.index_queue) self.pointer = 0 self.indices = indices def __iter__(self): batch = [] num_samples = 0 while (num_samples < self.num_samples): indexes = [] left = min(self.batch_size, (self.num_samples - num_samples)) for i in range(left): if (self.pointer >= len(self.index_queue)): self.index_queue = list(range(len(self.data_source))) self.pointer = 0 if self.shuffle: np.random.shuffle(self.index_queue) indexes.append(self.index_queue[self.pointer]) self.pointer += 1 batch.append(indexes) num_samples += len(indexes) if (self.indices is not None): indexes = torch.as_tensor(sum(batch, [])) subsampled_indexes = indexes[self.indices] batch = subsampled_indexes.split(self.batch_size) return iter(batch) def __len__(self): if (self.indices is None): return (((self.num_samples + self.batch_size) - 1) // self.batch_size) else: return (((len(self.indices) + self.batch_size) - 1) // self.batch_size)
class TradingDayOfWeekRule(six.with_metaclass(ABCMeta, StatelessRule)): (n=lossless_float_to_int('TradingDayOfWeekRule')) def __init__(self, n, invert): if (not (0 <= n < MAX_WEEK_RANGE)): raise _out_of_range_error(MAX_WEEK_RANGE) self.td_delta = (((- n) - 1) if invert else n) def should_trigger(self, dt): val = self.cal.minute_to_session_label(dt, direction='none').value return (val in self.execution_period_values) def execution_period_values(self): sessions = self.cal.all_sessions return set(pd.Series(data=sessions).groupby(sessions.map((lambda x: x.isocalendar()[0:2]))).nth(self.td_delta).astype(np.int64))
(('Python' not in caffe.layer_type_list()), 'Caffe built without Python layer support') class TestPythonLayer(unittest.TestCase): def setUp(self): net_file = python_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['three'].data.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_backward(self): x = 7 self.net.blobs['three'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_reshape(self): s = 4 self.net.blobs['data'].reshape(s, s, s, s) self.net.forward() for blob in six.itervalues(self.net.blobs): for d in blob.data.shape: self.assertEqual(s, d) def test_exception(self): net_file = exception_net_file() self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) os.remove(net_file) def test_parameter(self): net_file = parameter_net_file() net = caffe.Net(net_file, caffe.TRAIN) net.forward() net.backward() layer = net.layers[list(net._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 0) self.assertEqual(layer.blobs[0].diff[0], 1) layer.blobs[0].data[0] += layer.blobs[0].diff[0] self.assertEqual(layer.blobs[0].data[0], 1) (h, caffemodel_file) = tempfile.mkstemp() net.save(caffemodel_file) layer.blobs[0].data[0] = (- 1) self.assertEqual(layer.blobs[0].data[0], (- 1)) net.copy_from(caffemodel_file) self.assertEqual(layer.blobs[0].data[0], 1) os.remove(caffemodel_file) net2 = caffe.Net(net_file, caffe.TRAIN) net2.share_with(net) layer = net.layers[list(net2._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 1) os.remove(net_file)
def memory_subplot(output, data_list): import matplotlib.pyplot as plt from matplotlib import dates number_plots = len(data_list) (fig, all_memory_axes) = plt.subplots(1, number_plots, sharey='row') if (number_plots == 1): all_memory_axes = [all_memory_axes] memory_max = 0.0 for line in chain(*data_list): memory_max = max(memory_max, line[MEMORY]) memory_max *= 1.1 hour_fmt = dates.DateFormatter('%H:%M') for (count, (data, memory_axes)) in enumerate(zip(data_list, all_memory_axes)): timestamp = [line[TIMESTAMP] for line in data] memory = [line[MEMORY] for line in data] dt_start_time = timestamp[0] hours = (timestamp[(- 1)] - dt_start_time) label = '{start_date:%Y-%m-%d}\n{runtime}'.format(start_date=dt_start_time, runtime=hours) memory_axes.plot(timestamp, memory, color='b') memory_axes.set_ylim(0, memory_max) memory_axes.xaxis.set_major_formatter(hour_fmt) memory_axes.set_xlabel(label) if ((len(data_list) == 1) or (count == 0)): memory_axes.set_ylabel('Memory (MB)') else: memory_axes.get_yaxis().set_visible(False) fig.autofmt_xdate() plot_configure(fig) plt.savefig(output)
class TestSharedData(): def test_default(self, isolation): builder = WheelBuilder(str(isolation)) assert (builder.config.shared_data == builder.config.shared_data == {}) def test_invalid_type(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': 42}}}}}} builder = WheelBuilder(str(isolation), config=config) with pytest.raises(TypeError, match='Field `tool.hatch.build.targets.wheel.shared-data` must be a mapping'): _ = builder.config.shared_data def test_absolute(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {str((isolation / 'source')): '/target/'}}}}}}} builder = WheelBuilder(str(isolation), config=config) assert (builder.config.shared_data == {str((isolation / 'source')): 'target'}) def test_relative(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {'../source': '/target/'}}}}}}} builder = WheelBuilder(str((isolation / 'foo')), config=config) assert (builder.config.shared_data == {str((isolation / 'source')): 'target'}) def test_source_empty_string(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {'': '/target/'}}}}}}} builder = WheelBuilder(str(isolation), config=config) with pytest.raises(ValueError, match='Source #1 in field `tool.hatch.build.targets.wheel.shared-data` cannot be an empty string'): _ = builder.config.shared_data def test_relative_path_not_string(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {'source': 0}}}}}}} builder = WheelBuilder(str(isolation), config=config) with pytest.raises(TypeError, match='Path for source `source` in field `tool.hatch.build.targets.wheel.shared-data` must be a string'): _ = builder.config.shared_data def test_relative_path_empty_string(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {'source': ''}}}}}}} builder = WheelBuilder(str(isolation), config=config) with pytest.raises(ValueError, match='Path for source `source` in field `tool.hatch.build.targets.wheel.shared-data` cannot be an empty string'): _ = builder.config.shared_data def test_order(self, isolation): config = {'tool': {'hatch': {'build': {'targets': {'wheel': {'shared-data': {'../very-nested': 'target1/embedded', '../source1': '/target2/', '../source2': '/target1/'}}}}}}} builder = WheelBuilder(str((isolation / 'foo')), config=config) assert (builder.config.shared_data == {str((isolation / 'source2')): 'target1', str((isolation / 'very-nested')): f'target1{os.sep}embedded', str((isolation / 'source1')): 'target2'})
_flax class FlaxViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained('hf-internal-testing/tiny-random-vit', 'hf-internal-testing/tiny-bert', vision_from_pt=True, text_from_pt=True) batch_size = 13 pixel_values = floats_tensor([batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size]) input_ids = ids_tensor([batch_size, 4], model.config.text_config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return (model, inputs) def get_vision_text_model(self, vision_config, text_config): vision_model = FlaxViTModel(vision_config) text_model = FlaxBertModel(text_config) return (vision_model, text_model) def prepare_config_and_inputs(self): vit_model_tester = FlaxViTModelTester(self) bert_model_tester = FlaxBertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() (vision_config, pixel_values) = vision_config_and_inputs (text_config, input_ids, token_type_ids, attention_mask) = text_config_and_inputs return {'text_config': text_config, 'vision_config': vision_config, 'pixel_values': pixel_values, 'attention_mask': attention_mask, 'text_config': text_config, 'input_ids': input_ids, 'token_type_ids': token_type_ids}
def main(root): generate_default_image_optim_loop_asset(root) generate_default_image_optim_loop_processing_asset(root) generate_default_image_pyramid_optim_loop_asset(root) generate_default_image_pyramid_optim_loop__processing_asset(root) generate_default_transformer_optim_loop_asset(root) generate_default_transformer_epoch_optim_loop_asset(root)
def test_hierarchical_logp(): with pm.Model() as m: x = pm.Uniform('x', lower=0, upper=1) y = pm.Uniform('y', lower=0, upper=x) logp_ancestors = list(ancestors([m.logp()])) ops = {a.owner.op for a in logp_ancestors if a.owner} assert (len(ops) > 0) assert (not any((isinstance(o, RandomVariable) for o in ops))) assert (m.rvs_to_values[x] in logp_ancestors) assert (m.rvs_to_values[y] in logp_ancestors)
_start_docstrings('Bert Based model to embed queries or document for document retrieval.', RETRIBERT_START_DOCSTRING) class RetriBertModel(RetriBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.projection_dim = config.projection_dim self.bert_query = BertModel(config) self.bert_doc = (None if config.share_encoders else BertModel(config)) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction='mean') self.post_init() def embed_sentences_checkpointed(self, input_ids, attention_mask, sent_encoder, checkpoint_batch_size=(- 1)): if ((checkpoint_batch_size < 0) or (input_ids.shape[0] < checkpoint_batch_size)): return sent_encoder(input_ids, attention_mask=attention_mask)[1] else: device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = ([None] * sent_encoder.config.num_hidden_layers) extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(attention_mask, input_shape, device) def partial_encode(*inputs): encoder_outputs = sent_encoder.encoder(inputs[0], attention_mask=inputs[1], head_mask=head_mask) sequence_output = encoder_outputs[0] pooled_output = sent_encoder.pooler(sequence_output) return pooled_output embedding_output = sent_encoder.embeddings(input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None) pooled_output_list = [] for b in range(math.ceil((input_ids.shape[0] / checkpoint_batch_size))): b_embedding_output = embedding_output[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)] b_attention_mask = extended_attention_mask[(b * checkpoint_batch_size):((b + 1) * checkpoint_batch_size)] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)): q_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, self.bert_query, checkpoint_batch_size) return self.project_query(q_reps) def embed_answers(self, input_ids, attention_mask=None, checkpoint_batch_size=(- 1)): a_reps = self.embed_sentences_checkpointed(input_ids, attention_mask, (self.bert_query if (self.bert_doc is None) else self.bert_doc), checkpoint_batch_size) return self.project_doc(a_reps) def forward(self, input_ids_query, attention_mask_query, input_ids_doc, attention_mask_doc, checkpoint_batch_size=(- 1)): device = input_ids_query.device q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size) a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = ((loss_qa + loss_aq) / 2) return loss
def animate(message: str, do_animation: bool, *, delay: float=0) -> Generator[(None, None, None)]: if ((not do_animation) or (not _env_supports_animation())): sys.stderr.write(f'''{message}... ''') (yield) return event = Event() if EMOJI_SUPPORT: animate_at_beginning_of_line = True symbols = EMOJI_ANIMATION_FRAMES period = EMOJI_FRAME_PERIOD else: animate_at_beginning_of_line = False symbols = NONEMOJI_ANIMATION_FRAMES period = NONEMOJI_FRAME_PERIOD thread_kwargs = {'message': message, 'event': event, 'symbols': symbols, 'delay': delay, 'period': period, 'animate_at_beginning_of_line': animate_at_beginning_of_line} t = Thread(target=print_animation, kwargs=thread_kwargs) t.start() try: (yield) finally: event.set() clear_line()
class TransformerDecoder(nn.Module): def __init__(self, decoder_layer: nn.Module, num_layers: int, norm: Optional[nn.Module]=None, return_intermediate: Optional[bool]=False) -> None: super().__init__() self.layers = _get_clones(decoder_layer, num_layers) self.num_layers = num_layers self.norm = norm self.return_intermediate = return_intermediate def reset_parameters(self) -> None: 'See for p in self.parameters(): if (p.dim() > 1): nn.init.xavier_uniform_(p) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor]=None, memory_mask: Optional[Tensor]=None, tgt_key_padding_mask: Optional[Tensor]=None, memory_key_padding_mask: Optional[Tensor]=None, pos: Optional[Tensor]=None, query_pos: Optional[Tensor]=None) -> Tensor: output = tgt intermediate = [] for layer in self.layers: output = layer(output, memory, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, pos=pos, query_pos=query_pos) if self.return_intermediate: assert (self.norm is not None) intermediate.append(self.norm(output)) if (self.norm is not None): output = self.norm(output) if self.return_intermediate: intermediate.pop() intermediate.append(output) if self.return_intermediate: return torch.stack(intermediate) return output
def abs_relative(depth_pred, depth_gt): assert np.all((((np.isfinite(depth_pred) & np.isfinite(depth_gt)) & (depth_pred >= 0)) & (depth_gt >= 0))) diff = (depth_pred - depth_gt) num_pixels = float(diff.size) if (num_pixels == 0): return np.nan else: return (np.sum((np.absolute(diff) / depth_gt)) / num_pixels)
def import_all_modules(root: str, base_module: str) -> None: for file in os.listdir(root): if (file.endswith(('.py', '.pyc')) and (not file.startswith('_'))): module = file[:file.find('.py')] if (module not in sys.modules): module_name = '.'.join([base_module, module]) importlib.import_module(module_name)
def validate_uint64(value: int, title: str='Value') -> None: if ((not isinstance(value, int)) or isinstance(value, bool)): raise ValidationError(f'{title} must be an integer: Got: {type(value)}') if (value < 0): raise ValidationError(f'{title} cannot be negative: Got: {value}') if (value > UINT_64_MAX): raise ValidationError(f'{title} exeeds maximum UINT256 size. Got: {value}')
def test_specify_elements_with_labels(standard): network = Network(standard.tpm.tpm, node_labels=('A', 'B', 'C')) subsystem = Subsystem(network, (0, 0, 0), ('B', 'C')) assert (subsystem.node_indices == (1, 2)) assert (tuple((node.label for node in subsystem.nodes)) == ('B', 'C')) assert (str(subsystem) == 'Subsystem(B, C)')
def test_caplog_captures_for_all_stages(caplog: pytest.LogCaptureFixture, logging_during_setup_and_teardown: None) -> None: assert (not caplog.records) assert (not caplog.get_records('call')) logger.info('a_call_log') assert ([x.message for x in caplog.get_records('call')] == ['a_call_log']) assert ([x.message for x in caplog.get_records('setup')] == ['a_setup_log']) caplog_records = caplog._item.stash[caplog_records_key] assert (set(caplog_records) == {'setup', 'call'})
def decode_opcreate_script(script: bytes) -> Optional[list]: try: decoded = [x for x in script_GetOp(script)] except MalformedBitcoinScript: return None if ((len(decoded) == 5) and (decoded[0] == (1, b'\x04', 2)) and (decoded[(- 1)][0] == opcodes.OP_CREATE)): return decoded return None
def pytest_cmdline_main(config: Config) -> Optional[Union[(int, ExitCode)]]: if (config.option.version > 0): showversion(config) return 0 elif config.option.help: config._do_configure() showhelp(config) config._ensure_unconfigure() return 0 return None
def _bpx_to_param_dict(bpx: BPX) -> dict: pybamm_dict = {} pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.cell, pybamm_dict, cell) pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.negative_electrode, pybamm_dict, negative_electrode) pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.positive_electrode, pybamm_dict, positive_electrode) pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.electrolyte, pybamm_dict, electrolyte) pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.separator, pybamm_dict, separator) pybamm_dict = _bpx_to_domain_param_dict(bpx.parameterisation.separator, pybamm_dict, experiment) pybamm_dict['Current function [A]'] = pybamm_dict['Nominal cell capacity [A.h]'] pybamm_dict['Thermodynamic factor'] = 1.0 for domain in [negative_electrode, separator, positive_electrode]: pybamm_dict[(domain.pre_name + 'Bruggeman coefficient (electrolyte)')] = 1.5 for domain in [negative_electrode, positive_electrode]: pybamm_dict[(domain.pre_name + 'Bruggeman coefficient (electrode)')] = 0 pybamm_dict['Number of cells connected in series to make a battery'] = 1 pybamm_dict['Number of electrodes connected in parallel to make a cell'] = pybamm_dict['Number of electrode pairs connected in parallel to make a cell'] equal_len_width = math.sqrt(pybamm_dict['Electrode area [m2]']) pybamm_dict['Electrode width [m]'] = equal_len_width pybamm_dict['Electrode height [m]'] = equal_len_width pybamm_dict['Cell cooling surface area [m2]'] = pybamm_dict['External surface area [m2]'] pybamm_dict['Cell volume [m3]'] = pybamm_dict['Volume [m3]'] T_ref = pybamm_dict['Reference temperature [K]'] def arrhenius(Ea, T): return exp(((Ea / constants.R) * ((1 / T_ref) - (1 / T)))) for name in ['Specific heat capacity [J.K-1.kg-1]', 'Density [kg.m-3]', 'Thermal conductivity [W.m-1.K-1]']: for domain in [negative_electrode, positive_electrode, separator, negative_current_collector, positive_current_collector]: pybamm_name = ((domain.pre_name + name[:1].lower()) + name[1:]) if (name in pybamm_dict): pybamm_dict[pybamm_name] = pybamm_dict[name] for domain in [negative_electrode, positive_electrode, separator, negative_current_collector, positive_current_collector]: incorrect_name = (domain.pre_name + 'specific heat capacity [J.K-1.kg-1]') new_name = (domain.pre_name + 'specific heat capacity [J.kg-1.K-1]') if (incorrect_name in pybamm_dict): pybamm_dict[new_name] = pybamm_dict[incorrect_name] del pybamm_dict[incorrect_name] for domain in [negative_current_collector, positive_current_collector]: pybamm_dict[(domain.pre_name + 'thickness [m]')] = 0 pybamm_dict[(domain.pre_name + 'conductivity [S.m-1]')] = .0 pybamm_dict.update({'Total heat transfer coefficient [W.m-2.K-1]': 0}, check_already_exists=False) for domain in [negative_electrode, positive_electrode]: pybamm_dict[(domain.pre_name + 'active material volume fraction')] = ((pybamm_dict[(domain.pre_name + 'surface area per unit volume [m-1]')] * pybamm_dict[(domain.short_pre_name + 'particle radius [m]')]) / 3.0) for domain in [negative_electrode, separator, positive_electrode]: pybamm_dict[(domain.pre_name + 'porosity')] = (pybamm_dict[(domain.pre_name + 'transport efficiency')] ** (1.0 / 1.5)) U_n = pybamm_dict[(negative_electrode.pre_name + 'OCP [V]')] if isinstance(U_n, tuple): def _negative_electrode_ocp(sto): (name, (x, y)) = U_n return pybamm.Interpolant(x, y, sto, name=name, interpolator='linear') pybamm_dict[(negative_electrode.pre_name + 'OCP [V]')] = _negative_electrode_ocp U_p = pybamm_dict[(positive_electrode.pre_name + 'OCP [V]')] if isinstance(U_p, tuple): def _positive_electrode_ocp(sto): (name, (x, y)) = U_p return pybamm.Interpolant(x, y, sto, name=name, interpolator='linear') pybamm_dict[(positive_electrode.pre_name + 'OCP [V]')] = _positive_electrode_ocp dUdT_n = pybamm_dict[(negative_electrode.pre_name + 'entropic change coefficient [V.K-1]')] if callable(dUdT_n): def _negative_electrode_entropic_change(sto, c_s_max): return dUdT_n(sto) elif isinstance(dUdT_n, tuple): def _negative_electrode_entropic_change(sto, c_s_max): (name, (x, y)) = dUdT_n return pybamm.Interpolant(x, y, sto, name=name, interpolator='linear') else: def _negative_electrode_entropic_change(sto, c_s_max): return dUdT_n pybamm_dict[(negative_electrode.pre_name + 'OCP entropic change [V.K-1]')] = _negative_electrode_entropic_change dUdT_p = pybamm_dict[(positive_electrode.pre_name + 'entropic change coefficient [V.K-1]')] if callable(dUdT_p): def _positive_electrode_entropic_change(sto, c_s_max): return dUdT_p(sto) elif isinstance(dUdT_p, tuple): def _positive_electrode_entropic_change(sto, c_s_max): (name, (x, y)) = dUdT_p return pybamm.Interpolant(x, y, sto, name=name, interpolator='linear') else: def _positive_electrode_entropic_change(sto, c_s_max): return dUdT_p pybamm_dict[(positive_electrode.pre_name + 'OCP entropic change [V.K-1]')] = _positive_electrode_entropic_change c_e = pybamm_dict['Initial concentration in electrolyte [mol.m-3]'] F = 96485 c_n_max = pybamm_dict[(('Maximum concentration in ' + negative_electrode.pre_name.lower()) + '[mol.m-3]')] k_n_norm = pybamm_dict[(negative_electrode.pre_name + 'reaction rate constant [mol.m-2.s-1]')] Ea_k_n = pybamm_dict.get((negative_electrode.pre_name + 'reaction rate constant activation energy [J.mol-1]'), 0.0) k_n = ((k_n_norm * F) / (c_n_max * (c_e ** 0.5))) def _negative_electrode_exchange_current_density(c_e, c_s_surf, c_s_max, T): k_ref = k_n return ((((k_ref * arrhenius(Ea_k_n, T)) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5)) pybamm_dict[(negative_electrode.pre_name + 'exchange-current density [A.m-2]')] = _copy_func(_negative_electrode_exchange_current_density) c_p_max = pybamm_dict[(('Maximum concentration in ' + positive_electrode.pre_name.lower()) + '[mol.m-3]')] k_p_norm = pybamm_dict[(positive_electrode.pre_name + 'reaction rate constant [mol.m-2.s-1]')] Ea_k_p = pybamm_dict.get((positive_electrode.pre_name + 'reaction rate constant activation energy [J.mol-1]'), 0.0) k_p = ((k_p_norm * F) / (c_p_max * (c_e ** 0.5))) def _positive_electrode_exchange_current_density(c_e, c_s_surf, c_s_max, T): k_ref = k_p return ((((k_ref * arrhenius(Ea_k_p, T)) * (c_e ** 0.5)) * (c_s_surf ** 0.5)) * ((c_s_max - c_s_surf) ** 0.5)) pybamm_dict[(domain.pre_name + 'exchange-current density [A.m-2]')] = _copy_func(_positive_electrode_exchange_current_density) Ea_D_n = pybamm_dict.get((negative_electrode.pre_name + 'diffusivity activation energy [J.mol-1]'), 0.0) D_n_ref = pybamm_dict[(negative_electrode.pre_name + 'diffusivity [m2.s-1]')] if callable(D_n_ref): def _negative_electrode_diffusivity(sto, T): return (arrhenius(Ea_D_n, T) * D_n_ref(sto)) elif isinstance(D_n_ref, tuple): def _negative_electrode_diffusivity(sto, T): (name, (x, y)) = D_n_ref return (arrhenius(Ea_D_n, T) * pybamm.Interpolant(x, y, sto, name=name, interpolator='linear')) else: def _negative_electrode_diffusivity(sto, T): return (arrhenius(Ea_D_n, T) * D_n_ref) pybamm_dict[(negative_electrode.pre_name + 'diffusivity [m2.s-1]')] = _copy_func(_negative_electrode_diffusivity) Ea_D_p = pybamm_dict.get((positive_electrode.pre_name + 'diffusivity activation energy [J.mol-1]'), 0.0) D_p_ref = pybamm_dict[(positive_electrode.pre_name + 'diffusivity [m2.s-1]')] if callable(D_p_ref): def _positive_electrode_diffusivity(sto, T): return (arrhenius(Ea_D_p, T) * D_p_ref(sto)) elif isinstance(D_p_ref, tuple): def _positive_electrode_diffusivity(sto, T): (name, (x, y)) = D_p_ref return (arrhenius(Ea_D_p, T) * pybamm.Interpolant(x, y, sto, name=name, interpolator='linear')) else: def _positive_electrode_diffusivity(sto, T): return (arrhenius(Ea_D_p, T) * D_p_ref) pybamm_dict[(positive_electrode.pre_name + 'diffusivity [m2.s-1]')] = _copy_func(_positive_electrode_diffusivity) Ea_D_e = pybamm_dict.get((electrolyte.pre_name + 'diffusivity activation energy [J.mol-1]'), 0.0) D_e_ref = pybamm_dict[(electrolyte.pre_name + 'diffusivity [m2.s-1]')] if callable(D_e_ref): def _electrolyte_diffusivity(sto, T): return (arrhenius(Ea_D_e, T) * D_e_ref(sto)) elif isinstance(D_e_ref, tuple): def _electrolyte_diffusivity(sto, T): (name, (x, y)) = D_e_ref return (arrhenius(Ea_D_e, T) * pybamm.Interpolant(x, y, sto, name=name, interpolator='linear')) else: def _electrolyte_diffusivity(sto, T): return (arrhenius(Ea_D_e, T) * D_e_ref) pybamm_dict[(electrolyte.pre_name + 'diffusivity [m2.s-1]')] = _copy_func(_electrolyte_diffusivity) Ea_sigma_e = pybamm_dict.get((electrolyte.pre_name + 'conductivity activation energy [J.mol-1]'), 0.0) sigma_e_ref = pybamm_dict[(electrolyte.pre_name + 'conductivity [S.m-1]')] if callable(sigma_e_ref): def _conductivity(c_e, T): return (arrhenius(Ea_sigma_e, T) * sigma_e_ref(c_e)) elif isinstance(sigma_e_ref, tuple): def _conductivity(c_e, T): (name, (x, y)) = sigma_e_ref return (arrhenius(Ea_sigma_e, T) * pybamm.Interpolant(x, y, c_e, name=name, interpolator='linear')) else: def _conductivity(c_e, T): return (arrhenius(Ea_sigma_e, T) * sigma_e_ref) pybamm_dict[(electrolyte.pre_name + 'conductivity [S.m-1]')] = _copy_func(_conductivity) return pybamm_dict
class AverageMeter(): def __init__(self, ema=False): self.ema = ema self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): if isinstance(val, torch.Tensor): val = val.item() self.val = val self.sum += (val * n) self.count += n if self.ema: self.avg = ((self.avg * 0.9) + (self.val * 0.1)) else: self.avg = (self.sum / self.count)
class TestModisL2(): def test_available_reader(self): assert ('modis_l2' in available_readers()) def test_scene_available_datasets(self, modis_l2_nasa_mod35_file): scene = Scene(reader='modis_l2', filenames=modis_l2_nasa_mod35_file) available_datasets = scene.all_dataset_names() assert (len(available_datasets) > 0) assert ('cloud_mask' in available_datasets) assert ('latitude' in available_datasets) assert ('longitude' in available_datasets) .parametrize(('input_files', 'has_5km', 'has_500', 'has_250', 'default_res'), [(lazy_fixture('modis_l2_nasa_mod35_file'), True, False, False, 1000)]) def test_load_longitude_latitude(self, input_files, has_5km, has_500, has_250, default_res): from .test_modis_l1b import _load_and_check_geolocation scene = Scene(reader='modis_l2', filenames=input_files) shape_5km = _shape_for_resolution(5000) shape_500m = _shape_for_resolution(500) shape_250m = _shape_for_resolution(250) default_shape = _shape_for_resolution(default_res) with dask.config.set(scheduler=CustomScheduler(max_computes=(((1 + has_5km) + has_500) + has_250))): _load_and_check_geolocation(scene, '*', default_res, default_shape, True, check_callback=_check_shared_metadata) _load_and_check_geolocation(scene, 5000, 5000, shape_5km, has_5km, check_callback=_check_shared_metadata) _load_and_check_geolocation(scene, 500, 500, shape_500m, has_500, check_callback=_check_shared_metadata) _load_and_check_geolocation(scene, 250, 250, shape_250m, has_250, check_callback=_check_shared_metadata) def test_load_quality_assurance(self, modis_l2_nasa_mod35_file): scene = Scene(reader='modis_l2', filenames=modis_l2_nasa_mod35_file) dataset_name = 'quality_assurance' scene.load([dataset_name]) quality_assurance_id = make_dataid(name=dataset_name, resolution=1000) assert (quality_assurance_id in scene) quality_assurance = scene[quality_assurance_id] assert (quality_assurance.shape == _shape_for_resolution(1000)) _check_shared_metadata(quality_assurance, expect_area=True) .parametrize(('input_files', 'loadables', 'request_resolution', 'exp_resolution', 'exp_area'), [(lazy_fixture('modis_l2_nasa_mod35_mod03_files'), ['cloud_mask'], 1000, 1000, True), (lazy_fixture('modis_l2_imapp_mask_byte1_geo_files'), ['cloud_mask', 'land_sea_mask', 'snow_ice_mask'], None, 1000, True)]) def test_load_category_dataset(self, input_files, loadables, request_resolution, exp_resolution, exp_area): scene = Scene(reader='modis_l2', filenames=input_files) kwargs = ({'resolution': request_resolution} if (request_resolution is not None) else {}) scene.load(loadables, **kwargs) for ds_name in loadables: cat_id = make_dataid(name=ds_name, resolution=exp_resolution) assert (cat_id in scene) cat_data_arr = scene[cat_id] assert isinstance(cat_data_arr.data, da.Array) cat_data_arr = cat_data_arr.compute() assert (cat_data_arr.shape == _shape_for_resolution(exp_resolution)) assert (cat_data_arr.values[(0, 0)] == 0.0) assert (cat_data_arr.attrs.get('resolution') == exp_resolution) assert np.issubdtype(cat_data_arr.dtype, np.integer) assert (cat_data_arr.attrs.get('_FillValue') is not None) _check_shared_metadata(cat_data_arr, expect_area=exp_area) .parametrize(('input_files', 'exp_area'), [(lazy_fixture('modis_l2_nasa_mod35_file'), False), (lazy_fixture('modis_l2_nasa_mod35_mod03_files'), True)]) def test_load_250m_cloud_mask_dataset(self, input_files, exp_area): scene = Scene(reader='modis_l2', filenames=input_files) dataset_name = 'cloud_mask' scene.load([dataset_name], resolution=250) cloud_mask_id = make_dataid(name=dataset_name, resolution=250) assert (cloud_mask_id in scene) cloud_mask = scene[cloud_mask_id] assert isinstance(cloud_mask.data, da.Array) cloud_mask = cloud_mask.compute() assert (cloud_mask.shape == _shape_for_resolution(250)) assert (cloud_mask.values[(0, 0)] == 0.0) assert np.issubdtype(cloud_mask.dtype, np.integer) assert (cloud_mask.attrs.get('_FillValue') is not None) _check_shared_metadata(cloud_mask, expect_area=exp_area) .parametrize(('input_files', 'loadables', 'exp_resolution', 'exp_area', 'exp_value'), [(lazy_fixture('modis_l2_nasa_mod06_file'), ['surface_pressure'], 5000, True, 4.0), (lazy_fixture('modis_l2_imapp_snowmask_file'), ['snow_mask'], 1000, False, 1.0), (lazy_fixture('modis_l2_imapp_snowmask_geo_files'), ['snow_mask'], 1000, True, 1.0)]) def test_load_l2_dataset(self, input_files, loadables, exp_resolution, exp_area, exp_value): scene = Scene(reader='modis_l2', filenames=input_files) scene.load(loadables) for ds_name in loadables: assert (ds_name in scene) data_arr = scene[ds_name] assert isinstance(data_arr.data, da.Array) data_arr = data_arr.compute() assert (data_arr.values[(0, 0)] == exp_value) assert (data_arr.shape == _shape_for_resolution(exp_resolution)) assert (data_arr.attrs.get('resolution') == exp_resolution) _check_shared_metadata(data_arr, expect_area=exp_area)
class OnnxExportTestCaseV2(TestCase): def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor): from transformers.onnx import export model_class = FeaturesManager.get_model_class_for_feature(feature) config = AutoConfig.from_pretrained(model_name) model = model_class.from_config(config) onnx_config = onnx_config_class_constructor(model.config) if is_torch_available(): from transformers.utils import torch_version if (torch_version < onnx_config.torch_onnx_minimum_version): pytest.skip(f'Skipping due to incompatible PyTorch version. Minimum required is {onnx_config.torch_onnx_minimum_version}, got: {torch_version}') if (model.main_input_name == 'input_ids'): preprocessor = AutoTokenizer.from_pretrained(model_name) if (not getattr(config, 'pad_token_id', None)): config.pad_token_id = preprocessor.eos_token_id elif (model.main_input_name == 'pixel_values'): preprocessor = AutoFeatureExtractor.from_pretrained(model_name) else: raise ValueError(f'Unsupported model input name: {model.main_input_name}') with NamedTemporaryFile('w') as output: try: (onnx_inputs, onnx_outputs) = export(preprocessor, model, onnx_config, onnx_config.default_onnx_opset, Path(output.name)) validate_model_outputs(onnx_config, preprocessor, model, Path(output.name), onnx_outputs, onnx_config.atol_for_validation) except (RuntimeError, ValueError) as e: self.fail(f'{name}, {feature} -> {e}') (_get_models_to_test(PYTORCH_EXPORT_MODELS)) _torch _vision def test_pytorch_export(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) (_get_models_to_test(PYTORCH_EXPORT_WITH_PAST_MODELS)) _torch def test_pytorch_export_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) (_get_models_to_test(PYTORCH_EXPORT_SEQ2SEQ_WITH_PAST_MODELS)) _torch def test_pytorch_export_seq2seq_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) (_get_models_to_test(TENSORFLOW_EXPORT_DEFAULT_MODELS)) _tf _vision def test_tensorflow_export(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) (_get_models_to_test(TENSORFLOW_EXPORT_WITH_PAST_MODELS), skip_on_empty=True) _tf def test_tensorflow_export_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) (_get_models_to_test(TENSORFLOW_EXPORT_SEQ2SEQ_WITH_PAST_MODELS), skip_on_empty=True) _tf def test_tensorflow_export_seq2seq_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor)
def test_guard_against_duplicate_packets(): zc = Zeroconf(interfaces=['127.0.0.1']) zc.registry.async_add(ServiceInfo('_ 'Test._ server='Test._ port=4)) zc.question_history = QuestionHistoryWithoutSuppression() class SubListener(_listener.AsyncListener): def handle_query_or_defer(self, msg: DNSIncoming, addr: str, port: int, transport: _engine._WrappedTransport, v6_flow_scope: Union[(Tuple[()], Tuple[(int, int)])]=()) -> None: super().handle_query_or_defer(msg, addr, port, transport, v6_flow_scope) listener = SubListener(zc) listener.transport = MagicMock() query = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True) question = r.DNSQuestion('x._ const._TYPE_PTR, const._CLASS_IN) query.add_question(question) packet_with_qm_question = query.packets()[0] query3 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True) question3 = r.DNSQuestion('x._ay._tcp.local.', const._TYPE_PTR, const._CLASS_IN) query3.add_question(question3) packet_with_qm_question2 = query3.packets()[0] query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True) question2 = r.DNSQuestion('x._ const._TYPE_PTR, const._CLASS_IN) question2.unicast = True query2.add_question(question2) packet_with_qu_question = query2.packets()[0] addrs = ('1.2.3.4', 43) with patch.object(listener, 'handle_query_or_defer') as _handle_query_or_defer: start_time = current_time_millis() listener._process_datagram_at_time(False, len(packet_with_qm_question), start_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qm_question), start_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() new_time = (start_time + 1100) listener._process_datagram_at_time(False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qm_question2), new_time, packet_with_qm_question2, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qu_question), new_time, packet_with_qu_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qu_question), new_time, packet_with_qu_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() log.setLevel(logging.WARNING) listener._process_datagram_at_time(False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_called_once() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(packet_with_qm_question), new_time, packet_with_qm_question, addrs) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() listener._process_datagram_at_time(False, len(b'garbage'), new_time, b'garbage', addrs) _handle_query_or_defer.assert_not_called() _handle_query_or_defer.reset_mock() zc.close()
def Transformer(input_vocab_size: int, target_vocab_size: int, encoder_input_size: int=None, decoder_input_size: int=None, num_layers: int=6, d_model: int=512, num_heads: int=8, dff: int=2048, dropout_rate: float=0.1) -> tf.keras.Model: inputs = [tf.keras.layers.Input(shape=(encoder_input_size,), dtype=tf.int64), tf.keras.layers.Input(shape=(decoder_input_size,), dtype=tf.int64)] (encoder_input, decoder_input) = inputs encoder = Encoder(num_layers=num_layers, d_model=d_model, num_heads=num_heads, dff=dff, vocab_size=input_vocab_size, dropout_rate=dropout_rate)(encoder_input) decoder = Decoder(num_layers=num_layers, d_model=d_model, num_heads=num_heads, dff=dff, vocab_size=target_vocab_size, dropout_rate=dropout_rate)(decoder_input, encoder) output = tf.keras.layers.Dense(target_vocab_size)(decoder) return tf.keras.Model(inputs=inputs, outputs=output)
class Effect6316(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): for attrName in ('buffDuration', 'warfareBuff1Value', 'warfareBuff2Value', 'warfareBuff3Value', 'warfareBuff4Value'): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Command')), attrName, src.getModifiedItemAttr('eliteBonusCommandDestroyer1'), skill='Command Destroyers', **kwargs)
class DST_Optimizer(BaseOptimizer): def __init__(self, args=None): super().__init__(args) self.model_name = 'dst' def _optimize(self, oracle, config): self.oracle.assign_evaluator(oracle) gnn = GCN(nfeat=50, nhid=100, n_out=1, num_layer=2) gnn = gnn.to(device) gnn.device = device all_smiles_score_list = [] population_size = config['population_size'] lamb = config['lamb'] topk = config['topk'] epsilon = config['epsilon'] start_smiles_lst = ['C1(N)=NC=CC=N1', 'C1(C)=NC=CC=N1', 'C1(C)=CC=CC=C1', 'C1(N)=CC=CC=C1', 'CC', 'C1(C)CCCCC1'] np.random.seed(self.seed) torch.manual_seed(self.seed) random.seed(self.seed) random.seed(self.seed) shuffle(self.all_smiles) warmstart_smiles_lst = self.all_smiles[:2000] warmstart_smiles_score = self.oracle(warmstart_smiles_lst) warmstart_smiles_score_lst = list(zip(warmstart_smiles_lst, warmstart_smiles_score)) warmstart_smiles_score_lst.sort(key=(lambda x: x[1]), reverse=True) all_smiles_score_list.extend(warmstart_smiles_score_lst) train_gnn(all_smiles_score_list, gnn) print('##### train GNN ######') init_smiles_lst = (start_smiles_lst + [i[0] for i in warmstart_smiles_score_lst[:50]]) current_set = set(init_smiles_lst) patience = 0 old_scores = 0 while True: if (len(self.oracle) >= 5000): self.save_result(((((self.model_name + '_') + oracle.name) + '_') + str(self.seed))) if (len(self.oracle) > 100): self.sort_buffer() old_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]] else: old_scores = 0 next_set = set() print('Sampling from current state') t1 = time() current_list = list(current_set) current_list = [(i, gnn, topk, epsilon) for i in current_list] results = pool.map(inference_utils.optimize_dst, current_list) for i in results: next_set = next_set.union(i) t2 = time() print('Sampling from current state takes', str(((t2 - t1) / 60))[:5], 'minutes') smiles_lst = list(next_set) shuffle(smiles_lst) smiles_lst = (smiles_lst[:config['pool_size']] + start_smiles_lst) smiles_lst = list(filter(is_valid, smiles_lst)) score_lst = self.oracle(smiles_lst) if self.finish: print('max oracle hit, abort ...... ') break print('##### train GNN online ######') all_smiles_score_list.extend(list(zip(smiles_lst, score_lst))) train_gnn(all_smiles_score_list, gnn) smiles_score_lst = [(smiles, score) for (smiles, score) in zip(smiles_lst, score_lst)] smiles_score_lst.sort(key=(lambda x: x[1]), reverse=True) print(smiles_score_lst[:5], 'Oracle num: ', len(self.oracle)) print('diversify molecules ...') (current_set, _, _) = dpp(smiles_score_lst=smiles_score_lst, num_return=population_size, lamb=lamb) if (len(self.oracle) > 500): self.sort_buffer() new_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]] if (new_scores == old_scores): patience += 1 if (patience >= self.args.patience): self.log_intermediate(finish=True) print('convergence criteria met, abort ...... ') break else: patience = 0
class TRPaned(): Kind = None def test_ctr(self): self.Kind().destroy() def test_pre_alloc(self): p = self.Kind() p.set_relative(0.25) self.assertEqual(p.get_relative(), 0.25) self.assertRaises(ValueError, p.set_relative, 2.0) self.assertRaises(ValueError, p.set_relative, (- 2.0)) def test_visible_no_setup(self): p = self.Kind() with visible(p): pass def test_visible_pre_setup_children(self): p = self.Kind() p.pack1(Gtk.Button()) p.pack2(Gtk.Button()) p.set_relative(0.75) self.assertAlmostEqual(p.get_relative(), 0.75) with visible(p, width=200, height=200) as p: self.assertAlmostEqual(p.get_relative(), 0.75, 2) def test_visible_pre_setup_empty(self): p = self.Kind() p.set_relative(0.75) self.assertEqual(p.get_relative(), 0.75) with visible(p) as p: self.assertAlmostEqual(p.get_relative(), 0.75, 2) def test_min_size_child(self): p = self.Kind() p.set_size_request(200, 200) p.pack1(Gtk.Label(), True, False) b2 = Gtk.Button() b2.set_size_request(50, 50) p.pack2(b2, True, False) p.set_relative(0.5) with visible(p) as p: self.assertEqual(p.get_position(), 100)
_test def test_avgpooling3d_legacy_interface(): old_layer = keras.layers.AveragePooling3D(pool_size=(2, 2, 2), border_mode='valid', name='avgpooling3d') new_layer = keras.layers.AvgPool3D(pool_size=(2, 2, 2), padding='valid', name='avgpooling3d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.AveragePooling3D((2, 2, 2), (2, 2, 2), 'valid', name='avgpooling3d') new_layer = keras.layers.AvgPool3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='avgpooling3d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.AveragePooling3D((2, 2, 2), padding='valid', dim_ordering='tf', name='avgpooling3d') new_layer = keras.layers.AvgPool3D(pool_size=(2, 2, 2), padding='valid', data_format='channels_last', name='avgpooling3d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.AveragePooling3D((2, 2, 2), padding='valid', dim_ordering='th', name='avgpooling3d') new_layer = keras.layers.AvgPool3D(pool_size=(2, 2, 2), padding='valid', data_format='channels_first', name='avgpooling3d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) old_layer = keras.layers.AveragePooling3D((2, 2, 2), padding='valid', dim_ordering='default', name='avgpooling3d') new_layer = keras.layers.AvgPool3D(pool_size=(2, 2, 2), padding='valid', name='avgpooling3d') assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config()))
def test_venv_creator_from_mapping_maximal_no_pip(): d = {'path': '/arb', 'system_site_packages': True, 'clear': False, 'symlinks': True, 'upgrade': True, 'with_pip': False, 'prompt': 'arbprompt', 'upgrade_pip': False, 'quiet': True} context = get_simple_context() with patch('pypyr.venv.EnvBuilderWithExtraDeps') as mock_builder: mock_builder.return_value.context = context vcs = list(VenvCreator.from_mapping(d)) assert (len(vcs) == 1) vc = vcs[0] vc.create() vc.install_dependencies() expected_path = str(Path('/arb').resolve()) assert (vc.path == expected_path) assert (vc.pip_extras is None) assert (not vc.upgrade_pip) assert vc._is_done assert (vc._future is None) mock_builder.assert_called_once_with(system_site_packages=True, clear=False, symlinks=True, upgrade=True, with_pip=False, prompt='arbprompt', upgrade_deps=False, is_quiet=True) mocked_builder = mock_builder.return_value mocked_builder.create.assert_called_once_with(expected_path) mocked_builder.upgrade_dependencies.assert_not_called() mocked_builder.pip_install_extras.assert_not_called()
class TestEgg(TestZip): def setUp(self): super().setUp() self._fixture_on_path('example-21.12-py3.6.egg') def test_files(self): for file in files('example'): path = str(file.dist.locate_file(file)) assert ('.egg/' in path), path def test_normalized_name(self): dist = distribution('example') assert (dist._normalized_name == 'example')