code
stringlengths
281
23.7M
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import ray def _objective(trial, local_trainer, checkpoint_dir=None): try: from transformers.utils.notebook import NotebookProgressCallback if local_trainer.pop_callback(NotebookProgressCallback): local_trainer.add_callback(ProgressCallback) except ModuleNotFoundError: pass checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) local_trainer.objective = None local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial) if (getattr(local_trainer, 'objective', None) is None): metrics = local_trainer.evaluate() local_trainer.objective = local_trainer.compute_objective(metrics) local_trainer._tune_save_checkpoint() ray.tune.report(objective=local_trainer.objective, **metrics, done=True) if (not trainer._memory_tracker.skip_memory_metrics): from .trainer_utils import TrainerMemoryTracker logger.warning('Memory tracking for your Trainer is currently enabled. Automatically disabling the memory tracker since the memory tracker is not serializable.') trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None if ('resources_per_trial' not in kwargs): kwargs['resources_per_trial'] = {'cpu': 1} if (trainer.args.n_gpu > 0): kwargs['resources_per_trial']['gpu'] = 1 resource_msg = ('1 CPU' + (' and 1 GPU' if (trainer.args.n_gpu > 0) else '')) logger.info(f'No `resources_per_trial` arg was passed into `hyperparameter_search`. Setting it to a default value of {resource_msg} for each trial.') gpus_per_trial = kwargs['resources_per_trial'].get('gpu', 0) trainer.args._n_gpu = gpus_per_trial if ('progress_reporter' not in kwargs): from ray.tune import CLIReporter kwargs['progress_reporter'] = CLIReporter(metric_columns=['objective']) if (('keep_checkpoints_num' in kwargs) and (kwargs['keep_checkpoints_num'] > 0)): trainer.use_tune_checkpoints = True if (kwargs['keep_checkpoints_num'] > 1): logger.warning(f"Currently keeping {kwargs['keep_checkpoints_num']} checkpoints for each trial. Checkpoints are usually huge, consider setting `keep_checkpoints_num=1`.") if ('scheduler' in kwargs): from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining if isinstance(kwargs['scheduler'], PopulationBasedTraining): if (not trainer.use_tune_checkpoints): logger.warning("You are using PopulationBasedTraining but you haven't enabled checkpointing. This means your trials will train from scratch everytime they are exploiting new configurations. Consider enabling checkpointing by passing `keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`.") if (isinstance(kwargs['scheduler'], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)) and ((not trainer.args.do_eval) or (trainer.args.evaluation_strategy == IntervalStrategy.NO))): raise RuntimeError("You are using {cls} as a scheduler but you haven't enabled evaluation during training. This means your trials will not report intermediate results to Ray Tune, and can thus not be stopped early or used to exploit other trials parameters. If this is what you want, do not use {cls}. If you would like to use {cls}, make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the Trainer `args`.".format(cls=type(kwargs['scheduler']).__name__)) trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) (trainable) def dynamic_modules_import_trainable(*args, **kwargs): if is_datasets_available(): import datasets.load dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), '__init__.py') spec = importlib.util.spec_from_file_location('datasets_modules', dynamic_modules_path) datasets_modules = importlib.util.module_from_spec(spec) sys.modules[spec.name] = datasets_modules spec.loader.exec_module(datasets_modules) return trainable(*args, **kwargs) if hasattr(trainable, '__mixins__'): dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ analysis = ray.tune.run(dynamic_modules_import_trainable, config=trainer.hp_space(None), num_samples=n_trials, **kwargs) best_trial = analysis.get_best_trial(metric='objective', mode=direction[:3]) best_run = BestRun(best_trial.trial_id, best_trial.last_result['objective'], best_trial.config) if (_tb_writer is not None): trainer.add_callback(_tb_writer) return best_run
class PicklableWrapper(object): def __init__(self, obj): while isinstance(obj, PicklableWrapper): obj = obj._obj self._obj = obj def __reduce__(self): s = cloudpickle.dumps(self._obj) return (cloudpickle.loads, (s,)) def __call__(self, *args, **kwargs): return self._obj(*args, **kwargs) def __getattr__(self, attr): if (attr not in ['_obj']): return getattr(self._obj, attr) return getattr(self, attr)
class AzureMLCallback(TrainerCallback): def __init__(self, azureml_run=None): if (not is_azureml_available()): raise RuntimeError('AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.') self.azureml_run = azureml_run def on_init_end(self, args, state, control, **kwargs): from azureml.core.run import Run if ((self.azureml_run is None) and state.is_world_process_zero): self.azureml_run = Run.get_context() def on_log(self, args, state, control, logs=None, **kwargs): if (self.azureml_run and state.is_world_process_zero): for (k, v) in logs.items(): if isinstance(v, (int, float)): self.azureml_run.log(k, v, description=k)
class ReconstructionLoss(nn.Module): def __init__(self, losstype='l2', eps=1e-06): super(ReconstructionLoss, self).__init__() self.losstype = losstype self.eps = eps def forward(self, x, target): if (self.losstype == 'l2'): return torch.mean(torch.sum(((x - target) ** 2), (1, 2, 3))) elif (self.losstype == 'l1'): diff = (x - target) return torch.mean(torch.sum(torch.sqrt(((diff * diff) + self.eps)), (1, 2, 3))) else: print('reconstruction loss type error!') return 0
def test_SimpleImputer_params_vs_sklearn(): result = sorted(impute.SimpleImputer._skcriteria_parameters) ignore = ['verbose', 'add_indicator', 'copy'] alias = {'keep_empty_features': 'keep_empty_criteria'} expected = sorted([alias.get(p, p) for p in sklimpute.SimpleImputer().get_params(deep=False) if (p not in ignore)]) assert (result == expected)
def preprocess_data(X, Y, num_init): dataset_size = X.size(0) (x_min, _) = X.min(0) (x_max, _) = X.max(0) x_range = (x_max - x_min) X = (2 * (((X - x_min) / x_range) - 0.5)) tmean = Y.mean() tstd = Y.std() Y = ((Y - tmean) / tstd) (init_x, X) = (X[:num_init], X[num_init:]) (init_y, Y) = (Y[:num_init], Y[num_init:]) return (init_x, init_y, X, Y)
(frozen=True) class _ExponentialSchedule(): learning_rate: float decay_steps: int decay_rate: float staircase: bool = False def value(self, t): m = (t / self.decay_steps) if self.staircase: m = np.floor(m) return (self.learning_rate * (self.decay_rate ** m))
.parametrize('search, documents, k', [pytest.param((((retriever_a * retriever_b) * retriever_c) + documents()), documents(), k, id=f'Union retrievers: {retriever_a.__class__.__name__} | {retriever_b.__class__.__name__} | {retriever_c.__class__.__name__} k: {k}') for k in [None, 3, 4] for retriever_c in cherche_retrievers(key='id', on='title') for retriever_b in cherche_retrievers(key='id', on='article') for retriever_a in cherche_retrievers(key='id', on='author')]) def test_retriever_union(search, documents: list, k: int): search = search.add(documents) answers = search(q='France', k=k) assert ((len(answers) == min(k, 1)) if (k is not None) else 1) for sample in answers: for key in ['title', 'article', 'author']: assert (key in sample) answers = search(q='Wikipedia', k=k) assert ((len(answers) == min(k, len(documents))) if (k is not None) else len(documents)) answers = search(q='Unknown', k=k) assert (len(answers) == 0)
def test_text(args, device_id, pt, step): device = ('cpu' if (args.visible_gpus == '-1') else 'cuda') if (pt != ''): test_from = pt else: test_from = args.test_from logger.info(('Loading checkpoint from %s' % test_from)) checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage)) opt = vars(checkpoint['opt']) for k in opt.keys(): if (k in model_flags): setattr(args, k, opt[k]) print(args) tokenizer = BertTokenizer.from_pretrained(args.bert_dir) model = Summarizer(args, device, tokenizer.vocab, checkpoint) model.eval() test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.test_batch_size, args.test_batch_ex_size, device, shuffle=False, is_test=True) predictor = build_predictor(args, tokenizer, model, logger) predictor.translate(test_iter, step)
def get_args(): usage = ' Python script to resolve overlaps in ctms. May be used with\n utils/data/subsegment_data_dir.sh. ' parser = argparse.ArgumentParser(usage) parser.add_argument('segments', type=argparse.FileType('r'), help='use segments to resolve overlaps') parser.add_argument('ctm_in', type=argparse.FileType('r'), help='input_ctm_file') parser.add_argument('ctm_out', type=argparse.FileType('w'), help='output_ctm_file') parser.add_argument('--verbose', type=int, default=0, help='Higher value for more verbose logging.') args = parser.parse_args() if (args.verbose > 2): logger.setLevel(logging.DEBUG) handler.setLevel(logging.DEBUG) return args
def coords(obj): if isinstance(obj, (tuple, list)): coordinates = obj elif ('geometry' in obj): coordinates = obj['geometry']['coordinates'] else: coordinates = obj.get('coordinates', obj) for e in coordinates: if isinstance(e, (float, int)): (yield tuple(coordinates)) break else: for f in coords(e): (yield f)
class Effect6503(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Capital Energy Turret')), 'capacitorNeed', src.getModifiedItemAttr('shipBonusDreadnoughtA3'), skill='Amarr Dreadnought', **kwargs)
def matching(ratio): imgs = {} result = [] img_set = [] true_image_set = [] img_snr10_true = img('true__out__images.pickle') img_snr10_rotate = img('rotation_var__out__images.pickle') for i in range(100): img_set.append(img_snr10_rotate[i]) if (i < int((100 * ratio))): img_set.append(img_snr10_true[i]) true_image_set.append((len(img_set) - 1)) for i in range(len(img_set)): imgs[i] = img_set[i] s = SSNR2D(imgs) start = time.clock() while True: result = [] center_num1 = random.randrange(0, len(img_set), 1) center_num2 = random.randrange(0, len(img_set), 1) if (center_num1 != center_num2): result.append(center_num1) result.append(center_num2) s.set_img_set(result) e = s.get_fsc_sum() if (e > 7.2): break center = center_num1 result = [center] temp = [center] while True: distance = [] for i in range(len(img_set)): if (i != center): temp.append(i) s.set_img_set(temp) distance.append(s.get_fsc_sum()) else: distance.append((- 1)) center = distance.index(max(distance)) temp = [center] if (center in result): break else: result.append(center) center = center_num2 if (center not in result): while True: distance = [] for i in range(len(img_set)): if (i != center): temp.append(i) s.set_img_set(temp) distance.append(s.get_fsc_sum()) else: distance.append((- 1)) center = distance.index(max(distance)) temp = [center] if (center in result): break else: result.append(center) end = time.clock() resolution = s.get_fsc_sum() t = (end - start) measure = cal_accuracy(result, true_image_set) print(('Resolution = ' + str(resolution))) print(('Result: ' + ', '.join(result))) sheet = 0 start_y = 1 write_to_excel(sheet, 0, start_y, ratio) write_to_excel(sheet, 1, start_y, resolution) write_to_excel(sheet, 2, start_y, measure[0]) write_to_excel(sheet, 3, start_y, measure[1]) write_to_excel(sheet, 4, start_y, measure[2]) write_to_excel(sheet, 5, start_y, t)
class SubprocessOutputPoller(): def __init__(self, process): super().__init__() self.process = process self._lines = [] self._lines_lock = Lock() self._last_seen = time.monotonic() self.data_ready = Event() self._polling_thread = Thread(target=self.poll_stdout) self._ready_thread = Thread(target=self.check_ready) self._polling_thread.start() self._ready_thread.start() def poll_stdout(self): for line in iter(self.process.stdout.readline, b''): with self._lines_lock: self._lines.append(line.rstrip()) self._last_seen = time.monotonic() def check_ready(self): while True: time.sleep(0.05) if (self._lines and ((time.monotonic() - self._last_seen) > 0.5)): self.data_ready.set() if (not self._polling_thread.is_alive()): break def get_lines(self): with self._lines_lock: lines = self._lines self._lines = [] self.data_ready.clear() return lines def shutdown(self): self._polling_thread.join() self._ready_thread.join()
def get_freeman_coordination(img: np.ndarray, contour: [(int, int)]) -> [int]: freeman_coordination_list = list() (freeman_x_coordination_list, freeman_y_coordination_list) = __get_freeman_box_list(img=img) for point in contour: (x, y) = point point_freeman_coordination = __get_freeman_coordination(point=(x, y), freeman_x_list=freeman_x_coordination_list, freeman_y_list=freeman_y_coordination_list) if (point_freeman_coordination is not None): freeman_coordination_list.append(point_freeman_coordination) '' freeman_coordination_list.append(freeman_coordination_list[0]) return freeman_coordination_list
def populate_params(): params = {} params['fps'] = get_param('~fps') params['frame_id'] = get_param('~frame_id') params['retry_on_fail'] = get_param('~retry_on_fail') params['buffer_queue_size'] = get_param('~buffer_queue_size') params['python_node'] = get_param('~python_node') return params
def get_optimizer_param_groups(model, model_config, optimizer_config, optimizer_schedulers): if optimizer_config.construct_single_param_group_only: return [{'params': list(model.parameters()), 'lr': optimizer_schedulers['lr'], 'weight_decay': optimizer_config.weight_decay}] if (not optimizer_config.head_optimizer_params.use_different_lr): assert ('lr_head' in optimizer_schedulers) (trunk_regularized_params, trunk_unregularized_params) = ([], []) (head_regularized_params, head_unregularized_params) = ([], []) regularized_params = [] unregularized_params = [] for (name, module) in model.named_modules(): if (('head' in name) and (isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES))): head_regularized_params.append(module.weight) if (module.bias is not None): if optimizer_config['regularize_bias']: head_regularized_params.append(module.bias) else: head_unregularized_params.append(module.bias) elif (('head' in name) and isinstance(module, _BN_TYPES)): (head_regularized_params, head_unregularized_params) = _get_bn_optimizer_params(module, head_regularized_params, head_unregularized_params, optimizer_config) elif (isinstance(module, nn.Linear) or isinstance(module, _CONV_TYPES)): trunk_regularized_params.append(module.weight) if (module.bias is not None): if optimizer_config['regularize_bias']: trunk_regularized_params.append(module.bias) else: trunk_regularized_params.append(module.bias) elif isinstance(module, _BN_TYPES): (trunk_regularized_params, trunk_unregularized_params) = _get_bn_optimizer_params(module, trunk_regularized_params, trunk_unregularized_params, optimizer_config) elif (len(list(module.children())) >= 0): for params in module.parameters(recurse=False): regularized_params.append(params) if optimizer_config.non_regularized_parameters: non_reg_param_names = optimizer_config.non_regularized_parameters for (name, param) in model.named_parameters(): hits = [p for p in non_reg_param_names if (p in name)] if any(hits): unregularized_params.append(param) (non_reg_params, trunk_regularized_params, trunk_unregularized_params) = _assign_regularized_params(parameters_to_unregularize=unregularized_params, regularized_param_list=trunk_regularized_params, unregularized_param_list=trunk_unregularized_params) (non_reg_params, head_regularized_params, head_unregularized_params) = _assign_regularized_params(parameters_to_unregularize=unregularized_params, regularized_param_list=head_regularized_params, unregularized_param_list=head_unregularized_params) (non_reg_params, regularized_params, _) = _assign_regularized_params(parameters_to_unregularize=unregularized_params, regularized_param_list=regularized_params) non_trainable_params = [] for (name, param) in model.named_parameters(): if (name in model_config.NON_TRAINABLE_PARAMS): param.requires_grad = False non_trainable_params.append(param) trainable_params = _filter_trainable(model.parameters()) trunk_regularized_params = _filter_trainable(trunk_regularized_params) trunk_unregularized_params = _filter_trainable(trunk_unregularized_params) head_regularized_params = _filter_trainable(head_regularized_params) head_unregularized_params = _filter_trainable(head_unregularized_params) regularized_params = _filter_trainable(regularized_params) logging.info(f''' Trainable params: {len(trainable_params)}, Non-Trainable params: {len(non_trainable_params)}, Trunk Regularized Parameters: {len(trunk_regularized_params)}, Trunk Unregularized Parameters {len(trunk_unregularized_params)}, Head Regularized Parameters: {len(head_regularized_params)}, Head Unregularized Parameters: {len(head_unregularized_params)} Remaining Regularized Parameters: {len(regularized_params)} Remaining Unregularized Parameters: {len(unregularized_params)}''') param_groups = [{'params': trunk_regularized_params, 'lr': optimizer_schedulers['lr'], 'weight_decay': optimizer_config.weight_decay}, {'params': trunk_unregularized_params, 'lr': optimizer_schedulers['lr'], 'weight_decay': 0.0}, {'params': head_regularized_params, 'lr': optimizer_schedulers['lr_head'], 'weight_decay': optimizer_config.head_optimizer_params.weight_decay}, {'params': head_unregularized_params, 'lr': optimizer_schedulers['lr_head'], 'weight_decay': 0.0}] if (len(regularized_params) > 0): param_groups.append({'params': regularized_params, 'lr': optimizer_schedulers['lr']}) if (len(unregularized_params) > 0): param_groups.append({'params': unregularized_params, 'lr': optimizer_schedulers['lr'], 'weight_decay': 0.0}) return param_groups
('beeref.widgets.welcome_overlay.BeeSettings.get_recent_files', return_value=[]) def test_welcome_overlay_when_no_recent_files(qapp): parent = QtWidgets.QMainWindow() view = BeeGraphicsView(qapp, parent) overlay = WelcomeOverlay(view) overlay.show() assert (overlay.layout.indexOf(overlay.files_widget) < 0)
def resample_and_save(predicted, target_shape, output_file, force_separate_z=False, interpolation_order=1, interpolation_order_z=0): if isinstance(predicted, str): assert isfile(predicted), 'If isinstance(segmentation_softmax, str) then isfile(segmentation_softmax) must be True' del_file = deepcopy(predicted) predicted = np.load(predicted) os.remove(del_file) predicted_new_shape = resample_data_or_seg(predicted, target_shape, False, order=interpolation_order, do_separate_z=force_separate_z, cval=0, order_z=interpolation_order_z) seg_new_shape = predicted_new_shape.argmax(0) np.savez_compressed(output_file, data=seg_new_shape.astype(np.uint8))
class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer((num_input_features + (i * growth_rate)), growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient) self.add_module(('denselayer%d' % (i + 1)), layer) def forward(self, init_features): features = [init_features] for (name, layer) in self.named_children(): new_features = layer(*features) features.append(new_features) return torch.cat(features, 1)
class AppsfuelOAuth2(BaseOAuth2): name = 'appsfuel' ID_KEY = 'user_id' AUTHORIZATION_URL = ' ACCESS_TOKEN_URL = ' ACCESS_TOKEN_METHOD = 'POST' USER_DETAILS_URL = ' def get_user_details(self, response): email = response.get('email', '') username = (email.split('')[0] if email else '') (fullname, first_name, last_name) = self.get_user_names(response.get('display_name', '')) return {'username': username, 'fullname': fullname, 'first_name': first_name, 'last_name': last_name, 'email': email} def user_data(self, access_token, *args, **kwargs): return self.get_json(self.USER_DETAILS_URL, params={'access_token': access_token})
('/xml_add', methods=['POST']) def xml_add(): if (not session.get('logged_in')): return redirect(url_for('login')) obj = [elem.replace('.xml', '') for elem in os.listdir(('../%s/' % request.form['para2']))] if (request.form['para1'] in obj): code = 201 msg = '' elif (request.form['para1'] == ''): code = 201 msg = '' else: code = 200 msg = '' with open(((('../%s/' % request.form['para2']) + request.form['para1']) + '.xml'), 'w') as f: f.write('<?xml version="1.0"?><root></root>') return jsonify(code=code, msg=msg)
class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: for subtree in tree.iter_subtrees(): self._call_userfunc(subtree) return tree def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: for subtree in tree.iter_subtrees_topdown(): self._call_userfunc(subtree) return tree
.parametrize('image_name', png_images) def test_pil_saving(image_test, image_name): try: from PIL import Image except ImportError: pytest.skip('PIL not available') from pyglet.image.codecs.pil import PILImageEncoder image_test.test_image_saving(PILImageEncoder(), image_name)
def define_config(): return {'horizon': 15, 'sequence_length': 50, 'update_steps': 100, 'pretrain_steps': 100, 'discount': 0.99, 'lambda_': 0.95, 'steps_per_update': 1000, 'steps_per_critic_clone': 1000, 'batch_size': 32, 'warmup_training_steps': 5000, 'kl_scale': 1.0, 'kl_mix': 0.8, 'free_nats': 3.0, 'deterministic_size': 200, 'stochastic_size': 30, 'sampling_scale': 1.0, 'units': 400, 'posterior_samples': 5, 'model_learning_rate': 0.0001, 'model_learning_rate_factor': 5.0, 'actor_learning_rate': 8e-05, 'critic_learning_rate': 8e-05, 'model_grad_clip_norm': 100.0, 'actor_grad_clip_norm': 5.0, 'critic_grad_clip_norm': 1.0, 'swag_burnin': 500, 'swag_period': 200, 'swag_models': 20, 'swag_decay': 0.8, 'cost_threshold': 25.0, 'penalty_mu': 5e-09, 'lagrangian_mu': 1e-06, 'penalty_power_factor': 1e-05, 'safety_critic_learning_rate': 0.0002, 'safety_critic_grad_clip_norm': 50.0, 'safety_lambda': 0.95, 'safety_discount': 0.995, 'cost_imbalance_weight': 100.0, 'total_training_steps': 500000, 'action_repeat': 2, 'environment': 'dmc_cartpole_balance', 'safety': False, 'observation_type': 'rgb_image', 'seed': 314, 'episode_length': 1000, 'training_steps_per_epoch': 25000, 'evaluation_steps_per_epoch': 10000, 'log_dir': 'runs', 'render_episodes': 1, 'evaluate_model': False, 'cuda_device': '-1', 'precision': 16}
def get_memory_list(unit='G', number_only=False, init_pid=None): from pyrl.utils.data import num_to_str if (init_pid is None): init_pid = os.getpid() process = psutil.Process(init_pid) ret = [num_to_str(process.memory_full_info().uss, unit, number_only=number_only)] for proc in process.children(): process_info = proc.memory_full_info() ret.append(num_to_str(process_info.uss, unit, number_only=number_only)) return ret
def _check_mopidy_extensions_service() -> Dict[(str, Tuple[(bool, str)])]: log = subprocess.check_output(['sudo', '/usr/local/sbin/raveberry/read_mopidy_log'], universal_newlines=True) error_handling = {'spotify': [((lambda line: (line.startswith('ERROR') and ('spotify.session' in line) and ('USER_NEEDS_PREMIUM' in line))), 'Spotify Premium is required'), ((lambda line: (line.startswith('ERROR') and ('spotify.session' in line))), 'User or Password are wrong'), ((lambda line: (line.startswith('ERROR') and ('mopidy_spotify.web' in line))), 'Client ID or Client Secret are wrong or expired'), ((lambda line: (line.startswith('WARNING') and ('spotify' in line) and ('The extension has been automatically disabled' in line))), 'Configuration Error')], 'soundcloud': [((lambda line: (line.startswith('ERROR') and ('Invalid "auth_token"' in line))), 'auth_token is invalid'), ((lambda line: (line.startswith('WARNING') and ('soundcloud' in line) and ('The extension has been automatically disabled' in line))), 'Configuration Error')], 'jamendo': [((lambda line: (line.startswith('ERROR') and ('Invalid "client_id"' in line))), 'client_id is invalid')]} success_messages = {'spotify': 'Login successful', 'soundcloud': 'auth_token valid', 'jamendo': 'client_id could not be checked'} extensions = {} for line in log.split('\n')[::(- 1)]: for extension in ['spotify', 'soundcloud', 'jamendo']: if (extension in extensions): continue for (error_condition, error_message) in error_handling[extension]: if error_condition(line): extensions[extension] = (False, error_message) if (line.startswith('WARNING') and (extension in line) and ('The extension has been automatically disabled' in line)): extensions[extension] = (False, 'Configuration Error') if (('spotify' in extensions) and ('soundcloud' in extensions) and ('jamendo' in extensions)): break if line.startswith('Started Mopidy music server.'): for extension in ['spotify', 'soundcloud', 'jamendo']: if (extension not in extensions): extensions[extension] = (True, success_messages[extension]) break for extension in ['spotify', 'soundcloud', 'jamendo']: if (extension not in extensions): extensions[extension] = (True, 'No info found, enabling to be safe') return extensions
_kernel_api(params={'grp': POINTER, 'attr': POINTER}) def hook__lck_mtx_alloc_init(ql, address, params): lck_addr = ql.os.heap.alloc(ctypes.sizeof(lck_mtx_t)) lck = lck_mtx_t(ql, lck_addr) if (params['grp'] > 0): grp = lck_grp_t(ql, params['grp']) grp.loadFromMem() else: grp = None if (params['attr'] > 0): attr = lck_attr_t(ql, params['attr']) attr.loadFromMem() else: attr = None lck_mtx_init(ql, lck, grp, attr) return lck_addr
def convert(filename, stream=None): name = path.basename(filename) if name.endswith('.vim'): name = name[:(- 4)] f = file(filename) code = f.read() f.close() writer = StyleWriter(code, name) if (stream is not None): out = stream else: out = StringIO() writer.write(out) if (stream is None): return out.getvalue()
def getIdxMap_torch(img, offset=False): (C, H, W) = img.shape import torch idx = torch.stack(torch.where((~ torch.isnan(img[0])))) if offset: idx = (idx.float() + 0.5) idx = idx.view(2, (H * W)).float().contiguous() idx = idx.transpose(0, 1) idx = ((idx / (H - 1)) if (not offset) else (idx / H)) return idx
_fixtures(SqlAlchemyFixture, AccessDomainFixture) def test_collaborator_rights(sql_alchemy_fixture, access_domain_fixture): account = access_domain_fixture.account address_book = access_domain_fixture.address_book other_address_book = access_domain_fixture.other_address_book other_address_book.allow(account) assert (not other_address_book.can_be_edited_by(account)) assert (not other_address_book.can_be_added_to_by(account)) other_address_book.allow(account, can_edit_addresses=True, can_add_addresses=True) assert other_address_book.can_be_edited_by(account) assert other_address_book.can_be_added_to_by(account)
class Serializer(xml_serializer.Serializer): def handle_tagfield(self, obj, field): tag_string = str(getattr(obj, field.name)) fake_obj = FakeObject(field.name, tag_string) fake_field = FakeField(field.name) self.handle_field(fake_obj, fake_field) def handle_fk_field(self, obj, field): if isinstance(field, SingleTagField): self.handle_tagfield(obj, field) else: super(Serializer, self).handle_fk_field(obj, field) def handle_m2m_field(self, obj, field): if isinstance(field, TagField): self.handle_tagfield(obj, field) else: super(Serializer, self).handle_m2m_field(obj, field)
('jsonpath_ready') def jsonpath_ready(stage, depspec, stagespec): log.debug('checking jsonpath ready predicate\n%s', depspec) dependencies = depspec['expressions'] for x in dependencies: depmatches = stage.view.query(x, stage.view.steps) if (not depmatches): log.debug('no query matches, not ready') return False issubwork = ('_nodeid' not in depmatches[0].value[0]) if issubwork: log.debug('dependency is a subworkflow. determine if scope is done') if (not all([scope_done(scope['_offset'], stage.view) for match in depmatches for scope in match.value])): return False elif (not all([y.has_result() for y in stage.view.getSteps(x)])): return False log.debug('all checks ok, predicate is True') return True
def readFragmentScores(name='fpscores'): import gzip global _fscores if (name == 'fpscores'): name = op.join(op.dirname(__file__), name) _fscores = cPickle.load(gzip.open(('%s.pkl.gz' % name))) outDict = {} for i in _fscores: for j in range(1, len(i)): outDict[i[j]] = float(i[0]) _fscores = outDict
class TestSwitchEncoder(): def test_switch_encoder_and_head(self): model = FakeTrainableModelWithSwitchEncoder() dataset = FakePairDataset() data_loader = PairsSimilarityDataLoader(dataset, batch_size=3) trainer_args = Quaterion.trainer_defaults(model, data_loader) trainer_args['callbacks'].pop(1) trainer_args['accelerator'] = 'cpu' trainer_args['max_epochs'] = 1 Quaterion.fit(trainable_model=model, trainer=pl.Trainer(**trainer_args), train_dataloader=data_loader)
def get_center(mask): if isinstance(mask, torch.Tensor): mask = mask.detach().detach().cpu().numpy() if (len(mask.shape) > 2): mask = mask.reshape(mask.shape[(- 2):]) mask = (mask > 0.5) moment = cv2.moments(mask.astype('float')) if (moment['m00'] != 0): cx = int((moment['m10'] / moment['m00'])) cy = int((moment['m01'] / moment['m00'])) return (cx, cy) else: return None
class PythonImplementationRequirement(Requirement): def __init__(self, implementation_name: str): self.implementation_name = implementation_name super().__init__() def _evaluate(self) -> bool: return (sys.implementation.name == self.implementation_name) def fail_reason(self) -> str: return f'{self.implementation_name} is required'
def list_action(): parser = ArgumentParser(usage='mprof list\nThis command takes no argument.') parser.add_argument('--version', action='version', version=mp.__version__) args = parser.parse_args() filenames = get_profile_filenames('all') for (n, filename) in enumerate(filenames): ts = osp.splitext(filename)[0].split('_')[(- 1)] print('{index} {filename} {hour}:{min}:{sec} {day}/{month}/{year}'.format(index=n, filename=filename, year=ts[:4], month=ts[4:6], day=ts[6:8], hour=ts[8:10], min=ts[10:12], sec=ts[12:14]))
class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10, deconv=None, delinear=None, channel_deconv=None): super(ResNet, self).__init__() self.in_planes = 64 if deconv: self.deconv = True self.conv1 = deconv(3, 64, kernel_size=3, stride=1, padding=1) else: self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) if (not deconv): self.bn1 = nn.BatchNorm2d(64) if channel_deconv: self.deconv1 = channel_deconv() self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, deconv=deconv) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, deconv=deconv) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, deconv=deconv) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, deconv=deconv) if delinear: self.linear = delinear((512 * block.expansion), num_classes) else: self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride, deconv): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride, deconv)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): if hasattr(self, 'bn1'): out = F.relu(self.bn1(self.conv1(x))) else: out = F.relu(self.conv1(x)) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) if hasattr(self, 'deconv1'): out = self.deconv1(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def load_expert(): data_dir = os.path.join(covid_data_dir, 'test', 'expert') experts = ['biomedical_expert', 'computer_science_expert'] final_result = {} for expert in experts: folder = os.path.join(data_dir, expert) filenames = [f for f in os.listdir(folder) if ('.swp' not in f)] res = {} for filename in filenames: with open(os.path.join(folder, filename), 'r', encoding='utf-8') as infile: data = json.load(infile) res[data['docId']] = data['labels'] final_result[expert] = res return final_result
class EnsembleAgent(CustomAgent): def get_ranks_greedy(self, obs, infos, input_quest, input_quest_mask, quest_id_list, previous_commands, previous_dynamics, previous_belief): with torch.no_grad(): batch_size = len(obs) if (self.not_finished_yet is None): self.not_finished_yet = np.ones((len(obs),), dtype='float32') self.naozi.push_batch(copy.deepcopy(obs)) self.kg.push_batch(copy.deepcopy(obs), previous_commands, [item['srl'] for item in infos]) else: for i in range(batch_size): if (self.not_finished_yet[i] == 1.0): self.naozi.push_one(i, copy.deepcopy(obs[i])) self.kg.push_one(i, copy.deepcopy(obs[i]), previous_commands[i], infos[i]['srl']) description_list = self.naozi.get() (input_description, input_description_mask, description_id_list) = self.get_agent_inputs(description_list) (ctrlf_word_mask, _) = self.get_word_mask(quest_id_list, description_id_list) current_belief = ([None] * batch_size) if (self.enable_graph_input == 'gata'): (current_adjacency_matrix, current_belief, previous_adjacency_matrix) = self.graph_update_during_rl(input_description, input_description_mask, previous_belief) for i in range(batch_size): if (self.not_finished_yet[i] == 0.0): current_adjacency_matrix[i] = previous_adjacency_matrix[i] current_belief[i] = previous_belief[i] (node_representations, node_mask) = self.encode_belief_graph(current_adjacency_matrix, use_model='online') (node_vocabulary, relation_vocabulary, graph_triplets) = (([None] * batch_size), ([None] * batch_size), ([None] * batch_size)) elif (self.enable_graph_input != 'false'): (graph_triplets, node_vocabulary, relation_vocabulary, graph_adj_np) = self.kg.get() graph_adj = to_pt(graph_adj_np, enable_cuda=self.use_cuda, type='float') (node_features, node_mask, relation_features, relation_mask) = self.get_gcn_input_features(node_vocabulary, relation_vocabulary, use_model='online') node_representations = self.get_graph_representations(node_features, node_mask, relation_features, relation_mask, graph_adj, use_model='online') else: (graph_triplets, node_vocabulary, relation_vocabulary) = (([None] * batch_size), ([None] * batch_size), ([None] * batch_size)) (node_representations, node_mask, relation_mask) = (None, None, None) (action_rank, ctrlf_rank, current_dynamics) = self.get_ranks(input_description, input_description_mask, input_quest, input_quest_mask, ctrlf_word_mask, node_representations, node_mask, node_vocabulary, previous_dynamics, use_model='online') for i in range(batch_size): if (self.prev_actions[(- 1)][i] == 'stop'): self.prev_step_is_still_interacting[i] = 0.0 replay_info = [to_pt(self.prev_step_is_still_interacting, False, 'float')] return (action_rank, ctrlf_rank, ctrlf_word_mask, current_dynamics, current_belief, replay_info) def get_qa_ranks_greedy(self, observation_list, quest_list, belief): with torch.no_grad(): batch_size = len(observation_list) current_belief = None if (self.enable_graph_input == 'gata'): current_belief = belief (graph_adj, node_vocabulary, relation_vocabulary) = (None, ([None] * batch_size), ([None] * batch_size)) elif (self.enable_graph_input != 'false'): (_, node_vocabulary, relation_vocabulary, graph_adj_np) = self.kg.get() graph_adj = to_pt(graph_adj_np, enable_cuda=self.use_cuda, type='float') else: (graph_adj, node_vocabulary, relation_vocabulary) = (None, ([None] * batch_size), ([None] * batch_size)) (point_rank, mask) = self.answer_question(observation_list, quest_list, node_vocabulary, relation_vocabulary, graph_adj, current_belief) return (point_rank, mask)
_head('infer_links') class InferLinksHead(torch.nn.Module): def __init__(self, dim_in, dim_out): super().__init__() if (cfg.dataset.infer_link_label == 'edge'): dim_out = 2 else: raise ValueError(f'Infer-link task {cfg.dataset.infer_link_label} not available.') self.predictor = torch.nn.Linear(1, dim_out) def forward(self, batch): x = batch.x[batch.complete_edge_index] x = (x[0] * x[1]).sum(1) y = self.predictor(x.unsqueeze(1)) return (y, batch.y)
def prepare_locked_transfer(properties, defaults): properties: LockedTransferProperties = create_properties(properties, defaults) params = unwrap_canonical_identifier(properties.__dict__) secrethash = sha256(params.pop('secret')).digest() params['lock'] = Lock(amount=params.pop('amount'), expiration=params.pop('expiration'), secrethash=secrethash) if (params['locksroot'] == GENERATE): params['locksroot'] = keccak(params['lock'].as_bytes) params['signature'] = EMPTY_SIGNATURE params.pop('route_states') if (params['metadata'] == GENERATE): params['metadata'] = create(MetadataProperties()) return (params, LocalSigner(params.pop('pkey')), params.pop('sender'))
def test_period_object_column(): range_index = pd.period_range(start='2000', periods=10, freq='B') df = pd.DataFrame({'a': 5, 'b': range_index}, index=range_index) view = QgridWidget(df=df) view._handle_qgrid_msg_helper({'type': 'change_sort', 'sort_field': 'index', 'sort_ascending': True}) view._handle_qgrid_msg_helper({'type': 'show_filter_dropdown', 'field': 'index', 'search_val': None}) view._handle_qgrid_msg_helper({'type': 'change_sort', 'sort_field': 'b', 'sort_ascending': True}) view._handle_qgrid_msg_helper({'type': 'show_filter_dropdown', 'field': 'b', 'search_val': None})
((pgv is None), 'pygraphviz is not available') class TestParallelWithPyGraphviz(TestParallel): def setUp(self): class PGVMachine(HierarchicalGraphMachine): def __init__(self, *args, **kwargs): kwargs['use_pygraphviz'] = True super(PGVMachine, self).__init__(*args, **kwargs) super(TestParallelWithPyGraphviz, self).setUp() self.machine_cls = PGVMachine
class GraphGather(torch.nn.Module): def __init__(self, node_features: int, hidden_node_features: int, out_features: int, att_depth: int, att_hidden_dim: int, att_dropout_p: float, emb_depth: int, emb_hidden_dim: int, emb_dropout_p: float, big_positive: float) -> None: super().__init__() self.big_positive = big_positive self.att_nn = MLP(in_features=(node_features + hidden_node_features), hidden_layer_sizes=([att_hidden_dim] * att_depth), out_features=out_features, dropout_p=att_dropout_p) self.emb_nn = MLP(in_features=hidden_node_features, hidden_layer_sizes=([emb_hidden_dim] * emb_depth), out_features=out_features, dropout_p=emb_dropout_p) def forward(self, hidden_nodes: torch.Tensor, input_nodes: torch.Tensor, node_mask: torch.Tensor) -> torch.Tensor: Softmax = torch.nn.Softmax(dim=1) cat = torch.cat((hidden_nodes, input_nodes), dim=2) energy_mask = ((node_mask == 0).float() * self.big_positive) energies = (self.att_nn(cat) - energy_mask.unsqueeze((- 1))) attention = Softmax(energies) embedding = self.emb_nn(hidden_nodes) return torch.sum((attention * embedding), dim=1)
class HGFilter(nn.Module): def __init__(self, opt): super(HGFilter, self).__init__() self.num_modules = opt.num_stack self.opt = opt if (opt.input_type == 'RGB'): self.input_channel = 3 print('input type: RGB') elif (opt.input_type == 'RGBD'): self.input_channel = 4 print('input type: RGBD') elif (opt.input_type == 'RGBN'): self.input_channel = 5 print('input type: RGB+normal') elif (opt.input_type == 'RGBM2'): self.input_channel = 5 print('input type: RGB+separated object masks') elif (opt.input_type == 'RGBM3'): self.input_channel = 5 elif (opt.input_type == 'RGBM4'): self.input_channel = 5 print('input type: RGB+separated object masks, background removed by bbox') elif (opt.input_type == 'RGBMD'): self.input_channel = 6 print('input type: RGB+separated object masks, depth added') elif (opt.input_type == 'RGBMD2'): self.input_channel = 6 print('input type: RGB+separated object masks, depth added, background removed') elif (opt.input_type == 'RGBM'): self.input_channel = 4 print('input type: RGB+mask') elif (opt.input_type == 'RGBMN'): self.input_channel = 8 print('input type: RGB+mask+normal') else: raise ValueError('invalid input specification: {}'.format(opt.input_type)) if ('cls_code' in opt): self.add_cls_code = opt.cls_code else: self.add_cls_code = False self.conv1 = nn.Conv2d(self.input_channel, 64, kernel_size=7, stride=2, padding=3) if (self.opt.norm == 'batch'): self.bn1 = nn.BatchNorm2d(64) elif (self.opt.norm == 'group'): self.bn1 = nn.GroupNorm(32, 64) if (self.opt.hg_down == 'conv64'): self.conv2 = ConvBlock(64, 64, self.opt.norm) self.down_conv2 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1) elif (self.opt.hg_down == 'conv128'): self.conv2 = ConvBlock(64, 128, self.opt.norm) self.down_conv2 = nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1) elif (self.opt.hg_down == 'ave_pool'): self.conv2 = ConvBlock(64, 128, self.opt.norm) else: raise NameError('Unknown Fan Filter setting!') self.conv3 = ConvBlock(128, 128, self.opt.norm) self.conv4 = ConvBlock(128, 256, self.opt.norm) for hg_module in range(self.num_modules): self.add_module(('m' + str(hg_module)), HourGlass(1, opt.num_hourglass, 256, self.opt.norm)) self.add_module(('top_m_' + str(hg_module)), ConvBlock(256, 256, self.opt.norm)) self.add_module(('conv_last' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) if (self.opt.norm == 'batch'): self.add_module(('bn_end' + str(hg_module)), nn.BatchNorm2d(256)) elif (self.opt.norm == 'group'): self.add_module(('bn_end' + str(hg_module)), nn.GroupNorm(32, 256)) self.add_module(('l' + str(hg_module)), nn.Conv2d(256, opt.hourglass_dim, kernel_size=1, stride=1, padding=0)) if (hg_module < (self.num_modules - 1)): self.add_module(('bl' + str(hg_module)), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)) self.add_module(('al' + str(hg_module)), nn.Conv2d(opt.hourglass_dim, 256, kernel_size=1, stride=1, padding=0)) def forward(self, x): x = F.relu(self.bn1(self.conv1(x)), True) tmpx = x if (self.opt.hg_down == 'ave_pool'): x = F.avg_pool2d(self.conv2(x), 2, stride=2) elif (self.opt.hg_down in ['conv64', 'conv128']): x = self.conv2(x) x = self.down_conv2(x) else: raise NameError('Unknown Fan Filter setting!') normx = x x = self.conv3(x) x = self.conv4(x) previous = x outputs = [] for i in range(self.num_modules): hg = self._modules[('m' + str(i))](previous) ll = hg ll = self._modules[('top_m_' + str(i))](ll) ll = F.relu(self._modules[('bn_end' + str(i))](self._modules[('conv_last' + str(i))](ll)), True) tmp_out = self._modules[('l' + str(i))](ll) outputs.append(tmp_out) if (i < (self.num_modules - 1)): ll = self._modules[('bl' + str(i))](ll) tmp_out_ = self._modules[('al' + str(i))](tmp_out) previous = ((previous + ll) + tmp_out_) return (outputs, tmpx.detach(), normx)
class TestSink(ComponentLevel2): def construct(s, Type, answer): assert (type(answer) == list), 'TestSink only accepts a list of outputs!' s.answer = deque([(x if (x == '*') else Type(x)) for x in answer]) s.in_ = InPort(Type) def up_sink(): if (not s.answer): assert False, 'Simulation has ended' else: ref = s.answer.popleft() ans = s.in_ assert ((ref == ans) or (ref == '*')), 'Expect {}, get {} instead'.format(ref, ans) def done(s): return (not s.answer) def line_trace(s): return ('%s' % s.in_)
class TestNcNWCSAFPPS(): def test_start_time(self, nwcsaf_pps_cmic_filehandler): assert (nwcsaf_pps_cmic_filehandler.start_time == read_nwcsaf_time(START_TIME_PPS)) def test_end_time(self, nwcsaf_pps_cmic_filehandler): assert (nwcsaf_pps_cmic_filehandler.end_time == read_nwcsaf_time(END_TIME_PPS)) def test_drop_xycoords(self, nwcsaf_pps_cmic_filehandler): y_line = xr.DataArray(list(range(5)), dims='y', attrs={'long_name': 'scan line number'}) x_pixel = xr.DataArray(list(range(10)), dims='x', attrs={'long_name': 'pixel number'}) lat = xr.DataArray(np.ones((5, 10)), dims=('y', 'x'), coords={'y': y_line, 'x': x_pixel}, attrs={'name': 'lat', 'standard_name': 'latitude'}) lon = xr.DataArray(np.ones((5, 10)), dims=('y', 'x'), coords={'y': y_line, 'x': x_pixel}, attrs={'name': 'lon', 'standard_name': 'longitude'}) data_array_in = xr.DataArray(np.ones((5, 10)), attrs={'scale_factor': np.array(0, dtype=float), 'add_offset': np.array(1, dtype=float)}, dims=('y', 'x'), coords={'lon': lon, 'lat': lat, 'y': y_line, 'x': x_pixel}) data_array_out = nwcsaf_pps_cmic_filehandler.drop_xycoords(data_array_in) assert ('y' not in data_array_out.coords) def test_get_dataset_scales_and_offsets(self, nwcsaf_pps_cpp_filehandler): dsid = {'name': 'cpp_cot'} info = dict(name='cpp_cot', file_type='nc_nwcsaf_cpp') res = nwcsaf_pps_cpp_filehandler.get_dataset(dsid, info) np.testing.assert_allclose(res, ((COT_ARRAY * COT_SCALE) + COT_OFFSET)) def test_get_dataset_scales_and_offsets_palette_meanings_using_other_dataset(self, nwcsaf_pps_cpp_filehandler): dsid = {'name': 'cpp_cot_pal'} info = dict(name='cpp_cot_pal', file_type='nc_nwcsaf_cpp', scale_offset_dataset='cot') res = nwcsaf_pps_cpp_filehandler.get_dataset(dsid, info) palette_meanings = np.array(COT_PALETTE_MEANINGS.split()).astype(int) np.testing.assert_allclose(res.attrs['palette_meanings'], ((palette_meanings * COT_SCALE) + COT_OFFSET)) def test_get_palette_fill_value_color_added(self, nwcsaf_pps_ctth_filehandler): dsid = {'name': 'ctth_alti_pal'} info = dict(name='ctth_alti_pal', file_type='nc_nwcsaf_ctth', scale_offset_dataset='ctth_alti') res = nwcsaf_pps_ctth_filehandler.get_dataset(dsid, info) res.attrs['palette_meanings'] palette_meanings = np.array([0, 500, 1000, 1500, 65535]) np.testing.assert_allclose(res.attrs['palette_meanings'], ((palette_meanings * COT_SCALE) + COT_OFFSET)) def test_get_dataset_raises_when_dataset_missing(self, nwcsaf_pps_cpp_filehandler): dsid = {'name': 'cpp_phase'} info = dict(name='cpp_phase', file_type='nc_nwcsaf_cpp') with pytest.raises(KeyError): nwcsaf_pps_cpp_filehandler.get_dataset(dsid, info) def test_get_dataset_uses_file_key_if_present(self, nwcsaf_pps_cmic_filehandler, nwcsaf_pps_cpp_filehandler): dsid_cpp = {'name': 'cpp_cot'} dsid_cmic = {'name': 'cmic_cot'} file_key = 'cmic_cot' nwcsaf_pps_cmic_filehandler.file_key_prefix = '' info_cpp = dict(name='cpp_cot', file_key=file_key, file_type='nc_nwcsaf_cpp') res_cpp = nwcsaf_pps_cmic_filehandler.get_dataset(dsid_cpp, info_cpp) info_cmic = dict(name='cmic_cot', file_type='nc_nwcsaf_cpp') res_cmic = nwcsaf_pps_cmic_filehandler.get_dataset(dsid_cmic, info_cmic) np.testing.assert_allclose(res_cpp, res_cmic) def test_get_dataset_can_handle_file_key_list(self, nwcsaf_pps_cmic_filehandler, nwcsaf_pps_cpp_filehandler): dsid_cpp = {'name': 'cpp_reff'} dsid_cmic = {'name': 'cmic_cre'} info_cpp = dict(name='cmic_reff', file_key=['reff', 'cre'], file_type='nc_nwcsaf_cpp') res_cpp = nwcsaf_pps_cpp_filehandler.get_dataset(dsid_cpp, info_cpp) info_cmic = dict(name='cmic_reff', file_key=['reff', 'cre'], file_type='nc_nwcsaf_cpp') res_cmic = nwcsaf_pps_cmic_filehandler.get_dataset(dsid_cmic, info_cmic) np.testing.assert_allclose(res_cpp, res_cmic)
class WideResNet(nn.Module): def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0): super(WideResNet, self).__init__() nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)] assert (((depth - 4) % 6) == 0) n = ((depth - 4) / 6) block = BasicBlock self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x, return_prelogit=False): out = self.conv1(x) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view((- 1), self.nChannels) if return_prelogit: return (self.fc(out), out) else: return self.fc(out)
_arg_scope def one_hot_encoding(labels, num_classes, on_value=1.0, off_value=0.0, outputs_collections=None, scope=None): with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc: labels = ops.convert_to_tensor(labels) if (labels.dtype == dtypes.int32): labels = standard_ops.to_int64(labels) outputs = standard_ops.one_hot(labels, num_classes, on_value=on_value, off_value=off_value) return utils.collect_named_outputs(outputs_collections, sc, outputs)
class Pile(object): def __init__(self, squirrel=None): if (squirrel is None): squirrel = psq.Squirrel() self._squirrel = squirrel self._listeners = [] self._squirrel.get_database().add_listener(self._notify_squirrel_to_pile) def _notify_squirrel_to_pile(self, event, *args): self.notify_listeners(event) def add_listener(self, obj): self._listeners.append(util.smart_weakref(obj)) def notify_listeners(self, what): for ref in self._listeners: obj = ref() if obj: obj(what, []) def get_tmin(self): return self.tmin def get_tmax(self): return self.tmax def get_deltatmin(self): return self._squirrel.get_deltat_span('waveform')[0] def get_deltatmax(self): return self._squirrel.get_deltat_span('waveform')[1] def deltatmin(self): return self.get_deltatmin() def deltatmax(self): return self.get_deltatmax() def tmin(self): return self._squirrel.get_time_span('waveform', dummy_limits=False)[0] def tmax(self): return self._squirrel.get_time_span('waveform', dummy_limits=False)[1] def networks(self): return set((codes.network for codes in self._squirrel.get_codes('waveform'))) def stations(self): return set((codes.station for codes in self._squirrel.get_codes('waveform'))) def locations(self): return set((codes.location for codes in self._squirrel.get_codes('waveform'))) def channels(self): return set((codes.channel for codes in self._squirrel.get_codes('waveform'))) def is_relevant(self, tmin, tmax): (ptmin, ptmax) = self._squirrel.get_time_span(['waveform', 'waveform_promise'], dummy_limits=False) if (None in (ptmin, ptmax)): return False return ((tmax >= ptmin) and (ptmax >= tmin)) def load_files(self, filenames, filename_attributes=None, fileformat='mseed', cache=None, show_progress=True, update_progress=None): self._squirrel.add(filenames, kinds='waveform', format=fileformat) def chop(self, tmin, tmax, nut_selector=None, snap=(round, round), include_last=False, load_data=True, accessor_id='default'): nuts = self._squirrel.get_waveform_nuts(tmin=tmin, tmax=tmax) if load_data: traces = [self._squirrel.get_content(nut, 'waveform', accessor_id) for nut in nuts if ((nut_selector is None) or nut_selector(nut))] else: traces = [trace.Trace(**nut.trace_kwargs) for nut in nuts if ((nut_selector is None) or nut_selector(nut))] self._squirrel.advance_accessor(accessor_id) chopped = [] used_files = set() for tr in traces: if ((not load_data) and (tr.ydata is not None)): tr = tr.copy(data=False) tr.ydata = None try: chopped.append(tr.chop(tmin, tmax, inplace=False, snap=snap, include_last=include_last)) except trace.NoData: pass return (chopped, used_files) def _process_chopped(self, chopped, degap, maxgap, maxlap, want_incomplete, wmax, wmin, tpad): chopped.sort(key=(lambda a: a.full_id)) if degap: chopped = trace.degapper(chopped, maxgap=maxgap, maxlap=maxlap) if (not want_incomplete): chopped_weeded = [] for tr in chopped: emin = (tr.tmin - (wmin - tpad)) emax = ((tr.tmax + tr.deltat) - (wmax + tpad)) if ((abs(emin) <= (0.5 * tr.deltat)) and (abs(emax) <= (0.5 * tr.deltat))): chopped_weeded.append(tr) elif degap: if ((0.0 < emin <= (5.0 * tr.deltat)) and (((- 5.0) * tr.deltat) <= emax < 0.0)): tr.extend((wmin - tpad), ((wmax + tpad) - tr.deltat), fillmethod='repeat') chopped_weeded.append(tr) chopped = chopped_weeded for tr in chopped: tr.wmin = wmin tr.wmax = wmax return chopped def chopper(self, tmin=None, tmax=None, tinc=None, tpad=0.0, trace_selector=None, want_incomplete=True, degap=True, maxgap=5, maxlap=None, keep_current_files_open=False, accessor_id='default', snap=(round, round), include_last=False, load_data=True, style=None): if (tmin is None): if (self.tmin is None): logger.warning("Pile's tmin is not set - pile may be empty.") return tmin = (self.tmin + tpad) if (tmax is None): if (self.tmax is None): logger.warning("Pile's tmax is not set - pile may be empty.") return tmax = (self.tmax - tpad) if (not self.is_relevant((tmin - tpad), (tmax + tpad))): return nut_selector = trace_callback_to_nut_callback(trace_selector) if (tinc is None): tinc = (tmax - tmin) nwin = 1 elif (tinc == 0.0): nwin = 1 else: eps = 1e-06 nwin = max(1, (int((((tmax - tmin) / tinc) - eps)) + 1)) for iwin in range(nwin): (wmin, wmax) = ((tmin + (iwin * tinc)), min((tmin + ((iwin + 1) * tinc)), tmax)) (chopped, used_files) = self.chop((wmin - tpad), (wmax + tpad), nut_selector, snap, include_last, load_data, accessor_id) processed = self._process_chopped(chopped, degap, maxgap, maxlap, want_incomplete, wmax, wmin, tpad) if (style == 'batch'): (yield classic_pile.Batch(tmin=wmin, tmax=wmax, i=iwin, n=nwin, traces=processed)) else: (yield processed) if (not keep_current_files_open): self._squirrel.clear_accessor(accessor_id, 'waveform') def chopper_grouped(self, gather, progress=None, *args, **kwargs): keys = self.gather_keys(gather) if (len(keys) == 0): return outer_trace_selector = None if ('trace_selector' in kwargs): outer_trace_selector = kwargs['trace_selector'] pbar = None try: if (progress is not None): pbar = util.progressbar(progress, len(keys)) for (ikey, key) in enumerate(keys): def tsel(tr): return ((gather(tr) == key) and ((outer_trace_selector is None) or outer_trace_selector(tr))) kwargs['trace_selector'] = tsel for traces in self.chopper(*args, **kwargs): (yield traces) if pbar: pbar.update((ikey + 1)) finally: if pbar: pbar.finish() def reload_modified(self): self._squirrel.reload() def iter_traces(self, load_data=False, return_abspath=False, trace_selector=None): assert (not load_data) assert (not return_abspath) nut_selector = trace_callback_to_nut_callback(trace_selector) for nut in self._squirrel.get_waveform_nuts(): if ((nut_selector is None) or nut_selector(nut)): (yield trace.Trace(**nut.trace_kwargs)) def gather_keys(self, gather, selector=None): codes_gather = trace_callback_to_codes_callback(gather) codes_selector = trace_callback_to_codes_callback(selector) return self._squirrel._gather_codes_keys('waveform', codes_gather, codes_selector) def snuffle(self, **kwargs): from pyrocko.gui.snuffler import snuffle snuffle(self, **kwargs) def add_file(self, mtf): if isinstance(mtf, classic_pile.MemTracesFile): name = self._squirrel.add_volatile_waveforms(mtf.get_traces()) mtf._squirrel_name = name else: assert False def remove_file(self, mtf): if (isinstance(mtf, classic_pile.MemTracesFile) and getattr(mtf, '_squirrel_name', False)): self._squirrel.remove(mtf._squirrel_name) mtf._squirrel_name = None def is_empty(self): return ('waveform' not in self._squirrel.get_kinds()) def get_update_count(self): return 0
def test_kernel_regularization(): ((x_train, y_train), (x_test, y_test)) = get_data() for reg in [regularizers.l1(), regularizers.l2(), regularizers.l1_l2()]: model = create_model(kernel_regularizer=reg) model.compile(loss='categorical_crossentropy', optimizer='sgd') assert (len(model.losses) == 1) model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
def render_page_template(name, route_data=None, **kwargs): main_scripts = _list_files('build', 'js', JS_BUNDLE_NAME) use_cdn = app.config.get('USE_CDN', True) if (request.args.get('use_cdn') is not None): use_cdn = (request.args.get('use_cdn') == 'true') external_styles = get_external_css(local=(not use_cdn), exclude=FONT_AWESOME_5) external_scripts = get_external_javascript(local=(not use_cdn)) if features.BILLING: external_scripts.append('//js.stripe.com/v3/') has_contact = (len(app.config.get('CONTACT_INFO', [])) > 0) contact_href = None if (len(app.config.get('CONTACT_INFO', [])) == 1): contact_href = app.config['CONTACT_INFO'][0] version_number = '' if (not features.BILLING): version_number = ('Quay %s' % __version__) scopes_set = {scope.scope: scope._asdict() for scope in list(scopes.app_scopes(app.config).values())} contents = render_template(name, registry_state=app.config.get('REGISTRY_STATE', 'normal'), route_data=route_data, external_styles=external_styles, external_scripts=external_scripts, main_scripts=main_scripts, feature_set=features.get_features(), config_set=frontend_visible_config(app.config), oauth_set=get_oauth_config(), external_login_set=get_external_login_config(), scope_set=scopes_set, vuln_priority_set=PRIORITY_LEVELS, mixpanel_key=app.config.get('MIXPANEL_KEY', ''), munchkin_key=app.config.get('MARKETO_MUNCHKIN_ID', ''), recaptcha_key=app.config.get('RECAPTCHA_SITE_KEY', ''), google_tagmanager_key=app.config.get('GOOGLE_TAGMANAGER_KEY', ''), google_anaytics_key=app.config.get('GOOGLE_ANALYTICS_KEY', ''), sentry_public_dsn=app.config.get('SENTRY_PUBLIC_DSN', ''), is_debug=str(app.config.get('DEBUGGING', False)).lower(), has_billing=features.BILLING, onprem=(not app.config.get('FEATURE_BILLING', False)), contact_href=contact_href, has_contact=has_contact, hostname=app.config['SERVER_HOSTNAME'], preferred_scheme=app.config['PREFERRED_URL_SCHEME'], version_number=version_number, current_year=datetime.datetime.now().year, kubernetes_namespace=(IS_KUBERNETES and QE_NAMESPACE), account_recovery_mode=app.config.get('ACCOUNT_RECOVERY_MODE', False), **kwargs) resp = make_response(contents) resp.headers['X-FRAME-OPTIONS'] = 'DENY' return resp
class PresetEchoesGoal(PresetTab, Ui_PresetEchoesGoal): def __init__(self, editor: PresetEditor, game_description: GameDescription, window_manager: WindowManager): super().__init__(editor, game_description, window_manager) self.setupUi(self) self.goal_layout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop) self.skytemple_combo.setItemData(0, LayoutSkyTempleKeyMode.ALL_BOSSES) self.skytemple_combo.setItemData(1, LayoutSkyTempleKeyMode.ALL_GUARDIANS) self.skytemple_combo.setItemData(2, int) self.skytemple_combo.currentIndexChanged.connect(self._on_sky_temple_key_combo_changed) self.skytemple_slider.valueChanged.connect(self._on_sky_temple_key_combo_slider_changed) self._set_slider_visible(False) def tab_title(cls) -> str: return 'Goal' def uses_patches_tab(cls) -> bool: return False def _set_slider_visible(self, visible: bool): for w in [self.skytemple_slider, self.skytemple_slider_label]: w.setVisible(visible) def _on_sky_temple_key_combo_changed(self): combo_enum = self.skytemple_combo.currentData() with self._editor as editor: if (combo_enum is int): new_value = LayoutSkyTempleKeyMode(self.skytemple_slider.value()) self._set_slider_visible(True) else: new_value = combo_enum self._set_slider_visible(False) editor.set_configuration_field('sky_temple_keys', new_value) def _on_sky_temple_key_combo_slider_changed(self): self.skytemple_slider_label.setText(str(self.skytemple_slider.value())) self._on_sky_temple_key_combo_changed() def on_preset_changed(self, preset: Preset): assert isinstance(preset.configuration, EchoesConfiguration) keys = preset.configuration.sky_temple_keys if isinstance(keys.value, int): self.skytemple_slider.setValue(keys.value) data = int else: data = keys set_combo_with_value(self.skytemple_combo, data)
def point_adjustment(y_true, y_score): score = y_score.copy() assert (len(score) == len(y_true)) splits = (np.where((y_true[1:] != y_true[:(- 1)]))[0] + 1) is_anomaly = (y_true[0] == 1) pos = 0 for sp in splits: if is_anomaly: score[pos:sp] = np.max(score[pos:sp]) is_anomaly = (not is_anomaly) pos = sp sp = len(y_true) if is_anomaly: score[pos:sp] = np.max(score[pos:sp]) return score
def voc_palette(): return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
def replace_natural_gas_technology(df: pd.DataFrame): mapping = {'Steam Turbine': 'CCGT', 'Combustion Engine': 'OCGT', 'NG': 'CCGT', 'Ng': 'CCGT', 'NG/FO': 'OCGT', 'Ng/Fo': 'OCGT', 'NG/D': 'OCGT', 'LNG': 'OCGT', 'CCGT/D': 'CCGT', 'CCGT/FO': 'CCGT', 'LCCGT': 'CCGT', 'CCGT/Fo': 'CCGT'} fueltype = (df['Fueltype'] == 'Natural Gas') df.loc[(fueltype, 'Technology')] = df.loc[(fueltype, 'Technology')].replace(mapping).fillna('CCGT') unique_tech_with_ng = df.loc[(fueltype, 'Technology')].unique() unknown_techs = np.setdiff1d(unique_tech_with_ng, ['CCGT', 'OCGT']) if (len(unknown_techs) > 0): df.Technology.where(fueltype, df['Technology'].map({t: 'CCGT' for t in unknown_techs}), inplace=True) df['Fueltype'] = np.where(fueltype, df['Technology'], df['Fueltype']) return df
class LeafPrinter(Printer): def process(self, output, pstate): if (output in pstate.memo): return pstate.memo[output] if (output.name in greek): r = greek[output.name] else: r = str(output) pstate.memo[output] = r return r
class BaseOptions(): def __init__(self): self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) self.initialized = False def initialize(self): self.parser.add_argument('--name', type=str, default=None, help='name of the experiment. It decides where to store samples and models') self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') self.parser.add_argument('--model', type=str, default='eld_model', help='chooses which model to use.', choices=model_names) self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') self.parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') self.parser.add_argument('--resume_epoch', '-re', type=int, default=None, help='checkpoint to use. (default: latest') self.parser.add_argument('--seed', type=int, default=2018, help='random seed to use. Default=2018') self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') self.parser.add_argument('--nThreads', default=8, type=int, help='# threads for loading data') self.parser.add_argument('--chop', action='store_true', help='enable forward_chop') self.parser.add_argument('--no-log', action='store_true', help='disable tf logger?') self.parser.add_argument('--no-verbose', action='store_true', help='disable verbose info?') self.parser.add_argument('--debug', action='store_true', help='debugging mode') self.initialized = True
class YOLOX(nn.Module): def __init__(self, backbone=None, head=None): super().__init__() if (backbone is None): backbone = DFPPAFPN() if (head is None): head = TALHead(20) self.backbone = backbone self.head = head def forward(self, x, targets=None, buffer=None, mode='off_pipe'): assert (mode in ['off_pipe', 'on_pipe']) if (mode == 'off_pipe'): fpn_outs = self.backbone(x, buffer=buffer, mode='off_pipe') if self.training: assert (targets is not None) (loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg) = self.head(fpn_outs, targets, x) outputs = {'total_loss': loss, 'iou_loss': iou_loss, 'l1_loss': l1_loss, 'conf_loss': conf_loss, 'cls_loss': cls_loss, 'num_fg': num_fg} else: outputs = self.head(fpn_outs) return outputs elif (mode == 'on_pipe'): (fpn_outs, buffer_) = self.backbone(x, buffer=buffer, mode='on_pipe') outputs = self.head(fpn_outs) return (outputs, buffer_)
_module() class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if (norm_layer is None): norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if (replace_stride_with_dilation is None): replace_stride_with_dilation = [False, False, False] if (len(replace_stride_with_dilation) != 3): raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) if zero_init_residual: for m in self.modules(): if isinstance(m, SABottleneck): nn.init.constant_(m.bn3.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = (planes * block.expansion) for _ in range(1, blocks): layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x
def get_parser(desc, default_task='translation'): usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False) usr_parser.add_argument('--user-dir', default=None) (usr_args, _) = usr_parser.parse_known_args() utils.import_user_module(usr_args) parser = argparse.ArgumentParser(allow_abbrev=False) gen_parser_from_dataclass(parser, CommonParams()) from fairseq.registry import REGISTRIES for (registry_name, REGISTRY) in REGISTRIES.items(): parser.add_argument(('--' + registry_name.replace('_', '-')), default=REGISTRY['default'], choices=REGISTRY['registry'].keys()) from fairseq.tasks import TASK_REGISTRY parser.add_argument('--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task') return parser
class CLIPConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer(BPE(vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix='', end_of_word_suffix='</w>', fuse_unk=False, unk_token=str(unk_token))) tokenizer.normalizer = normalizers.Sequence([normalizers.NFC(), normalizers.Replace(Regex('\\s+'), ' '), normalizers.Lowercase()]) tokenizer.pre_tokenizer = pre_tokenizers.Sequence([pre_tokenizers.Split(Regex("'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+"), behavior='removed', invert=True), pre_tokenizers.ByteLevel(add_prefix_space=False)]) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.RobertaProcessing(sep=(self.original_tokenizer.eos_token, self.original_tokenizer.eos_token_id), cls=(self.original_tokenizer.bos_token, self.original_tokenizer.bos_token_id), add_prefix_space=False, trim_offsets=False) return tokenizer
def test_extract_header_comment(monkeypatch, tmp_path): pot_file = (tmp_path / 'temp.pot') monkeypatch.chdir(project_dir) cmdinst = configure_cli_command(f"extract . -o '{pot_file}' --header-comment 'Boing' ") cmdinst.run() pot_content = pot_file.read_text() assert ('Boing' in pot_content)
def _load_and_check_geolocation(scene, resolution, exp_res, exp_shape, has_res, check_callback=_check_shared_metadata): scene.load(['longitude', 'latitude'], resolution=resolution) lon_id = make_dataid(name='longitude', resolution=exp_res) lat_id = make_dataid(name='latitude', resolution=exp_res) if has_res: lon_arr = scene[lon_id] lat_arr = scene[lat_id] assert (lon_arr.shape == exp_shape) assert (lat_arr.shape == exp_shape) (lon_vals, lat_vals) = dask.compute(lon_arr, lat_arr) assert (lon_arr.dtype == lat_arr.dtype) assert (lon_arr.dtype == np.float32) assert (lon_vals.dtype == lon_arr.dtype) assert (lat_vals.dtype == lat_arr.dtype) np.testing.assert_array_less(lon_vals, 0) np.testing.assert_array_less(0, lat_vals) check_callback(lon_arr) check_callback(lat_arr) else: pytest.raises(KeyError, scene.__getitem__, lon_id) pytest.raises(KeyError, scene.__getitem__, lat_id)
def gen_imgs_classifier(samples, patches_dir): num_samples = len(samples) print('gen_imgs_classifier ', num_samples) for (counter, batch_sample) in samples.iterrows(): with openslide.open_slide(batch_sample.slide_path) as slide: tiles = DeepZoomGenerator(slide, tile_size=256, overlap=0, limit_bounds=False) img = tiles.get_tile((tiles.level_count - 1), batch_sample.tile_loc[::(- 1)]) if batch_sample.is_tumor: truth_slide_path = (MASK_TRUTH_DIR / osp.basename(batch_sample.slide_path).replace('.tif', '_Mask.tif')) with openslide.open_slide(str(truth_slide_path)) as truth: truth_tiles = DeepZoomGenerator(truth, tile_size=256, overlap=0, limit_bounds=False) mask = truth_tiles.get_tile((truth_tiles.level_count - 1), batch_sample.tile_loc[::(- 1)]) mask_n = np.array(mask) mask_center = mask_n[(128, 128)] if (mask_center[0] == 255): cv2.imwrite((((str(patches_dir) + 'mask/') + str(batch_sample.tile_loc[::(- 1)])) + '.png'), cv2.cvtColor(np.array(mask_n), cv2.COLOR_RGB2BGR)) cv2.imwrite((((str(patches_dir) + 'tumor/') + str(batch_sample.tile_loc[::(- 1)])) + '.png'), cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)) else: cv2.imwrite((((str(patches_dir) + 'normal/') + str(batch_sample.tile_loc[::(- 1)])) + '.png'), cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR))
class BaseProcessingNet(nn.Sequential): def __init__(self, in_dim, mid_dim, out_dim, num_layers, block=FCBlock, final_activation=None, normalization='batch'): super(BaseProcessingNet, self).__init__() self.add_module('input', block(in_dim=in_dim, out_dim=mid_dim, normalization=None)) for i in range(num_layers): self.add_module('pyramid-{}'.format(i), block(in_dim=mid_dim, out_dim=mid_dim, normalization=normalization)) self.add_module('head'.format((i + 1)), block(in_dim=mid_dim, out_dim=out_dim, normalization=None, activation=final_activation)) self.apply(init_weights_xavier)
def _introspect_attributes(program_id: int) -> dict: attributes = {} for index in range(_get_number(program_id, GL_ACTIVE_ATTRIBUTES)): (a_name, a_type, a_size) = _query_attribute(program_id, index) loc = glGetAttribLocation(program_id, create_string_buffer(a_name.encode('utf-8'))) (count, fmt) = _attribute_types[a_type] attributes[a_name] = dict(type=a_type, size=a_size, location=loc, count=count, format=fmt) if _debug_gl_shaders: for attribute in attributes.values(): print(f' Found attribute: {attribute}') return attributes
def main(): args = parser.get_args() args.use_gpu = torch.cuda.is_available() if args.use_gpu: torch.backends.cudnn.benchmark = True if (args.launcher == 'none'): args.distributed = False else: args.distributed = True dist_utils.init_dist(args.launcher) (_, world_size) = dist_utils.get_dist_info() args.world_size = world_size timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) log_file = os.path.join(args.experiment_path, f'{timestamp}.log') logger = get_root_logger(log_file=log_file, name=args.log_name) if (not args.test): if (args.local_rank == 0): train_writer = SummaryWriter(os.path.join(args.tfboard_path, 'train')) val_writer = SummaryWriter(os.path.join(args.tfboard_path, 'test')) else: train_writer = None val_writer = None config = get_config(args, logger=logger) if args.distributed: assert ((config.total_bs % world_size) == 0) config.dataset.train.others.bs = (config.total_bs // world_size) if config.dataset.get('extra_train'): config.dataset.extra_train.others.bs = (config.total_bs // world_size) config.dataset.val.others.bs = (config.total_bs // world_size) if config.dataset.get('test'): config.dataset.test.others.bs = (config.total_bs // world_size) else: config.dataset.train.others.bs = config.total_bs if config.dataset.get('extra_train'): config.dataset.extra_train.others.bs = config.total_bs if config.dataset.get('extra_val'): config.dataset.extra_val.others.bs = config.total_bs config.dataset.val.others.bs = config.total_bs if config.dataset.get('test'): config.dataset.test.others.bs = config.total_bs log_args_to_file(args, 'args', logger=logger) log_config_to_file(config, 'config', logger=logger) logger.info(f'Distributed training: {args.distributed}') if (args.seed is not None): logger.info(f'Set random seed to {args.seed}, deterministic: {args.deterministic}') misc.set_random_seed((args.seed + args.local_rank), deterministic=args.deterministic) if args.distributed: assert (args.local_rank == torch.distributed.get_rank()) if (args.shot != (- 1)): config.dataset.train.others.shot = args.shot config.dataset.train.others.way = args.way config.dataset.train.others.fold = args.fold config.dataset.val.others.shot = args.shot config.dataset.val.others.way = args.way config.dataset.val.others.fold = args.fold if args.test: test_net(args, config) elif args.zeroshot: zeroshot(args, config) elif args.svm: svm(args, config) elif (args.finetune_model or args.scratch_model): finetune(args, config, train_writer, val_writer) else: pretrain(args, config, train_writer, val_writer)
class _job_state_monitor(threading.Thread): def __init__(self, job_service): self.logger = job_service._logger self.js = job_service self._term = threading.Event() super(_job_state_monitor, self).__init__() self.setDaemon(True) def stop(self): self._term.set() def run(self): error_type_count = dict() while (not self._term.is_set()): try: jobs = self.js.jobs for job_id in list(jobs.keys()): job_info = jobs[job_id] if (job_info['state'] not in [api.DONE, api.FAILED, api.CANCELED]): new_job_info = self.js._job_get_info(job_id, reconnect=False) self.logger.info(('update Job %s (state: %s)' % (job_id, new_job_info['state']))) if (new_job_info['state'] != job_info['state']): job_obj = job_info['obj'] job_obj._attributes_i_set('state', new_job_info['state'], job_obj._UP, True) jobs[job_id] = new_job_info except Exception as e: import traceback traceback.print_exc() self.logger.warning(('Exception in monitoring thread: %s' % e)) error_type = str(e) if (error_type not in error_type_count): error_type_count = dict() error_type_count[error_type] = 1 else: error_type_count[error_type] += 1 if (error_type_count[error_type] >= 3): self.logger.error('too many monitoring errors - stop') return finally: time.sleep(MONITOR_UPDATE_INTERVAL)
class ConditionalFix(BaseFix): skip_on = None def start_tree(self, *args): super(ConditionalFix, self).start_tree(*args) self._should_skip = None def should_skip(self, node): if (self._should_skip is not None): return self._should_skip pkg = self.skip_on.split('.') name = pkg[(- 1)] pkg = '.'.join(pkg[:(- 1)]) self._should_skip = does_tree_import(pkg, name, node) return self._should_skip
class FakeDisplayItem(dict): def get(self, key, default='', connector=' - '): if ((key[:1] == '~') and ('~' in key[1:])): return connector.join(map(self.get, util.tagsplit(key))) elif ((key[:1] == '~') and (key[(- 4):(- 3)] == ':')): func = key[(- 3):] key = key[:(- 4)] return f'{util.tag(key)}<{func}>' elif (key in self): return self[key] return util.tag(key) __call__ = get def comma(self, key): value = self.get(key) if isinstance(value, (int | float)): return value return value.replace('\n', ', ')
class DependenciesWidget(QtWidgets.QTableView): def __init__(self): super().__init__(None) self.root_model = DependenciesModel(self) self.proxy_model = QtCore.QSortFilterProxyModel(self) self.proxy_model.setSourceModel(self.root_model) self.proxy_model.setSortCaseSensitivity(Qt.CaseSensitivity.CaseInsensitive) self.setModel(self.proxy_model) self.horizontalHeader().setStretchLastSection(True) self.setSortingEnabled(True) self.sortByColumn(0, Qt.AscendingOrder)
.unit() .parametrize(('prefix_tree', 'full_tree', 'strict', 'expected'), [(1, 1, True, False), (1, 1, False, True), ({'a': 1, 'b': 1}, {'a': 1, 'b': {'c': 1, 'd': 1}}, False, True), ({'a': 1, 'b': 1}, {'a': 1, 'b': {'c': 1, 'd': 1}}, True, True)]) def test_is_prefix(prefix_tree, full_tree, strict, expected): prefix_structure = tree_structure(prefix_tree) full_tree_structure = tree_structure(full_tree) assert (prefix_structure.is_prefix(full_tree_structure, strict=strict) is expected)
def test_affixes(): s = '\nspace-allowed-after-this |\nspace-allowed-before-this\nspace-allowed-after-this\n space-required-before-and-after-this |\nspace-required-before-and-after-this |\n space-required-before-and-after-this<= no space after\n' assert (list(MyLexer().get_tokens(s)) == [(Token.Name, 'space-allowed-after-this '), (Token.Name, '|'), (Token.Text, '\n'), (Token.Name, 'space-allowed-before-this'), (Token.Text, '\n'), (Token.Name, 'space-allowed-after-this'), (Token.Text, '\n'), (Token.Name, ' space-required-before-and-after-this '), (Token.Name, '|'), (Token.Text, '\n'), (Token.Error, 's'), (Token.Error, 'p'), (Token.Error, 'a'), (Token.Error, 'c'), (Token.Error, 'e'), (Token.Name, '-'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Error, 'q'), (Token.Error, 'u'), (Token.Error, 'i'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Error, 'd'), (Token.Name, '-'), (Token.Error, 'b'), (Token.Error, 'e'), (Token.Error, 'f'), (Token.Error, 'o'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Name, '-'), (Token.Error, 'a'), (Token.Error, 'n'), (Token.Error, 'd'), (Token.Name, '-'), (Token.Error, 'a'), (Token.Error, 'f'), (Token.Error, 't'), (Token.Error, 'e'), (Token.Error, 'r'), (Token.Name, '-'), (Token.Error, 't'), (Token.Error, 'h'), (Token.Error, 'i'), (Token.Error, 's'), (Token.Error, ' '), (Token.Name, '|'), (Token.Text, '\n'), (Token.Error, ' '), (Token.Error, 's'), (Token.Error, 'p'), (Token.Error, 'a'), (Token.Error, 'c'), (Token.Error, 'e'), (Token.Name, '-'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Error, 'q'), (Token.Error, 'u'), (Token.Error, 'i'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Error, 'd'), (Token.Name, '-'), (Token.Error, 'b'), (Token.Error, 'e'), (Token.Error, 'f'), (Token.Error, 'o'), (Token.Error, 'r'), (Token.Error, 'e'), (Token.Name, '-'), (Token.Error, 'a'), (Token.Error, 'n'), (Token.Error, 'd'), (Token.Name, '-'), (Token.Error, 'a'), (Token.Error, 'f'), (Token.Error, 't'), (Token.Error, 'e'), (Token.Error, 'r'), (Token.Name, '-'), (Token.Error, 't'), (Token.Error, 'h'), (Token.Error, 'i'), (Token.Error, 's'), (Token.Error, '<'), (Token.Error, '='), (Token.Error, ' '), (Token.Error, 'n'), (Token.Error, 'o'), (Token.Error, ' '), (Token.Error, 's'), (Token.Error, 'p'), (Token.Error, 'a'), (Token.Error, 'c'), (Token.Error, 'e'), (Token.Error, ' '), (Token.Error, 'a'), (Token.Error, 'f'), (Token.Error, 't'), (Token.Error, 'e'), (Token.Error, 'r'), (Token.Text, '\n')])
def _get_pak_name(locale_name: str) -> str: if (locale_name in {'en', 'en-PH', 'en-LR'}): return 'en-US' elif locale_name.startswith('en-'): return 'en-GB' elif locale_name.startswith('es-'): return 'es-419' elif (locale_name == 'pt'): return 'pt-BR' elif locale_name.startswith('pt-'): return 'pt-PT' elif (locale_name in {'zh-HK', 'zh-MO'}): return 'zh-TW' elif ((locale_name == 'zh') or locale_name.startswith('zh-')): return 'zh-CN' return locale_name.split('-')[0]
def new_onion_packet(payment_path_pubkeys: Sequence[bytes], session_key: bytes, hops_data: Sequence[OnionHopsDataSingle], associated_data: bytes) -> OnionPacket: num_hops = len(payment_path_pubkeys) assert (num_hops == len(hops_data)) hop_shared_secrets = get_shared_secrets_along_route(payment_path_pubkeys, session_key) filler = _generate_filler(b'rho', hops_data, hop_shared_secrets) next_hmac = bytes(PER_HOP_HMAC_SIZE) pad_key = get_bolt04_onion_key(b'pad', session_key) mix_header = generate_cipher_stream(pad_key, HOPS_DATA_SIZE) for i in range((num_hops - 1), (- 1), (- 1)): rho_key = get_bolt04_onion_key(b'rho', hop_shared_secrets[i]) mu_key = get_bolt04_onion_key(b'mu', hop_shared_secrets[i]) hops_data[i].hmac = next_hmac stream_bytes = generate_cipher_stream(rho_key, HOPS_DATA_SIZE) hop_data_bytes = hops_data[i].to_bytes() mix_header = mix_header[:(- len(hop_data_bytes))] mix_header = (hop_data_bytes + mix_header) mix_header = xor_bytes(mix_header, stream_bytes) if ((i == (num_hops - 1)) and (len(filler) != 0)): mix_header = (mix_header[:(- len(filler))] + filler) packet = (mix_header + associated_data) next_hmac = hmac_oneshot(mu_key, msg=packet, digest=hashlib.sha256) return OnionPacket(public_key=ecc.ECPrivkey(session_key).get_public_key_bytes(), hops_data=mix_header, hmac=next_hmac)
class BaseEvaluator(): env: gym.Env policy: BasePolicy MAX_EPISODE_STEPS = 1000 def setup(self, env_id: str, policy_cls: Type[BasePolicy], env_kwargs=None): self.env_id = env_id self.env_kwargs = ({} if (env_kwargs is None) else env_kwargs) obs_mode = policy_cls.get_obs_mode(env_id) control_mode = policy_cls.get_control_mode(env_id) self.env: BaseEnv = gym.make(self.env_id, obs_mode=obs_mode, control_mode=control_mode, **self.env_kwargs) self.policy = policy_cls(self.env_id, self.env.observation_space, self.env.action_space) self.result = OrderedDict() def evaluate_episode(self, reset_kwargs, render_mode=None): env = self.env policy = self.policy obs = env.reset(**reset_kwargs) policy.reset(obs) for _ in range(self.MAX_EPISODE_STEPS): action = policy.act(obs) if (render_mode is not None): env.render(mode=render_mode) (obs, reward, done, info) = env.step(action) if done: if (render_mode is not None): env.render(mode=render_mode) assert ('success' in info), sorted(info.keys()) metrics = extract_scalars_from_info(info, 'TimeLimit.truncated') return metrics def evaluate_episodes(self, episode_cfgs: List[dict], callback: Callable=None): for (i, episode_cfg) in enumerate(episode_cfgs): episode_id = episode_cfg.get('episode_id', i) reset_kwargs = episode_cfg.get('reset_kwargs', {}) metrics = self.evaluate_episode(reset_kwargs) if (metrics is None): raise RuntimeError('Episode {}: check whether time limit is set'.format(episode_id)) if (episode_id in self.result): raise RuntimeError('Episode id {} is not unique.'.format(episode_id)) self.result[episode_id] = metrics if (callback is not None): callback((i + 1), metrics) def close(self): self.env.close() def generate_dummy_config(self, env_id, num_episodes: int): env_info = dict(env_id=env_id) episodes = [dict(episode_id=i) for i in range(num_episodes)] return dict(env_info=env_info, episodes=episodes) def merge_result(self): merged_result = merge_dicts(self.result.values()) merged_metrics = {k: np.mean(v) for (k, v) in merged_result.items()} return merged_metrics def export_to_csv(self, path): import csv import tabulate merged_metrics = self.merge_result() headers = (['env_id'] + list(merged_metrics.keys())) data = [([self.env_id] + list(merged_metrics.values()))] print(tabulate(data, headers=headers, tablefmt='psql', floatfmt='.4f')) with open(path, 'w') as f: csv_writer = csv.writer(f) csv_writer.writerow(headers) csv_writer.writerows(data) print('The evaluation result is saved to {}.'.format(path)) def submit(self): raise NotImplementedError def error(self, *args, **kwargs): raise NotImplementedError
class PointCompoundSource(SandboxSource): __implements__ = 'CompoundModel' rotation_x = Float.T(help='Clockwise rotation of ellipsoid around x-axis in [deg]', default=0.0) rotation_y = Float.T(help='Clockwise rotation of ellipsoid around y-axis in [deg]', default=0.0) rotation_z = Float.T(help='Clockwise rotation of ellipsoid around z-axis in [deg]', default=0.0) dVx = Float.T(help='Volume change in x-plane in [m3]', default=1.0) dVy = Float.T(help='Volume change in y-plane in [m3]', default=1.0) dVz = Float.T(help='Volume change in z-plane in [m3]', default=1.0) nu = Float.T(help="Poisson's ratio, typically 0.25", default=0.25) def volume(self): return (((self.dVx + self.dVy) + self.dVz) / 1.8) def pointCDMParameters(self): if self._sandbox: (east_shift, north_shift) = od.latlon_to_ne_numpy(self._sandbox.frame.llLat, self._sandbox.frame.llLon, self.lat, self.lon) else: east_shift = 0.0 north_shift = 0.0 params = {'x0': (self.easting + east_shift), 'y0': (self.northing + north_shift), 'z0': self.depth, 'rotx': self.rotation_x, 'roty': self.rotation_y, 'rotz': self.rotation_z, 'dVx': (self.dVx ** 3), 'dVy': (self.dVy ** 3), 'dVz': (self.dVz ** 3), 'nu': self.nu} return params
class Invoice(TimeStampedModel): sender = models.ForeignKey(Sender, verbose_name=_('Sender'), on_delete=models.PROTECT) is_business = models.BooleanField(default=False) invoice_number = models.CharField(_('Invoice number'), max_length=20) invoice_type = models.CharField(_('Invoice type'), choices=INVOICE_TYPES, max_length=4) invoice_currency = models.CharField(_('Invoice currency'), choices=CURRENCIES, max_length=4) invoice_date = models.DateField(_('Invoice date')) invoice_deadline = models.DateField(_('Invoice deadline')) invoice_tax_rate = models.DecimalField(_('Invoice tax rate'), max_digits=5, decimal_places=2) invoice_amount = models.DecimalField(_('Invoice amount'), max_digits=10, decimal_places=2) invoice_tax_amount = models.DecimalField(_('Invoice tax'), max_digits=10, decimal_places=2) transmission_format = models.CharField(_('Transmission format'), choices=TRANSMISSION_FORMATS, max_length=5) payment_condition = models.CharField(_('Payment condition'), choices=PAYMENT_CONDITIONS, max_length=5) payment_method = models.CharField(_('Payment method'), choices=PAYMENT_METHODS, max_length=5) causal = models.TextField(_('Causal'), blank=True) recipient_tax_code = models.CharField(_('Tax code'), blank=True, max_length=16) recipient_denomination = models.CharField(_('Recipient denomination'), blank=True, max_length=80) recipient_first_name = models.CharField(_('Recipient first name'), blank=True, max_length=60) recipient_last_name = models.CharField(_('Recipient last name'), blank=True, max_length=60) recipient_code = models.CharField(_('Recipient code'), blank=True, max_length=7) recipient_pec = models.EmailField(_('Recipient PEC'), blank=True) recipient_address = models.ForeignKey(Address, models.PROTECT, verbose_name=_('Recipient Address')) def invoice_summary(self): result = list() for item in sorted(self.items.iterator(), key=(lambda i: i.row)): result.append({'row': item.row, 'description': item.description, 'quantity': item.quantity, 'unit_price': Decimal(item.unit_price), 'total_price': Decimal(item.total_price), 'vat_rate': Decimal(item.vat_rate)}) return result def to_xml(self): return invoice_to_xml(self) def get_filename(self): return f'{self.invoice_number}.xml' def __str__(self): return ((f'[{INVOICE_TYPES[self.invoice_type].title()}/{self.invoice_number}] ' + (f"{f'{self.recipient_first_name} {self.recipient_last_name}'}" if (self.recipient_first_name and self.recipient_last_name) else f'{self.recipient_code}')) + f"{(f': {self.causal}' if self.causal else '')}")
def get_module_summary(module: torch.nn.Module, module_args: Optional[Tuple[(Any, ...)]]=None, module_kwargs: Optional[MutableMapping[(str, Any)]]=None) -> ModuleSummary: module_summary_data = _ModuleSummaryData() has_uninitialized_param = _has_uninitialized_param(module) if (not has_uninitialized_param): has_tensor_in_args = _has_tensor(module_args) has_tensor_in_kwargs = _has_tensor(module_kwargs) if has_tensor_in_kwargs: warnings.warn('A tensor in module_kwargs was found. This may lead to an inaccurately computed activation size, as keyword arguments are not passed into forward hooks for modules. For best results, please input tensors though module_args.') if (has_tensor_in_args or has_tensor_in_kwargs): module_summary_data = _get_module_flops_and_activation_sizes(module, module_args, module_kwargs) return _generate_module_summary(module, '', module_summary_data)
class SwinUnet(nn.Module): def __init__(self, config, img_size=224, num_classes=21843, zero_head=False, vis=False): super(SwinUnet, self).__init__() self.num_classes = num_classes self.zero_head = zero_head self.config = config self.swin_unet = SwinTransformerSys(img_size=config.DATA.IMG_SIZE, patch_size=config.MODEL.SWIN.PATCH_SIZE, in_chans=config.MODEL.SWIN.IN_CHANS, num_classes=self.num_classes, embed_dim=config.MODEL.SWIN.EMBED_DIM, depths=config.MODEL.SWIN.DEPTHS, num_heads=config.MODEL.SWIN.NUM_HEADS, window_size=config.MODEL.SWIN.WINDOW_SIZE, mlp_ratio=config.MODEL.SWIN.MLP_RATIO, qkv_bias=config.MODEL.SWIN.QKV_BIAS, qk_scale=config.MODEL.SWIN.QK_SCALE, drop_rate=config.MODEL.DROP_RATE, drop_path_rate=config.MODEL.DROP_PATH_RATE, ape=config.MODEL.SWIN.APE, patch_norm=config.MODEL.SWIN.PATCH_NORM, use_checkpoint=config.TRAIN.USE_CHECKPOINT) def forward(self, x): if (x.size()[1] == 1): x = x.repeat(1, 3, 1, 1) logits = self.swin_unet(x) return logits def load_from(self, config): pretrained_path = config.MODEL.PRETRAIN_CKPT if (pretrained_path is not None): print('pretrained_path:{}'.format(pretrained_path)) device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) pretrained_dict = torch.load(pretrained_path, map_location=device) if ('model' not in pretrained_dict): print('---start load pretrained modle by splitting---') pretrained_dict = {k[17:]: v for (k, v) in pretrained_dict.items()} for k in list(pretrained_dict.keys()): if ('output' in k): print('delete key:{}'.format(k)) del pretrained_dict[k] msg = self.swin_unet.load_state_dict(pretrained_dict, strict=False) return pretrained_dict = pretrained_dict['model'] print('---start load pretrained modle of swin encoder---') model_dict = self.swin_unet.state_dict() full_dict = copy.deepcopy(pretrained_dict) for (k, v) in pretrained_dict.items(): if ('layers.' in k): current_layer_num = (3 - int(k[7:8])) current_k = (('layers_up.' + str(current_layer_num)) + k[8:]) full_dict.update({current_k: v}) for k in list(full_dict.keys()): if (k in model_dict): if (full_dict[k].shape != model_dict[k].shape): print('delete:{};shape pretrain:{};shape model:{}'.format(k, v.shape, model_dict[k].shape)) del full_dict[k] msg = self.swin_unet.load_state_dict(full_dict, strict=False) else: print('none pretrain')
def delete_redundant_edges_and_ids(graph): class_nodes_delete = ['wall', 'floor', 'ceiling', 'door', 'curtain', 'window', 'doorjamb'] ids_delete = [x['id'] for x in graph['nodes'] if (x['class_name'] in class_nodes_delete)] graph['nodes'] = [x for x in graph['nodes'] if (x['id'] not in ids_delete)] graph['edges'] = [x for x in graph['edges'] if ((x['from_id'] not in ids_delete) and (x['to_id'] not in ids_delete))] parent_node = {} children_node = {} for (it, edge) in enumerate(graph['edges']): if (edge['relation_type'] == 'INSIDE'): if (edge['to_id'] not in parent_node.keys()): parent_node[edge['to_id']] = [] parent_node[edge['to_id']].append(edge['from_id']) if (edge['from_id'] not in children_node.keys()): children_node[edge['from_id']] = [] children_node[edge['from_id']].append(edge['to_id']) final_edges = [] for edge in graph['edges']: if (edge['relation_type'] == 'INSIDE'): all_parents = children_node[edge['from_id']] all_children = parent_node[edge['to_id']] if (len(set(all_parents).intersection(all_children)) > 0): continue final_edges.append(edge) graph['edges'] = final_edges return graph
def test_fips_hash_manager_md5(monkeypatch): replaced_md5 = pretend.raiser(ValueError('fipsmode')) monkeypatch.setattr(package_file.hashlib, 'md5', replaced_md5) filename = 'tests/fixtures/twine-1.5.0-py2.py3-none-any.whl' hasher = package_file.HashManager(filename) hasher.hash() hashes = TWINE_1_5_0_WHEEL_HEXDIGEST._replace(md5=None) assert (hasher.hexdigest() == hashes)
def register_task(name, dataclass=None): def register_task_cls(cls): if (name in TASK_REGISTRY): raise ValueError('Cannot register duplicate task ({})'.format(name)) if (not issubclass(cls, FairseqTask)): raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__)) if (cls.__name__ in TASK_CLASS_NAMES): raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__)) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) if ((dataclass is not None) and (not issubclass(dataclass, FairseqDataclass))): raise ValueError('Dataclass {} must extend FairseqDataclass'.format(dataclass)) cls.__dataclass = dataclass TASK_DATACLASS_REGISTRY[name] = dataclass return cls return register_task_cls
def get_platforms_filepath(): config_path = _get_config_path() platform_file = os.path.join(config_path, 'platforms.txt') if (not os.path.isfile(platform_file)): platform_file = os.path.join(PKG_CONFIG_DIR, 'platforms.txt') if (not os.path.isfile(platform_file)): raise OSError('Platform file {filepath} does not exist!'.format(filepath=platform_file)) return platform_file
class Effect1035(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Emission Systems')), 'capacitorNeed', src.getModifiedItemAttr('eliteBonusLogistics2'), skill='Logistics Cruisers', **kwargs)
_cache def load_ld_paths(root: str='/', prefix: str='') -> dict[(str, list[str])]: ldpaths: dict = {'conf': [], 'env': [], 'interp': []} env_ldpath = os.environ.get('LD_LIBRARY_PATH') if (env_ldpath is not None): if (root != '/'): log.warning('ignoring LD_LIBRARY_PATH due to ROOT usage') else: ldpaths['env'] = parse_ld_paths(env_ldpath, path='') libc = get_libc() if (libc == Libc.MUSL): root_prefix = (Path(root) / prefix) ld_musl = list((root_prefix / 'etc').glob('ld-musl-*.path')) assert (len(ld_musl) <= 1) if (len(ld_musl) == 0): ldpaths['conf'] = [(root + '/lib'), (root + '/usr/local/lib'), (root + '/usr/lib')] else: ldpaths['conf'] = [] for ldpath in ld_musl[0].read_text().split(':'): ldpath_stripped = ldpath.strip() if (ldpath_stripped == ''): continue ldpaths['conf'].append((root + ldpath_stripped)) else: ldpaths['conf'] = parse_ld_so_conf(((root + prefix) + '/etc/ld.so.conf'), root=root) ldpaths['conf'].extend(['/lib', '/lib64/', '/usr/lib', '/usr/lib64']) log.debug('linker ldpaths: %s', ldpaths) return ldpaths
def rmepsilon(ifst, connect=True, reverse=False, queue_type='auto', delta=_weight.DELTA, weight=None, nstate=_fst.NO_STATE_ID): try: queue_type = _getters.GetQueueType(queue_type) except ValueError: raise ValueError('Unknown queue type: {!r}'.format(queue_type)) weight = _get_weight_or_default(ifst._weight_factory, weight, False) ofst = ifst._mutable_fst_type() ifst._ops.rmepsilon_cons(ifst, ofst, connect, reverse, queue_type, delta, weight, nstate) return ofst
class SignalReference(XodrBase): _usedIDs = {} _IDCounter = {} def __init__(self, s, t, id=None, orientation=Orientation.positive): super().__init__() self.s = s self.t = t self.orientation = orientation self.validity = None self.id = id def __eq__(self, other): if (isinstance(other, SignalReference) and super().__eq__(other)): if (self.get_attributes() == other.get_attributes()): return True return False def _update_id(self): try: if (str(self.id) in self._usedIDs[self.__class__.__name__]): print('Warning: id', self.id, 'has already been used for another', self.__class__.__name__, '...auto-generating unique id.') except KeyError: self._usedIDs[self.__class__.__name__] = [] self._IDCounter[self.__class__.__name__] = 0 if ((self.id == None) or (str(self.id) in self._usedIDs[self.__class__.__name__])): while (str(self._IDCounter[self.__class__.__name__]) in self._usedIDs[self.__class__.__name__]): self._IDCounter[self.__class__.__name__] += 1 self.id = str(self._IDCounter[self.__class__.__name__]) self._usedIDs[self.__class__.__name__].append(str(self.id)) def get_attributes(self): retdict = {} retdict['id'] = str(self.id) retdict['s'] = str(self.s) retdict['t'] = str(self.t) if (self.orientation == Orientation.positive): retdict['orientation'] = '+' elif (self.orientation == Orientation.negative): retdict['orientation'] = '-' else: retdict['orientation'] = enum2str(self.orientation) return retdict def add_validity(self, fromLane, toLane): if self.validity: raise ValueError('only one validity is allowed') self.validity = Validity(fromLane, toLane) return self def get_element(self): element = ET.Element('signalReference', attrib=self.get_attributes()) self._add_additional_data_to_element(element) if self.validity: element.append(self.validity.get_element()) return element
def Save_info(fun): def work(*args, **kwargs): result = fun(*args, **kwargs) if result: timetoken = str(int(time.time())) filename = 'Output/{}_result_{}.rabbit'.format(fun.__name__, timetoken) for i in result: try: fw = open(filename, 'a') fw.write((i.replace('\n', '') + '\n')) fw.close() except: pass print((':' + filename)) return work
class ForestToPyDotVisitor(ForestVisitor): def __init__(self, rankdir='TB'): super(ForestToPyDotVisitor, self).__init__(single_visit=True) self.pydot = import_module('pydot') self.graph = self.pydot.Dot(graph_type='digraph', rankdir=rankdir) def visit(self, root, filename): super(ForestToPyDotVisitor, self).visit(root) try: self.graph.write_png(filename) except FileNotFoundError as e: logger.error('Could not write png: ', e) def visit_token_node(self, node): graph_node_id = str(id(node)) graph_node_label = '"{}"'.format(node.value.replace('"', '\\"')) graph_node_color = 8421504 graph_node_style = '"filled,rounded"' graph_node_shape = 'diamond' graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor='#{:06x}'.format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) def visit_packed_node_in(self, node): graph_node_id = str(id(node)) graph_node_label = repr(node) graph_node_color = 8421504 graph_node_style = 'filled' graph_node_shape = 'diamond' graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor='#{:06x}'.format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) (yield node.left) (yield node.right) def visit_packed_node_out(self, node): graph_node_id = str(id(node)) graph_node = self.graph.get_node(graph_node_id)[0] for child in [node.left, node.right]: if (child is not None): child_graph_node_id = str(id((child.token if isinstance(child, TokenNode) else child))) child_graph_node = self.graph.get_node(child_graph_node_id)[0] self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) else: child_graph_node_id = str(randint(, )) child_graph_node_style = 'invis' child_graph_node = self.pydot.Node(child_graph_node_id, style=child_graph_node_style, label='None') child_edge_style = 'invis' self.graph.add_node(child_graph_node) self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node, style=child_edge_style)) def visit_symbol_node_in(self, node): graph_node_id = str(id(node)) graph_node_label = repr(node) graph_node_color = 8421504 graph_node_style = '"filled"' if node.is_intermediate: graph_node_shape = 'ellipse' else: graph_node_shape = 'rectangle' graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor='#{:06x}'.format(graph_node_color), shape=graph_node_shape, label=graph_node_label) self.graph.add_node(graph_node) return iter(node.children) def visit_symbol_node_out(self, node): graph_node_id = str(id(node)) graph_node = self.graph.get_node(graph_node_id)[0] for child in node.children: child_graph_node_id = str(id(child)) child_graph_node = self.graph.get_node(child_graph_node_id)[0] self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node))
def downsample_avg(in_chs, out_chs, kernel_size=1, stride=1, dilation=1, norm_layer=None, preact=False): norm_layer = (norm_layer or nn.BatchNorm2d) avg_stride = (stride if (dilation == 1) else 1) pool = nn.Identity() if ((stride > 1) or (dilation > 1)): avg_pool_fn = (AvgPool2dSame if ((avg_stride == 1) and (dilation > 1)) else nn.AvgPool2d) pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) if preact: conv = create_conv2d(in_chs, out_chs, 1, stride=1) else: conv = ConvNormAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, apply_act=False) return nn.Sequential(*[pool, conv])
def test_utime_as_datetime(): the_utime = actual_dt1 = qcore.utime_as_datetime(the_utime) assert_eq(actual_dt1.tzname(), 'UTC') assert_eq(actual_dt1, datetime(2022, 10, 31, 18, 2, 3, 123456, tzinfo=timezone.utc)) actual_dt2 = qcore.utime_as_datetime(the_utime, tz=PLUS_7_TZ) assert_eq(actual_dt2.tzname(), 'UTC+07:00') assert_eq(actual_dt2, datetime(2022, 11, 1, 1, 2, 3, 123456, tzinfo=PLUS_7_TZ)) assert_eq(actual_dt2, actual_dt1) assert_eq(actual_dt2, datetime(2022, 10, 31, 18, 2, 3, 123456, tzinfo=timezone.utc))
def test_matrix(hatch, helpers, temp_dir_data, config_file): config_file.model.template.plugins['default']['tests'] = False config_file.save() project_name = 'My.App' with temp_dir_data.as_cwd(): result = hatch('new', project_name) assert (result.exit_code == 0), result.output project_path = (temp_dir_data / 'my-app') project = Project(project_path) helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']}) helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}], 'overrides': {'matrix': {'version': {'platforms': [{'value': 'foo', 'if': ['9000']}]}}}}) with project_path.as_cwd(): result = hatch('env', 'create', 'test') assert (result.exit_code == 0), result.output assert (result.output == helpers.dedent('\n Creating environment: test.42\n Checking dependencies\n Skipped 1 incompatible environment:\n test.9000 -> unsupported platform\n ')) env_data_path = (((temp_dir_data / 'data') / 'env') / 'virtual') assert env_data_path.is_dir() project_data_path = (env_data_path / project_path.name) assert project_data_path.is_dir() storage_dirs = list(project_data_path.iterdir()) assert (len(storage_dirs) == 1) storage_path = storage_dirs[0] with project_path.as_cwd(): result = hatch('env', 'find', 'test') assert (result.exit_code == 0), result.output assert (result.output == helpers.dedent(f''' {(storage_path / 'test.9000')} {(storage_path / 'test.42')} '''))
class SawyerPegUnplugSideV2Policy(Policy): _fully_parsed def _parse_obs(obs): return {'hand_pos': obs[:3], 'unused_gripper': obs[3], 'peg_pos': obs[4:7], 'unused_info': obs[7:]} def get_action(self, obs): o_d = self._parse_obs(obs) action = Action({'delta_pos': np.arange(3), 'grab_effort': 3}) action['delta_pos'] = move(o_d['hand_pos'], to_xyz=self._desired_pos(o_d), p=25.0) action['grab_effort'] = self._grab_effort(o_d) return action.array def _desired_pos(o_d): pos_curr = o_d['hand_pos'] pos_peg = (o_d['peg_pos'] + np.array([(- 0.02), 0.0, 0.035])) if (np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04): return (pos_peg + np.array([0.0, 0.0, 0.2])) elif (abs((pos_curr[2] - 0.15)) > 0.02): return np.array([*pos_peg[:2], 0.15]) else: return (pos_curr + np.array([0.01, 0.0, 0.0])) def _grab_effort(o_d): pos_curr = o_d['hand_pos'] pos_peg = (o_d['peg_pos'] + np.array([(- 0.02), 0.0, 0.035])) if ((np.linalg.norm((pos_curr[:2] - pos_peg[:2])) > 0.04) or (abs((pos_curr[2] - pos_peg[2])) > 0.15)): return (- 1.0) else: return 0.1
class _Looks(BaseSprite): def __init__(self): super().__init__() def looks_switchbackdropto(self, backdrop): self.stage.costume_manager.switch_costume(backdrop) def looks_nextbackdrop(self): self.stage.costume_manager.next_costume() def looks_seteffectto_color(self, value): pass looks_seteffectto_color.opcode = 'looks_seteffectto' looks_seteffectto_color.param = 'EFFECT' looks_seteffectto_color.value = 'COLOR' looks_seteffectto_color.translation = 'looks_effect_color' def looks_seteffectto_fisheye(self, value): pass looks_seteffectto_fisheye.opcode = 'looks_seteffectto' looks_seteffectto_fisheye.param = 'EFFECT' looks_seteffectto_fisheye.value = 'FISHEYE' looks_seteffectto_fisheye.translation = 'looks_effect_fisheye' def looks_seteffectto_whirl(self, value): pass looks_seteffectto_whirl.opcode = 'looks_seteffectto' looks_seteffectto_whirl.param = 'EFFECT' looks_seteffectto_whirl.value = 'WHIRL' looks_seteffectto_whirl.translation = 'looks_effect_whirl' def looks_seteffectto_pixelate(self, value): pass looks_seteffectto_pixelate.opcode = 'looks_seteffectto' looks_seteffectto_pixelate.param = 'EFFECT' looks_seteffectto_pixelate.value = 'PIXELATE' looks_seteffectto_pixelate.translation = 'looks_effect_pixelate' def looks_seteffectto_mosaic(self, value): pass looks_seteffectto_mosaic.opcode = 'looks_seteffectto' looks_seteffectto_mosaic.param = 'EFFECT' looks_seteffectto_mosaic.value = 'MOSAIC' looks_seteffectto_mosaic.translation = 'looks_effect_mosaic' def looks_seteffectto_brightness(self, value): pass looks_seteffectto_brightness.opcode = 'looks_seteffectto' looks_seteffectto_brightness.param = 'EFFECT' looks_seteffectto_brightness.value = 'BRIGHTNESS' looks_seteffectto_brightness.translation = 'looks_effect_brightness' def looks_seteffectto_ghost(self, value): self.ghost = value looks_seteffectto_ghost.opcode = 'looks_seteffectto' looks_seteffectto_ghost.param = 'EFFECT' looks_seteffectto_ghost.value = 'GHOST' looks_seteffectto_ghost.translation = 'looks_effect_ghost' def looks_changeeffectby_color(self, value): pass looks_changeeffectby_color.opcode = 'looks_changeeffectby' looks_changeeffectby_color.param = 'EFFECT' looks_changeeffectby_color.value = 'COLOR' looks_changeeffectby_color.translation = 'looks_effect_color' def looks_changeeffectby_fisheye(self, value): pass looks_changeeffectby_fisheye.opcode = 'looks_changeeffectby' looks_changeeffectby_fisheye.param = 'EFFECT' looks_changeeffectby_fisheye.value = 'FISHEYE' looks_changeeffectby_fisheye.translation = 'looks_effect_fisheye' def looks_changeeffectby_whirl(self, value): pass looks_changeeffectby_whirl.opcode = 'looks_changeeffectby' looks_changeeffectby_whirl.param = 'EFFECT' looks_changeeffectby_whirl.value = 'WHIRL' looks_changeeffectby_whirl.translation = 'looks_effect_whirl' def looks_changeeffectby_pixelate(self, value): pass looks_changeeffectby_pixelate.opcode = 'looks_changeeffectby' looks_changeeffectby_pixelate.param = 'EFFECT' looks_changeeffectby_pixelate.value = 'PIXELATE' looks_changeeffectby_pixelate.translation = 'looks_effect_pixelate' def looks_changeeffectby_mosaic(self, value): pass looks_changeeffectby_mosaic.opcode = 'looks_changeeffectby' looks_changeeffectby_mosaic.param = 'EFFECT' looks_changeeffectby_mosaic.value = 'MOSAIC' looks_changeeffectby_mosaic.translation = 'looks_effect_mosaic' def looks_changeeffectby_brightness(self, value): pass looks_changeeffectby_brightness.opcode = 'looks_changeeffectby' looks_changeeffectby_brightness.param = 'EFFECT' looks_changeeffectby_brightness.value = 'BRIGHTNESS' looks_changeeffectby_brightness.translation = 'looks_effect_brightness' def looks_changeeffectby_ghost(self, value): self.ghost += value looks_changeeffectby_ghost.opcode = 'looks_changeeffectby' looks_changeeffectby_ghost.param = 'EFFECT' looks_changeeffectby_ghost.value = 'GHOST' looks_changeeffectby_ghost.translation = 'looks_effect_ghost' def looks_cleargraphiceffects(self): pass def looks_backdropnumbername_number(self): return (self.stage.costume_manager.current_costume + 1) looks_backdropnumbername_number.opcode = 'looks_backdropnumbername' looks_backdropnumbername_number.param = 'NUMBER_NAME' looks_backdropnumbername_number.value = 'number' looks_backdropnumbername_number.translation = 'looks_numbername_number' looks_backdropnumbername_number.return_type = int def looks_backdropnumbername_name(self): costume = self.stage.costume_manager.get_costume() if (not costume): return '' return costume.name looks_backdropnumbername_name.opcode = 'looks_backdropnumbername' looks_backdropnumbername_name.param = 'NUMBER_NAME' looks_backdropnumbername_name.value = 'name' looks_backdropnumbername_name.translation = 'looks_numbername_name' looks_backdropnumbername_name.return_type = str
_task('multilingual_masked_lm') class MultiLingualMaskedLMTask(FairseqTask): def add_args(parser): parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner') parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample tokens. If set to "complete", splits samples only at the end of sentence, but may include multiple sentences per sample. "complete_doc" is similar but respects doc boundaries. If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=512, type=int, help='max number of total tokens over all segments per sample for BERT dataset') parser.add_argument('--mask-prob', default=0.15, type=float, help='probability of replacing a token with mask') parser.add_argument('--leave-unmasked-prob', default=0.1, type=float, help='probability that a masked token is unmasked') parser.add_argument('--random-token-prob', default=0.1, type=float, help='probability of replacing a token with a random token') parser.add_argument('--freq-weighted-replacement', action='store_true', help='sample random replacement words based on word frequencies') parser.add_argument('--mask-whole-words', default=False, action='store_true', help='mask whole words; you may also want to set --bpe') parser.add_argument('--multilang-sampling-alpha', type=float, default=1.0, help='smoothing alpha for sample rations across multiple datasets') def __init__(self, args, dictionary): super().__init__(args) self.dictionary = dictionary self.seed = args.seed self.mask_idx = dictionary.add_symbol('<mask>') def setup_task(cls, args, **kwargs): paths = args.data.split(':') assert (len(paths) > 0) dictionary = Dictionary.load(os.path.join(paths[0], 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) return cls(args, dictionary) def _get_whole_word_mask(self): if self.args.mask_whole_words: bpe = encoders.build_bpe(self.args) if (bpe is not None): def is_beginning_of_word(i): if (i < self.source_dictionary.nspecial): return True tok = self.source_dictionary[i] if tok.startswith('madeupword'): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor(list(map(is_beginning_of_word, range(len(self.source_dictionary))))) else: mask_whole_words = None return mask_whole_words def _get_sample_prob(self, dataset_lens): prob = (dataset_lens / dataset_lens.sum()) smoothed_prob = (prob ** self.args.multilang_sampling_alpha) smoothed_prob = (smoothed_prob / smoothed_prob.sum()) return smoothed_prob def load_dataset(self, split, epoch=0, combine=False, **kwargs): paths = self.args.data.split(':') assert (len(paths) > 0) data_path = paths[(epoch % len(paths))] languages = [name for name in os.listdir(data_path) if os.path.isdir(os.path.join(data_path, name))] print('| Training on {0} languages: {1}'.format(len(languages), languages)) print('| Language to id mapping: ', {lang: id for (id, lang) in enumerate(languages)}) mask_whole_words = self._get_whole_word_mask() lang_datasets = [] for (lang_id, language) in enumerate(languages): split_path = os.path.join(data_path, language, split) dataset = data_utils.load_indexed_dataset(split_path, self.source_dictionary, self.args.dataset_impl, combine=combine) if (dataset is None): raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) dataset = TokenBlockDataset(dataset, dataset.sizes, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode=self.args.sample_break_mode) print('| loaded {} blocks from: {}'.format(len(dataset), split_path)) dataset = PrependTokenDataset(dataset, self.source_dictionary.bos()) (src_dataset, tgt_dataset) = MaskTokensDataset.apply_mask(dataset, self.source_dictionary, pad_idx=self.source_dictionary.pad(), mask_idx=self.mask_idx, seed=self.args.seed, mask_prob=self.args.mask_prob, leave_unmasked_prob=self.args.leave_unmasked_prob, random_token_prob=self.args.random_token_prob, freq_weighted_replacement=self.args.freq_weighted_replacement, mask_whole_words=mask_whole_words) lang_dataset = NestedDictionaryDataset({'net_input': {'src_tokens': PadDataset(src_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'src_lengths': NumelDataset(src_dataset, reduce=False)}, 'target': PadDataset(tgt_dataset, pad_idx=self.source_dictionary.pad(), left_pad=False), 'nsentences': NumSamplesDataset(), 'ntokens': NumelDataset(src_dataset, reduce=True), 'lang_id': RawLabelDataset(([lang_id] * src_dataset.sizes.shape[0]))}, sizes=[src_dataset.sizes]) lang_datasets.append(lang_dataset) if (split == self.args.train_subset): dataset_lengths = np.array([len(d) for d in lang_datasets], dtype=float) sample_probs = self._get_sample_prob(dataset_lengths) print('| Sample probability by language: ', {lang: '{0:.4f}'.format(sample_probs[id]) for (id, lang) in enumerate(languages)}) size_ratio = ((sample_probs * dataset_lengths.sum()) / dataset_lengths) print('| Up/Down Sampling ratio by language: ', {lang: '{0:.2f}'.format(size_ratio[id]) for (id, lang) in enumerate(languages)}) resampled_lang_datasets = [ResamplingDataset(lang_datasets[i], size_ratio=size_ratio[i], seed=self.args.seed, epoch=epoch, replace=(size_ratio[i] >= 1.0)) for (i, d) in enumerate(lang_datasets)] dataset = ConcatDataset(resampled_lang_datasets) else: dataset = ConcatDataset(lang_datasets) lang_splits = [split] for (lang_id, lang_dataset) in enumerate(lang_datasets): split_name = ((split + '_') + languages[lang_id]) lang_splits.append(split_name) self.datasets[split_name] = lang_dataset if (split in self.args.valid_subset): self.args.valid_subset = self.args.valid_subset.replace(split, ','.join(lang_splits)) with data_utils.numpy_seed((self.args.seed + epoch)): shuffle = np.random.permutation(len(dataset)) self.datasets[split] = SortDataset(dataset, sort_order=[shuffle, dataset.sizes]) def build_dataset_for_inference(self, src_tokens, src_lengths, sort=True): src_dataset = PadDataset(TokenBlockDataset(src_tokens, src_lengths, (self.args.tokens_per_sample - 1), pad=self.source_dictionary.pad(), eos=self.source_dictionary.eos(), break_mode='eos'), pad_idx=self.source_dictionary.pad(), left_pad=False) src_dataset = PrependTokenDataset(src_dataset, self.source_dictionary.bos()) src_dataset = NestedDictionaryDataset({'id': IdDataset(), 'net_input': {'src_tokens': src_dataset, 'src_lengths': NumelDataset(src_dataset, reduce=False)}}, sizes=src_lengths) if sort: src_dataset = SortDataset(src_dataset, sort_order=[src_lengths]) return src_dataset def get_batch_iterator(self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0): self.dataset_to_epoch_iter = None return super().get_batch_iterator(dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch) def source_dictionary(self): return self.dictionary def target_dictionary(self): return self.dictionary
class MobileNetV3(nn.Module): def __init__(self, model_mode='LARGE', num_classes=1000, multiplier=1.0, dropout_rate=0.0, output_layers=['default']): super(MobileNetV3, self).__init__() self.num_classes = num_classes self.output_layers = output_layers if (model_mode == 'LARGE'): layers = [[16, 16, 3, 1, 'RE', False, 16], [16, 24, 3, 2, 'RE', False, 64], [24, 24, 3, 1, 'RE', False, 72], [24, 40, 5, 2, 'RE', True, 72], [40, 40, 5, 1, 'RE', True, 120], [40, 40, 5, 1, 'RE', True, 120], [40, 80, 3, 2, 'HS', False, 240], [80, 80, 3, 1, 'HS', False, 200], [80, 80, 3, 1, 'HS', False, 184], [80, 80, 3, 1, 'HS', False, 184], [80, 112, 3, 1, 'HS', True, 480], [112, 112, 3, 1, 'HS', True, 672], [112, 160, 5, 1, 'HS', True, 672], [160, 160, 5, 2, 'HS', True, 672], [160, 160, 5, 1, 'HS', True, 960]] init_conv_out = _make_divisible((16 * multiplier)) self.init_conv = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=init_conv_out, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(init_conv_out), h_swish(inplace=True)) self.layer1 = MobileBlock(16, 16, 3, 1, 'RE', False, 16) self.layer2 = nn.Sequential(MobileBlock(16, 24, 3, 2, 'RE', False, 64), MobileBlock(24, 24, 3, 1, 'RE', False, 72)) self.layer3 = nn.Sequential(MobileBlock(24, 40, 5, 2, 'RE', True, 72), MobileBlock(40, 40, 5, 1, 'RE', True, 120), MobileBlock(40, 40, 5, 1, 'RE', True, 120)) self.layer4 = nn.Sequential(MobileBlock(40, 80, 3, 2, 'HS', False, 240), MobileBlock(80, 80, 3, 1, 'HS', False, 200), MobileBlock(80, 80, 3, 1, 'HS', False, 184), MobileBlock(80, 80, 3, 1, 'HS', False, 184)) self.layer5 = nn.Sequential(MobileBlock(80, 112, 3, 1, 'HS', True, 480), MobileBlock(112, 112, 3, 1, 'HS', True, 672)) self.layer6 = nn.Sequential(MobileBlock(112, 160, 5, 1, 'HS', True, 672), MobileBlock(160, 160, 5, 2, 'HS', True, 672), MobileBlock(160, 160, 5, 1, 'HS', True, 960)) out_conv1_in = _make_divisible((160 * multiplier)) out_conv1_out = _make_divisible((960 * multiplier)) self.out_conv1 = nn.Sequential(nn.Conv2d(out_conv1_in, out_conv1_out, kernel_size=1, stride=1), nn.BatchNorm2d(out_conv1_out), h_swish(inplace=True)) out_conv2_in = _make_divisible((960 * multiplier)) out_conv2_out = _make_divisible((1280 * multiplier)) self.out_conv2 = nn.Sequential(nn.Conv2d(out_conv2_in, out_conv2_out, kernel_size=1, stride=1), h_swish(inplace=True), nn.Dropout(dropout_rate), nn.Conv2d(out_conv2_out, self.num_classes, kernel_size=1, stride=1)) elif (model_mode == 'SMALL'): layers = [[16, 16, 3, 2, 'RE', True, 16], [16, 24, 3, 2, 'RE', False, 72], [24, 24, 3, 1, 'RE', False, 88], [24, 40, 5, 2, 'RE', True, 96], [40, 40, 5, 1, 'RE', True, 240], [40, 40, 5, 1, 'RE', True, 240], [40, 48, 5, 1, 'HS', True, 120], [48, 48, 5, 1, 'HS', True, 144], [48, 96, 5, 2, 'HS', True, 288], [96, 96, 5, 1, 'HS', True, 576], [96, 96, 5, 1, 'HS', True, 576]] init_conv_out = _make_divisible((16 * multiplier)) self.init_conv = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=init_conv_out, kernel_size=3, stride=2, padding=1), nn.BatchNorm2d(init_conv_out), h_swish(inplace=True)) self.block = [] for (in_channels, out_channels, kernal_size, stride, nonlinear, se, exp_size) in layers: in_channels = _make_divisible((in_channels * multiplier)) out_channels = _make_divisible((out_channels * multiplier)) exp_size = _make_divisible((exp_size * multiplier)) self.block.append(MobileBlock(in_channels, out_channels, kernal_size, stride, nonlinear, se, exp_size)) out_conv1_in = _make_divisible((96 * multiplier)) out_conv1_out = _make_divisible((576 * multiplier)) self.out_conv1 = nn.Sequential(nn.Conv2d(out_conv1_in, out_conv1_out, kernel_size=1, stride=1), SqueezeBlock(out_conv1_out), nn.BatchNorm2d(out_conv1_out), h_swish(inplace=True)) out_conv2_in = _make_divisible((576 * multiplier)) out_conv2_out = _make_divisible((1280 * multiplier)) self.out_conv2 = nn.Sequential(nn.Conv2d(out_conv2_in, out_conv2_out, kernel_size=1, stride=1), h_swish(inplace=True), nn.Dropout(dropout_rate), nn.Conv2d(out_conv2_out, self.num_classes, kernel_size=1, stride=1)) self.apply(_weights_init) def _add_output_and_check(self, name, x, outputs, output_layers): if (name in output_layers): outputs[name] = x return (len(output_layers) == len(outputs)) def forward(self, x, output_layers=None): outputs = OrderedDict() if (output_layers is None): output_layers = self.output_layers out = self.init_conv(x) if self._add_output_and_check('init_conv', out, outputs, output_layers): return outputs out = self.layer1(out) if self._add_output_and_check('layer1', out, outputs, output_layers): return outputs out = self.layer2(out) if self._add_output_and_check('layer2', out, outputs, output_layers): return outputs out = self.layer3(out) if self._add_output_and_check('layer3', out, outputs, output_layers): return outputs out = self.layer4(out) if self._add_output_and_check('layer4', out, outputs, output_layers): return outputs out = self.layer5(out) if self._add_output_and_check('layer5', out, outputs, output_layers): return outputs out = self.layer6(out) if self._add_output_and_check('layer6', out, outputs, output_layers): return outputs out = self.out_conv1(out) if self._add_output_and_check('layer_out', out, outputs, output_layers): return outputs (batch, channels, height, width) = out.size() out = F.avg_pool2d(out, kernel_size=[height, width]) out = self.out_conv2(out).view(batch, (- 1)) if ((len(output_layers) == 1) and (output_layers[0] == 'default')): return out return outputs