code
stringlengths
281
23.7M
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import ray def _objective(trial, local_trainer, checkpoint_dir=None): try: from transformers.utils.notebook import NotebookProgressCallback if local_trainer.pop_callback(NotebookProgressCallback): local_trainer.add_callback(ProgressCallback) except ModuleNotFoundError: pass checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) local_trainer.objective = None local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial) if (getattr(local_trainer, 'objective', None) is None): metrics = local_trainer.evaluate() local_trainer.objective = local_trainer.compute_objective(metrics) local_trainer._tune_save_checkpoint() ray.tune.report(objective=local_trainer.objective, **metrics, done=True) if (not trainer._memory_tracker.skip_memory_metrics): from .trainer_utils import TrainerMemoryTracker logger.warning('Memory tracking for your Trainer is currently enabled. Automatically disabling the memory tracker since the memory tracker is not serializable.') trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None if ('resources_per_trial' not in kwargs): kwargs['resources_per_trial'] = {'cpu': 1} if (trainer.args.n_gpu > 0): kwargs['resources_per_trial']['gpu'] = 1 resource_msg = ('1 CPU' + (' and 1 GPU' if (trainer.args.n_gpu > 0) else '')) logger.info(f'No `resources_per_trial` arg was passed into `hyperparameter_search`. Setting it to a default value of {resource_msg} for each trial.') gpus_per_trial = kwargs['resources_per_trial'].get('gpu', 0) trainer.args._n_gpu = gpus_per_trial if ('progress_reporter' not in kwargs): from ray.tune import CLIReporter kwargs['progress_reporter'] = CLIReporter(metric_columns=['objective']) if (('keep_checkpoints_num' in kwargs) and (kwargs['keep_checkpoints_num'] > 0)): trainer.use_tune_checkpoints = True if (kwargs['keep_checkpoints_num'] > 1): logger.warning(f"Currently keeping {kwargs['keep_checkpoints_num']} checkpoints for each trial. Checkpoints are usually huge, consider setting `keep_checkpoints_num=1`.") if ('scheduler' in kwargs): from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining if isinstance(kwargs['scheduler'], PopulationBasedTraining): if (not trainer.use_tune_checkpoints): logger.warning("You are using PopulationBasedTraining but you haven't enabled checkpointing. This means your trials will train from scratch everytime they are exploiting new configurations. Consider enabling checkpointing by passing `keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`.") if (isinstance(kwargs['scheduler'], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)) and ((not trainer.args.do_eval) or (trainer.args.evaluation_strategy == IntervalStrategy.NO))): raise RuntimeError("You are using {cls} as a scheduler but you haven't enabled evaluation during training. This means your trials will not report intermediate results to Ray Tune, and can thus not be stopped early or used to exploit other trials parameters. If this is what you want, do not use {cls}. If you would like to use {cls}, make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the Trainer `args`.".format(cls=type(kwargs['scheduler']).__name__)) trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) (trainable) def dynamic_modules_import_trainable(*args, **kwargs): if is_datasets_available(): import datasets.load dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), '__init__.py') spec = importlib.util.spec_from_file_location('datasets_modules', dynamic_modules_path) datasets_modules = importlib.util.module_from_spec(spec) sys.modules[spec.name] = datasets_modules spec.loader.exec_module(datasets_modules) return trainable(*args, **kwargs) if hasattr(trainable, '__mixins__'): dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ analysis = ray.tune.run(dynamic_modules_import_trainable, config=trainer.hp_space(None), num_samples=n_trials, **kwargs) best_trial = analysis.get_best_trial(metric='objective', mode=direction[:3], scope=trainer.args.ray_scope) best_run = BestRun(best_trial.trial_id, best_trial.last_result['objective'], best_trial.config) if (_tb_writer is not None): trainer.add_callback(_tb_writer) return best_run
(HAS_ANNOTATED) def test_annotated(): class WithAnnotated(): annotated_field: typing.Annotated[(int, 'metadata')] assert (get_dataclass_shape(WithAnnotated) == Shape(input=InputShape(constructor=WithAnnotated, kwargs=None, fields=(InputField(type=typing.Annotated[(int, 'metadata')], id='annotated_field', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY),), params=(Param(field_id='annotated_field', name='annotated_field', kind=ParamKind.POS_OR_KW),), overriden_types=frozenset({'annotated_field'})), output=OutputShape(fields=(OutputField(type=typing.Annotated[(int, 'metadata')], id='annotated_field', default=NoDefault(), accessor=create_attr_accessor('annotated_field', is_required=True), metadata=MappingProxyType({}), original=ANY),), overriden_types=frozenset({'annotated_field'}))))
def initialize_dict(dict, entries, separator): for entry in entries: hyphen_free = entry.replace(separator, '').lower() boundary_list = [1] i = 1 while (i < len(entry)): if (entry[i] == separator): boundary_list += [1] i += 1 else: boundary_list += [0] i += 1 dict[hyphen_free] = (boundary_list + [1])
def test_index_query_scan() -> None: from pynamodb.attributes import NumberAttribute from pynamodb.models import Model from pynamodb.indexes import GlobalSecondaryIndex from pynamodb.pagination import ResultIterator class UntypedIndex(GlobalSecondaryIndex): bar = NumberAttribute(hash_key=True) class TypedIndex(GlobalSecondaryIndex['MyModel']): bar = NumberAttribute(hash_key=True) class MyModel(Model): foo = NumberAttribute(hash_key=True) bar = NumberAttribute() untyped_index = UntypedIndex() typed_index = TypedIndex() untyped_query_result: ResultIterator = MyModel.untyped_index.query(123) assert_type(next(untyped_query_result), Any) typed_query_result: ResultIterator[MyModel] = MyModel.typed_index.query(123) assert_type(next(typed_query_result), MyModel) untyped_scan_result = MyModel.untyped_index.scan() assert_type(next(untyped_scan_result), Any) typed_scan_result = MyModel.typed_index.scan() assert_type(next(typed_scan_result), MyModel)
class TestShellCommand(): def klass(self): return configtypes.ShellCommand .parametrize('kwargs, val, expected', [({}, '[foobar]', ['foobar']), ({'placeholder': True}, '[foo, "{}", bar]', ['foo', '{}', 'bar']), ({'placeholder': True}, '["foo{}bar"]', ['foo{}bar']), ({'placeholder': True}, '[foo, "bar {}"]', ['foo', 'bar {}']), ({'placeholder': True}, '[f, "{file}", b]', ['f', '{file}', 'b']), ({'placeholder': True}, '["f{file}b"]', ['f{file}b']), ({'placeholder': True}, '[f, "b {file}"]', ['f', 'b {file}'])]) def test_valid(self, klass, kwargs, val, expected): cmd = klass(**kwargs) assert (cmd.from_str(val) == expected) assert (cmd.to_py(expected) == expected) .parametrize('kwargs, val', [({'placeholder': True}, '[foo, bar]'), ({'placeholder': True}, '[foo, "{", "}", bar'), ({'placeholder': True}, '[foo, "{fi", "le}", bar'), ({'placeholder': True}, '[f, "{wrong}", b]'), ({'placeholder': True}, '["f{wrong}b"]'), ({'placeholder': True}, '[f, "b {wrong}"]')]) def test_from_str_invalid(self, klass, kwargs, val): with pytest.raises(configexc.ValidationError): klass(**kwargs).from_str(val)
.change_flags(vm__lazy=True) def test_ifelse_lazy_c(): a = scalar() b = generic() c = generic() notimpl = NotImplementedOp() cloops = [True, False] if (pytensor.config.cxx == ''): cloops = [False] for use_cloop in cloops: for lazy in [True, None]: linker = pytensor.link.vm.VMLinker(use_cloop=use_cloop, lazy=lazy) f = function([a, b, c], ifelse(a, notimpl(b), c), mode=Mode(linker=linker, optimizer='fast_run')) with pytest.raises(NotImplementedOpException): f(1, 'a', 'b') assert (f(0, 'a', 'b') == 'b')
def cache_features(features, num_shards): if (num_shards == 1): return (features, tf.no_op(name='init_queue')) flat_features = list(features.itervalues()) queue = tf.FIFOQueue(num_shards, dtypes=[v.dtype for v in flat_features]) flat_features = [tf.split(v, num_shards, axis=0) for v in flat_features] flat_features = list(zip(*flat_features)) init_ops = [queue.enqueue(v, name=('enqueue_%d' % i)) for (i, v) in enumerate(flat_features)] flat_feature = queue.dequeue() new_features = {} for (k, v) in zip(features.iterkeys(), flat_feature): v.set_shape(features[k].shape) new_features[k] = v return (new_features, tf.group(*init_ops))
class BertDataset(Dataset): def __init__(self, name, indexed_dataset, data_prefix, num_epochs, max_num_samples, masked_lm_prob, max_seq_length, short_seq_prob, seed): self.name = name self.seed = seed self.masked_lm_prob = masked_lm_prob self.max_seq_length = max_seq_length self.indexed_dataset = indexed_dataset self.samples_mapping = get_samples_mapping_(self.indexed_dataset, data_prefix, num_epochs, max_num_samples, self.max_seq_length, short_seq_prob, self.seed, self.name) tokenizer = get_tokenizer() self.vocab_id_list = list(tokenizer.inv_vocab.keys()) self.vocab_id_to_token_dict = tokenizer.inv_vocab self.cls_id = tokenizer.cls self.sep_id = tokenizer.sep self.mask_id = tokenizer.mask self.pad_id = tokenizer.pad def __len__(self): return self.samples_mapping.shape[0] def __getitem__(self, idx): (start_idx, end_idx, seq_length) = self.samples_mapping[idx] sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)] np_rng = np.random.RandomState(seed=(self.seed + idx)) return build_training_sample(sample, seq_length, self.max_seq_length, self.vocab_id_list, self.vocab_id_to_token_dict, self.cls_id, self.sep_id, self.mask_id, self.pad_id, self.masked_lm_prob, np_rng)
(scope='module', params=[(Arc, (0, 0, 5)), (Circle, (0, 0, 5)), (Ellipse, (0, 0, 0, 5)), (Sector, (0, 0, 3)), (Line, (0, 0, 7, 7)), (Rectangle, (0, 0, 20, 20)), (BorderedRectangle, (0, 0, 30, 10)), (Triangle, (0, 0, 2, 2, 5, 5)), (Star, (1, 1, 20, 11, 5)), (Polygon, ((0, 0), (1, 1), (2, 2)))]) def shape_and_positionals(request): return request.param
class ReplicaSetTelemetry(BaseModel, extra='forbid'): id: int = Field(..., description='') local: Optional['LocalShardTelemetry'] = Field(default=None, description='') remote: List['RemoteShardTelemetry'] = Field(..., description='') replicate_states: Dict[(str, 'ReplicaState')] = Field(..., description='')
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0): assert (vit in ['base', 'large']), 'vit parameter must be base or large' if (vit == 'base'): vision_width = 768 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12, num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=(0 or drop_path_rate)) elif (vit == 'large'): vision_width = 1024 visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24, num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer, drop_path_rate=(0.1 or drop_path_rate)) return (visual_encoder, vision_width)
.usefixtures('config_stub', 'key_config_stub') class TestConfigPyModules(): def qbmodulepy(self, tmp_path): return ConfPy(tmp_path, filename='qbmodule.py') (autouse=True) def restore_sys_path(self): old_path = sys.path.copy() (yield) sys.path = old_path def test_bind_in_module(self, confpy, qbmodulepy, tmp_path): qbmodulepy.write('def run(config):', ' config.bind(",a", "message-info foo", mode="normal")') confpy.write_qbmodule() confpy.read() expected = {'normal': {',a': 'message-info foo'}} assert (config.instance.get_obj('bindings.commands') == expected) assert ('qbmodule' not in sys.modules) assert (tmp_path not in sys.path) def test_restore_sys_on_err(self, confpy, qbmodulepy, tmp_path): confpy.write_qbmodule() qbmodulepy.write('def run(config):', ' 1/0') error = confpy.read(error=True) assert (error.text == 'Unhandled exception') assert isinstance(error.exception, ZeroDivisionError) assert ('qbmodule' not in sys.modules) assert (tmp_path not in sys.path) def test_fail_on_nonexistent_module(self, confpy, qbmodulepy, tmp_path): qbmodulepy.write('def run(config):', ' pass') confpy.write('import foobar', 'foobar.run(config)') error = confpy.read(error=True) assert (error.text == 'Unhandled exception') assert isinstance(error.exception, ImportError) tblines = error.traceback.strip().splitlines() assert (tblines[0] == 'Traceback (most recent call last):') assert tblines[(- 1)].endswith("Error: No module named 'foobar'") def test_no_double_if_path_exists(self, confpy, qbmodulepy, tmp_path): sys.path.insert(0, tmp_path) confpy.write('import sys', 'if sys.path[0] in sys.path[1:]:', ' raise Exception("Path not expected")') confpy.read() assert (sys.path.count(tmp_path) == 1)
class AddNewsForm(forms.ModelForm): name = HoneypotField() class Meta(): model = Item fields = ('link', 'section', 'title', 'language', 'description') def __init__(self, *args, **kwargs): kwargs['initial'] = {'section': 6} super().__init__(*args, **kwargs) self.fields['title'].widget.attrs = {'class': 'form-control small'} self.fields['title'].required = False self.fields['link'].widget.attrs = {'class': 'form-control small'} self.fields['language'].widget.attrs = {'class': 'form-control'} self.fields['description'].widget.attrs = {'class': 'form-control'} self.fields['section'].widget.attrs = {'class': 'form-control'}
class ValidationException(DomainException): def for_failed_validations(cls, failed_validation_constraints): detail_messages = [i.message for i in failed_validation_constraints] return cls(message=_.ngettext('An error occurred', 'Some errors occurred', len(detail_messages)), detail_messages=detail_messages)
class SequentialNodeRewriter(NodeRewriter): def __init__(self, *rewriters: Rewriter, apply_all_rewrites: bool=False, profile: bool=False): super().__init__() self.rewrites: Sequence[Rewriter] = rewriters assert isinstance(self.rewrites, tuple) self.reentrant = any((getattr(rewrite, 'reentrant', True) for rewrite in rewriters)) self.retains_inputs = all((getattr(rewrite, 'retains_inputs', False) for rewrite in rewriters)) self.apply_all_rewrites = apply_all_rewrites self.profile = profile if self.profile: self.time_rewrites: dict[(Rewriter, float)] = {} self.process_count: dict[(Rewriter, int)] = {} self.applied_true: dict[(Rewriter, int)] = {} self.node_created: dict[(Rewriter, int)] = {} self.tracker = OpToRewriterTracker() for o in self.rewrites: self.tracker.add_tracker(o) if self.profile: self.time_rewrites.setdefault(o, 0.0) self.process_count.setdefault(o, 0) self.applied_true.setdefault(o, 0) self.node_created.setdefault(o, 0) def __str__(self): return getattr(self, '__name__', f"{type(self).__name__}({','.join([str(o) for o in self.rewrites])})") def tracks(self): t = [] for l in self.rewrites: at = l.tracks() if at: t.extend(at) return t def transform(self, fgraph, node): if (len(self.rewrites) == 0): return repl = None while True: rewrites = self.tracker.get_trackers(node.op) new_repl = None for rewrite in rewrites: rewrite_start = time.perf_counter() new_repl = rewrite.transform(fgraph, node) rewrite_finish = time.perf_counter() if self.profile: self.time_rewrites[rewrite] += (rewrite_start - rewrite_finish) self.process_count[rewrite] += 1 if (not new_repl): continue if isinstance(new_repl, (tuple, list)): new_vars = new_repl else: new_vars = list(new_repl.values()) if config.optimizer_verbose: print(f'rewriting: rewrite {rewrite} replaces node {node} with {new_repl}') if self.profile: self.node_created[rewrite] += len(list(applys_between(fgraph.variables, new_vars))) self.applied_true[rewrite] += 1 break if (not new_repl): return repl if (not self.apply_all_rewrites): return new_repl if (not new_vars[0].owner): return new_repl if (len(new_repl) > 1): s = {v.owner for v in new_repl} assert (len(s) == 1) repl = new_repl node = new_vars[0].owner def print_profile(cls, stream, prof, level=0): (time_rewrites, process_count, applied_true, node_created, profile) = prof if (not profile): return blanc = (' ' * int(level)) print(blanc, f'{cls.__name__}', file=stream) print(blanc, '', file=stream) count_rewrite = [] not_used = [] not_used_time = 0 for (o, count) in process_count.items(): if (count > 0): count_rewrite.append((time_rewrites[o], applied_true[o], count, o, node_created[o])) else: not_used.append((time_rewrites[o], o)) not_used_time += time_rewrites[o] if count_rewrite: print(blanc, ' time taken - times applied - times tried - name - node_created:', file=stream) count_rewrite.sort() for (t, a_t, count, o, n_c) in count_rewrite[::(- 1)]: print(blanc, f' {t:.3f}s - {int(a_t)} - {int(count)} - {o} - {int(n_c)}', file=stream) print(blanc, f' {not_used_time:.3f}s - in {len(not_used)} rewrite(s) that were not used (displaying only those with a runtime greater than 0)', file=stream) not_used.sort(key=(lambda nu: (nu[0], str(nu[1])))) for (t, o) in not_used[::(- 1)]: if (t > 0): print((blanc + ' '), f' {t:.3f}s - {o}', file=stream) else: print(blanc, " The rewriter wasn't successful ", file=stream) print(file=stream) def merge_profile(prof1, prof2): raise NotImplementedError def print_summary(self, stream=sys.stdout, level=0, depth=(- 1)): print(f"{(' ' * level)}{self.__class__.__name__} id={id(self)}", file=stream) if (depth != 0): depth -= 1 for lrewrite in self.rewrites: lrewrite.print_summary(stream, level=(level + 2), depth=depth) def add_requirements(self, fgraph): for rewrite in self.rewrites: rewrite.add_requirements(fgraph)
def eval_directory(path): with open(os.path.join(path, 'config.json')) as fp: config = json.load(fp) one_shot_architectures = glob.glob(os.path.join(path, 'one_shot_architecture_*.obj')) one_shot_architectures.sort(key=natural_keys) test_errors = [] valid_errors = [] for model in one_shot_architectures: (test, valid, _, _) = eval_one_shot_model(config=config, model=model) test_errors.append(test) valid_errors.append(valid) with open(os.path.join(path, 'one_shot_validation_errors.obj'), 'wb') as fp: pickle.dump(valid_errors, fp) with open(os.path.join(path, 'one_shot_test_errors.obj'), 'wb') as fp: pickle.dump(test_errors, fp)
class AllowedSmilesCharDictionary(object): def __init__(self) -> None: self.forbidden_symbols = {'Ag', 'Al', 'Am', 'Ar', 'At', 'Au', 'D', 'E', 'Fe', 'G', 'K', 'L', 'M', 'Ra', 'Re', 'Rf', 'Rg', 'Rh', 'Ru', 'T', 'U', 'V', 'W', 'Xe', 'Y', 'Zr', 'a', 'd', 'f', 'g', 'h', 'k', 'm', 'si', 't', 'te', 'u', 'v', 'y'} def allowed(self, smiles: str) -> bool: for symbol in self.forbidden_symbols: if (symbol in smiles): print('Forbidden symbol {:<2} in {}'.format(symbol, smiles)) return False return True
class QuestionSuggestionChainBase(Chain, BaseModel): llm_chain: LLMChain output_key: str = 'questions' class Config(): extra = Extra.forbid arbitrary_types_allowed = True def input_keys(self) -> List[str]: return self.llm_chain.prompt.input_variables def output_keys(self) -> List[str]: return [self.output_key] def extract_questions(self, s: str) -> List[str]: components = s.split('\n') questions = [] count = 1 for c in components: if c.startswith(f'{count}'): questions.append(c.replace(f'{count}.', '').replace(f'{count}', '').strip()) count += 1 return questions def _call(self, inputs: Dict[(str, str)], run_manager: Optional[CallbackManagerForChainRun]=None) -> Dict[(str, List[str])]: _run_manager = (run_manager or CallbackManagerForChainRun.get_noop_manager()) questions = self.llm_chain.predict(**inputs) _run_manager.on_text(questions, color='green', end='\n', verbose=False) return {self.output_keys[0]: self.extract_questions(questions)} def from_prompt(cls, llm: BaseLanguageModel) -> QuestionSuggestionChainBase: llm_chain = LLMChain(llm=llm, prompt=QUESTION_SUGGESTION_PROMPT_BASE) return cls(llm_chain=llm_chain)
def test_db_reuse_simple(django_pytester: DjangoPytester) -> None: django_pytester.create_test_module('\n import pytest\n\n from .app.models import Item\n\n .django_db\n def test_db_can_be_accessed():\n assert Item.objects.count() == 0\n ') result = django_pytester.runpytest_subprocess('-v', '--reuse-db') assert (result.ret == 0) result.stdout.fnmatch_lines(['*test_db_can_be_accessed PASSED*'])
def interpolateables(state_a, state_b): animate = [] for (tag, path, values) in state_b.diff(state_a): if (tag == 'set'): ypath = path_to_str(path) v_new = get_elements(state_b, ypath)[0] v_old = values for type in [float, Color, Background]: if (isinstance(v_old, type) and isinstance(v_new, type)): animate.append((ypath, v_old, v_new)) return animate
class RSSReader(Session): def __init__(self, factory, url, rate): self.url = url self.rate = rate self.factory = factory self.old_entries = {} def get_new(self): feed = feedparser.parse(self.url) new_entries = [] for entry in feed['entries']: idval = (entry['id'] + entry.get('updated', '')) if (idval not in self.old_entries): self.old_entries[idval] = entry new_entries.append(entry) return new_entries def disconnect(self, reason=None): if (self.factory.task and self.factory.task.running): self.factory.task.stop() self.sessionhandler.disconnect(self) def _callback(self, new_entries, init): if (not init): for entry in reversed(new_entries): self.data_in(entry) def data_in(self, text=None, **kwargs): self.sessionhandler.data_in(self, bot_data_in=text, **kwargs) def _errback(self, fail): logger.log_err(('RSS feed error: %s' % fail.value)) def update(self, init=False): return threads.deferToThread(self.get_new).addCallback(self._callback, init).addErrback(self._errback)
def init_ctx(f): (f) def wrapper(self, *args, **kw): if (self.manager.label is None): label_addr = self.ql.os.heap.alloc(ctypes.sizeof(label_t)) self.manager.label = label_t(self.ql, label_addr) self.manager.label.l_flags = 1 self.manager.label.updateToMem() if (self.manager.cred is None): cred_addr = self.ql.os.heap.alloc(ctypes.sizeof(ucred_t)) self.manager.cred = ucred_t(self.ql, cred_addr) self.manager.cred.cr_ref = 2 pyarr = [20, 12, 61, 79, 80, 81, 98, 33, 100, 204, 250, 395, 398, 399, 701, 0] self.manager.cred.cr_posix = ucred_t.posix_cred_t(501, 501, 501, 15, (ctypes.c_uint32 * len(pyarr))(*pyarr), 20, 20, 501, 2) self.manager.cred.cr_label = POINTER64(self.manager.label.base) self.manager.cred.updateToMem() if (self.manager.vnode is None): tmp_addr = self.ql.os.heap.alloc(ctypes.sizeof(vnode_t)) self.manager.vnode = vnode_t(self.ql, tmp_addr) tmp_name = self.ql.os.heap.alloc(len(self.manager.current_proc)) self.manager.ql.mem.write(tmp_name, self.manager.current_proc.encode()) self.manager.vnode.v_name = POINTER64(tmp_name) self.manager.vnode.updateToMem() return f(self, *args, **kw) return wrapper
class DownSample(nn.Module): def __init__(self, in_channels, s_factor): super(DownSample, self).__init__() self.down = nn.Sequential(nn.Upsample(scale_factor=0.5, mode='bilinear', align_corners=False), nn.Conv2d(in_channels, (in_channels + s_factor), 1, stride=1, padding=0, bias=False)) def forward(self, x): x = self.down(x) return x
def test_matrix_variable_selection_duplicate_exclusion(hatch, helpers, temp_dir, config_file): config_file.model.template.plugins['default']['tests'] = False config_file.save() project_name = 'My.App' with temp_dir.as_cwd(): result = hatch('new', project_name) assert (result.exit_code == 0), result.output project_path = (temp_dir / 'my-app') data_path = (temp_dir / 'data') data_path.mkdir() project = Project(project_path) helpers.update_project_environment(project, 'default', {'skip-install': True, **project.config.envs['default']}) helpers.update_project_environment(project, 'test', {'matrix': [{'version': ['9000', '42']}]}) with project_path.as_cwd(env_vars={ConfigEnvVars.DATA: str(data_path)}): result = hatch('run', '-version=9000', '-version=42', 'python', '-c', "import pathlib,sys;pathlib.Path('test.txt').write_text(sys.executable)") assert (result.exit_code == 1), result.output assert (result.output == helpers.dedent('\n Duplicate excluded variable: version\n '))
class SimpleOxfordPetDataset(OxfordPetDataset): def __getitem__(self, *args, **kwargs): sample = super().__getitem__(*args, **kwargs) image = np.array(Image.fromarray(sample['image']).resize((256, 256), Image.LINEAR)) mask = np.array(Image.fromarray(sample['mask']).resize((256, 256), Image.NEAREST)) trimap = np.array(Image.fromarray(sample['trimap']).resize((256, 256), Image.NEAREST)) sample['image'] = np.moveaxis(image, (- 1), 0) sample['mask'] = np.expand_dims(mask, 0) sample['trimap'] = np.expand_dims(trimap, 0) return sample
class ViewStageBase(object): def __init__(self, name, state_provider): self.name = name self.state_provider = state_provider self.datamodel = _datamodel self.view = None def schedule(self): raise NotImplementedError() def ready(self): raise NotImplementedError() def applicable(self, flowview): self.view = flowview return self.ready() def apply(self, flowview): self.view = flowview self.schedule() def addStep(self, step): dependencies = [self.view.dag.getNode(k.stepid) for k in step.inputs] return self.view.addStep(step, stage=self.name, depends_on=dependencies) def addWorkflow(self, rules, isolate=True): self.view.addWorkflow(rules, stage=(self.name if isolate else None)) def json(self): return {'name': self.name, 'state_provider': (self.state_provider.json() if self.state_provider else None)}
def main(argv): trainIds = False try: (opts, args) = getopt.getopt(argv, 'ht') except getopt.GetoptError: printError('Invalid arguments') for (opt, arg) in opts: if (opt == '-h'): printHelp() sys.exit(0) elif (opt == '-t'): trainIds = True else: printError("Handling of argument '{}' not implementend".format(opt)) if (len(args) == 0): printError('Missing input json file') elif (len(args) == 1): printError('Missing output image filename') elif (len(args) > 2): printError('Too many arguments') inJson = args[0] outImg = args[1] if trainIds: json2instanceImg(inJson, outImg, 'trainIds') else: json2instanceImg(inJson, outImg)
def test_imbalance_penalty_at_insufficent_payer_balance(): imbalance_penalty = calculate_imbalance_fees(channel_capacity=TokenAmount(20), proportional_imbalance_fee=ProportionalFeeAmount(1)) (pair, _) = _foward_transfer_pair(TokenAmount(10), NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(11)), fee_schedule=FeeScheduleState(flat=FeeAmount(0), imbalance_penalty=imbalance_penalty)), NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(11)), fee_schedule=FeeScheduleState(flat=FeeAmount(0), imbalance_penalty=imbalance_penalty))) assert (not pair)
def _to_sequence_example(set_info, decoder, vocab): set_id = set_info['set_id'] image_data = [] image_ids = [] caption_data = [] caption_ids = [] for image_info in set_info['items']: filename = os.path.join(FLAGS.image_dir, set_id, (str(image_info['index']) + '.jpg')) with open(filename, 'r') as f: encoded_image = f.read() try: decoded_image = decoder.decode_jpeg(encoded_image) except (tf.errors.InvalidArgumentError, AssertionError): print(('Skipping file with invalid JPEG data: %s' % filename)) return image_data.append(encoded_image) image_ids.append(image_info['index']) caption = image_info['name'].encode('utf-8') caption_data.append(caption) caption_id = [(vocab.word_to_id(word) + 1) for word in caption.split()] caption_ids.append(caption_id) feature = {} for index in range(8): if (index >= len(image_data)): feature[('images/' + str(index))] = _bytes_feature(image_data[(- 1)]) else: feature[('images/' + str(index))] = _bytes_feature(image_data[index]) feature['set_id'] = _bytes_feature(set_id) feature['set_url'] = _bytes_feature(set_info['set_url']) feature['likes'] = _int64_feature(set_info['likes']) feature['views'] = _int64_feature(set_info['views']) context = tf.train.Features(feature=feature) feature_lists = tf.train.FeatureLists(feature_list={'caption': _bytes_feature_list(caption_data), 'caption_ids': _int64_list_feature_list(caption_ids), 'image_index': _int64_feature_list(image_ids)}) sequence_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists) return sequence_example
def time_info(exp, file_name='log.txt', runs=1, nbins=10, max_line_length=10000): time_list = [] config_file = f'./configs/{exp}.json' sweeper = Sweeper(config_file) for i in range((runs * sweeper.config_dicts['num_combinations'])): log_file = f'./logs/{exp}/{(i + 1)}/{file_name}' try: with open(log_file, 'r') as f: try: f.seek((- max_line_length), os.SEEK_END) except IOError: f.seek(0) last_line = f.readlines()[(- 1)] try: t = float(last_line.split(' ')[(- 2)]) time_list.append(t) except: print(('No time info in file: ' + log_file)) continue except: continue if (len(time_list) > 0): time_list = np.array(time_list) print(f'{exp} max time: {np.max(time_list):.2f} minutes') print(f'{exp} mean time: {np.mean(time_list):.2f} minutes') print(f'{exp} min time: {np.min(time_list):.2f} minutes') from utils.helper import make_dir make_dir(f'./logs/{exp}/0/') (num, bins, patches) = plt.hist(time_list, nbins) plt.xlabel('Time (min)') plt.ylabel('Counts in the bin') plt.savefig(f'./logs/{exp}/0/time_info.png') plt.clf() plt.cla() plt.close() else: print(f'{exp}: no time info!')
def test_abi3(tmp_path): project_dir = (tmp_path / 'project') limited_api_project.generate(project_dir) actual_wheels = utils.cibuildwheel_run(project_dir, add_env={'CIBW_SKIP': 'pp* '}) expected_wheels = [w.replace('cp38-cp38', 'cp38-abi3') for w in utils.expected_wheels('spam', '0.1.0') if (('-pp' not in w) and ('-cp39' not in w) and ('-cp31' not in w))] assert (set(actual_wheels) == set(expected_wheels))
class BM25Search(): def __init__(self, index_name: str, hostname: str='localhost', keys: Dict[(str, str)]={'title': 'title', 'body': 'txt'}, language: str='english', batch_size: int=128, timeout: int=100, retry_on_timeout: bool=True, maxsize: int=24, number_of_shards: int='default', initialize: bool=True): self.results = {} self.batch_size = batch_size self.initialize = initialize self.config = {'hostname': hostname, 'index_name': index_name, 'keys': keys, 'timeout': timeout, 'retry_on_timeout': retry_on_timeout, 'maxsize': maxsize, 'number_of_shards': number_of_shards, 'language': language} self.es = ElasticSearch(self.config) if self.initialize: self.initialise() def initialise(self): self.es.delete_index() self.es.create_index() def search(self, corpus: Dict[(str, Dict[(str, str)])], queries: Dict[(str, str)], top_k: List[int], *args, **kwargs) -> Dict[(str, Dict[(str, float)])]: if self.initialize: self.index(corpus) if kwargs.get('sleep_for', None): time.sleep(kwargs.get('sleep_for')) query_ids = list(queries.keys()) queries = [queries[qid] for qid in query_ids] for start_idx in tqdm.trange(0, len(queries), self.batch_size, desc='que'): query_ids_batch = query_ids[start_idx:(start_idx + self.batch_size)] results = self.es.lexical_multisearch(texts=queries[start_idx:(start_idx + self.batch_size)], top_hits=(top_k + 1)) for (query_id, hit) in zip(query_ids_batch, results): scores = {} for (corpus_id, score) in hit['hits']: if (corpus_id != query_id): scores[corpus_id] = score self.results[query_id] = scores return self.results def index(self, corpus: Dict[(str, Dict[(str, str)])]): progress = tqdm.tqdm(unit='docs', total=len(corpus)) dictionary = {idx: {self.config['keys']['title']: corpus[idx].get('title', None), self.config['keys']['body']: corpus[idx].get('text', None)} for idx in list(corpus.keys())} self.es.bulk_add_to_index(generate_actions=self.es.generate_actions(dictionary=dictionary, update=False), progress=progress)
class ZeroConfProcess(multiprocessing.Process): def __init__(self, signalk): self.name_type = False self.pipe = NonBlockingPipe('zeroconf', True) super(ZeroConfProcess, self).__init__(target=self.process, daemon=True) self.start() def remove_service(self, zc, type, name): print(('signalk zeroconf ' + _('service removed')), name, type) if (self.name_type == (name, type)): self.pipe[1].send('disconnect') print(('signalk zeroconf ' + _('server lost'))) def update_service(self, zc, type, name): self.add_service(zc, type, name) def add_service(self, zc, type, name): print(('signalk zeroconf ' + _('service add')), name, type) info = zc.get_service_info(type, name) if (not info): return properties = {} for (name, value) in info.properties.items(): try: properties[name.decode()] = value.decode() except Exception as e: print('signalk zeroconf exception', e, name, value) if (('swname' in properties) and (properties['swname'] == 'signalk-server')): try: host_port = ((socket.inet_ntoa(info.addresses[0]) + ':') + str(info.port)) except Exception as e: host_port = ((socket.inet_ntoa(info.address) + ':') + str(info.port)) self.name_type = (name, type) self.pipe[1].send(host_port) def process(self): warned = False while True: try: import zeroconf if warned: print((('signalk:' + _('succeeded')) + ' import zeroconf')) break except Exception as e: if (not warned): print(((('signalk: ' + _('failed to')) + ' import zeroconf, ') + _('autodetection not possible'))) print((((_('try') + ' pip3 install zeroconf') + _('or')) + ' apt install python3-zeroconf')) warned = True time.sleep(20) current_ip_address = [] zc = None while True: new_ip_address = zeroconf.get_all_addresses() if (current_ip_address != new_ip_address): debug('IP address changed from ', current_ip_address, 'to', new_ip_address) current_ip_address = new_ip_address if (zc != None): zc.close() zc = zeroconf.Zeroconf() self.browser = zeroconf.ServiceBrowser(zc, '_ self) time.sleep(5) def poll(self): last = False while True: p = self.pipe[0].recv() if (not p): return last last = p
def get_bandpath_fcc(ase_atom, npoints=30): from ase.dft.kpoints import ibz_points, kpoint_convert, get_bandpath points = ibz_points['fcc'] G = points['Gamma'] X = points['X'] W = points['W'] K = points['K'] L = points['L'] (kpts_reduced, kpath, sp_points) = get_bandpath([L, G, X, W, K, G], ase_atom.cell, npoints=npoints) kpts_cartes = kpoint_convert(ase_atom.cell, skpts_kc=kpts_reduced) return (kpts_reduced, kpts_cartes, kpath, sp_points)
def _horizontal_datum_from_params(cf_params): datum_name = cf_params.get('horizontal_datum_name') if (datum_name and (datum_name not in ('undefined', 'unknown'))): try: return Datum.from_name(datum_name) except CRSError: pass ellipsoid = None ellipsoid_name = cf_params.get('reference_ellipsoid_name') try: ellipsoid = CustomEllipsoid(name=(ellipsoid_name or 'undefined'), semi_major_axis=cf_params.get('semi_major_axis'), semi_minor_axis=cf_params.get('semi_minor_axis'), inverse_flattening=cf_params.get('inverse_flattening'), radius=cf_params.get('earth_radius')) except CRSError: if (ellipsoid_name and (ellipsoid_name not in ('undefined', 'unknown'))): ellipsoid = Ellipsoid.from_name(ellipsoid_name) prime_meridian = None prime_meridian_name = cf_params.get('prime_meridian_name') try: prime_meridian = CustomPrimeMeridian(name=(prime_meridian_name or 'undefined'), longitude=cf_params['longitude_of_prime_meridian']) except KeyError: if (prime_meridian_name and (prime_meridian_name not in ('undefined', 'unknown'))): prime_meridian = PrimeMeridian.from_name(prime_meridian_name) if (ellipsoid or prime_meridian): return CustomDatum(name=(datum_name or 'undefined'), ellipsoid=(ellipsoid or 'WGS 84'), prime_meridian=(prime_meridian or 'Greenwich')) return None
def euler2mat_tf(point_cloud, rotations): batch_size = rotations.get_shape()[0].value assert (rotations.get_shape()[1].value == 3) rotated_list = [] one = tf.constant([1.0]) zero = tf.constant([0.0]) for i in range(batch_size): x = rotations[(i, 0)] y = rotations[(i, 1)] z = rotations[(i, 2)] cosz = tf.cos([z]) sinz = tf.sin([z]) Mz = tf.stack([[cosz, (- sinz), zero], [sinz, cosz, zero], [zero, zero, one]]) Mz = tf.squeeze(Mz) cosy = tf.cos([y]) siny = tf.sin([y]) My = tf.stack([[cosy, zero, siny], [zero, one, zero], [(- siny), zero, cosy]]) My = tf.squeeze(My) cosx = tf.cos([x]) sinx = tf.sin([x]) Mx = tf.stack([[one, zero, zero], [zero, cosx, (- sinx)], [zero, sinx, cosx]]) Mx = tf.squeeze(Mx) rotate_mat = tf.matmul(Mz, tf.matmul(My, Mz)) rotated_list.append(tf.matmul(point_cloud[i], rotate_mat)) return tf.stack(rotated_list)
class PointGroup(BaseModel, extra='forbid'): hits: List['ScoredPoint'] = Field(..., description='Scored points that have the same value of the group_by key') id: 'GroupId' = Field(..., description='') lookup: Optional['Record'] = Field(default=None, description='Record that has been looked up using the group id')
class TestMsgFmt(unittest.TestCase): def test_ok(self): with pofile_from_entry(msgid='test string', msgstr='estay ingstray') as p: test_msgfmt(p.name) def test_busted_newlines(self): with pofile_from_entry(msgid='multi\nline\nstring', msgstr='ultimay\ninelay\ningstray\n') as p: self.assertRaises(AssertionError, test_msgfmt, p.name) def test_busted_format(self): with pofile_from_entry(msgid='test %(type)s', msgstr='estay', flags=['python-format']) as p: self.assertRaises(AssertionError, test_msgfmt, p.name) def test_translated_format(self): with pofile_from_entry(msgid='test %(type)s', msgstr='estay %(ypetay)', flags=['python-format']) as p: self.assertRaises(AssertionError, test_msgfmt, p.name)
def test_deprecated_decorator(recwarn_always: pytest.WarningsRecorder) -> None: assert (deprecated_old() == 3) got = recwarn_always.pop(TrioDeprecationWarning) assert isinstance(got.message, Warning) assert ('test_deprecate.deprecated_old is deprecated' in got.message.args[0]) assert ('1.5' in got.message.args[0]) assert ('test_deprecate.new' in got.message.args[0]) assert ('issues/123' in got.message.args[0])
def gpt_collate_fn(data, tokenizer): batch_data = {} for key in data[0]: batch_data[key] = [d[key] for d in data] output_batch = tokenizer(batch_data['output_text'], padding=True, return_tensors='pt', add_special_tokens=False, return_attention_mask=False, truncation=True, max_length=1000) batch_data['input_ids'] = output_batch['input_ids'] return batch_data
class PipelinePackIterator(PipelineIterator): def __iter__(self): self.iterator = iter(self.loader) return self def __next__(self): is_last = False accumulator = [] if ((self._loader_batch_index is not None) and (self._loader_batch_index < self.loader_batch_size)): while (self._loader_batch_index < self.loader_batch_size): item = self.loader_batch_item() is_last = item.pop('is_last') accumulator.append(item) if is_last: return accumulator while (not is_last): processed = self.infer(next(self.iterator), **self.params) if (self.loader_batch_size is not None): if isinstance(processed, torch.Tensor): first_tensor = processed else: key = list(processed.keys())[0] first_tensor = processed[key] if isinstance(first_tensor, list): observed_batch_size = len(first_tensor) else: observed_batch_size = first_tensor.shape[0] if (0 < observed_batch_size < self.loader_batch_size): self.loader_batch_size = observed_batch_size self._loader_batch_data = processed self._loader_batch_index = 0 while (self._loader_batch_index < self.loader_batch_size): item = self.loader_batch_item() is_last = item.pop('is_last') accumulator.append(item) if is_last: return accumulator else: item = processed is_last = item.pop('is_last') accumulator.append(item) return accumulator
def test_index_bits(): data = Bits(8, 202) x = Bits(4, 3) assert (data[x] == 1) y = Bits(4, (- 8)) with pytest.raises(IndexError): data[y] a = Bits(8, 4) assert (data[a] == 0) b = Bits(8, 20) with pytest.raises(IndexError): data[b] c = Bits(8, (- 1)) with pytest.raises(IndexError): data[c]
def down_pic(pic_urls, keyword): filepath = judge_filepath(keyword) for (i, pic_url) in enumerate(pic_urls): try: pic = requests.get(pic_url, timeout=15) name = (str((i + 1)) + '.jpg') filename = os.path.join(filepath, name) with open(filename, 'wb') as f: f.write(pic.content) print(('%s: %s' % (str((i + 1)), str(pic_url)))) except Exception as e: print(('%s: %s' % (str((i + 1)), str(pic_url)))) print(e) continue time.sleep(1)
class TestDSSSSHSerialization(): def test_load_ssh_public_key_dss_too_short(self, backend): ssh_key = b'ssh-dss' with pytest.raises(ValueError): load_ssh_public_key(ssh_key, backend) def test_load_ssh_public_key_dss_comment_with_spaces(self, backend): ssh_key = b'ssh-dss AAAAB3NzaC1kc3MAAACBALmwUtfwdjAUjU2Dixd5DvT0NDcjjr69UDLqSD/Xt5Al7D3GXr1WOrWGpjO0NE9qzRCvMTU7zykRH6XjuNXB6Hvv48Zfm4vmnHQHFmmMg2bI75JbnOwdzWnnPZJrVU4rS23dFFPqs5ug+EbhVVrcwzxahjcSjJ7WEQSkVQWnSPbbAAAAFQDXmpD3DIkGvLSBf1GdUF4PHKtUrQAAAIB/bJFwss+2fngmfG/Li5OyL7A9iVoGdkUaFaxEUROTp7wkm2z49fXFAir+/U31v50Tu98YLfWvKlxdHcdgQYV9Ww5LIrhWwwD4UKOwC6w5S3KHVbi3pWUi7vxJFXOWfeu1mC/JTWqMKR91j+rmOtdppWIZRyIVIqLcMdGO3m+2VgAAAIANFDz5KQH5NvoljpoRQiRgyPjxWXiE7vjLElKj4v8KrpanAywBzdhIW1y/tzpGuwRwj5ihi8iNTHgSsoTaj5AG5HPomJf5vJElxpu/2O9pHA52wcNObIQ7j+JA5uWusxNIbl+pF6sSiP8abrz53N7tPF/IhHTjBHb1Ol7IFu9p9A== extra' with pytest.warns(utils.DeprecatedIn40): load_ssh_public_key(ssh_key, backend) def test_load_ssh_public_key_dss_extra_data_after_modulo(self, backend): ssh_key = b'ssh-dss AAAAB3NzaC1kc3MAAACBALmwUtfwdjAUjU2Dixd5DvT0NDcjjr69UDLqSD/Xt5Al7D3GXr1WOrWGpjO0NE9qzRCvMTU7zykRH6XjuNXB6Hvv48Zfm4vmnHQHFmmMg2bI75JbnOwdzWnnPZJrVU4rS23dFFPqs5ug+EbhVVrcwzxahjcSjJ7WEQSkVQWnSPbbAAAAFQDXmpD3DIkGvLSBf1GdUF4PHKtUrQAAAIB/bJFwss+2fngmfG/Li5OyL7A9iVoGdkUaFaxEUROTp7wkm2z49fXFAir+/U31v50Tu98YLfWvKlxdHcdgQYV9Ww5LIrhWwwD4UKOwC6w5S3KHVbi3pWUi7vxJFXOWfeu1mC/JTWqMKR91j+rmOtdppWIZRyIVIqLcMdGO3m+2VgAAAIANFDz5KQH5NvoljpoRQiRgyPjxWXiE7vjLElKj4v8KrpanAywBzdhIW1y/tzpGuwRwj5ihi8iNTHgSsoTaj5AG5HPomJf5vJElxpu/2O9pHA52wcNObIQ7j+JA5uWusxNIbl+pF6sSiP8abrz53N7tPF/IhHTjBHb1Ol7IFu9p9AAwMD== ' with pytest.raises(ValueError): load_ssh_public_key(ssh_key, backend) def test_load_ssh_public_key_dss_different_string(self, backend): ssh_key = b'ssh-dss AAAAB3NzAC1kc3MAAACBALmwUtfwdjAUjU2Dixd5DvT0NDcjjr69UDLqSD/Xt5Al7D3GXr1WOrWGpjO0NE9qzRCvMTU7zykRH6XjuNXB6Hvv48Zfm4vmnHQHFmmMg2bI75JbnOwdzWnnPZJrVU4rS23dFFPqs5ug+EbhVVrcwzxahjcSjJ7WEQSkVQWnSPbbAAAAFQDXmpD3DIkGvLSBf1GdUF4PHKtUrQAAAIB/bJFwss+2fngmfG/Li5OyL7A9iVoGdkUaFaxEUROTp7wkm2z49fXFAir+/U31v50Tu98YLfWvKlxdHcdgQYV9Ww5LIrhWwwD4UKOwC6w5S3KHVbi3pWUi7vxJFXOWfeu1mC/JTWqMKR91j+rmOtdppWIZRyIVIqLcMdGO3m+2VgAAAIANFDz5KQH5NvoljpoRQiRgyPjxWXiE7vjLElKj4v8KrpanAywBzdhIW1y/tzpGuwRwj5ihi8iNTHgSsoTaj5AG5HPomJf5vJElxpu/2O9pHA52wcNObIQ7j+JA5uWusxNIbl+pF6sSiP8abrz53N7tPF/IhHTjBHb1Ol7IFu9p9A== ' with pytest.raises(ValueError): load_ssh_public_key(ssh_key, backend) def test_load_ssh_public_key_dss(self, backend): ssh_key = b'ssh-dss AAAAB3NzaC1kc3MAAACBALmwUtfwdjAUjU2Dixd5DvT0NDcjjr69UDLqSD/Xt5Al7D3GXr1WOrWGpjO0NE9qzRCvMTU7zykRH6XjuNXB6Hvv48Zfm4vmnHQHFmmMg2bI75JbnOwdzWnnPZJrVU4rS23dFFPqs5ug+EbhVVrcwzxahjcSjJ7WEQSkVQWnSPbbAAAAFQDXmpD3DIkGvLSBf1GdUF4PHKtUrQAAAIB/bJFwss+2fngmfG/Li5OyL7A9iVoGdkUaFaxEUROTp7wkm2z49fXFAir+/U31v50Tu98YLfWvKlxdHcdgQYV9Ww5LIrhWwwD4UKOwC6w5S3KHVbi3pWUi7vxJFXOWfeu1mC/JTWqMKR91j+rmOtdppWIZRyIVIqLcMdGO3m+2VgAAAIANFDz5KQH5NvoljpoRQiRgyPjxWXiE7vjLElKj4v8KrpanAywBzdhIW1y/tzpGuwRwj5ihi8iNTHgSsoTaj5AG5HPomJf5vJElxpu/2O9pHA52wcNObIQ7j+JA5uWusxNIbl+pF6sSiP8abrz53N7tPF/IhHTjBHb1Ol7IFu9p9A== ' with pytest.warns(utils.DeprecatedIn40): key = load_ssh_public_key(ssh_key, backend) assert (key is not None) assert isinstance(key, dsa.DSAPublicKey) numbers = key.public_numbers() expected_y = int('d143cf92901f936fa258e9ac8f8f1597884eef8cb1252a3e2ff0aae96a7032c01cdd8485b5cbfb73a46bb04708f98a18bc88d4c7812b284da8f9006e473e89897f9bc9125c69bbfd8ef691c0e76c1c34e6c843b8fe240e6e5aeb313486e5fa917ab1288ff1a6ebcf9dcdeed3c5fc88474e30476f53a5ec816ef69f4', 16) expected_p = int('b9b052d7fd4d838b17790ef4febebd5032ea483fd7b79025ec3dc65ebd563ab586a633b4344f6acd10af31353bcf29111fa5e3b8d5c1e87befe3c65f9b8be69cc8366c8ef925b9cec1dcd69e73d926b554e2b4b6ddd1453eab39ba0f846e1555adcc33c5a8637128c9ed61104a45505a748f6db', 16) expected_q = expected_g = int('7f6c9170b2cfb67e78267c6fcb8b93b22fb03d895a0676451a15aca7bc249b6cf8f5f5c5022afefd4df5bf9d13bbdf182df5af2a5c5d1dcd5b0e4b22b856c300f850a3b00bac394b728755b8b7a56522eefcdebb5982fc94d6a8c291f758feae63ad769aa2dc31d18ede6fb656', 16) expected = dsa.DSAPublicNumbers(expected_y, dsa.DSAParameterNumbers(expected_p, expected_q, expected_g)) assert (numbers == expected)
def build(opt): dpath = os.path.join(opt['datapath'], opt['dataset']) embpath = os.path.join(opt['datapath'], 'embeddings') logpath = os.path.join(opt['datapath'], 'logs') modelpath = os.path.join(opt['datapath'], 'models') version = None if (not build_data.built(dpath, version_string=version)): print((('[building data: ' + dpath) + ']')) if build_data.built(dpath): build_data.remove_dir(dpath) build_data.make_dir(dpath) build_data.make_dir(embpath) build_data.make_dir(logpath) build_data.make_dir(modelpath) dpext = os.path.join(dpath, 'MSRParaphraseCorpus') reformat_split(dpath, files[0], os.path.join(dpext, 'msr-para-train.tsv')) reformat_split(dpath, files[1], os.path.join(dpext, 'msr-para-val.tsv')) reformat_split(dpath, files[2], os.path.join(dpext, 'msr-para-test.tsv')) build_data.mark_done(dpath, version_string=version)
class QuantLinear(nn.Linear): def __init__(self, in_features, out_features, bias=True): super(QuantLinear, self).__init__(in_features, out_features, bias) self.layer_type = 'QuantLinear' self.bit = 4 self.weight_quant = weight_quantize_fn(w_bit=self.bit, power=True) def forward(self, x): weight_q = self.weight_quant(self.weight) return F.linear(x, weight_q, self.bias) def show_params(self): wgt_alpha = round(self.weight_quant.wgt_alpha.data.item(), 3) print('clipping threshold weight alpha: {:.2f}'.format(wgt_alpha))
class CacheMixin(): cache_timeout = 60 def get_cache_timeout(self): return self.cache_timeout def dispatch(self, *args, **kwargs): if (not settings.CACHE_PAGE_ENABLED): return super().dispatch(*args, **kwargs) return cache_page(self.get_cache_timeout())(super().dispatch)(*args, **kwargs)
def open_database_from_options_or_exit(db_options, quiet=False, apsw_warning=False): from .. import dbutils dbinfo = dbutils.get_dbinfo(db_options.database) try: return dbinfo.open_database(copy_to_memory=db_options.copy_to_memory, quiet=quiet, apsw_warning=apsw_warning) except dbutils.DBError as err: die(f'''Cannot connect to {dbinfo.get_human_name()}: {err} ''')
def test(model, args): print('Test') join = os.path.join if (not os.path.exists(join(args.save_dir, 'infer'))): os.mkdir(join(args.save_dir, 'infer')) if (not os.path.exists(join(args.save_dir, 'label'))): os.mkdir(join(args.save_dir, 'label')) split_dir = os.path.join(args.src_dir, 'splits.pkl') with open(split_dir, 'rb') as f: splits = pickle.load(f) test_keys = splits[args.fold]['test'] model.eval() for key in test_keys: preds = [] labels = [] data_loader = generate_test_loader(key, args) with torch.no_grad(): for (i, tup) in enumerate(data_loader): if (args.gpu is not None): img = tup[0].float().cuda(args.gpu, non_blocking=True) label = tup[1].long().cuda(args.gpu, non_blocking=True) else: img = tup[0] label = tup[1] mask = model(img) mask_softmax = F.softmax(mask, dim=1) mask = torch.argmax(mask_softmax, dim=1) preds.append(mask.cpu().numpy()) labels.append(label.cpu().numpy()) preds = np.concatenate(preds, axis=0) labels = np.concatenate(labels, axis=0).squeeze() print(preds.shape, labels.shape) if ('.' in key): key = key.split('.')[0] ni_pred = nib.Nifti1Image(preds.astype(np.int8), affine=np.eye(4)) ni_lb = nib.Nifti1Image(labels.astype(np.int8), affine=np.eye(4)) nib.save(ni_pred, join(args.save_dir, 'infer', (key + '.nii'))) nib.save(ni_lb, join(args.save_dir, 'label', (key + '.nii'))) print('finish saving file:', key)
def get_markdown_header(category): header = f''' # Release Notes worksheet {category} The main goal of this process is to rephrase all the commit messages below to make them clear and easy to read by the end user. You should follow the following instructions to do so: * **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here]( * Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good. * Please drop any commits that are not user-facing. * If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it. The categories below are as follows: * BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code (guidelines here: * Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code. * new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc) * improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability) * bug fixes: All commits that fix bugs and behaviors that do not match the documentation * performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it) * documentation: All commits that add/update documentation * Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc ''' return [header]
def att_block_model(x_train): inputs = Input((x_train.shape[1],)) x = att_block(inputs) predictions = Dense(7, kernel_initializer=initializers.glorot_normal(seed=1), kernel_regularizer=regularizers.l2(1e-10), kernel_constraint=unit_norm(), activity_regularizer=regularizers.l2(1e-10), use_bias=True, bias_initializer=initializers.glorot_normal(seed=1), bias_constraint=unit_norm(), activation='softmax', name=('fc_' + str(1)))(x) model = Model(input=inputs, output=predictions) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['acc']) return model
class StreamingPlayer(): description: str currentChapter: int = 0 movie: str = None def __init__(self, description: str, amplifier): self.description = description self.amplifier = amplifier def on(self) -> None: print(f'{self.description} on') def off(self) -> None: print(f'{self.description} off') def play(self, movie: str) -> None: self.movie = movie self.currentChapter = 0 print(f'{self.description} playing "{movie}"') def play_chapter(self, chapter: int) -> None: if (self.movie == None): print(f"{self.description} can't play chapter {chapter} no movie selected") else: self.currentChapter = chapter print(f'{self.description} playing chapter {self.currentChapter} of "{self.movie}"') def stop(self) -> None: self.currentChapter = 0 print(f'{self.description} stopped "{self.movie}"') def pause(self) -> None: self.currentChapter = 0 print(f'{self.description} paused "{self.movie}"') def setTwoChannelAudio(self) -> None: print(f'{self.description} set two channel audio') def setSurroundAudio(self) -> None: print(f'{self.description} set surround audio') def toString(self) -> str: return self.description
class AsyncTakeTest(unittest.TestCase): def _test_async_take_with_error(path: str) -> None: tc = unittest.TestCase() dist.init_process_group(backend='gloo') with patch('torchsnapshot.storage_plugin.FSStoragePlugin', FaultyFSStoragePlugin): future = torchsnapshot.Snapshot.async_take(path, {'foo': torch.nn.Linear(128, 64)}) tc.assertFalse(future.done()) with tc.assertRaisesRegex(RuntimeError, 'sorry'): future.wait() def test_async_take_with_error(self) -> None: for nproc in [2, 4]: with tempfile.TemporaryDirectory() as path: lc = get_pet_launch_config(nproc=nproc) pet.elastic_launch(lc, entrypoint=self._test_async_take_with_error)(path) metadata_path = os.path.join(path, SNAPSHOT_METADATA_FNAME) self.assertFalse(os.path.isfile(metadata_path)) def _test_unwaited_async_take(path: str) -> None: tc = unittest.TestCase() dist.init_process_group(backend='gloo') with patch('torchsnapshot.storage_plugin.FSStoragePlugin', SlowFSStoragePlugin): future = torchsnapshot.Snapshot.async_take(path, {'foo': torch.nn.Linear(128, 64)}) tc.assertFalse(future.done()) ('Skipping due to inconsistent behavior between Python3.8 and Python3.9') def test_unwaited_async_take(self) -> None: for nproc in [1, 2, 4]: with tempfile.TemporaryDirectory() as path: lc = get_pet_launch_config(nproc=nproc) pet.elastic_launch(lc, entrypoint=self._test_unwaited_async_take)(path) metadata_path = os.path.join(path, SNAPSHOT_METADATA_FNAME) self.assertTrue(os.path.isfile(metadata_path)) def _test_unwaited_async_take_with_error(path: str) -> None: tc = unittest.TestCase() dist.init_process_group(backend='gloo') with patch('torchsnapshot.storage_plugin.FSStoragePlugin', FaultyFSStoragePlugin): future = torchsnapshot.Snapshot.async_take(path, {'foo': torch.nn.Linear(128, 64)}) tc.assertFalse(future.done()) def test_unwaited_async_take_with_error(self) -> None: for nproc in [1, 2, 4]: with tempfile.TemporaryDirectory() as path: lc = get_pet_launch_config(nproc=nproc) pet.elastic_launch(lc, entrypoint=self._test_unwaited_async_take_with_error)(path) metadata_path = os.path.join(path, SNAPSHOT_METADATA_FNAME) self.assertFalse(os.path.isfile(metadata_path))
def unite_values(*values: Value) -> Value: if (not values): return NO_RETURN_VALUE hashable_vals = {} unhashable_vals = [] for value in values: if isinstance(value, MultiValuedValue): subvals = value.vals elif (isinstance(value, AnnotatedValue) and isinstance(value.value, MultiValuedValue)): subvals = [annotate_value(subval, value.metadata) for subval in value.value.vals] else: subvals = [value] for subval in subvals: try: if (subval not in hashable_vals): hashable_vals[subval] = None except Exception: unhashable_vals.append(subval) existing = (list(hashable_vals) + unhashable_vals) reachabilities = [_is_unreachable(val) for val in existing] num_unreachable = sum(reachabilities) num = (len(existing) - num_unreachable) if (num == 0): if num_unreachable: return AnyValue(AnySource.unreachable) return NO_RETURN_VALUE if num_unreachable: existing = [val for (i, val) in enumerate(existing) if (not reachabilities[i])] if (num == 1): return existing[0] else: return MultiValuedValue(existing)
def _get_jupyter_python_script(subcommand='notebook'): dist = ('jupyterlab' if (subcommand == 'lab') else 'notebook') try: ep = _find_entry_point(dist, 'console_scripts', f'jupyter-{subcommand}') if (ep is None): _log.debug(f'Entry point {dist}.console_scripts.jupyter-{subcommand} not found.') return except: _log.debug(f'Error loading {dist}.console_scripts.jupyter-{subcommand} entry point.', exc_info=True) return try: return '; '.join(('import sys', f'from {__name__} import _run_entry_point', f"sys.exit(_run_entry_point('{dist}', '{ep.group}', '{ep.name}'))")) except: _log.debug(f'Unexpected error getting jupyter-{subcommand} entry point', exc_info=True)
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False, output_loading_info=False): try: import tensorflow as tf import torch except ImportError: logger.error('Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see and for installation instructions.') raise if isinstance(pytorch_checkpoint_path, str): pytorch_checkpoint_path = [pytorch_checkpoint_path] pt_state_dict = {} for path in pytorch_checkpoint_path: pt_path = os.path.abspath(path) logger.info(f'Loading PyTorch weights from {pt_path}') pt_state_dict.update(torch.load(pt_path, map_location='cpu')) logger.info(f'PyTorch checkpoint contains {sum((t.numel() for t in pt_state_dict.values())):,} parameters') return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys, output_loading_info=output_loading_info)
class DownsampleAvg(nn.Module): def __init__(self, in_chs, out_chs, stride=1, dilation=1, apply_act=False, layers: LayerFn=None): super(DownsampleAvg, self).__init__() layers = (layers or LayerFn()) avg_stride = (stride if (dilation == 1) else 1) if ((stride > 1) or (dilation > 1)): avg_pool_fn = (AvgPool2dSame if ((avg_stride == 1) and (dilation > 1)) else nn.AvgPool2d) self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = layers.conv_norm_act(in_chs, out_chs, 1, apply_act=apply_act) def forward(self, x): return self.conv(self.pool(x))
class HourglassAEModule(nn.Module): def __init__(self, depth, stage_channels, norm_cfg=dict(type='BN', requires_grad=True)): norm_cfg = copy.deepcopy(norm_cfg) super().__init__() self.depth = depth cur_channel = stage_channels[0] next_channel = stage_channels[1] self.up1 = ConvModule(cur_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) self.pool1 = MaxPool2d(2, 2) self.low1 = ConvModule(cur_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) if (self.depth > 1): self.low2 = HourglassAEModule((depth - 1), stage_channels[1:]) else: self.low2 = ConvModule(next_channel, next_channel, 3, padding=1, norm_cfg=norm_cfg) self.low3 = ConvModule(next_channel, cur_channel, 3, padding=1, norm_cfg=norm_cfg) self.up2 = nn.UpsamplingNearest2d(scale_factor=2) def forward(self, x): up1 = self.up1(x) pool1 = self.pool1(x) low1 = self.low1(pool1) low2 = self.low2(low1) low3 = self.low3(low2) up2 = self.up2(low3) return (up1 + up2)
class MaxExcessReturnPortfolio(Portfolio): def __init__(self, cov_matrix: QFDataFrame, variance_of_assets: QFSeries, upper_constraint: Union[(float, Sequence[float])]=None): self.cov_matrix = cov_matrix self.variance_of_assets = variance_of_assets self.upper_constraint = upper_constraint def get_weights(self) -> QFSeries: P = self.cov_matrix.values q = ((- 0.5) * self.variance_of_assets.values) weights = QuadraticOptimizer.get_optimal_weights(P, q, upper_constraints=self.upper_constraint) return QFSeries(data=weights, index=self.cov_matrix.columns)
class _LRScheduler(object): def __init__(self, optimizer, last_epoch=(- 1)): self.optimizer = optimizer if (last_epoch == (- 1)): for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) else: for (i, group) in enumerate(optimizer.param_groups): if ('initial_lr' not in group): raise KeyError("param 'initial_lr' is not specified in param_groups[{}] when resuming an optimizer".format(i)) self.base_lrs = list(map((lambda group: group['initial_lr']), optimizer.param_groups)) self.last_epoch = last_epoch def with_counter(method): if getattr(method, '_with_counter', False): return method instance_ref = weakref.ref(method.__self__) func = method.__func__ cls = instance_ref().__class__ del method (func) def wrapper(*args, **kwargs): instance = instance_ref() instance._step_count += 1 wrapped = func.__get__(instance, cls) return wrapped(*args, **kwargs) wrapper._with_counter = True return wrapper self.optimizer.step = with_counter(self.optimizer.step) self.optimizer._step_count = 0 self._step_count = 0 self.step() def state_dict(self): return {key: value for (key, value) in self.__dict__.items() if (key != 'optimizer')} def load_state_dict(self, state_dict): self.__dict__.update(state_dict) def get_last_lr(self): return self._last_lr def get_lr(self): raise NotImplementedError def step(self, epoch=None): if (self._step_count == 1): if (not hasattr(self.optimizer.step, '_with_counter')): warnings.warn('Seems like `optimizer.step()` has been overridden after learning rate scheduler initialization. Please, make sure to call `optimizer.step()` before `lr_scheduler.step()`. See more details at UserWarning) elif (self.optimizer._step_count < 1): warnings.warn('Detected call of `lr_scheduler.step()` before `optimizer.step()`. In PyTorch 1.1.0 and later, you should call them in the opposite order: `optimizer.step()` before `lr_scheduler.step()`. Failure to do this will result in PyTorch skipping the first value of the learning rate schedule. See more details at UserWarning) self._step_count += 1 class _enable_get_lr_call(): def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False with _enable_get_lr_call(self): if (epoch is None): self.last_epoch += 1 values = self.get_lr() else: warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) self.last_epoch = epoch if hasattr(self, '_get_closed_form_lr'): values = self._get_closed_form_lr() else: values = self.get_lr() for (param_group, lr) in zip(self.optimizer.param_groups, values): param_group['lr'] = lr self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def test_valid_oauth(app): user = model.user.get_user('devtable') app = model.oauth.list_applications_for_org(model.user.get_user_or_org('buynlarge'))[0] token_string = ('%s%s' % (('a' * 20), ('b' * 20))) (oauth_token, _) = model.oauth.create_user_access_token(user, app.client_id, 'repo:read', access_token=token_string) result = validate_bearer_auth(('bearer ' + token_string)) assert (result.context.oauthtoken == oauth_token) assert (result.authed_user == user) assert result.auth_valid
class Migration(migrations.Migration): dependencies = [('adserver', '0021_publisher_record_ad_views')] operations = [migrations.RemoveField(model_name='adtype', name='publisher'), migrations.AddField(model_name='adtype', name='default_enabled', field=models.BooleanField(default=False, help_text='Whether this ad type should default to checked when advertisers are creating ads')), migrations.AddField(model_name='advertisement', name='ad_types', field=models.ManyToManyField(blank=True, help_text='Possible ways this ad will be displayed', related_name='advertisements', to='adserver.AdType'))]
class KnownValues(unittest.TestCase): ((not has_spglib), 'spglib not found') def test_D4h_vs_spglib(self): dim = 3 magmom = [1, 1, (- 1), (- 1), 1, (- 1), 1, 1, (- 1), (- 1), 1, (- 1)] cell = make_cell_D4h(dim, magmom) sg = spg.SpaceGroup(cell) sg.backend = 'spglib' sg.build() ops = sg.ops pg = sg.groupname['point_group_symbol'] sg.backend = 'pyscf' sg.build() ops1 = sg.ops pg1 = sg.groupname['point_group_symbol'] self.assertTrue((pg == pg1)) for (op, op1) in zip(ops, ops1): self.assertTrue((op == op1)) def test_D4h_2d(self): dim = 3 cell = make_cell_D4h(dim) sg = spg.SpaceGroup(cell) sg.build() ops3 = sg.ops pg3 = sg.groupname['point_group_symbol'] self.assertTrue((pg3 == '4/mmm')) ops2 = [] for op in ops3: rot = op.rot if ((rot[(2, 0)] == 0) and (rot[(2, 1)] == 0) and (rot[(0, 2)] == 0) and (rot[(1, 2)] == 0) and (rot[(2, 2)] != (- 1))): ops2.append(op) ops2.sort() dim = 2 cell = make_cell_D4h(dim) sg = spg.SpaceGroup(cell) sg.build() ops = sg.ops pg = sg.groupname['point_group_symbol'] self.assertTrue((pg == '4mm')) for (op, op0) in zip(ops, ops2): self.assertTrue((op == op0)) ((not has_spglib), 'spglib not found') def test_Oh_vs_spglib(self): cell = gto.Cell() cell.atom = '\n Si 0.0 0.0 0.0\n Si 1. 1. 1.\n ' cell.a = [[0.0, 2., 2.], [2., 0.0, 2.], [2., 2., 0.0]] cell.build() sg = spg.SpaceGroup(cell) sg.build() ops = sg.ops self.assertTrue((sg.groupname['point_group_symbol'] == 'm-3m')) sg.backend = 'spglib' sg.build() ops0 = sg.ops for (op, op0) in zip(ops, ops0): self.assertTrue((op == op0)) ((not has_spglib), 'spglib not found') def test_D6h_vs_spglib(self): cell = gto.Cell() cell.atom = '\n Zn 0.000000 1.516699 1.301750\n Zn 1.313500 0.758350 3.905250\n ' cell.a = [[2.627, 0.0, 0.0], [(- 1.3135), 2.275049, 0.0], [0.0, 0.0, 5.207]] cell.build() sg = spg.SpaceGroup(cell) sg.build() ops = sg.ops self.assertTrue((sg.groupname['point_group_symbol'] == '6/mmm')) sg.backend = 'spglib' sg.build() ops0 = sg.ops for (op, op0) in zip(ops, ops0): self.assertTrue((op == op0)) ((not has_spglib), 'spglib not found') def test_C2h_vs_spglib(self): cell = gto.Cell() cell.atom = '\n Cu 1.145000 8.273858 11.128085\n ' cell.a = [[2., 0.0, 0.0], [0.0, 11., 0.0], [0.0, 4., 22.]] cell.build() sg = spg.SpaceGroup(cell) sg.build() ops = sg.ops self.assertTrue((sg.groupname['point_group_symbol'] == '2/m')) sg.backend = 'spglib' sg.build() ops0 = sg.ops for (op, op0) in zip(ops, ops0): self.assertTrue((op == op0)) ((not has_spglib), 'spglib not found') def test_D3d_vs_spglib(self): cell = gto.Cell() cell.atom = '\n Li 5.281899 1.082489 0.642479\n Li 18.499727 3.791392 2.250266\n ' cell.a = [[8., 0.0, 0.0], [7., 3., 0.0], [7., 1., 2.]] cell.build() sg = spg.SpaceGroup(cell) sg.build() ops = sg.ops self.assertTrue((sg.groupname['point_group_symbol'] == '-3m')) sg.backend = 'spglib' sg.build() ops0 = sg.ops for (op, op0) in zip(ops, ops0): self.assertTrue((op == op0)) def test_spg_elment_hash(self): num = np.random.randint(0, ((3 ** 9) * (12 ** 3))) rot = np.empty([3, 3], dtype=int) trans = np.empty([3], dtype=float) r = (num % (3 ** 9)) id_eye = int('', 3) id_max = int('', 3) r += id_eye if (r > id_max): r -= (id_max + 1) rot = (np.asarray(lib.base_repr_int(r, 3, 9)) - 1) rot = rot.reshape(3, 3) t = (num // (3 ** 9)) degit = (12 ** 2) for i in range(3): trans[i] = (float(((t % (degit * 12)) // degit)) / 12.0) degit = (degit // 12) op = spg.SPGElement(rot, trans) self.assertTrue((hash(op) == num))
def _torch_alloc(size, device_id): torch_stream_ptr = torch.cuda.current_stream().cuda_stream cupy_stream_ptr = cupy.cuda.get_current_stream().ptr if (torch_stream_ptr != cupy_stream_ptr): raise RuntimeError('The current stream set in PyTorch and CuPy must be same. Use `pytorch_pfn_extras.cuda.stream` instead of `torch.cuda.stream`.') return torch.cuda.caching_allocator_alloc(size, device_id, torch_stream_ptr)
def main(): dsz.ui.Echo('Pulling Firefox browser data...', dsz.GOOD) dsz.control.echo.Off() dsz.cmd.Run('background log python windows/firefoxrip.py -args "-s 1000000"', dsz.RUN_FLAG_RECORD) dsz.control.echo.On() dsz.ui.Echo('\tNo IE Capability Available!', dsz.WARNING) dsz.ui.Echo('\tNo Chrome Capability Available!', dsz.WARNING)
def create_sky_temple_key(key_number: int, resource_database: ResourceDatabase) -> PickupEntry: return PickupEntry(name=f'Sky Temple Key {(key_number + 1)}', progression=((resource_database.get_item(echoes_items.SKY_TEMPLE_KEY_ITEMS[key_number]), 1),), model=PickupModel(game=resource_database.game_enum, name=echoes_items.SKY_TEMPLE_KEY_MODEL), pickup_category=SKY_TEMPLE_KEY_CATEGORY, broad_category=pickup_category.GENERIC_KEY_CATEGORY, generator_params=PickupGeneratorParams(preferred_location_category=LocationCategory.MAJOR, probability_offset=3))
class InvalidBodyLengthError(ProtocolError): def __init__(self, expected, actual): self.expected_length = expected self.actual_length = actual def __str__(self): return ('InvalidBodyLengthError: Expected %d bytes, received %d' % (self.expected_length, self.actual_length))
def pblock_053(content): stage_number = int(get1(content, b'04')) pzs = sxml.PolesZeros(pz_transfer_function_type=ptftype(get1(content, b'03')), input_units=sxml.Units(name=punit(get1(content, b'05'))), output_units=sxml.Units(name=punit(get1(content, b'06'))), normalization_factor=float(get1(content, b'07')), normalization_frequency=sxml.Frequency(value=float(get1(content, b'08'))), zero_list=list(map(ppolezero, getn(content, b'10-13'))), pole_list=list(map(ppolezero, getn(content, b'15-18')))) for (i, x) in enumerate(pzs.zero_list): x.number = i for (i, x) in enumerate(pzs.pole_list): x.number = i return (stage_number, pzs)
def train_model(model, train, test, num_classes): x_train = train[0].reshape(((train[0].shape[0],) + input_shape)) x_test = test[0].reshape(((test[0].shape[0],) + input_shape)) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') y_train = keras.utils.to_categorical(train[1], num_classes) y_test = keras.utils.to_categorical(test[1], num_classes) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) t = now() model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) print(('Training time: %s' % (now() - t))) score = model.evaluate(x_test, y_test, verbose=0) print('Test score:', score[0]) print('Test accuracy:', score[1])
def readData(segmentation_input): global numFrame, df, df_complete, time df = pd.read_csv(segmentation_input) numFrame = df.iloc[(- 1)]['Frame'] df = df[0:numFrame] dupl = [] df_complete = df[0:numFrame] df = df[(df.Visibility == 1)].reset_index(drop=True) time = df[['Time']] df = df[['Frame', 'Visibility', 'X', 'Y']] init = 0 while (init < len(df)): dupl += [0] init += 1 df['Dup'] = dupl i = 0 while (i < (len(df) - 4)): if ((df['X'][i] == df['X'][(i + 1)]) and (df['Y'][i] == df['Y'][(i + 1)]) and (df['X'][i] == df['X'][(i + 2)]) and (df['Y'][i] == df['Y'][(i + 2)]) and (df['X'][i] == df['X'][(i + 3)]) and (df['Y'][i] == df['Y'][(i + 3)]) and (df['X'][i] == df['X'][(i + 4)]) and (df['Y'][i] == df['Y'][(i + 4)])): df['Dup'][i] = 1 df['Dup'][(i + 1)] = 1 df['Dup'][(i + 2)] = 1 df['Dup'][(i + 3)] = 1 df['Dup'][(i + 4)] = 1 i += 5 else: df['Dup'][i] = 0 i += 1 print(np.shape(df)) print('# of frame = ', numFrame) X = df['X'] Y = df['Y'] vecX = [(X[(i + 1)] - X[i]) for i in range((len(X) - 1))] vecY = [(Y[(i + 1)] - Y[i]) for i in range((len(Y) - 1))] vecX.append(0) vecY.append(0) df['vecX'] = vecX df['vecY'] = vecY
def follow_redirects(link, sites=None): def follow(url): return ((sites == None) or (urlparse.urlparse(url).hostname in sites)) class RedirectHandler(urllib2.HTTPRedirectHandler): def __init__(self): self.last_url = None def redirect_request(self, req, fp, code, msg, hdrs, newurl): self.last_url = newurl if (not follow(newurl)): return None r = urllib2.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, hdrs, newurl) r.get_method = (lambda : 'HEAD') return r if (not follow(link)): return link redirect_handler = RedirectHandler() opener = urllib2.build_opener(redirect_handler) req = urllib2.Request(link) req.get_method = (lambda : 'HEAD') try: with contextlib.closing(opener.open(req, timeout=1)) as site: return site.url except: return (redirect_handler.last_url if redirect_handler.last_url else link)
class Effect3774(BaseEffect): type = 'passive' def handler(fit, module, context, projectionRange, **kwargs): fit.ship.increaseItemAttr('hiSlots', module.getModifiedItemAttr('hiSlotModifier'), **kwargs) fit.ship.increaseItemAttr('medSlots', module.getModifiedItemAttr('medSlotModifier'), **kwargs) fit.ship.increaseItemAttr('lowSlots', module.getModifiedItemAttr('lowSlotModifier'), **kwargs)
def get_cuda_info(device=None, unit='G', number_only=True): current_mem = get_gpu_memory_usage_by_current_program(device, unit, number_only) (all_mem, used, _, ratio) = get_gpu_memory_info(device, unit, number_only) utilization = get_gpu_utilization(device) return {'gpu_mem_ratio': ratio, 'gpu_mem': used, 'gpu_mem_this': current_mem, 'gpu_util': (utilization if number_only else f'{utilization}%')}
def update(): for episode in range(100): observation = env.reset() action = RL.choose_action(str(observation)) while True: env.render() (observation_, reward, done) = env.step(action) action_ = RL.choose_action(str(observation_)) RL.learn(str(observation), action, reward, str(observation_), action_) observation = observation_ action = action_ if done: break print('game over') env.destroy()
def test_get_state_dict(): if (torch.__version__ == 'parrots'): state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'conv.weight', 'conv.bias']) else: state_dict_keys = set(['block.conv.weight', 'block.conv.bias', 'block.norm.weight', 'block.norm.bias', 'block.norm.running_mean', 'block.norm.running_var', 'block.norm.num_batches_tracked', 'conv.weight', 'conv.bias']) model = Model() state_dict = get_state_dict(model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], model.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], model.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], model.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], model.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], model.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], model.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], model.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], model.conv.weight) assert_tensor_equal(state_dict['conv.bias'], model.conv.bias) wrapped_model = DDPWrapper(model) state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.bias) for (name, module) in wrapped_model.module._modules.items(): module = DataParallel(module) wrapped_model.module._modules[name] = module state_dict = get_state_dict(wrapped_model) assert isinstance(state_dict, OrderedDict) assert (set(state_dict.keys()) == state_dict_keys) assert_tensor_equal(state_dict['block.conv.weight'], wrapped_model.module.block.module.conv.weight) assert_tensor_equal(state_dict['block.conv.bias'], wrapped_model.module.block.module.conv.bias) assert_tensor_equal(state_dict['block.norm.weight'], wrapped_model.module.block.module.norm.weight) assert_tensor_equal(state_dict['block.norm.bias'], wrapped_model.module.block.module.norm.bias) assert_tensor_equal(state_dict['block.norm.running_mean'], wrapped_model.module.block.module.norm.running_mean) assert_tensor_equal(state_dict['block.norm.running_var'], wrapped_model.module.block.module.norm.running_var) if (torch.__version__ != 'parrots'): assert_tensor_equal(state_dict['block.norm.num_batches_tracked'], wrapped_model.module.block.module.norm.num_batches_tracked) assert_tensor_equal(state_dict['conv.weight'], wrapped_model.module.conv.module.weight) assert_tensor_equal(state_dict['conv.bias'], wrapped_model.module.conv.module.bias)
def _start_all_stats_collection_from_deltas(deltas: List[Delta], partition_value_string: Optional[str], partition_canonical_string: Optional[str], columns: Optional[List[str]]=None, trace_id: Optional[str]=None, file_count_per_cpu: Optional[int]=MANIFEST_FILE_COUNT_PER_CPU, cpus_per_instance: Optional[int]=DEFAULT_CPUS_PER_INSTANCE_R5_8XLARGE, stat_results_s3_bucket: Optional[str]=None, metastats_results_s3_bucket: Optional[str]=None, deltacat_storage=unimplemented_deltacat_storage) -> Dict[(int, DeltaStats)]: delta_stats_compute_list: List[DeltaLocator] = [] meta_stats_list_ready: List[DeltaLocator] = [] meta_stats_list_to_compute: List[DeltaLocator] = [] if stat_results_s3_bucket: found_columns_stats_map: Dict[(int, List[DeltaStatsCacheResult])] = read_cached_partition_stats(partition_canonical_string, stat_results_s3_bucket) delta_cache_res: List[DeltaStats] = [] for delta in deltas: if (found_columns_stats_map and (delta.stream_position in found_columns_stats_map)): cached_result = found_columns_stats_map[delta.stream_position] if cached_result.hits: delta_cache_res.append(cached_result.hits) meta_stats_list_ready.append(cached_result.hits.column_stats[0].manifest_stats.delta_locator) if cached_result.misses: delta_locator: DeltaLocator = cached_result.misses.delta_locator delta_stats_compute_list.append(delta_locator) meta_stats_list_to_compute.append(delta_locator) else: delta_stats_compute_list.append(delta.locator) meta_stats_list_to_compute.append(delta.locator) logger.info(f'Collecting stats on {len(delta_stats_compute_list)} deltas!') delta_stats_compute_res: Dict[(int, DeltaStats)] = {} if delta_stats_compute_list: delta_stats_compute_res = _start_metadata_stats_collection(delta_stats_compute_list=delta_stats_compute_list, meta_stats_list_ready=meta_stats_list_ready, meta_stats_list_to_compute=meta_stats_list_to_compute, partition_value_string=partition_value_string, partition_canonical_string=partition_canonical_string, columns=columns, trace_id=trace_id, file_count_per_cpu=file_count_per_cpu, cpus_per_instance=cpus_per_instance, stat_results_s3_bucket=stat_results_s3_bucket, metastats_results_s3_bucket=metastats_results_s3_bucket, deltacat_storage=deltacat_storage) delta_stream_range_stats: Dict[(int, DeltaStats)] = {} for delta_column_stats in delta_cache_res: assert (len(delta_column_stats.column_stats) > 0), f'Expected columns of `{delta_column_stats}` to be non-empty' stream_position = delta_column_stats.column_stats[0].manifest_stats.delta_locator.stream_position delta_stream_range_stats[stream_position] = delta_column_stats stats_collection_res: Dict[(int, DeltaStats)] = {**delta_stream_range_stats, **delta_stats_compute_res} return stats_collection_res
class ColdcardPlugin(HW_PluginBase): keystore_class = Coldcard_KeyStore minimum_library = (0, 7, 7) DEVICE_IDS = [(COINKITE_VID, CKCC_PID), (COINKITE_VID, CKCC_SIMULATED_PID)] SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh') def __init__(self, parent, config, name): HW_PluginBase.__init__(self, parent, config, name) self.libraries_available = self.check_libraries_available() if (not self.libraries_available): return self.device_manager().register_devices(self.DEVICE_IDS, plugin=self) self.device_manager().register_enumerate_func(self.detect_simulator) def get_library_version(self): import ckcc try: version = ckcc.__version__ except AttributeError: version = 'unknown' if requirements_ok: return version else: raise LibraryFoundButUnusable(library_version=version) def detect_simulator(self): fn = CKCC_SIMULATOR_PATH if os.path.exists(fn): return [Device(path=fn, interface_number=(- 1), id_=fn, product_key=(COINKITE_VID, CKCC_SIMULATED_PID), usage_page=0, transport_ui_string='simulator')] return [] _in_hwd_thread def create_client(self, device, handler): if handler: self.handler = handler try: rv = CKCCClient(self, handler, device.path, is_simulator=(device.product_key[1] == CKCC_SIMULATED_PID)) return rv except Exception as e: self.logger.exception('late failure connecting to device?') return None def setup_device(self, device_info, wizard, purpose): device_id = device_info.device.id_ client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard) return client def get_xpub(self, device_id, derivation, xtype, wizard): if (xtype not in self.SUPPORTED_XTYPES): raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device)) client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard) client.ping_check() xpub = client.get_xpub(derivation, xtype) return xpub _in_hwd_thread def get_client(self, keystore, force_pair=True, *, devices=None, allow_user_interaction=True) -> Optional['CKCCClient']: client = super().get_client(keystore, force_pair, devices=devices, allow_user_interaction=allow_user_interaction) if (client is not None): client.ping_check() return client def export_ms_wallet(wallet: Multisig_Wallet, fp, name): assert isinstance(wallet, Multisig_Wallet) print('# Exported from Electrum', file=fp) print(f'Name: {name:.20s}', file=fp) print(f'Policy: {wallet.m} of {wallet.n}', file=fp) print(f'Format: {wallet.txin_type.upper()}', file=fp) xpubs = [] derivs = set() for (xpub, ks) in zip(wallet.get_master_public_keys(), wallet.get_keystores()): (fp_bytes, der_full) = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix=[], only_der_suffix=False) fp_hex = fp_bytes.hex().upper() der_prefix_str = bip32.convert_bip32_intpath_to_strpath(der_full) xpubs.append((fp_hex, xpub, der_prefix_str)) derivs.add(der_prefix_str) if (len(derivs) == 1): print(('Derivation: ' + derivs.pop()), file=fp) print('', file=fp) assert (len(xpubs) == wallet.n) for (xfp, xpub, der_prefix) in xpubs: if derivs: print(f'# derivation: {der_prefix}', file=fp) print(f'''{xfp}: {xpub} ''', file=fp) def show_address(self, wallet, address, keystore: 'Coldcard_KeyStore'=None): if (keystore is None): keystore = wallet.get_keystore() if (not self.show_address_helper(wallet, address, keystore)): return txin_type = wallet.get_txin_type(address) if (type(wallet) is Standard_Wallet): sequence = wallet.get_address_index(address) keystore.show_address(sequence, txin_type) elif (type(wallet) is Multisig_Wallet): assert isinstance(wallet, Multisig_Wallet) pubkey_deriv_info = wallet.get_public_keys_with_deriv_info(address) pubkey_hexes = sorted([pk.hex() for pk in list(pubkey_deriv_info)]) xfp_paths = [] for pubkey_hex in pubkey_hexes: pubkey = bytes.fromhex(pubkey_hex) (ks, der_suffix) = pubkey_deriv_info[pubkey] (fp_bytes, der_full) = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix, only_der_suffix=False) xfp_int = xfp_int_from_xfp_bytes(fp_bytes) xfp_paths.append(([xfp_int] + list(der_full))) script = bfh(wallet.pubkeys_to_scriptcode(pubkey_hexes)) keystore.show_p2sh_address(wallet.m, script, xfp_paths, txin_type) else: keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device)) return
class MetaTrainer(object): def __init__(self, args): self.args = args if (args.dataset == 'miniimagenet'): from dataloader.mini_imagenet import MiniImageNet as Dataset args.num_class = 64 print('Using dataset: miniImageNet, base class num:', args.num_class) elif (args.dataset == 'cub'): from dataloader.cub import CUB as Dataset args.num_class = 100 print('Using dataset: CUB, base class num:', args.num_class) elif (args.dataset == 'tieredimagenet'): from dataloader.tiered_imagenet import tieredImageNet as Dataset args.num_class = 351 print('Using dataset: tieredImageNet, base class num:', args.num_class) elif (args.dataset == 'fc100'): from dataloader.fc100 import DatasetLoader as Dataset args.num_class = 60 print('Using dataset: FC100, base class num:', args.num_class) elif (args.dataset == 'cifar_fs'): from dataloader.cifar_fs import DatasetLoader as Dataset args.num_class = 64 print('Using dataset: CIFAR-FS, base class num:', args.num_class) else: raise ValueError('Please set the correct dataset.') self.Dataset = Dataset if (args.mode == 'pre_train'): print('Building pre-train model.') self.model = importlib.import_module('model.meta_model').MetaModel(args, dropout=args.dropout, mode='pre') else: print('Building meta model.') self.model = importlib.import_module('model.meta_model').MetaModel(args, dropout=args.dropout, mode='meta') if (args.mode == 'pre_train'): print('Initialize the model for pre-train phase.') else: args.dir = ('pretrain_model/%s/%s/max_acc.pth' % (args.dataset, args.backbone)) if (not os.path.exists(args.dir)): os.system('sh scripts/download_pretrain_model.sh') print('Loading pre-trainrd model from:\n', args.dir) model_dict = self.model.state_dict() pretrained_dict = torch.load(args.dir)['params'] pretrained_dict = {('encoder.' + k): v for (k, v) in pretrained_dict.items()} for (k, v) in pretrained_dict.items(): model_dict[k] = pretrained_dict[k] self.model.load_state_dict(model_dict) if (self.args.num_gpu > 1): self.model = nn.DataParallel(self.model, list(range(args.num_gpu))) self.model = self.model.cuda() print('Building model finished.') if (args.mode == 'pre_train'): args.save_path = ('pre_train/%s-%s' % (args.dataset, args.backbone)) else: args.save_path = ('meta_train/%s-%s-%s-%dway-%dshot' % (args.dataset, args.backbone, args.meta_update, args.way, args.shot)) args.save_path = osp.join('logs', args.save_path) ensure_path(args.save_path) trainset = Dataset('train', args) if (args.mode == 'pre_train'): self.train_loader = DataLoader(dataset=trainset, batch_size=args.bs, shuffle=True, num_workers=args.num_workers, pin_memory=True) else: train_sampler = CategoriesSampler(trainset.label, (args.val_frequency * args.bs), args.way, (args.shot + args.query)) self.train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler, num_workers=args.num_workers, pin_memory=True) valset = Dataset(args.set, args) val_sampler = CategoriesSampler(valset.label, args.val_episode, args.way, (args.shot + args.query)) self.val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=args.num_workers, pin_memory=True) val_loader = [x for x in self.val_loader] if (args.mode == 'pre_train'): self.optimizer = torch.optim.SGD([{'params': self.model.encoder.parameters(), 'lr': args.lr}, {'params': self.model.fc.parameters(), 'lr': args.lr}], momentum=0.9, nesterov=True, weight_decay=0.0005) self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=args.step_size, gamma=args.gamma) else: if (args.meta_update == 'mtl'): new_para = filter((lambda p: p.requires_grad), self.model.encoder.parameters()) else: new_para = self.model.encoder.parameters() self.optimizer = torch.optim.SGD([{'params': new_para, 'lr': args.lr}, {'params': self.model.base_learner.parameters(), 'lr': self.args.lr}, {'params': self.model.get_hyperprior_combination_initialization_vars(), 'lr': self.args.lr_combination}, {'params': self.model.get_hyperprior_combination_mapping_vars(), 'lr': self.args.lr_combination_hyperprior}, {'params': self.model.get_hyperprior_basestep_initialization_vars(), 'lr': self.args.lr_basestep}, {'params': self.model.get_hyperprior_stepsize_mapping_vars(), 'lr': self.args.lr_basestep_hyperprior}], lr=args.lr, momentum=0.9, nesterov=True, weight_decay=0.0005) self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=args.step_size, gamma=args.gamma) def save_model(self, name): torch.save(dict(params=self.model.state_dict()), osp.join(self.args.save_path, (name + '.pth'))) def train(self): args = self.args model = self.model trlog = {} trlog['args'] = vars(args) trlog['train_loss'] = [] trlog['val_loss'] = [] trlog['train_acc'] = [] trlog['val_acc'] = [] trlog['max_acc'] = 0.0 trlog['max_acc_epoch'] = 0 timer = Timer() global_count = 0 writer = SummaryWriter(osp.join(args.save_path, 'tf')) label = torch.arange(args.way, dtype=torch.int8).repeat(args.query) label = label.type(torch.LongTensor) if torch.cuda.is_available(): label = label.cuda() SLEEP(args) for epoch in range(1, (args.max_epoch + 1)): print(args.save_path) start_time = time.time() tl = Averager() ta = Averager() tqdm_gen = tqdm.tqdm(self.train_loader) model.train() for (i, batch) in enumerate(tqdm_gen, 1): global_count = (global_count + 1) if torch.cuda.is_available(): (data, _) = [_.cuda() for _ in batch] else: data = batch[0] p = (args.shot * args.way) (data_shot, data_query) = (data[:p], data[p:]) data_shot = data_shot.unsqueeze(0).repeat(args.num_gpu, 1, 1, 1, 1) logits = model((data_shot, data_query)) loss = F.cross_entropy(logits, label) acc = count_acc(logits, label) writer.add_scalar('data/loss', float(loss), global_count) writer.add_scalar('data/acc', float(acc), global_count) total_loss = (loss / args.bs) writer.add_scalar('data/total_loss', float(total_loss), global_count) tqdm_gen.set_description('Epoch {}, Total loss={:.4f}, Acc={:.4f}.'.format(epoch, total_loss.item(), acc)) tl.add(total_loss.item()) ta.add(acc) total_loss.backward() if ((i % args.bs) == 0): self.optimizer.step() self.optimizer.zero_grad() tl = tl.item() ta = ta.item() vl = Averager() va = Averager() model.eval() tqdm_gen = tqdm.tqdm(self.val_loader) for (i, batch) in enumerate(tqdm_gen, 1): if torch.cuda.is_available(): (data, _) = [_.cuda() for _ in batch] else: data = batch[0] p = (args.shot * args.way) (data_shot, data_query) = (data[:p], data[p:]) data_shot = data_shot.unsqueeze(0).repeat(args.num_gpu, 1, 1, 1, 1) logits = model((data_shot, data_query)) loss = F.cross_entropy(logits, label) acc = count_acc(logits, label) vl.add(loss.item()) va.add(acc) tqdm_gen.set_description('Episode {}: {:.2f}({:.2f})'.format(i, (va.item() * 100), (acc * 100))) vl = vl.item() va = va.item() writer.add_scalar('data/val_loss', float(vl), epoch) writer.add_scalar('data/val_acc', float(va), epoch) print(('Validation acc:%.4f' % va)) if (va >= trlog['max_acc']): print(' New best model!!! ') trlog['max_acc'] = va trlog['max_acc_epoch'] = epoch self.save_model('max_acc') trlog['train_loss'].append(tl) trlog['train_acc'].append(ta) trlog['val_loss'].append(vl) trlog['val_acc'].append(va) torch.save(trlog, osp.join(args.save_path, 'trlog')) if args.save_all: self.save_model(('epoch-%d' % epoch)) torch.save(self.optimizer.state_dict(), osp.join(args.save_path, 'optimizer_latest.pth')) print('Best epoch {}, best val acc={:.4f}.'.format(trlog['max_acc_epoch'], trlog['max_acc'])) print(('This epoch takes %d seconds.' % (time.time() - start_time)), ('\nStill need %.2f hour to finish.' % (((time.time() - start_time) * (args.max_epoch - epoch)) / 3600))) self.lr_scheduler.step() writer.close() def eval(self): model = self.model args = self.args result_list = [args.save_path] trlog = torch.load(osp.join(args.save_path, 'trlog')) test_set = self.Dataset('test', args) sampler = CategoriesSampler(test_set.label, 3000, args.way, (args.shot + args.query)) loader = DataLoader(test_set, batch_sampler=sampler, num_workers=args.num_workers, pin_memory=True) test_acc_record = np.zeros((3000,)) model.load_state_dict(torch.load(osp.join(args.save_path, ('max_acc' + '.pth')))['params']) model.eval() ave_acc = Averager() label = torch.arange(args.way).repeat(args.query) if torch.cuda.is_available(): label = label.type(torch.cuda.LongTensor) else: label = label.type(torch.LongTensor) tqdm_gen = tqdm.tqdm(loader) for (i, batch) in enumerate(tqdm_gen, 1): if torch.cuda.is_available(): (data, _) = [_.cuda() for _ in batch] else: data = batch[0] k = (args.way * args.shot) (data_shot, data_query) = (data[:k], data[k:]) data_shot = data_shot.unsqueeze(0).repeat(args.num_gpu, 1, 1, 1, 1) logits = model((data_shot, data_query)) acc = count_acc(logits, label) ave_acc.add(acc) test_acc_record[(i - 1)] = acc tqdm_gen.set_description('Episode {}: {:.2f}({:.2f})'.format(i, (ave_acc.item() * 100), (acc * 100))) (m, pm) = compute_confidence_interval(test_acc_record) result_list.append('Best validation epoch {},\nbest validation acc {:.4f}, \nbest test acc {:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'], ave_acc.item())) result_list.append('Test acc {:.4f} + {:.4f}'.format(m, pm)) print(result_list[(- 2)]) print(result_list[(- 1)]) save_list_to_txt(os.path.join(args.save_path, 'results.txt'), result_list) def pre_train(self): model = self.model args = self.args lr_scheduler = self.lr_scheduler optimizer = self.optimizer train_loader = self.train_loader val_loader = self.val_loader trlog = {} trlog['args'] = vars(args) trlog['train_loss'] = [] trlog['val_loss'] = [] trlog['train_acc'] = [] trlog['val_acc'] = [] trlog['max_acc'] = 0.0 trlog['max_acc_epoch'] = 0 timer = Timer() global_count = 0 writer = SummaryWriter(osp.join(args.save_path, 'tf')) label = torch.arange(args.way).repeat(args.query) if torch.cuda.is_available(): label = label.type(torch.cuda.LongTensor) else: label = label.type(torch.LongTensor) SLEEP(args) for epoch in range(1, (args.max_epoch + 1)): print(args.save_path) start_time = time.time() model = model.train() model.mode = 'pre' tl = Averager() ta = Averager() tqdm_gen = tqdm.tqdm(train_loader) for (i, batch) in enumerate(tqdm_gen, 1): global_count = (global_count + 1) if torch.cuda.is_available(): (data, train_label) = [_.cuda() for _ in batch] else: data = batch[0] logits = model(data) loss = F.cross_entropy(logits, train_label) acc = count_acc(logits, train_label) writer.add_scalar('data/loss', float(loss), global_count) writer.add_scalar('data/acc', float(acc), global_count) total_loss = loss writer.add_scalar('data/total_loss', float(total_loss), global_count) tqdm_gen.set_description('Epoch {}, total loss={:.4f} acc={:.4f}'.format(epoch, total_loss.item(), acc)) tl.add(total_loss.item()) ta.add(acc) optimizer.zero_grad() total_loss.backward() optimizer.step() tl = tl.item() ta = ta.item() model = model.eval() model.mode = 'meta' vl = Averager() va = Averager() if (epoch < args.val_epoch): vl = 0 va = 0 else: tqdm_gen = tqdm.tqdm(val_loader) for (i, batch) in enumerate(tqdm_gen, 1): if torch.cuda.is_available(): (data, _) = [_.cuda() for _ in batch] else: data = batch[0] p = (args.shot * args.way) (data_shot, data_query) = (data[:p], data[p:]) data_shot = data_shot.unsqueeze(0).repeat(args.num_gpu, 1, 1, 1, 1) logits = model.preval_forward(data_shot, data_query) loss = F.cross_entropy(logits, label) acc = count_acc(logits, label) vl.add(loss.item()) va.add(acc) vl = vl.item() va = va.item() writer.add_scalar('data/val_loss', float(vl), epoch) writer.add_scalar('data/val_acc', float(va), epoch) tqdm_gen.set_description('epo {}, val, loss={:.4f} acc={:.4f}'.format(epoch, vl, va)) if (va >= trlog['max_acc']): print(' New best model!!! ') trlog['max_acc'] = va trlog['max_acc_epoch'] = epoch self.save_model('max_acc') torch.save(optimizer.state_dict(), osp.join(args.save_path, 'optimizer_best.pth')) trlog['train_loss'].append(tl) trlog['train_acc'].append(ta) trlog['val_loss'].append(vl) trlog['val_acc'].append(va) torch.save(trlog, osp.join(args.save_path, 'trlog')) if args.save_all: self.save_model(('epoch-%d' % epoch)) torch.save(optimizer.state_dict(), osp.join(args.save_path, 'optimizer_latest.pth')) print('Best epoch {}, best val acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'])) print(('This epoch takes %d seconds' % (time.time() - start_time)), ('\nStill need %.2f hour to finish' % (((time.time() - start_time) * (args.max_epoch - epoch)) / 3600))) lr_scheduler.step() writer.close() result_list = ['Best validation epoch {},\nbest val Acc {:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'])] save_list_to_txt(os.path.join(args.save_path, 'results.txt'), result_list)
def test_is_debugging(monkeypatch): import pytest_timeout assert (not pytest_timeout.is_debugging()) from types import ModuleType module_name = 'custom.pydevd' module = ModuleType(module_name) monkeypatch.setitem(sys.modules, module_name, module) def custom_trace(*args): pass custom_trace.__module__ = module_name module.custom_trace = custom_trace assert pytest_timeout.is_debugging(custom_trace)
.end_to_end() def test_migrating_a_whole_task_with_persist(tmp_path): source = '\n import pytask\n\n .persist\n .depends_on("in.txt")\n .produces("out.txt")\n def task_dummy(depends_on, produces):\n produces.write_text(depends_on.read_text())\n ' tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source)) for name in ('in.txt', 'out.txt'): tmp_path.joinpath(name).write_text('They say oh my god I see the way you shine.') session = build(paths=tmp_path) assert (session.exit_code == ExitCode.OK) assert (len(session.execution_reports) == 1) assert (session.execution_reports[0].outcome == TaskOutcome.PERSISTENCE) assert isinstance(session.execution_reports[0].exc_info[1], Persisted)
class DataConfig(): def __init__(self, path_config_json): path_config_json = Path(path_config_json) config_dict = utils.Json.load(path_config_json) dir_config = path_config_json.parent self.lm_name = config_dict['lm_name'] self.path_test = (dir_config / config_dict['path_test']) self.path_train = (dir_config / config_dict['path_train']) self.path_phrase = (dir_config / config_dict['path_phrase']) self.path_tagging_docs = (dir_config / config_dict['path_tagging_docs']) self.paths_tagging_human = [(dir_config / p) for p in config_dict['paths_tagging_human']] self.path_stem_test = (dir_config / config_dict['path_stem_test']) self.path_stem_train = (dir_config / config_dict['path_stem_train']) self.path_stem_doc2references = (dir_config / config_dict['path_stem_doc2references']) self.dir_config = dir_config self.kp_num_candidates_per_doc = config_dict['kp_num_candidates_per_doc'] def todict(self): return {key: str(val) for (key, val) in self.__dict__.items()}
def conv_functional(): input_shape = (128, 28, 28, 1) inp = tf.keras.Input(shape=input_shape[1:]) x = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu')(inp) x = tf.keras.layers.Conv2DTranspose(32, kernel_size=(3, 3), activation='relu')(x) x = tf.keras.layers.DepthwiseConv2D(depth_multiplier=1, kernel_size=(3, 3), activation='relu')(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dropout(0.5, trainable=False)(x) x = tf.keras.layers.Dense(10, activation='softmax')(x) model = tf.keras.Model(inputs=inp, outputs=x, name='conv_functional') return model
def test_addbug() -> None: int5A = Fsm(alphabet={Charclass('a'), Charclass('b'), Charclass('c'), (~ Charclass('abc'))}, states={0, 1}, initial=1, finals={1}, map={0: {(~ Charclass('abc')): 0, Charclass('a'): 0, Charclass('b'): 0, Charclass('c'): 0}, 1: {(~ Charclass('abc')): 0, Charclass('a'): 0, Charclass('b'): 1, Charclass('c'): 1}}) assert int5A.accepts('') int5B = Fsm(alphabet={Charclass('a'), Charclass('b'), Charclass('c'), (~ Charclass('abc'))}, states={0, 1, 2}, initial=1, finals={0}, map={0: {(~ Charclass('abc')): 2, Charclass('a'): 2, Charclass('b'): 2, Charclass('c'): 2}, 1: {(~ Charclass('abc')): 2, Charclass('a'): 2, Charclass('b'): 2, Charclass('c'): 0}, 2: {(~ Charclass('abc')): 2, Charclass('a'): 2, Charclass('b'): 2, Charclass('c'): 2}}) assert int5B.accepts('c') int5C = int5A.concatenate(int5B) assert int5C.accepts('c')
('beeref.widgets.SceneToPixmapExporterDialog.exec') ('beeref.widgets.SceneToPixmapExporterDialog.value') ('PyQt6.QtWidgets.QFileDialog.getSaveFileName') def test_on_action_export_scene(file_mock, value_mock, exec_mock, view, tmpdir, qtbot): item = BeeTextItem('foo') view.scene.addItem(item) filename = os.path.join(tmpdir, 'test.png') assert (os.path.exists(filename) is False) file_mock.return_value = (filename, None) exec_mock.return_value = 1 value_mock.return_value = QtCore.QSize(100, 100) view.on_export_finished = MagicMock() view.on_action_export_scene() qtbot.waitUntil((lambda : (view.on_export_finished.called is True))) view.on_export_finished.assert_called_once_with(filename, []) img = QtGui.QImage(filename) assert (img.size() == QtCore.QSize(100, 100))
def group_connections(connections): grouped_conns = defaultdict(list) if isinstance(connections, QuerySet): languages = connections.values_list('contact__language', flat=True) for language in languages.distinct(): lang_conns = connections.filter(contact__language=language) grouped_conns[language].extend(lang_conns) else: for connection in connections: language = connection.contact.language grouped_conns[language].append(connection) for (lang, conns) in grouped_conns.items(): (yield (lang, conns))
class Pirate(cmd2.Cmd): def __init__(self): shortcuts = dict(cmd2.DEFAULT_SHORTCUTS) shortcuts.update({'~': 'sing'}) super().__init__(multiline_commands=['sing'], terminators=[MULTILINE_TERMINATOR, '...'], shortcuts=shortcuts) self.default_to_shell = True self.songcolor = 'blue' self.add_settable(cmd2.Settable('songcolor', str, 'Color to ``sing``', self, choices=color_choices)) self.gold = 0 self.initial_gold = self.gold self.prompt = 'arrr> ' def precmd(self, line): self.initial_gold = self.gold return line def postcmd(self, stop, line): if (self.gold != self.initial_gold): self.poutput('Now we gots {0} doubloons'.format(self.gold)) if (self.gold < 0): self.poutput("Off to debtorrr's prison.") self.exit_code = 1 stop = True return stop def do_loot(self, arg): self.gold += 1 def do_drink(self, arg): try: self.gold -= int(arg) except ValueError: if arg: self.poutput('What\'s "{0}"? I\'ll take rrrum.'.format(arg)) self.gold -= 1 def do_quit(self, arg): self.poutput('Quiterrr!') return True def do_sing(self, arg): self.poutput(cmd2.ansi.style(arg, fg=Fg[self.songcolor.upper()]), apply_style=False) yo_parser = cmd2.Cmd2ArgumentParser() yo_parser.add_argument('--ho', type=int, default=2, help="How often to chant 'ho'") yo_parser.add_argument('-c', '--commas', action='store_true', help='Intersperse commas') yo_parser.add_argument('beverage', help='beverage to drink with the chant') .with_argparser(yo_parser) def do_yo(self, args): chant = (['yo'] + (['ho'] * args.ho)) separator = (', ' if args.commas else ' ') chant = separator.join(chant) self.poutput('{0} and a bottle of {1}'.format(chant, args.beverage))
def save_checkpoint(model, args, is_best=False): directory = os.path.expanduser(args.save_dir) if (not os.path.exists(directory)): os.makedirs(directory) filename = '{}_{}_{}.pth'.format(args.model, args.backbone, args.dataset) filename = os.path.join(directory, filename) if args.distributed: model = model.module torch.save(model.state_dict(), filename) if is_best: best_filename = '{}_{}_{}_best_model.pth'.format(args.model, args.backbone, args.dataset) best_filename = os.path.join(directory, best_filename) shutil.copyfile(filename, best_filename)
def swap_gauge(stdscr, pos_y, pos_x, size, mem_data): values = ([(((mem_data['used'] / mem_data['tot']) * 100.0), NColors.red()), (((mem_data['cached'] / mem_data['tot']) * 100.0), NColors.yellow())] if (mem_data['tot'] > 0) else []) used = size_to_string(mem_data['used'], 'k') total = size_to_string(mem_data['tot'], 'k') data = {'name': 'Swp', 'color': NColors.cyan(), 'values': values, 'online': (mem_data['tot'] > 0), 'mright': '{used}/{total}'.format(used=used, total=total)} basic_gauge(stdscr, pos_y, pos_x, (size - 1), data)
class LegacyIndex(Index): INDEX_FILENAME = 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' PASSAGE_FILENAME = 'psgs_w100.tsv.pkl' def __init__(self, vector_size, index_path): self.index_id_to_db_id = [] self.index_path = index_path self.passages = self._load_passages() self.vector_size = vector_size self.index = None self._index_initialized = False def _resolve_path(self, index_path, filename): assert (os.path.isdir(index_path) or is_remote_url(index_path)), 'Please specify a valid `index_path`.' archive_file = os.path.join(index_path, filename) try: resolved_archive_file = cached_path(archive_file) except EnvironmentError: msg = f'''Can't load '{archive_file}'. Make sure that: - '{index_path}' is a correct remote path to a directory containing a file named {filename} - or '{index_path}' is the correct path to a directory containing a file named {filename}. ''' raise EnvironmentError(msg) if (resolved_archive_file == archive_file): logger.info(f'loading file {archive_file}') else: logger.info(f'loading file {archive_file} from cache at {resolved_archive_file}') return resolved_archive_file def _load_passages(self): logger.info(f'Loading passages from {self.index_path}') passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME) with open(passages_path, 'rb') as passages_file: passages = pickle.load(passages_file) return passages def _deserialize_index(self): logger.info(f'Loading index from {self.index_path}') resolved_index_path = self._resolve_path(self.index_path, (self.INDEX_FILENAME + '.index.dpr')) self.index = faiss.read_index(resolved_index_path) resolved_meta_path = self._resolve_path(self.index_path, (self.INDEX_FILENAME + '.index_meta.dpr')) with open(resolved_meta_path, 'rb') as metadata_file: self.index_id_to_db_id = pickle.load(metadata_file) assert (len(self.index_id_to_db_id) == self.index.ntotal), 'Deserialized index_id_to_db_id should match faiss index size' def is_initialized(self): return self._index_initialized def init_index(self): index = faiss.IndexHNSWFlat((self.vector_size + 1), 512) index.hnsw.efSearch = 128 index.hnsw.efConstruction = 200 self.index = index self._deserialize_index() self._index_initialized = True def get_doc_dicts(self, doc_ids: np.array): doc_list = [] for doc_ids_i in doc_ids: ids = [str(int(doc_id)) for doc_id in doc_ids_i] docs = [self.passages[doc_id] for doc_id in ids] doc_list.append(docs) doc_dicts = [] for docs in doc_list: doc_dict = {} doc_dict['title'] = [doc[1] for doc in docs] doc_dict['text'] = [doc[0] for doc in docs] doc_dicts.append(doc_dict) return doc_dicts def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5) -> Tuple[(np.ndarray, np.ndarray)]: aux_dim = np.zeros(len(question_hidden_states), dtype='float32').reshape((- 1), 1) query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim)) (_, docs_ids) = self.index.search(query_nhsw_vectors, n_docs) vectors = [[self.index.reconstruct(int(doc_id))[:(- 1)] for doc_id in doc_ids] for doc_ids in docs_ids] ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids] return (np.array(ids), np.array(vectors))
def seed_everything(seed, cudnn_deterministic=False): if (seed is not None): print(f'Global seed set to {seed}') random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if cudnn_deterministic: torch.backends.cudnn.deterministic = True warnings.warn('You have chosen to seed training. This will turn on the CUDNN deterministic setting, which can slow down your training considerably! You may see unexpected behavior when restarting from checkpoints.')
class ISIC2018DatasetFast(Dataset): def __init__(self, mode, data_dir=None, one_hot=True, image_size=224, aug=None, aug_empty=None, transform=None, img_transform=None, msk_transform=None, add_boundary_mask=False, add_boundary_dist=False, logger=None, **kwargs): self.print = (logger.info if logger else print) self.data_dir = (data_dir if data_dir else '/path/to/datasets/ISIC2018') self.one_hot = one_hot self.image_size = image_size self.aug = aug self.aug_empty = aug_empty self.transform = transform self.img_transform = img_transform self.msk_transform = msk_transform self.mode = mode self.add_boundary_mask = add_boundary_mask self.add_boundary_dist = add_boundary_dist data_preparer = PrepareISIC2018(data_dir=self.data_dir, image_size=self.image_size, logger=logger) data = data_preparer.get_data() (X, Y) = (data['x'], data['y']) X = torch.tensor(X) Y = torch.tensor(Y) if (kwargs.get('data_scale', 'full') == 'full'): (tr_length, vl_length) = (1815, 259) elif (kwargs.get('data_scale') == 'medium'): (tr_length, vl_length) = ((1815 // 2), 259) elif (kwargs.get('data_scale') == 'lite'): (tr_length, vl_length) = ((1815 // 5), 259) elif (kwargs.get('data_scale') == 'ultra-lite'): (tr_length, vl_length) = ((1815 // 10), 259) else: raise ValueError(f"the value of <data_scale> param ({kwargs.get('data_scale')}) is dataset is invalid. valid in (full, medium, lite, ultra-lite)") if (mode == 'tr'): self.imgs = X[:tr_length] self.msks = Y[:tr_length] elif (mode == 'vl'): self.imgs = X[1815:(1815 + vl_length)] self.msks = Y[1815:(1815 + vl_length)] elif (mode == 'te'): self.imgs = X[(1815 + vl_length):] self.msks = Y[(1815 + vl_length):] else: raise ValueError() def __len__(self): return len(self.imgs) def __getitem__(self, idx): data_id = idx img = self.imgs[idx] msk = self.msks[idx] if self.one_hot: msk = ((msk - msk.min()) / (msk.max() - msk.min())) msk = F.one_hot(torch.squeeze(msk).to(torch.int64)) msk = torch.moveaxis(msk, (- 1), 0).to(torch.float) if self.aug: if (self.mode == 'tr'): img_ = np.uint8(torch.moveaxis((img * 255), 0, (- 1)).detach().numpy()) msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy()) augmented = self.aug(image=img_, mask=msk_) img = torch.moveaxis(torch.tensor(augmented['image'], dtype=torch.float32), (- 1), 0) msk = torch.moveaxis(torch.tensor(augmented['mask'], dtype=torch.float32), (- 1), 0) elif self.aug_empty: img_ = np.uint8(torch.moveaxis((img * 255), 0, (- 1)).detach().numpy()) msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy()) augmented = self.aug_empty(image=img_, mask=msk_) img = torch.moveaxis(torch.tensor(augmented['image'], dtype=torch.float32), (- 1), 0) msk = torch.moveaxis(torch.tensor(augmented['mask'], dtype=torch.float32), (- 1), 0) img = img.nan_to_num(127) img = normalize(img) msk = msk.nan_to_num(0) msk = normalize(msk) if (self.add_boundary_mask or self.add_boundary_dist): msk_ = np.uint8(torch.moveaxis((msk * 255), 0, (- 1)).detach().numpy()) if self.add_boundary_mask: boundary_mask = calc_edge(msk_, mode='canny') msk = torch.concatenate([msk, torch.tensor(boundary_mask).unsqueeze(0)], dim=0) if self.add_boundary_dist: boundary_mask = (boundary_mask if self.add_boundary_mask else calc_edge(msk_, mode='canny')) distance_map = calc_distance_map(boundary_mask, mode='l2') distance_map = (distance_map / (self.image_size * 1.4142)) distance_map = np.clip(distance_map, a_min=0, a_max=0.2) distance_map = ((1 - np_normalize(distance_map)) * 255) msk = torch.concatenate([msk, torch.tensor(distance_map).unsqueeze(0)], dim=0) if self.img_transform: img = self.img_transform(img) if self.msk_transform: msk = self.msk_transform(msk) img = img.nan_to_num(0.5) msk = msk.nan_to_num((- 1)) sample = {'image': img, 'mask': msk, 'id': data_id} return sample
class CaptureBase(abc.ABC, Generic[AnyStr]): EMPTY_BUFFER: AnyStr def __init__(self, fd: int) -> None: raise NotImplementedError() def start(self) -> None: raise NotImplementedError() def done(self) -> None: raise NotImplementedError() def suspend(self) -> None: raise NotImplementedError() def resume(self) -> None: raise NotImplementedError() def writeorg(self, data: AnyStr) -> None: raise NotImplementedError() def snap(self) -> AnyStr: raise NotImplementedError()
def get_patterns(graph_file, base_pattern): patterns_graph = read_graph(graph_file) prompts = [x.lm_pattern for x in list(patterns_graph.nodes)] no_base = [] for p in prompts: if (p.replace(' .', '.') == base_pattern.replace(' .', '.')): continue no_base.append(p) assert ((len(prompts) - 1) == len(no_base)) combinations = list(itertools.product([base_pattern], no_base)) return [list(x) for x in combinations]
.needs_connection def test_HITRAN_molecules_list(verbose=True, *args, **kwargs): from radis.db.classes import HITRAN_MOLECULES from radis.misc.basics import compare_lists molecules = fetch_HITRAN_molecules() if verbose: print('HITRAN molecules, fetched online ') print(molecules) print('Comparing Radis hardcoded HITRAN molecules list to the HITRAN website: ') assert (compare_lists(HITRAN_MOLECULES, molecules, l1_str='Radis molecules', l2_str='Fetched from HITRAN website', print_index=True) == 1)
def _create_unless(terminals, g_regex_flags, re_, use_bytes): tokens_by_type = classify(terminals, (lambda t: type(t.pattern))) assert (len(tokens_by_type) <= 2), tokens_by_type.keys() embedded_strs = set() callback = {} for retok in tokens_by_type.get(PatternRE, []): unless = [] for strtok in tokens_by_type.get(PatternStr, []): if (strtok.priority != retok.priority): continue s = strtok.pattern.value if (s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags)): unless.append(strtok) if (strtok.pattern.flags <= retok.pattern.flags): embedded_strs.add(strtok) if unless: callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) new_terminals = [t for t in terminals if (t not in embedded_strs)] return (new_terminals, callback)
class SingleAvatarPromotion(Struct): AvatarID: int Promotion: int PromotionCostList: List[PromotionCost] MaxLevel: int PlayerLevelRequire: Union[(int, None)] WorldLevelRequire: Union[(int, None)] AttackBase: PromotionAttr AttackAdd: PromotionAttr DefenceBase: PromotionAttr DefenceAdd: PromotionAttr HPBase: PromotionAttr HPAdd: PromotionAttr SpeedBase: PromotionAttr CriticalChance: PromotionAttr CriticalDamage: PromotionAttr BaseAggro: PromotionAttr
class BddOptions(SolverOptions): CUDD_ALL_REORDERING_ALGORITHMS = range(1, 23) (CUDD_REORDER_SAME, CUDD_REORDER_NONE, CUDD_REORDER_RANDOM, CUDD_REORDER_RANDOM_PIVOT, CUDD_REORDER_SIFT, CUDD_REORDER_SIFT_CONVERGE, CUDD_REORDER_SYMM_SIFT, CUDD_REORDER_SYMM_SIFT_CONV, CUDD_REORDER_WINDOW2, CUDD_REORDER_WINDOW3, CUDD_REORDER_WINDOW4, CUDD_REORDER_WINDOW2_CONV, CUDD_REORDER_WINDOW3_CONV, CUDD_REORDER_WINDOW4_CONV, CUDD_REORDER_GROUP_SIFT, CUDD_REORDER_GROUP_SIFT_CONV, CUDD_REORDER_ANNEALING, CUDD_REORDER_GENETIC, CUDD_REORDER_LINEAR, CUDD_REORDER_LINEAR_CONVERGE, CUDD_REORDER_LAZY_SIFT, CUDD_REORDER_EXACT) = CUDD_ALL_REORDERING_ALGORITHMS def __init__(self, **base_options): SolverOptions.__init__(self, **base_options) if (self.random_seed is not None): raise PysmtValueError("'random_seed' option not supported.") if (self.unsat_cores_mode is not None): raise PysmtValueError("'unsat_cores_mode' option not supported.") for (k, v) in self.solver_options.items(): if (k == 'static_ordering'): if (v is not None): try: valid = all((x.is_symbol(types.BOOL) for x in v)) except: valid = False if (not valid): raise PysmtValueError('The BDD static ordering must be a list of Boolean variables') elif (k == 'dynamic_reordering'): if (v not in (True, False)): raise PysmtValueError(("Invalid value %s for '%s'" % (str(k), str(v)))) elif (k == 'reordering_algorithm'): if (v not in BddOptions.CUDD_ALL_REORDERING_ALGORITHMS): raise PysmtValueError(("Invalid value %s for '%s'" % (str(k), str(v)))) else: raise PysmtValueError(("Unrecognized option '%s'." % k)) setattr(self, k, v) if (not hasattr(self, 'dynamic_reordering')): self.dynamic_reordering = False if (not hasattr(self, 'reordering_algorithm')): if (not self.dynamic_reordering): self.reordering_algorithm = None else: self.reordering_algorithm = BddOptions.CUDD_REORDER_SIFT if (not hasattr(self, 'static_ordering')): self.static_ordering = None if ((not self.dynamic_reordering) and (self.reordering_algorithm is not None)): raise PysmtValueError('reordering_algorithm requires dynamic_reordering.') def __call__(self, solver): if (self.static_ordering is not None): for var in self.static_ordering: solver.declare_variable(var) if self.dynamic_reordering: solver.ddmanager.AutodynEnable(self.reordering_algorithm) else: solver.ddmanager.AutodynDisable()
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import ray def _objective(trial, local_trainer, checkpoint_dir=None): try: from transformers.utils.notebook import NotebookProgressCallback if local_trainer.pop_callback(NotebookProgressCallback): local_trainer.add_callback(ProgressCallback) except ModuleNotFoundError: pass checkpoint = None if checkpoint_dir: for subdir in os.listdir(checkpoint_dir): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) local_trainer.objective = None local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial) if (getattr(local_trainer, 'objective', None) is None): metrics = local_trainer.evaluate() local_trainer.objective = local_trainer.compute_objective(metrics) local_trainer._tune_save_checkpoint() ray.tune.report(objective=local_trainer.objective, **metrics, done=True) if (not trainer._memory_tracker.skip_memory_metrics): from .trainer_utils import TrainerMemoryTracker logger.warning('Memory tracking for your Trainer is currently enabled. Automatically disabling the memory tracker since the memory tracker is not serializable.') trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True) _tb_writer = trainer.pop_callback(TensorBoardCallback) trainer.model = None if ('resources_per_trial' not in kwargs): kwargs['resources_per_trial'] = {'cpu': 1} if (trainer.args.n_gpu > 0): kwargs['resources_per_trial']['gpu'] = 1 resource_msg = ('1 CPU' + (' and 1 GPU' if (trainer.args.n_gpu > 0) else '')) logger.info(f'No `resources_per_trial` arg was passed into `hyperparameter_search`. Setting it to a default value of {resource_msg} for each trial.') gpus_per_trial = kwargs['resources_per_trial'].get('gpu', 0) trainer.args._n_gpu = gpus_per_trial if ('progress_reporter' not in kwargs): from ray.tune import CLIReporter kwargs['progress_reporter'] = CLIReporter(metric_columns=['objective']) if (('keep_checkpoints_num' in kwargs) and (kwargs['keep_checkpoints_num'] > 0)): trainer.use_tune_checkpoints = True if (kwargs['keep_checkpoints_num'] > 1): logger.warning(f"Currently keeping {kwargs['keep_checkpoints_num']} checkpoints for each trial. Checkpoints are usually huge, consider setting `keep_checkpoints_num=1`.") if ('scheduler' in kwargs): from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining if isinstance(kwargs['scheduler'], PopulationBasedTraining): if (not trainer.use_tune_checkpoints): logger.warning("You are using PopulationBasedTraining but you haven't enabled checkpointing. This means your trials will train from scratch everytime they are exploiting new configurations. Consider enabling checkpointing by passing `keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`.") if (isinstance(kwargs['scheduler'], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)) and ((not trainer.args.do_eval) or (trainer.args.evaluation_strategy == IntervalStrategy.NO))): raise RuntimeError("You are using {cls} as a scheduler but you haven't enabled evaluation during training. This means your trials will not report intermediate results to Ray Tune, and can thus not be stopped early or used to exploit other trials parameters. If this is what you want, do not use {cls}. If you would like to use {cls}, make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the Trainer `args`.".format(cls=type(kwargs['scheduler']).__name__)) trainable = ray.tune.with_parameters(_objective, local_trainer=trainer) (trainable) def dynamic_modules_import_trainable(*args, **kwargs): if is_datasets_available(): import datasets.load dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), '__init__.py') spec = importlib.util.spec_from_file_location('datasets_modules', dynamic_modules_path) datasets_modules = importlib.util.module_from_spec(spec) sys.modules[spec.name] = datasets_modules spec.loader.exec_module(datasets_modules) return trainable(*args, **kwargs) if hasattr(trainable, '__mixins__'): dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__ analysis = ray.tune.run(dynamic_modules_import_trainable, config=trainer.hp_space(None), num_samples=n_trials, **kwargs) best_trial = analysis.get_best_trial(metric='objective', mode=direction[:3], scope=trainer.args.ray_scope) best_run = BestRun(best_trial.trial_id, best_trial.last_result['objective'], best_trial.config, analysis) if (_tb_writer is not None): trainer.add_callback(_tb_writer) return best_run
def save_command_run_params(args): os.makedirs(args.out, exist_ok=True) with open(os.path.join(args.out, 'args.json'), 'w') as args_file: json.dump(args.__dict__, args_file) with open(os.path.join(args.out, 'command.sh'), 'w') as command_file: command_file.write(' '.join(sys.argv)) command_file.write('\n')
def get_runtime_info(parsed=None): return {'exec': {'version': version, 'api_version': api_version, 'argv': sys.argv, 'parsed': parsed}, 'python': {'name': sys.implementation.name, 'executable': sys.executable, 'version': platform.python_version()}, 'system': {'platform': platform.platform(), 'fs_encoding': sys.getfilesystemencoding()}}