code
stringlengths
281
23.7M
def initialize_prom_client(distribution, prometheus_url, prometheus_bearer_token): global prom_cli (prometheus_url, prometheus_bearer_token) = instance(distribution, prometheus_url, prometheus_bearer_token) if (prometheus_url and prometheus_bearer_token): bearer = ('Bearer ' + prometheus_bearer_token) headers = {'Authorization': bearer} try: prom_cli = prometheus_api_client.PrometheusConnect(url=prometheus_url, headers=headers, disable_ssl=True) except Exception as e: logging.error(('Not able to initialize the client %s' % e)) sys.exit(1) else: prom_cli = None
def disable_import(prefix): realimport = builtins.__import__ def my_import(name, *args, **kwargs): if name.startswith(prefix): raise ImportError return realimport(name, *args, **kwargs) try: builtins.__import__ = my_import (yield) finally: builtins.__import__ = realimport
class Explara(object): def __init__(self, access_token): self.access_token = access_token self.headers = {'Authorization': ('Bearer ' + self.access_token)} self.base_url = ' def get_events(self): events = requests.post(self.base_url.format('get-all-events'), headers=self.headers).json() return [{'title': e.get('eventTitle'), 'eventId': e.get('eventId')} for e in events.get('events')] def get_ticket_types(self, explara_eventid): ticket_types = requests.post(self.base_url.format('get-tickets'), headers=self.headers, data={'eventId': explara_eventid}).json() return ticket_types def get_orders(self, explara_eventid): ticket_orders = [] completed = False from_record = 0 to_record = 50 while (not completed): payload = {'eventId': explara_eventid, 'fromRecord': from_record, 'toRecord': to_record} attendee_response = requests.post(self.base_url.format('attendee-list'), headers=self.headers, data=payload).json() if (not attendee_response.get('attendee')): completed = True elif isinstance(attendee_response.get('attendee'), list): ticket_orders.extend([order for order in attendee_response.get('attendee')]) elif isinstance(attendee_response.get('attendee'), dict): ticket_orders.extend([order for (order_idx, order) in list(attendee_response.get('attendee').items())]) print('Synced {} records'.format(to_record)) from_record = to_record to_record += 50 return ticket_orders
def test_fixtures_nose_setup_issue8394(pytester: Pytester) -> None: pytester.makepyfile('\n def setup_module():\n pass\n\n def teardown_module():\n pass\n\n def setup_function(func):\n pass\n\n def teardown_function(func):\n pass\n\n def test_world():\n pass\n\n class Test(object):\n def setup_class(cls):\n pass\n\n def teardown_class(cls):\n pass\n\n def setup_method(self, meth):\n pass\n\n def teardown_method(self, meth):\n pass\n\n def test_method(self): pass\n ') match = '*no docstring available*' result = pytester.runpytest('--fixtures') assert (result.ret == 0) result.stdout.no_fnmatch_line(match) result = pytester.runpytest('--fixtures', '-v') assert (result.ret == 0) result.stdout.fnmatch_lines([match, match, match, match])
def add_new_params(old_grid, new_grid, old_name, new_name): if new_grid: new_params = set(new_grid.keys()) old_params = set(old_grid.keys()) if (len(old_params.intersection(new_params)) > 0): raise ValueError('Overlap in parameters between {} and {} of the chosen pipeline.'.format(old_name, new_name)) old_grid.update(new_grid) return
.usefixtures('session_app_data') def test_pick_periodic_update(tmp_path, mocker, for_py_version): (embed, current) = (get_embed_wheel('setuptools', '3.6'), get_embed_wheel('setuptools', for_py_version)) mocker.patch('virtualenv.seed.wheels.bundle.load_embed_wheel', return_value=embed) completed = (datetime.now(tz=timezone.utc) - timedelta(days=29)) u_log = UpdateLog(started=(datetime.now(tz=timezone.utc) - timedelta(days=30)), completed=completed, versions=[NewVersion(filename=current.path, found_date=completed, release_date=completed, source='periodic')], periodic=True) read_dict = mocker.patch('virtualenv.app_data.via_disk_folder.JSONStoreDisk.read', return_value=u_log.to_dict()) result = cli_run([str(tmp_path), '--activators', '', '--no-periodic-update', '--no-wheel', '--no-pip', '--setuptools', 'bundle', '--wheel', 'bundle']) assert (read_dict.call_count == 1) installed = [i.name for i in result.creator.purelib.iterdir() if (i.suffix == '.dist-info')] assert (f'setuptools-{current.version}.dist-info' in installed)
def get_time_display(prev_record: Optional[Record], record: Record) -> Tuple[(str, str, str)]: time_absolute = record.timestamp.isoformat() time_color = '' if prev_record: (time_display, delta_seconds) = nice_time_diff(prev_record.timestamp, record.timestamp) if (delta_seconds <= 10): if (delta_seconds < 0.0001): time_color_value = COLORMAP[0] elif (delta_seconds < 1): duration_value = ((delta_seconds * 1000000) / 100) time_color_value = COLORMAP[int(((log10(duration_value) / 4) * 255))] else: time_color_value = COLORMAP[(- 1)] time_color = f'color: {time_color_value}' else: time_display = time_absolute return (time_absolute, time_color, time_display)
class Effect5870(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Operation')), 'shieldBonus', ship.getModifiedItemAttr('shipBonusCI2'), skill='Caldari Hauler', **kwargs)
def get_total_memory(unit='G', number_only=False, init_pid=None): from pyrl.utils.data import num_to_str if (init_pid is None): init_pid = os.getpid() process = psutil.Process(init_pid) ret = process.memory_full_info().uss for proc in process.children(): process_info = proc.memory_full_info() ret += process_info.uss return num_to_str(ret, unit, number_only=number_only)
def test_indent(caplog): caplog.set_level(logging.INFO) lg = logger.copy() nesting = lg.nesting name = uniqstr() with lg.indent(): lg.report('make', name) assert any((match_report(r, activity='make', content=name, spacing=(ReportFormatter.SPACING * (nesting + 2))) for r in caplog.records)) count = 5 name = uniqstr() with lg.indent(count): lg.report('make', name) assert any((match_report(r, activity='make', content=name, spacing=(ReportFormatter.SPACING * ((nesting + count) + 1))) for r in caplog.records)) count = 3 name = uniqstr() with lg.indent(count): lg.info(name) logs = caplog.text assert (((ReportFormatter.SPACING * (nesting + count)) + name) in logs)
def first_stage(): ql = Qiling(['rootfs/8086/doogie/doogie.DOS_MBR'], 'rootfs/8086', console=False) ql.add_fs_mapper(128, QlDisk('rootfs/8086/doogie/doogie.DOS_MBR', 128)) ql.os.set_api((26, 4), set_required_datetime, QL_INTERCEPT.EXIT) hk = ql.hook_code(stop, begin=32792, end=32792) ql.run() ql.hook_del(hk) return ql
def _toWindowsPath(p): pp = p.split('/') if (getFoamRuntime() == 'BashWSL'): if p.startswith('/mnt/'): return ((pp[2].toupper() + ':\\') + '\\'.join(pp[3:])) else: return p.replace('/', '\\') elif (getFoamRuntime() == 'BlueCFD'): if p.startswith('/home/ofuser/blueCFD'): return ((((getFoamDir() + '\\') + '..') + '\\') + '\\'.join(pp[4:])) elif p.startswith('/'): return ((pp[1].upper() + ':\\') + '\\'.join(pp[2:])) else: return p.replace('/', '\\') else: return p
def _check_resultsets_equal(res1, res2): try: assert np.allclose(res1.species, res2.species) except ValueError: pass assert np.allclose(res1.tout, res2.tout) assert np.allclose(res1.param_values, res2.param_values) if isinstance(res1.initials, np.ndarray): assert np.allclose(res1.initials, res2.initials) else: for (k, v) in res1.initials.items(): assert np.allclose(res1.initials[k], v) assert np.allclose(res1._yobs_view, res2._yobs_view) if res1._model.expressions_dynamic(): assert np.allclose(res1._yexpr_view, res2._yexpr_view) assert (res1.squeeze == res2.squeeze) assert (res1.simulator_class == res2.simulator_class) assert (res1.init_kwargs == res2.init_kwargs) assert (res1.run_kwargs == res2.run_kwargs) assert (res1.n_sims_per_parameter_set == res2.n_sims_per_parameter_set) assert (res1._model.name == res2._model.name) assert (res1.timestamp == res2.timestamp) assert (res1.pysb_version == res2.pysb_version) assert (res1.custom_attrs == res2.custom_attrs)
def download_file(url, path): print('Downloading: {} (into {})'.format(url, path)) progress = [0, 0] def report(count, size, total): progress[0] = (count * size) if ((progress[0] - progress[1]) > 1000000): progress[1] = progress[0] print('Downloaded {:,}/{:,} ...'.format(progress[1], total)) (dest, _) = urlretrieve(url, path, reporthook=report) return dest
class ViewRecords(db.Model, AuditTimeMixin): __tablename__ = 'tb_view_record' id = db.Column(db.Integer, primary_key=True) domain_name = db.Column(db.String(256), nullable=False) record = db.Column(db.String(256), nullable=False) record_type = db.Column(db.String(32), nullable=False) ttl = db.Column(db.Integer, nullable=False, default=60) property = db.Column(db.String(256), default='none') zone_name = db.Column(db.String(50), nullable=False)
class Migration(migrations.Migration): dependencies = [('proposals', '0027_auto__0540')] operations = [migrations.AlterField(model_name='historicalproposal', name='video_url', field=models.URLField(blank=True, default='', help_text='Short 1-2 min video describing your talk')), migrations.AlterField(model_name='proposal', name='video_url', field=models.URLField(blank=True, default='', help_text='Short 1-2 min video describing your talk'))]
def test_feature_all_scenarios(mocker): feature = Feature(1, 'Feature', 'I am a feature', 'foo.feature', 1, tags=None) feature.scenarios.extend([mocker.MagicMock(id=1), mocker.MagicMock(id=2)]) feature.scenarios.append(mocker.MagicMock(spec=ScenarioOutline, id=3, scenarios=[mocker.MagicMock(id=4), mocker.MagicMock(id=5)])) feature.scenarios.append(mocker.MagicMock(spec=ScenarioLoop, id=6, scenarios=[mocker.MagicMock(id=7), mocker.MagicMock(id=8)])) all_scenarios = feature.all_scenarios assert (len(all_scenarios) == 8) assert (all_scenarios[0].id == 1) assert (all_scenarios[1].id == 2) assert (all_scenarios[2].id == 3) assert (all_scenarios[3].id == 4) assert (all_scenarios[4].id == 5) assert (all_scenarios[5].id == 6) assert (all_scenarios[6].id == 7) assert (all_scenarios[7].id == 8)
class CORALRegularizer(Regularizer): def __init__(self, l=1): self.uses_learning_phase = 1 self.l = l def set_layer(self, layer): self.layer = layer def __call__(self, loss): if (not hasattr(self, 'layer')): raise Exception('Need to call `set_layer` on ActivityRegularizer instance before calling the instance.') regularizer_loss = loss sim = 0 if (len(self.layer.inbound_nodes) > 1): sim = coral(self.layer.get_output_at(0), self.layer.get_output_at(1)) add_loss = K.switch(K.equal(len(self.layer.inbound_nodes), 2), sim, 0) regularizer_loss += (self.l * add_loss) return K.in_train_phase(regularizer_loss, loss) def get_config(self): return {'name': self.__class__.__name__, 'l': float(self.l)}
def decimal_to_binary(decimal_val, max_num_digits=20, fractional_part_only=False): decimal_val_fractional_part = abs((decimal_val - int(decimal_val))) current_binary_position_val = (1 / 2) binary_fractional_part_digits = [] while ((decimal_val_fractional_part >= 0) and (len(binary_fractional_part_digits) < max_num_digits)): if (decimal_val_fractional_part >= current_binary_position_val): binary_fractional_part_digits.append('1') decimal_val_fractional_part -= current_binary_position_val else: binary_fractional_part_digits.append('0') current_binary_position_val /= 2 binary_repr_fractional_part = ''.join(binary_fractional_part_digits) if fractional_part_only: return binary_repr_fractional_part else: return ((binary_repr(int(decimal_val)) + '.') + binary_repr_fractional_part)
def rtn_sprintf(se: 'SymbolicExecutor', pstate: 'ProcessState'): logger.debug('sprintf hooked') buff = pstate.get_argument_value(0) arg0 = pstate.get_argument_value(1) try: arg0f = pstate.get_format_string(arg0) nbArgs = arg0f.count('{') args = pstate.get_format_arguments(arg0, [pstate.get_argument_value(x) for x in range(2, (nbArgs + 2))]) s = arg0f.format(*args) except: logger.warning('Something wrong, probably UTF-8 string') s = '' return len(s)
def test_horovod_example(start_ray_client_server_2_cpus): assert ray.util.client.ray.is_connected() from ray_lightning.examples.ray_horovod_example import train_mnist data_dir = os.path.join(tempfile.gettempdir(), 'mnist_data_') config = {'layer_1': 32, 'layer_2': 64, 'lr': 0.1, 'batch_size': 32} train_mnist(config, data_dir, num_epochs=1, num_workers=1, use_gpu=False)
class CommonBaseTesting(CommonBase): def __init__(self, parent, id=None, *args, **kwargs): if ('test' in kwargs): self.test = kwargs.pop('test') super().__init__(*args, **kwargs) self.parent = parent self.id = id self.args = args self.kwargs = kwargs def wait_for(self, query_delay=0): pass def write(self, command): self.parent.write(command) def read(self): return self.parent.read()
class Simple_Header_TestCase(ParserTest): def __init__(self, *args, **kwargs): ParserTest.__init__(self, *args, **kwargs) self.ks = '\n%pre-install --interpreter /usr/bin/python --erroronfail --log=/tmp/blah\nls /tmp\n%end\n' def runTest(self): self.parser.readKickstartFromString(self.ks) self.assertEqual(len(self.handler.scripts), 1) script = self.handler.scripts[0] self.assertEqual(script.interp, '/usr/bin/python') self.assertFalse(script.inChroot) self.assertTrue(script.errorOnFail) self.assertEqual(script.lineno, 2) self.assertEqual(script.type, constants.KS_SCRIPT_PREINSTALL) self.assertEqual(script.logfile, '/tmp/blah') self.assertEqual(script.script.rstrip(), 'ls /tmp')
def get_cql_models(app, connection=None, keyspace=None): from .models import DjangoCassandraModel models = [] single_cassandra_connection = (len(list(get_cassandra_connections())) == 1) is_default_connection = ((connection == DEFAULT_DB_ALIAS) or single_cassandra_connection) for (name, obj) in inspect.getmembers(app): cql_model_types = (cqlengine.models.Model, DjangoCassandraModel) if (inspect.isclass(obj) and issubclass(obj, cql_model_types) and (not obj.__abstract__)): if ((obj.__connection__ == connection) or ((obj.__connection__ is None) and is_default_connection) or ((obj.__connection__ is None) and (obj.__keyspace__ is not None) and (obj.__keyspace__ == keyspace))): models.append(obj) return models
class KeywordProbInferenceDataset(InferenceDataset): def __init__(self, features: Dict, transforms: Dict, keyword_prob: str, load_into_mem: bool=False, audio_ids: List=None, threshold: Union[(float, str)]=None): super().__init__(features, transforms, load_into_mem=load_into_mem, audio_ids=audio_ids) self.aid_to_h5['keyword'] = load_dict_from_csv(keyword_prob, ('audio_id', 'hdf5_path')) self.threshold = threshold def load_keyword(self, audio_id): keyword = read_from_h5(audio_id, self.aid_to_h5['keyword'], self.dataset_cache) if (self.threshold is not None): if isinstance(self.threshold, float): keyword = np.where((keyword < self.threshold), 0, 1) elif isinstance(self.threshold, str): if self.threshold.startswith('top'): k = int(self.threshold[3:]) ind = keyword.argsort() keyword[ind[(- k):]] = 1.0 keyword[ind[:(- k)]] = 0.0 else: (threshold, topk) = self.threshold.split('_') threshold = float(threshold) onehot = np.where((keyword < threshold), 0, 1) k = int(topk[3:]) if (np.where((onehot == 1))[0].shape[0] > k): ind = keyword.argsort() keyword[ind[(- k):]] = 1.0 keyword[ind[:(- k)]] = 0.0 else: keyword = onehot return keyword def __getitem__(self, index): output = super().__getitem__(index) audio_id = output['audio_id'] output['keyword'] = self.load_keyword(audio_id) return output
class Decoder(nn.Module): def __init__(self, d_model, d_ff, d_k, d_v, n_layers, n_heads, len_q): super(Decoder, self).__init__() self.layers = nn.ModuleList([DecoderLayer(d_model, d_ff, d_k, d_v, n_heads, len_q) for _ in range(n_layers)]) def forward(self, dec_inputs, enc_outputs): dec_outputs = dec_inputs dec_enc_attns = [] for layer in self.layers: (dec_outputs, dec_enc_attn) = layer(dec_outputs, enc_outputs) dec_enc_attns.append(dec_enc_attn) return dec_outputs
def error_description(error_code): err_message = {'notLink': "Check the 'link' parameter (Empty or bad)", 'notDebrid': 'Maybe the filehoster is down or the link is not online', 'badFileUrl': 'The link format is not valid', 'hostNotValid': 'The filehoster is not supported', 'notFreeHost': 'This filehoster is not available for the free member', 'disabledHost': 'The filehoster are disabled', 'noGetFilename': 'Unable to retrieve the file name', 'maxLink': 'Limitation of number links per day reached', 'maxLinkHost': 'Limitation of number links per day for this host reached', 'notAddTorrent': 'Unable to add the torrent, check url', 'torrentTooBig': 'The torrent is too big or have too many files', 'maxTorrent': 'Limitation of torrents per day reached'}.get(error_code) return (err_message or "Unknown error: '{}'".format(error_code))
class Observer(): def __init__(self): self.id = rpc.get_worker_info().id self.env = gym.make('CartPole-v1') self.env.reset(seed=args.seed) def run_episode(self, agent_rref, n_steps): (state, ep_reward) = (self.env.reset()[0], 0) for step in range(n_steps): action = _remote_method(Agent.select_action, agent_rref, self.id, state) (state, reward, terminated, truncated, _) = self.env.step(action) _remote_method(Agent.report_reward, agent_rref, self.id, reward) if (terminated or truncated): break
def tscore(sample1, sample2): if (len(sample1) != len(sample2)): raise ValueError('different number of values') error = (pooled_sample_variance(sample1, sample2) / len(sample1)) diff = (statistics.mean(sample1) - statistics.mean(sample2)) return (diff / math.sqrt((error * 2)))
class CapturableArgumentParser(argparse.ArgumentParser): def __init__(self, *args: Any, **kwargs: Any) -> None: self.stdout = kwargs.pop('stdout', sys.stdout) self.stderr = kwargs.pop('stderr', sys.stderr) super().__init__(*args, **kwargs) def print_usage(self, file: (IO[str] | None)=None) -> None: if (file is None): file = self.stdout self._print_message(self.format_usage(), file) def print_help(self, file: (IO[str] | None)=None) -> None: if (file is None): file = self.stdout self._print_message(self.format_help(), file) def _print_message(self, message: str, file: (IO[str] | None)=None) -> None: if message: if (file is None): file = self.stderr file.write(message) def exit(self, status: int=0, message: (str | None)=None) -> NoReturn: if message: self._print_message(message, self.stderr) sys.exit(status) def error(self, message: str) -> NoReturn: self.print_usage(self.stderr) args = {'prog': self.prog, 'message': message} self.exit(2, (gettext('%(prog)s: error: %(message)s\n') % args))
.usefixtures('repo_with_no_tags_angular_commits') def test_errors_when_config_file_invalid_configuration(cli_runner: 'CliRunner', update_pyproject_toml: 'UpdatePyprojectTomlFn'): update_pyproject_toml('tool.semantic_release.remote.type', 'invalidType') result = cli_runner.invoke(main, ['--config', 'pyproject.toml', 'version']) stderr_lines = result.stderr.splitlines() assert (result.exit_code == 1) assert ('1 validation error for RawConfig' in stderr_lines[0]) assert ('remote.type' in stderr_lines[1])
class ReadHoldingRegistersRequest(ReadRegistersRequestBase): function_code = 3 function_code_name = 'read_holding_registers' def __init__(self, address=None, count=None, slave=0, **kwargs): super().__init__(address, count, slave, **kwargs) def execute(self, context): if (not (1 <= self.count <= 125)): return self.doException(merror.IllegalValue) if (not context.validate(self.function_code, self.address, self.count)): return self.doException(merror.IllegalAddress) values = context.getValues(self.function_code, self.address, self.count) if isinstance(values, ExceptionResponse): return values return ReadHoldingRegistersResponse(values)
.end_to_end() .parametrize('flag', ['-e', '--exclude']) .parametrize('pattern', ['*_1.txt', 'to_be_deleted_file_[1]*']) def test_clean_with_excluded_file_via_config(project, runner, flag, pattern): project.joinpath('pyproject.toml').write_text(f'''[tool.pytask.ini_options] exclude = [{pattern!r}]''') result = runner.invoke(cli, ['clean', flag, pattern, project.as_posix()]) assert (result.exit_code == ExitCode.OK) text_without_linebreaks = result.output.replace('\n', '') assert ('to_be_deleted_file_1.txt' not in text_without_linebreaks) assert ('to_be_deleted_file_2.txt' in text_without_linebreaks) assert ('pyproject.toml' in text_without_linebreaks)
class AutoencoderTinyBlock(nn.Module): def __init__(self, in_channels: int, out_channels: int, act_fn: str): super().__init__() act_fn = get_activation(act_fn) self.conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), act_fn, nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)) self.skip = (nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) if (in_channels != out_channels) else nn.Identity()) self.fuse = nn.ReLU() def forward(self, x): return self.fuse((self.conv(x) + self.skip(x)))
class SKOptLearner(Optimizer, BaseLearner): def __init__(self, function, **kwargs): self.function = function self.pending_points = set() self.data = collections.OrderedDict() self._kwargs = kwargs super().__init__(**kwargs) def new(self) -> SKOptLearner: return SKOptLearner(self.function, **self._kwargs) def tell(self, x, y, fit=True): if isinstance(x, collections.abc.Iterable): self.pending_points.discard(tuple(x)) self.data[tuple(x)] = y super().tell(x, y, fit) else: self.pending_points.discard(x) self.data[x] = y super().tell([x], y, fit) def tell_pending(self, x): self.pending_points.add(tuple(x)) def remove_unfinished(self): pass _latest def loss(self, real=True): if (not self.models): return np.inf else: model = self.models[(- 1)] return (1 - model.score(self.Xi, self.yi)) def ask(self, n, tell_pending=True): if (not tell_pending): raise NotImplementedError('Asking points is an irreversible action, so use `ask(n, tell_pending=True`.') points = super().ask(n) if (self.space.n_dims > 1): return (points, ([(self.loss() / n)] * n)) else: return ([p[0] for p in points], ([(self.loss() / n)] * n)) def npoints(self): return len(self.Xi) def plot(self, nsamples=200): hv = ensure_holoviews() if (self.space.n_dims > 1): raise ValueError('Can only plot 1D functions') bounds = self.space.bounds[0] if (not self.Xi): p = ((hv.Scatter([]) * hv.Curve([])) * hv.Area([])) else: scatter = hv.Scatter(([p[0] for p in self.Xi], self.yi)) if self.models: model = self.models[(- 1)] xs = np.linspace(*bounds, nsamples) xsp = self.space.transform(xs.reshape((- 1), 1).tolist()) (y_pred, sigma) = model.predict(xsp, return_std=True) curve = hv.Curve((xs, y_pred)).opts(line_dash='dashed') area = hv.Area((xs, (y_pred - (1.96 * sigma)), (y_pred + (1.96 * sigma))), vdims=['y', 'y2']).opts(alpha=0.5, line_alpha=0) else: area = hv.Area([]) curve = hv.Curve([]) p = ((scatter * curve) * area) margin = (0.05 * (bounds[1] - bounds[0])) plot_bounds = ((bounds[0] - margin), (bounds[1] + margin)) return p.redim(x={'range': plot_bounds}) def _get_data(self): return ([x[0] for x in self.Xi], self.yi) def _set_data(self, data): (xs, ys) = data self.tell_many(xs, ys) def to_dataframe(self, with_default_function_args: bool=True, function_prefix: str='function.', seed_name: str='seed', y_name: str='y') -> pandas.DataFrame: raise NotImplementedError def load_dataframe(self, df: pandas.DataFrame, with_default_function_args: bool=True, function_prefix: str='function.', seed_name: str='seed', y_name: str='y'): raise NotImplementedError
class PoseResNet(nn.Module): def __init__(self, block, layers, cfg, **kwargs): self.inplanes = 64 self.deconv_with_bias = cfg.POSE_RESNET.DECONV_WITH_BIAS self.input_channels = (kwargs['input_channels'] if ('input_channels' in kwargs) else cfg.POSE_RESNET.INPUT_CHANNELS) self.keep_scale = (('keep_scale' in kwargs) and kwargs['keep_scale']) super(PoseResNet, self).__init__() self.conv1 = (nn.Conv2d(self.input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) if (not self.keep_scale) else nn.Conv2d(self.input_channels, 64, kernel_size=7, stride=1, padding=3, bias=False)) self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM) self.relu = nn.ReLU(inplace=True) self.maxpool = (nn.MaxPool2d(kernel_size=3, stride=2, padding=1) if (not self.keep_scale) else (lambda x: x)) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.deconv_layers = self._make_deconv_layer(cfg.POSE_RESNET.NUM_DECONV_LAYERS, cfg.POSE_RESNET.NUM_DECONV_FILTERS, cfg.POSE_RESNET.NUM_DECONV_KERNELS) self.final_layer = nn.Conv2d(in_channels=cfg.POSE_RESNET.NUM_DECONV_FILTERS[(- 1)], out_channels=cfg.NETWORK.NUM_JOINTS, kernel_size=cfg.POSE_RESNET.FINAL_CONV_KERNEL, stride=1, padding=(1 if (cfg.POSE_RESNET.FINAL_CONV_KERNEL == 3) else 0)) def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM)) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def _get_deconv_cfg(self, deconv_kernel, index): if (deconv_kernel == 4): padding = 1 output_padding = 0 elif (deconv_kernel == 3): padding = 1 output_padding = 1 elif (deconv_kernel == 2): padding = 0 output_padding = 0 return (deconv_kernel, padding, output_padding) def _make_deconv_layer(self, num_layers, num_filters, num_kernels): assert (num_layers == len(num_filters)), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' assert (num_layers == len(num_kernels)), 'ERROR: num_deconv_layers is different len(num_deconv_filters)' layers = [] for i in range(num_layers): (kernel, padding, output_padding) = self._get_deconv_cfg(num_kernels[i], i) planes = num_filters[i] layers.append(nn.ConvTranspose2d(in_channels=self.inplanes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias)) layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)) layers.append(nn.ReLU(inplace=True)) self.inplanes = planes return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.deconv_layers(x) x = self.final_layer(x) return x def init_weights(self, pretrained=''): this_dir = os.path.dirname(__file__) pretrained = os.path.join(this_dir, '../..', pretrained) if os.path.isfile(pretrained): pretrained_state_dict = torch.load(pretrained, map_location=torch.device('cpu')) logger.info('=> loading pretrained models {}'.format(pretrained)) model_state_dict = self.state_dict() self.load_state_dict(pretrained_state_dict, strict=False) logger.info('=> init deconv weights from normal distribution') for (name, m) in self.deconv_layers.named_modules(): if isinstance(m, nn.ConvTranspose2d): logger.info('=> init {}.weight as normal(0, 0.001)'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.normal_(m.weight, std=0.001) if self.deconv_with_bias: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): logger.info('=> init {}.weight as 1'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) logger.info('=> init final conv weights from normal distribution') for m in self.final_layer.modules(): if isinstance(m, nn.Conv2d): logger.info('=> init {}.weight as normal(0, 0.001)'.format(name)) logger.info('=> init {}.bias as 0'.format(name)) nn.init.normal_(m.weight, std=0.001) nn.init.constant_(m.bias, 0) else: logger.info('=> init weights from normal distribution') for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight, std=0.001) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.ConvTranspose2d): nn.init.normal_(m.weight, std=0.001) if self.deconv_with_bias: nn.init.constant_(m.bias, 0)
class Battleship(commands.Cog): def __init__(self, bot: Bot): self.bot = bot self.games: list[Game] = [] self.waiting: list[discord.Member] = [] def predicate(self, ctx: commands.Context, announcement: discord.Message, reaction: discord.Reaction, user: discord.Member) -> bool: if self.already_playing(ctx.author): return True if ((user.id not in (ctx.me.id, ctx.author.id)) and (str(reaction.emoji) == HAND_RAISED_EMOJI) and (reaction.message.id == announcement.id)): if self.already_playing(user): self.bot.loop.create_task(ctx.send(f"{user.mention} You're already playing a game!")) self.bot.loop.create_task(announcement.remove_reaction(reaction, user)) return False if (user in self.waiting): self.bot.loop.create_task(ctx.send(f'{user.mention} Please cancel your game first before joining another one.')) self.bot.loop.create_task(announcement.remove_reaction(reaction, user)) return False return True if ((user.id == ctx.author.id) and (str(reaction.emoji) == CROSS_EMOJI) and (reaction.message.id == announcement.id)): return True return False def already_playing(self, player: discord.Member) -> bool: return any(((player in (game.p1.user, game.p2.user)) for game in self.games)) (invoke_without_command=True) async def battleship(self, ctx: commands.Context) -> None: if self.already_playing(ctx.author): (await ctx.send("You're already playing a game!")) return if (ctx.author in self.waiting): (await ctx.send("You've already sent out a request for a player 2.")) return announcement = (await ctx.send(f'''**Battleship**: A new game is about to start! Press {HAND_RAISED_EMOJI} to play against {ctx.author.mention}! (Cancel the game with {CROSS_EMOJI}.)''')) self.waiting.append(ctx.author) (await announcement.add_reaction(HAND_RAISED_EMOJI)) (await announcement.add_reaction(CROSS_EMOJI)) try: (reaction, user) = (await self.bot.wait_for('reaction_add', check=partial(self.predicate, ctx, announcement), timeout=60.0)) except TimeoutError: self.waiting.remove(ctx.author) (await announcement.delete()) (await ctx.send(f"{ctx.author.mention} Seems like there's no one here to play...")) return if (str(reaction.emoji) == CROSS_EMOJI): self.waiting.remove(ctx.author) (await announcement.delete()) (await ctx.send(f'{ctx.author.mention} Game cancelled.')) return (await announcement.delete()) self.waiting.remove(ctx.author) if self.already_playing(ctx.author): return game = Game(self.bot, ctx.channel, ctx.author, user) self.games.append(game) try: (await game.start_game()) self.games.remove(game) except discord.Forbidden: (await ctx.send(f'{ctx.author.mention} {user.mention} Game failed. This is likely due to you not having your DMs open. Check and try again.')) self.games.remove(game) except Exception: (await ctx.send(f'{ctx.author.mention} {user.mention} An error occurred. Game failed.')) self.games.remove(game) raise (name='ships', aliases=('boats',)) async def battleship_ships(self, ctx: commands.Context) -> None: embed = discord.Embed(colour=Colours.blue) embed.add_field(name='Name', value='\n'.join(SHIPS)) embed.add_field(name='Size', value='\n'.join((str(size) for size in SHIPS.values()))) (await ctx.send(embed=embed))
class TestEncodedId(): def test_init_str(self): obj = utils.EncodedId('Hello') assert ('Hello' == obj) assert ('Hello' == str(obj)) assert ('Hello' == f'{obj}') assert isinstance(obj, utils.EncodedId) obj = utils.EncodedId('this/is a/path') assert ('this%2Fis%20a%2Fpath' == str(obj)) assert ('this%2Fis%20a%2Fpath' == f'{obj}') assert isinstance(obj, utils.EncodedId) def test_init_int(self): obj = utils.EncodedId(23) assert ('23' == obj) assert ('23' == f'{obj}') assert isinstance(obj, utils.EncodedId) def test_init_invalid_type_raises(self): with pytest.raises(TypeError): utils.EncodedId(None) def test_init_encodeid_str(self): value = 'Goodbye' obj_init = utils.EncodedId(value) obj = utils.EncodedId(obj_init) assert (value == str(obj)) assert (value == f'{obj}') value = 'we got/a/path' expected = 'we%20got%2Fa%2Fpath' obj_init = utils.EncodedId(value) assert (expected == str(obj_init)) assert (expected == f'{obj_init}') obj = utils.EncodedId(utils.EncodedId(utils.EncodedId(utils.EncodedId(utils.EncodedId(obj_init))))) assert (expected == str(obj)) assert (expected == f'{obj}') obj2 = obj assert (expected == str(obj2)) assert (expected == f'{obj2}') def test_init_encodeid_int(self): value = 23 expected = f'{value}' obj_init = utils.EncodedId(value) obj = utils.EncodedId(obj_init) assert (expected == str(obj)) assert (expected == f'{obj}') def test_json_serializable(self): obj = utils.EncodedId('someone') assert ('"someone"' == json.dumps(obj)) obj = utils.EncodedId('we got/a/path') assert ('"we%20got%2Fa%2Fpath"' == json.dumps(obj))
class Solution(object): def getSum(self, a, b): import ctypes sum = 0 carry = ctypes.c_int32(b) while (carry.value != 0): sum = (a ^ carry.value) carry = ctypes.c_int32((a & carry.value)) carry.value <<= 1 a = sum return sum
class Softplus(UnaryScalarOp): def static_impl(x): not_int8 = (str(getattr(x, 'dtype', '')) not in ('int8', 'uint8')) if (x < (- 37.0)): return (np.exp(x) if not_int8 else np.exp(x, signature='f')) elif (x < 18.0): return (np.log1p(np.exp(x)) if not_int8 else np.log1p(np.exp(x, signature='f'))) elif (x < 33.3): return ((x + np.exp((- x))) if not_int8 else (x + np.exp((- x), signature='f'))) else: return x def impl(self, x): return Softplus.static_impl(x) def grad(self, inp, grads): (x,) = inp (gz,) = grads return [(gz * sigmoid(x))] def c_code(self, node, name, inp, out, sub): (x,) = inp (z,) = out if (node.inputs[0].type in float_types): if (node.inputs[0].type == float64): return dedent(f''' {z} = ( {x} < -37.0 ? exp({x}) : {x} < 18.0 ? log1p(exp({x})) : {x} < 33.3 ? {x} + exp(-{x}) : {x} ); ''') else: return dedent(f''' {z} = ( {x} < -37.0f ? exp({x}) : {x} < 18.0f ? log1p(exp({x})) : {x} < 33.3f ? {x} + exp(-{x}) : {x} ); ''') else: raise NotImplementedError('only floatingpoint is implemented') def c_code_cache_version(self): v = super().c_code_cache_version() if v: return ((3,) + v) else: return v
def test_a_decorated_singleton_should_not_override_a_child_provider(): parent_injector = Injector() provided_instance = SingletonB() class MyModule(Module): def provide_name(self) -> SingletonB: return provided_instance child_injector = parent_injector.create_child_injector([MyModule]) assert (child_injector.get(SingletonB) is provided_instance) assert (parent_injector.get(SingletonB) is not provided_instance) assert (parent_injector.get(SingletonB) is parent_injector.get(SingletonB))
def make_reader(): field_names = None def read_line(line): nonlocal field_names print('\t\t\t\t\t', line) reader = csv.reader([line]) row = next(reader) if (field_names is None): field_names = row return None return dict(zip(field_names, row)) return read_line
def test__symmetrical_torque_driven_ocp__symmetry_by_constraint(): from bioptim.examples.symmetrical_torque_driven_ocp import symmetry_by_constraint as ocp_module bioptim_folder = os.path.dirname(ocp_module.__file__) ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/cubeSym.bioMod'), phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False)
class Zabbix(): def __init__(self, server, user, password, verify=True): self.server = server self.user = user self.password = password s = requests.Session() s.auth = (user, password) self.zapi = ZabbixAPI(server, s) self.zapi.session.verify = verify self.zapi.login(user, password) self.version = self.get_version() _safe() def get_version(self): version = self.zapi.apiinfo.version() return version _safe({}) def get_trigger(self, triggerid): trigger = self.zapi.trigger.get(expandComment='true', expandDescription='true', triggerids=triggerid) return trigger[0] _safe({}) def get_event(self, triggerid): zbx_event = self.zapi.event.get(select_acknowledges='extend', expandDescription='true', object=0, value=1, objectids=triggerid) if (len(zbx_event) >= 1): return zbx_event[(- 1)] return zbx_event _safe([]) def get_itservices(self, root=None): if root: root_service = self.zapi.service.get(selectDependencies='extend', filter={'name': root}) try: root_service = root_service[0] except IndexError: logging.error('Can not find "{}" service in Zabbix'.format(root)) sys.exit(1) service_ids = [] for dependency in root_service['dependencies']: service_ids.append(dependency['serviceid']) services = self.zapi.service.get(selectDependencies='extend', serviceids=service_ids) else: services = self.zapi.service.get(selectDependencies='extend', output='extend') if (not services): logging.error('Can not find any child service for "{}"'.format(root)) return [] known_ids = [] service_tree = [i for i in services if i['dependencies']] for (idx, service) in enumerate(service_tree): child_services_ids = [] for dependency in service['dependencies']: child_services_ids.append(dependency['serviceid']) child_services = self.zapi.service.get(selectDependencies='extend', serviceids=child_services_ids) service_tree[idx]['dependencies'] = child_services known_ids = (known_ids + child_services_ids) known_ids.append(service['serviceid']) singers_services = [i for i in services if (i['serviceid'] not in known_ids)] if singers_services: service_tree = (service_tree + singers_services) return service_tree
class SawyerDoorCloseEnvV2(SawyerDoorEnvV2): def __init__(self): goal_low = (0.2, 0.65, 0.1499) goal_high = (0.3, 0.75, 0.1501) super().__init__() self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0.1, 0.95, 0.15], dtype=np.float32), 'hand_init_pos': np.array([(- 0.5), 0.6, 0.2], dtype=np.float32)} self.goal = np.array([0.2, 0.8, 0.15]) self.obj_init_pos = self.init_config['obj_init_pos'] self.obj_init_angle = self.init_config['obj_init_angle'] self.hand_init_pos = self.init_config['hand_init_pos'] self.goal_space = Box(np.array(goal_low), np.array(goal_high)) def reset_model(self): self._reset_hand() self._target_pos = self.goal.copy() self.objHeight = self.data.get_geom_xpos('handle')[2] if self.random_init: obj_pos = self._get_state_rand_vec() self.obj_init_pos = obj_pos goal_pos = (obj_pos.copy() + np.array([0.2, (- 0.2), 0.0])) self._target_pos = goal_pos self.sim.model.body_pos[self.model.body_name2id('door')] = self.obj_init_pos self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos self._set_obj_xyz((- 1.5708)) return self._get_obs() _assert_task_is_set def evaluate_state(self, obs, action): (reward, obj_to_target, in_place) = self.compute_reward(action, obs) info = {'obj_to_target': obj_to_target, 'in_place_reward': in_place, 'success': float((obj_to_target <= 0.08)), 'near_object': 0.0, 'grasp_success': 1.0, 'grasp_reward': 1.0, 'unscaled_reward': reward} return (reward, info) def compute_reward(self, actions, obs): _TARGET_RADIUS = 0.05 tcp = self.tcp_center obj = obs[4:7] target = self._target_pos tcp_to_target = np.linalg.norm((tcp - target)) tcp_to_obj = np.linalg.norm((tcp - obj)) obj_to_target = np.linalg.norm((obj - target)) in_place_margin = np.linalg.norm((self.obj_init_pos - target)) in_place = reward_utils.tolerance(obj_to_target, bounds=(0, _TARGET_RADIUS), margin=in_place_margin, sigmoid='gaussian') hand_margin = (np.linalg.norm((self.hand_init_pos - obj)) + 0.1) hand_in_place = reward_utils.tolerance(tcp_to_target, bounds=(0, (0.25 * _TARGET_RADIUS)), margin=hand_margin, sigmoid='gaussian') reward = ((3 * hand_in_place) + (6 * in_place)) if (obj_to_target < _TARGET_RADIUS): reward = 10 return [reward, obj_to_target, hand_in_place]
def get_dependency_urls(package_name, dependency_list_file): url_delimiter = '-f ' dependency_file_full_path = prepend_bin_path(package_name, dependency_list_file) dependency_list_array = open(dependency_file_full_path).read().splitlines() dependency_urls_list = [] for dependency_line in dependency_list_array: if dependency_line.strip().startswith('#'): continue url_delimiter = '-f ' dependency_line_split_list = dependency_line.split(url_delimiter) if (len(dependency_line_split_list) > 1): dependency_url = dependency_line_split_list[1].strip() if (dependency_url not in dependency_urls_list): dependency_urls_list.append(dependency_url) return dependency_urls_list
class LoggingPlugin(): def __init__(self, config: Config) -> None: self._config = config self.formatter = self._create_formatter(get_option_ini(config, 'log_format'), get_option_ini(config, 'log_date_format'), get_option_ini(config, 'log_auto_indent')) self.log_level = get_log_level_for_setting(config, 'log_level') self.caplog_handler = LogCaptureHandler() self.caplog_handler.setFormatter(self.formatter) self.report_handler = LogCaptureHandler() self.report_handler.setFormatter(self.formatter) self.log_file_level = get_log_level_for_setting(config, 'log_file_level', 'log_level') log_file = (get_option_ini(config, 'log_file') or os.devnull) if (log_file != os.devnull): directory = os.path.dirname(os.path.abspath(log_file)) if (not os.path.isdir(directory)): os.makedirs(directory) self.log_file_handler = _FileHandler(log_file, mode='w', encoding='UTF-8') log_file_format = get_option_ini(config, 'log_file_format', 'log_format') log_file_date_format = get_option_ini(config, 'log_file_date_format', 'log_date_format') log_file_formatter = DatetimeFormatter(log_file_format, datefmt=log_file_date_format) self.log_file_handler.setFormatter(log_file_formatter) self.log_cli_level = get_log_level_for_setting(config, 'log_cli_level', 'log_level') if self._log_cli_enabled(): terminal_reporter = config.pluginmanager.get_plugin('terminalreporter') assert (terminal_reporter is not None) capture_manager = config.pluginmanager.get_plugin('capturemanager') self.log_cli_handler: Union[(_LiveLoggingStreamHandler, _LiveLoggingNullHandler)] = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) else: self.log_cli_handler = _LiveLoggingNullHandler() log_cli_formatter = self._create_formatter(get_option_ini(config, 'log_cli_format', 'log_format'), get_option_ini(config, 'log_cli_date_format', 'log_date_format'), get_option_ini(config, 'log_auto_indent')) self.log_cli_handler.setFormatter(log_cli_formatter) self._disable_loggers(loggers_to_disable=config.option.logger_disable) def _disable_loggers(self, loggers_to_disable: List[str]) -> None: if (not loggers_to_disable): return for name in loggers_to_disable: logger = logging.getLogger(name) logger.disabled = True def _create_formatter(self, log_format, log_date_format, auto_indent): color = getattr(self._config.option, 'color', 'no') if ((color != 'no') and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_format)): formatter: logging.Formatter = ColoredLevelFormatter(create_terminal_writer(self._config), log_format, log_date_format) else: formatter = DatetimeFormatter(log_format, log_date_format) formatter._style = PercentStyleMultiline(formatter._style._fmt, auto_indent=auto_indent) return formatter def set_log_path(self, fname: str) -> None: fpath = Path(fname) if (not fpath.is_absolute()): fpath = (self._config.rootpath / fpath) if (not fpath.parent.exists()): fpath.parent.mkdir(exist_ok=True, parents=True) stream: io.TextIOWrapper = fpath.open(mode='w', encoding='UTF-8') old_stream = self.log_file_handler.setStream(stream) if old_stream: old_stream.close() def _log_cli_enabled(self) -> bool: enabled = ((self._config.getoption('--log-cli-level') is not None) or self._config.getini('log_cli')) if (not enabled): return False terminal_reporter = self._config.pluginmanager.get_plugin('terminalreporter') if (terminal_reporter is None): return False return True (wrapper=True, tryfirst=True) def pytest_sessionstart(self) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('sessionstart') with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): return (yield) (wrapper=True, tryfirst=True) def pytest_collection(self) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('collection') with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): return (yield) (wrapper=True) def pytest_runtestloop(self, session: Session) -> Generator[(None, object, object)]: if session.config.option.collectonly: return (yield) if (self._log_cli_enabled() and (self._config.getoption('verbose') < 1)): self._config.option.verbose = 1 with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): return (yield) def pytest_runtest_logstart(self) -> None: self.log_cli_handler.reset() self.log_cli_handler.set_when('start') def pytest_runtest_logreport(self) -> None: self.log_cli_handler.set_when('logreport') def _runtest_for(self, item: nodes.Item, when: str) -> Generator[(None, None, None)]: with catching_logs(self.caplog_handler, level=self.log_level) as caplog_handler, catching_logs(self.report_handler, level=self.log_level) as report_handler: caplog_handler.reset() report_handler.reset() item.stash[caplog_records_key][when] = caplog_handler.records item.stash[caplog_handler_key] = caplog_handler try: (yield) finally: log = report_handler.stream.getvalue().strip() item.add_report_section(when, 'log', log) (wrapper=True) def pytest_runtest_setup(self, item: nodes.Item) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('setup') empty: Dict[(str, List[logging.LogRecord])] = {} item.stash[caplog_records_key] = empty (yield from self._runtest_for(item, 'setup')) (wrapper=True) def pytest_runtest_call(self, item: nodes.Item) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('call') (yield from self._runtest_for(item, 'call')) (wrapper=True) def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('teardown') try: (yield from self._runtest_for(item, 'teardown')) finally: del item.stash[caplog_records_key] del item.stash[caplog_handler_key] def pytest_runtest_logfinish(self) -> None: self.log_cli_handler.set_when('finish') (wrapper=True, tryfirst=True) def pytest_sessionfinish(self) -> Generator[(None, None, None)]: self.log_cli_handler.set_when('sessionfinish') with catching_logs(self.log_cli_handler, level=self.log_cli_level): with catching_logs(self.log_file_handler, level=self.log_file_level): return (yield) def pytest_unconfigure(self) -> None: self.log_file_handler.close()
def check_improper_torsion(improper: Tuple[(int, int, int, int)], molecule: 'Ligand') -> Tuple[(int, int, int, int)]: for atom_index in improper: try: atom = molecule.atoms[atom_index] bonded_atoms = set() for bonded in atom.bonds: bonded_atoms.add(bonded) if (len(bonded_atoms.intersection(set(improper))) == 3): if (improper[0] == atom_index): return improper else: return (atom_index, *atom.bonds) except IndexError: continue raise TopologyMismatch(f'The improper {improper} is not a valid for this molecule.')
def preprocess_bilingual_corpora(args: argparse.Namespace, source_dict: Dictionary, char_source_dict: Optional[Dictionary], target_dict: Dictionary, char_target_dict: Optional[Dictionary]): embed_bytes = getattr(args, 'embed_bytes', False) if args.train_source_text_file: args.train_source_binary_path = binarize_text_file(text_file=args.train_source_text_file, dictionary=source_dict, output_path=args.train_source_binary_path, append_eos=args.append_eos_to_source, reverse_order=args.reverse_source, embed_bytes=embed_bytes, char_dictionary=char_source_dict) if args.eval_source_text_file: args.eval_source_binary_path = binarize_text_file(text_file=args.eval_source_text_file, dictionary=source_dict, output_path=args.eval_source_binary_path, append_eos=args.append_eos_to_source, reverse_order=args.reverse_source, embed_bytes=embed_bytes, char_dictionary=char_source_dict) if args.train_target_text_file: args.train_target_binary_path = binarize_text_file(text_file=args.train_target_text_file, dictionary=target_dict, output_path=args.train_target_binary_path, append_eos=True, reverse_order=False, embed_bytes=embed_bytes, char_dictionary=char_target_dict) if args.eval_target_text_file: args.eval_target_binary_path = binarize_text_file(text_file=args.eval_target_text_file, dictionary=target_dict, output_path=args.eval_target_binary_path, append_eos=True, reverse_order=False, embed_bytes=embed_bytes, char_dictionary=char_target_dict)
class BridgeTowerImageProcessingTester(unittest.TestCase): def __init__(self, parent, do_resize: bool=True, size: Dict[(str, int)]=None, size_divisor: int=32, do_rescale: bool=True, rescale_factor: Union[(int, float)]=(1 / 255), do_normalize: bool=True, do_center_crop: bool=True, image_mean: Optional[Union[(float, List[float])]]=[0., 0.4578275, 0.], image_std: Optional[Union[(float, List[float])]]=[0., 0., 0.], do_pad: bool=True, batch_size=7, min_resolution=30, max_resolution=400, num_channels=3): self.parent = parent self.do_resize = do_resize self.size = (size if (size is not None) else {'shortest_edge': 288}) self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_center_crop = do_center_crop self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution def prepare_image_processor_dict(self): return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size, 'size_divisor': self.size_divisor} def get_expected_values(self, image_inputs, batched=False): if (not batched): size = self.size['shortest_edge'] image = image_inputs[0] if isinstance(image, Image.Image): (w, h) = image.size else: (h, w) = (image.shape[1], image.shape[2]) scale = (size / min(w, h)) if (h < w): (newh, neww) = (size, (scale * w)) else: (newh, neww) = ((scale * h), size) max_size = int(((1333 / 800) * size)) if (max(newh, neww) > max_size): scale = (max_size / max(newh, neww)) newh = (newh * scale) neww = (neww * scale) (newh, neww) = (int((newh + 0.5)), int((neww + 0.5))) (expected_height, expected_width) = (((newh // self.size_divisor) * self.size_divisor), ((neww // self.size_divisor) * self.size_divisor)) else: expected_values = [] for image in image_inputs: (expected_height, expected_width) = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=(lambda item: item[0]))[0] expected_width = max(expected_values, key=(lambda item: item[1]))[1] return (expected_height, expected_width)
class HotpotStratifiedBinaryQuestionParagraphPairsDataset(QuestionAndParagraphsDataset): def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher, fixed_dataset=False, sample_seed=18, add_gold_distractor=True): self.questions = questions self.batcher = batcher self.fixed_dataset = fixed_dataset self.add_gold_distractor = add_gold_distractor self.random = np.random.RandomState(seed=sample_seed) self.gold_samples = self._build_gold_samples() self.epoch_samples = None def _build_gold_samples(self): gold_samples = [] for question in self.questions: pars = [flatten_iterable(question.supporting_facts[0].sentences), flatten_iterable(question.supporting_facts[1].sentences)] self.random.shuffle(pars) gold_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, pars, 1, num_distractors=0, question_id=question.question_id, q_type=question.q_type)) return gold_samples def get_batches(self, n_batches): if (len(self) < n_batches): raise ValueError() return itertools.islice(self.get_epoch(new_epoch=False), n_batches) def get_samples(self, n_samples: int): n_batches = self.batcher.epoch_size(n_samples) self.get_epoch() return (self.batcher.get_epoch(self.random.choice(self.epoch_samples, n_samples, replace=False)), n_batches) def get_epoch(self, new_epoch=True): if self.fixed_dataset: new_epoch = False if ((not new_epoch) and (self.epoch_samples is not None)): return self.batcher.get_epoch(self.epoch_samples) false_samples = [] for question in self.questions: two_distractors = [flatten_iterable(x.sentences) for x in self.random.choice(question.distractors, size=2, replace=False)] true_and_false_1 = [flatten_iterable(question.supporting_facts[0].sentences), two_distractors[0]] true_and_false_2 = [flatten_iterable(question.supporting_facts[1].sentences), two_distractors[1]] self.random.shuffle(true_and_false_1) self.random.shuffle(true_and_false_2) false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, true_and_false_1, 0, num_distractors=1, question_id=question.question_id, q_type=question.q_type)) false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, true_and_false_2, 0, num_distractors=1, question_id=question.question_id, q_type=question.q_type)) false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, [flatten_iterable(x.sentences) for x in self.random.choice(question.distractors, size=2, replace=False)], 0, num_distractors=2, question_id=question.question_id, q_type=question.q_type)) if self.add_gold_distractor: rand_q_idx = self.random.randint(len(self.gold_samples)) while (self.gold_samples[rand_q_idx].question_id == question.question_id): rand_q_idx = self.random.randint(len(self.gold_samples)) selected_q = self.gold_samples[rand_q_idx] self.random.shuffle(selected_q.paragraphs) false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, selected_q.paragraphs, label=0, num_distractors=2, question_id=question.question_id, q_type=question.q_type)) for gold in self.gold_samples: self.random.shuffle(gold.paragraphs) self.epoch_samples = (self.gold_samples + false_samples) np.random.shuffle(self.epoch_samples) return self.batcher.get_epoch(self.epoch_samples) def get_spec(self): batch_size = self.batcher.get_fixed_batch_size() num_contexts = 2 max_q_words = max((len(q.question_tokens) for q in self.questions)) max_c_words = max((max((c.num_tokens for c in (q.distractors + q.supporting_facts))) for q in self.questions)) return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts, max_num_question_words=max_q_words, max_num_context_words=max_c_words) def get_vocab(self): voc = set() for q in self.questions: voc.update(q.question_tokens) for para in (q.distractors + q.supporting_facts): voc.update(flatten_iterable(para.sentences)) return voc def get_word_counts(self): count = Counter() for q in self.questions: count.update(q.question_tokens) for para in (q.distractors + q.supporting_facts): count.update(flatten_iterable(para.sentences)) return count def __len__(self): len_mult = (5 if self.add_gold_distractor else 4) return self.batcher.epoch_size((len(self.gold_samples) * len_mult))
class CrowdCounter(nn.Module): def __init__(self, gpus, model_name, pretrained=True): super(CrowdCounter, self).__init__() if (model_name == 'CSRNet_LCM'): from .SCC_Model.CSRNet_LCM import CSRNet_LCM as net elif (model_name == 'VGG16_LCM'): from .SCC_Model.VGG16_LCM import VGG16_LCM as net elif (model_name == 'VGG16_LCM_REG'): from .SCC_Model.VGG16_LCM_REG import VGG16_LCM_REG as net self.CCN = net(pretrained) if (len(gpus) > 1): self.CCN = torch.nn.DataParallel(self.CCN, device_ids=gpus).cuda(gpus[0]) else: self.CCN = self.CCN.cuda() self.loss_sum_fn = nn.L1Loss().cuda() self.SumLoss = True def loss(self): return self.loss_total def loss_sum(self): return self.loss_sum def forward(self, img, gt_map): count_map = self.CCN(img) gt_map = torch.unsqueeze(gt_map, 1) (self.loss_total, self.loss_sum) = self.build_loss(count_map, gt_map) return count_map def build_loss(self, count_map, gt_map): loss_total = 0.0 if self.SumLoss: gt_map_ = (gt_map / cfg.LOG_PARA) (kernal3, kernal4, kernal5) = (2, 4, 8) filter5 = torch.ones(1, 1, kernal5, kernal5, requires_grad=False).cuda() gt_map_5 = F.conv2d(gt_map_, filter5, stride=kernal5) loss_sum_all = self.loss_sum_fn(count_map, gt_map_5) loss_total += loss_sum_all return (loss_total, loss_sum_all) def test_forward(self, img): count_map = self.CCN(img) return count_map
class FeatureDictNet(nn.ModuleDict): def __init__(self, model, out_indices=(0, 1, 2, 3, 4), out_map=None, feature_concat=False, flatten_sequential=False): super(FeatureDictNet, self).__init__() self.feature_info = _get_feature_info(model, out_indices) self.concat = feature_concat self.return_layers = {} return_layers = _get_return_layers(self.feature_info, out_map) modules = _module_list(model, flatten_sequential=flatten_sequential) remaining = set(return_layers.keys()) layers = OrderedDict() for (new_name, old_name, module) in modules: layers[new_name] = module if (old_name in remaining): self.return_layers[new_name] = str(return_layers[old_name]) remaining.remove(old_name) if (not remaining): break assert ((not remaining) and (len(self.return_layers) == len(return_layers))), f'Return layers ({remaining}) are not present in model' self.update(layers) def _collect(self, x) -> Dict[(str, torch.Tensor)]: out = OrderedDict() for (name, module) in self.items(): x = module(x) if (name in self.return_layers): out_id = self.return_layers[name] if isinstance(x, (tuple, list)): out[out_id] = (torch.cat(x, 1) if self.concat else x[0]) else: out[out_id] = x return out def forward(self, x) -> Dict[(str, torch.Tensor)]: return self._collect(x)
def find_resource_info_with_id(info_list: typing.Sequence[T], short_name: str, resource_type: ResourceType) -> T: for info in info_list: if (info.short_name == short_name): return info raise MissingResource(f"{resource_type.name} Resource with short_name '{short_name}' not found in {len(info_list)} resources")
class Subset(torch.utils.data.Dataset): def __init__(self, dataset, indices): self.dataset = dataset self.indices = indices self.group_array = self.get_group_array(re_evaluate=True) self.label_array = self.get_label_array(re_evaluate=True) def __getitem__(self, idx): return (self.dataset[self.indices[idx]] + (idx,)) def __len__(self): return len(self.indices) def get_group_array(self, re_evaluate=True): if re_evaluate: group_array = self.dataset.get_group_array()[self.indices] assert (len(group_array) == len(self)) return group_array else: return self.group_array def get_label_array(self, re_evaluate=True): if re_evaluate: label_array = self.dataset.get_label_array()[self.indices] assert (len(label_array) == len(self)) return label_array else: return self.label_array
class GroupAttention(nn.Module): def __init__(self, d_model, dropout=0.8, no_cuda=False): super(GroupAttention, self).__init__() self.d_model = 256.0 self.linear_key = nn.Linear(d_model, d_model) self.linear_query = nn.Linear(d_model, d_model) self.norm = LayerNorm(d_model) self.dropout = nn.Dropout(dropout) self.no_cuda = no_cuda def forward(self, context, eos_mask, prior): (batch_size, seq_len) = context.size()[:2] context = self.norm(context) if self.no_cuda: a = torch.from_numpy(np.diag(np.ones((seq_len - 1), dtype=np.int32), 1)) b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0)) c = torch.from_numpy(np.diag(np.ones((seq_len - 1), dtype=np.int32), (- 1))) tri_matrix = torch.from_numpy(np.triu(np.ones([seq_len, seq_len], dtype=np.float32), 0)) else: a = torch.from_numpy(np.diag(np.ones((seq_len - 1), dtype=np.int32), 1)).cuda() b = torch.from_numpy(np.diag(np.ones(seq_len, dtype=np.int32), 0)).cuda() c = torch.from_numpy(np.diag(np.ones((seq_len - 1), dtype=np.int32), (- 1))).cuda() tri_matrix = torch.from_numpy(np.triu(np.ones([seq_len, seq_len], dtype=np.float32), 0)).cuda() mask = (eos_mask & (a + c)) key = self.linear_key(context) query = self.linear_query(context) scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / self.d_model) scores = scores.masked_fill((mask == 0), (- .0)) neibor_attn = F.softmax(scores, dim=(- 1)) neibor_attn = torch.sqrt(((neibor_attn * neibor_attn.transpose((- 2), (- 1))) + 1e-09)) neibor_attn = (prior + ((1.0 - prior) * neibor_attn)) t = torch.log((neibor_attn + 1e-09)).masked_fill((a == 0), 0).matmul(tri_matrix) g_attn = tri_matrix.matmul(t).exp().masked_fill(((tri_matrix.int() - b) == 0), 0) g_attn = ((g_attn + g_attn.transpose((- 2), (- 1))) + neibor_attn.masked_fill((b == 0), 1e-09)) return (g_attn, neibor_attn)
class StateFieldProperty(object): def __init__(self, field, parent_property): self.field = field self.parent_property = parent_property def __get__(self, instance, owner): if instance: if (self.parent_property and hasattr(self.parent_property, '__get__')): return self.parent_property.__get__(instance, owner) default = self.field.to_python(self.field.workflow.initial_state) return instance.__dict__.get(self.field.name, default) else: return self.field.workflow def __set__(self, instance, value): instance.__dict__[self.field.name] = self.field.to_python(value)
class ESRGAN_model(): def __init__(self, lr_shape, hr_shape, SCALE=4): self.SCALE = SCALE (self.lr_shape, self.hr_shape) = (lr_shape, hr_shape) (self.lr_height, self.lr_width, self.channels) = lr_shape (self.hr_height, self.hr_width, _) = hr_shape self.n_residual_in_residual_dense_block = 16 optimizer = Adam(0.0002, 0.5) self.vgg = self.build_vgg() self.vgg.trainable = False self.vgg.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.disc_patch = (30, 40, 1) self.gf = 64 self.df = 64 self.discriminator = self.build_discriminator() self.discriminator.compile(loss='mse', optimizer=optimizer, metrics=['accuracy']) self.generator = self.build_generator() img_hr = Input(shape=self.hr_shape) img_lr = Input(shape=self.lr_shape) fake_hr = self.generator(img_lr) fake_features = self.vgg(fake_hr) self.discriminator.trainable = False validity = self.discriminator(fake_hr) self.combined = Model([img_lr, img_hr], [validity, fake_features]) self.combined.compile(loss=['binary_crossentropy', 'mse'], loss_weights=[0.001, 1], optimizer=optimizer) def build_vgg(self): vgg = VGG19(weights='imagenet') vgg.outputs = [vgg.layers[9].output] img = Input(shape=self.hr_shape) img_features = vgg(img) return Model(img, img_features) def build_generator(self): def dense_block(layer_input, filters): d1 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input) d1 = LeakyReLU(alpha=0.2)(d1) c1 = concatenate([layer_input, d1], axis=(- 1)) d2 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(c1) d2 = LeakyReLU(alpha=0.2)(d2) c2 = concatenate([layer_input, d1, d2], axis=(- 1)) d3 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(c2) d3 = LeakyReLU(alpha=0.2)(d3) c3 = concatenate([layer_input, d1, d2, d3], axis=(- 1)) d4 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(c3) d4 = LeakyReLU(alpha=0.2)(d4) c4 = concatenate([layer_input, d1, d2, d3, d4], axis=(- 1)) d5 = Conv2D(filters, kernel_size=3, strides=1, padding='same')(c4) d = Lambda((lambda x: (x * 0.2)))(d5) d = Add()([d, layer_input]) return d def residual_in_residual_dense_block(layer_input, filters): d = dense_block(layer_input, filters) d = dense_block(d, filters) d = dense_block(d, filters) d = Lambda((lambda x: (x * 0.2)))(d) d = Add()([d, layer_input]) return d def deconv2d(layer_input): u = UpSampling2D(size=2)(layer_input) u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u) u = LeakyReLU(alpha=0.2)(u) return u img_lr = Input(shape=self.lr_shape) c1 = Conv2D(64, kernel_size=9, strides=1, padding='same')(img_lr) r = residual_in_residual_dense_block(c1, self.gf) for _ in range((self.n_residual_in_residual_dense_block - 1)): r = residual_in_residual_dense_block(r, self.gf) c2 = Conv2D(64, kernel_size=3, strides=1, padding='same')(r) c2 = Add()([c2, c1]) u1 = deconv2d(c2) u2 = (u1 if (self.SCALE < 4) else deconv2d(u1)) u3 = (u2 if (self.SCALE < 8) else deconv2d(u2)) gen_hr = Conv2D(self.channels, kernel_size=9, strides=1, padding='same')(u3) return Model(img_lr, gen_hr) def build_discriminator(self): def d_block(layer_input, filters, strides=1, bn=True): d = Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input) d = LeakyReLU(alpha=0.2)(d) if bn: d = BatchNormalization(momentum=0.8)(d) return d d0 = Input(shape=self.hr_shape) d1 = d_block(d0, self.df, bn=False) d2 = d_block(d1, self.df, strides=2) d3 = d_block(d2, (self.df * 2)) d4 = d_block(d3, (self.df * 2), strides=2) d5 = d_block(d4, (self.df * 4)) d6 = d_block(d5, (self.df * 4), strides=2) d7 = d_block(d6, (self.df * 8)) d8 = d_block(d7, (self.df * 8), strides=2) d9 = Dense((self.df * 16))(d8) d10 = LeakyReLU(alpha=0.2)(d9) validity = Dense(1, activation='sigmoid')(d10) return Model(d0, validity)
def _configure_optimizer(learning_rate): if (FLAGS.optimizer == 'adadelta'): optimizer = tf.train.AdadeltaOptimizer(learning_rate, rho=FLAGS.adadelta_rho, epsilon=FLAGS.opt_epsilon) elif (FLAGS.optimizer == 'adagrad'): optimizer = tf.train.AdagradOptimizer(learning_rate, initial_accumulator_value=FLAGS.adagrad_initial_accumulator_value) elif (FLAGS.optimizer == 'adam'): optimizer = tf.train.AdamOptimizer(learning_rate, beta1=FLAGS.adam_beta1, beta2=FLAGS.adam_beta2, epsilon=FLAGS.opt_epsilon) elif (FLAGS.optimizer == 'ftrl'): optimizer = tf.train.FtrlOptimizer(learning_rate, learning_rate_power=FLAGS.ftrl_learning_rate_power, initial_accumulator_value=FLAGS.ftrl_initial_accumulator_value, l1_regularization_strength=FLAGS.ftrl_l1, l2_regularization_strength=FLAGS.ftrl_l2) elif (FLAGS.optimizer == 'momentum'): optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=FLAGS.momentum, name='Momentum') elif (FLAGS.optimizer == 'rmsprop'): optimizer = tf.train.RMSPropOptimizer(learning_rate, decay=FLAGS.rmsprop_decay, momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.opt_epsilon) elif (FLAGS.optimizer == 'sgd'): optimizer = tf.train.GradientDescentOptimizer(learning_rate) else: raise ValueError('Optimizer [%s] was not recognized', FLAGS.optimizer) return optimizer
.parametrize('ip, host', [('127.0.0.1', 'localhost'), ('27.0.0.1', 'localhost.localdomain'), ('27.0.0.1', 'local'), ('55.255.255.255', 'broadcasthost'), (':1', 'localhost'), (':1', 'ip6-localhost'), (':1', 'ip6-loopback'), ('e80::1%lo0', 'localhost'), ('f00::0', 'ip6-localnet'), ('f00::0', 'ip6-mcastprefix'), ('f02::1', 'ip6-allnodes'), ('f02::2', 'ip6-allrouters'), ('ff02::3', 'ip6-allhosts'), ('.0.0.0', '0.0.0.0'), ('127.0.1.1', 'myhostname'), ('127.0.0.53', 'myhostname')]) def test_whitelisted_lines(host_blocker_factory, ip, host): host_blocker = host_blocker_factory() line = '{} {}'.format(ip, host).encode('ascii') parsed_hosts = host_blocker._read_hosts_line(line) assert (host not in parsed_hosts)
class TestRowAppendableArray(unittest.TestCase): def test_append_1d_arrays_and_trim_remaining_buffer(self): appendable = RowAppendableArray(7) appendable.append_row(np.zeros(3)) appendable.append_row(np.ones(3)) self.assertTrue(np.array_equal(appendable.to_array(), np.array([0, 0, 0, 1, 1, 1]))) def test_append_rows_of_nd_arrays_and_trim_remaining_buffer(self): appendable = RowAppendableArray(7) appendable.append_row(np.zeros((3, 2))) appendable.append_row(np.ones((3, 2))) self.assertTrue(np.array_equal(appendable.to_array(), np.vstack([np.zeros((3, 2)), np.ones((3, 2))]))) def test_append_more_1d_arrays_than_expected(self): appendable = RowAppendableArray(5) appendable.append_row(np.zeros(3)) appendable.append_row(np.ones(3)) self.assertTrue(np.array_equal(appendable.to_array(), np.array([0, 0, 0, 1, 1, 1]))) def test_append_more_rows_of_nd_arrays_than_expected(self): appendable = RowAppendableArray(2) appendable.append_row(np.zeros((3, 2))) appendable.append_row(np.ones((3, 2))) self.assertTrue(np.array_equal(appendable.to_array(), np.vstack([np.zeros((3, 2)), np.ones((3, 2))]))) def test_append_1d_arrays_pre_allocated_appendable_array(self): appendable = RowAppendableArray(6) appendable.append_row(np.zeros(3)) appendable.append_row(np.ones(3)) self.assertTrue(np.array_equal(appendable.to_array(), np.array([0, 0, 0, 1, 1, 1]))) def test_append_rows_of_nd_arrays_to_pre_allocated_appendable_array(self): appendable = RowAppendableArray(6) appendable.append_row(np.zeros((3, 2))) appendable.append_row(np.ones((3, 2))) self.assertTrue(np.array_equal(appendable.to_array(), np.vstack([np.zeros((3, 2)), np.ones((3, 2))]))) def test_pre_allocation_can_double_appending_performance(self): unallocated = RowAppendableArray(0) pre_allocated = RowAppendableArray(10000) unallocated_performance = timeit((lambda : unallocated.append_row(np.array([42]))), number=10000) pre_allocated_performance = timeit((lambda : pre_allocated.append_row(np.array([42]))), number=10000) self.assertGreater((unallocated_performance / pre_allocated_performance), 2)
class Unfolding_Loss(Loss, ABC): def __init__(self, window_length, hop_length, **kwargs): super().__init__() self.window_length = window_length self.hop_length = hop_length def compute(self, model, mixture_signal, target_signal): target_signal_hat = model.separate(mixture_signal) target_signal_hat = self.auto_pad(target_signal_hat) target_signal_hat = target_signal_hat.unfold((- 2), self.window_length, self.hop_length) target_signal = self.auto_pad(target_signal) target_signal = target_signal.unfold((- 2), self.window_length, self.hop_length) return self.criterion(target_signal_hat, target_signal) def auto_pad(self, signal): n_step = math.floor((signal.shape[(- 2)] / self.hop_length)) padding = (((self.hop_length * n_step) + self.window_length) - signal.shape[(- 2)]) left_padding = (padding // 2) right_padding = ((padding // 2) + (padding % 1)) return f.pad(signal, (0, 0, left_padding, right_padding)) def criterion(self, target_signal_hat, target_signal): pass
class BertSmallModel(nn.Module): def __init__(self, config, train_embedding=False) -> None: super().__init__() self.model = Bert(config).to(device) self.embedding = copy.deepcopy(self.model.get_input_embeddings().requires_grad_(True)) self.model.set_input_embeddings(nn.Sequential()) def parameters(self): return (list(model.model.parameters()) + list(model.embedding.parameters())) def forward(self, x, mask): return self.model(x, mask)
def torch_persistent_save(obj, f): if isinstance(f, str): with PathManager.open(f, 'wb') as h: torch_persistent_save(obj, h) return for i in range(3): try: return torch.save(obj, f) except Exception: if (i == 2): logger.error(traceback.format_exc())
class Attempt(object): def __init__(self, value, attempt_number, has_exception): self.value = value self.attempt_number = attempt_number self.has_exception = has_exception def get(self, wrap_exception=False): if self.has_exception: if wrap_exception: raise RetryError(self) else: six.reraise(self.value[0], self.value[1], self.value[2]) else: return self.value def __repr__(self): if self.has_exception: return 'Attempts: {0}, Error:\n{1}'.format(self.attempt_number, ''.join(traceback.format_tb(self.value[2]))) else: return 'Attempts: {0}, Value: {1}'.format(self.attempt_number, self.value)
class Command(BaseCommand): help = 'Run the ML model on the specified URLs. Results are not stored to the database.' def add_arguments(self, parser): parser.add_argument('urls', nargs='+', help=_('URL to run against')) def handle(self, *args, **kwargs): self.stdout.write((_('Using the model(s) from %s') % settings.ADSERVER_ANALYZER_BACKEND)) for url in kwargs['urls']: if (not URLValidator()(url)): self.handle_url(url) def handle_url(self, url): self.stdout.write((_('Running against %s') % url)) keywords = [] for backend in get_url_analyzer_backends(): backend_instance = backend(url) analyzed_keywords = backend_instance.analyze() self.stdout.write((_("Keywords from '%s': %s") % (backend.__name__, analyzed_keywords))) if analyzed_keywords: keywords.extend(analyzed_keywords) self.stdout.write((_('Keywords/topics: %s') % keywords))
def extract_multipart_formdata(data): _temp = [] REGEX_MULTIPART = '(?is)((Content-Disposition[^\\n]+?name\\s*=\\s*[\\"\']?(?P<name>(.*?))[\\"\']?)(?:;\\s*filename=[\\"\']?(?P<filename>(.*?))[\\"\']?)?(?:\\nContent-Type:\\s*(?P<contenttype>(.*?))\\n)?(?:\\s*)?(?P<value>[\\w\\.\\_\\-\\*\\+\\[\\]\\=\\>\\;\\:\\\'\\"\\?\\/\\<\\.\\,\\!\\\\#\\$\\%\\^\\&\\*\\(\\)\\_\\+\\`\\~\\{\\}\\|\\\\ ]*)?(\\s)+--)' for entry in re.finditer(REGEX_MULTIPART, data): _out = {} if entry: _gdict = entry.groupdict() key = _gdict.get('name') value = _gdict.get('value') _out.update({'key': key}) if ('contenttype' in _gdict.keys()): filename = _gdict.get('filename') filename = ('' if (not filename) else filename) content_type = _gdict.get('contenttype') content_type = ('' if (not content_type) else content_type) if content_type: value = '' _out.update({'value': value}) if _out: _out.update({'type': 'MULTIPART '}) _temp.append(_out) return _temp
class TestMPM(TestCase): def test_well_posed(self): options = {'thermal': 'isothermal', 'working electrode': 'positive'} model = pybamm.lithium_ion.MPM(options) model.check_well_posedness() model = pybamm.lithium_ion.MPM({'working electrode': 'positive'}, build=False) model.build_model() model.check_well_posedness() def test_default_parameter_values(self): model = pybamm.lithium_ion.MPM({'working electrode': 'positive'}) self.assertEqual(model.default_parameter_values['Positive minimum particle radius [m]'], 0.0) def test_lumped_thermal_model_1D(self): options = {'thermal': 'lumped', 'working electrode': 'positive'} model = pybamm.lithium_ion.MPM(options) model.check_well_posedness() def test_particle_uniform(self): options = {'particle': 'uniform profile', 'working electrode': 'positive'} model = pybamm.lithium_ion.MPM(options) model.check_well_posedness() def test_differential_surface_form(self): options = {'surface form': 'differential', 'working electrode': 'positive'} model = pybamm.lithium_ion.MPM(options) model.check_well_posedness()
class BetaIncInv(ScalarOp): nfunc_spec = ('scipy.special.betaincinv', 3, 1) def impl(self, a, b, x): return scipy.special.betaincinv(a, b, x) def grad(self, inputs, grads): (a, b, x) = inputs (gz,) = grads return [grad_not_implemented(self, 0, a), grad_not_implemented(self, 0, b), (((gz * exp(betaln(a, b))) * ((1 - betaincinv(a, b, x)) ** (1 - b))) * (betaincinv(a, b, x) ** (1 - a)))] def c_code(self, *args, **kwargs): raise NotImplementedError()
class LxFdtDump(gdb.Command): def __init__(self): super(LxFdtDump, self).__init__('lx-fdtdump', gdb.COMMAND_DATA, gdb.COMPLETE_FILENAME) def fdthdr_to_cpu(self, fdt_header): fdt_header_be = '>IIIIIII' fdt_header_le = '<IIIIIII' if (utils.get_target_endianness() == 1): output_fmt = fdt_header_le else: output_fmt = fdt_header_be return unpack(output_fmt, pack(fdt_header_be, fdt_header['magic'], fdt_header['totalsize'], fdt_header['off_dt_struct'], fdt_header['off_dt_strings'], fdt_header['off_mem_rsvmap'], fdt_header['version'], fdt_header['last_comp_version'])) def invoke(self, arg, from_tty): if (not constants.LX_CONFIG_OF): raise gdb.GdbError('Kernel not compiled with CONFIG_OF\n') if (len(arg) == 0): filename = 'fdtdump.dtb' else: filename = arg py_fdt_header_ptr = gdb.parse_and_eval('(const struct fdt_header *) initial_boot_params') py_fdt_header = py_fdt_header_ptr.dereference() fdt_header = self.fdthdr_to_cpu(py_fdt_header) if (fdt_header[0] != constants.LX_OF_DT_HEADER): raise gdb.GdbError('No flattened device tree magic found\n') gdb.write('fdt_magic: 0x{:02X}\n'.format(fdt_header[0])) gdb.write('fdt_totalsize: 0x{:02X}\n'.format(fdt_header[1])) gdb.write('off_dt_struct: 0x{:02X}\n'.format(fdt_header[2])) gdb.write('off_dt_strings: 0x{:02X}\n'.format(fdt_header[3])) gdb.write('off_mem_rsvmap: 0x{:02X}\n'.format(fdt_header[4])) gdb.write('version: {}\n'.format(fdt_header[5])) gdb.write('last_comp_version: {}\n'.format(fdt_header[6])) inf = gdb.inferiors()[0] fdt_buf = utils.read_memoryview(inf, py_fdt_header_ptr, fdt_header[1]).tobytes() try: f = open(filename, 'wb') except gdb.error: raise gdb.GdbError('Could not open file to dump fdt') f.write(fdt_buf) f.close() gdb.write((('Dumped fdt blob to ' + filename) + '\n'))
def download_coco(path, overwrite=False): _DOWNLOAD_URLS = [(' '10ad623668ab00c62c096f0ed636d6aff41faca5'), (' '8551ee4bb5860311e79dace7e79cb91e432e78b3'), (' '4950dc9d00dbe1c933ee0170fd2a41')] mkdir(path) for (url, checksum) in _DOWNLOAD_URLS: filename = download(url, path=path, overwrite=overwrite, sha1_hash=checksum) with zipfile.ZipFile(filename) as zf: zf.extractall(path=path)
def init(): if dist.is_initialized(): return if ('MASTER_ADDR' not in os.environ): os.environ['MASTER_ADDR'] = 'localhost' if ('MASTER_PORT' not in os.environ): os.environ['MASTER_PORT'] = '29500' if ('RANK' not in os.environ): os.environ['RANK'] = '0' if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = '0' if ('WORLD_SIZE' not in os.environ): os.environ['WORLD_SIZE'] = '1' backend = ('gloo' if (not th.cuda.is_available()) else 'nccl') dist.init_process_group(backend=backend, timeout=datetime.timedelta(0, 3600)) th.cuda.set_device(int(os.environ.get('LOCAL_RANK', '0')))
class RotationTransformer(): valid_reps = ['axis_angle', 'euler_angles', 'quaternion', 'rotation_6d', 'matrix'] def __init__(self, from_rep='axis_angle', to_rep='rotation_6d', from_convention=None, to_convention=None): assert (from_rep != to_rep) assert (from_rep in self.valid_reps) assert (to_rep in self.valid_reps) if (from_rep == 'euler_angles'): assert (from_convention is not None) if (to_rep == 'euler_angles'): assert (to_convention is not None) forward_funcs = list() inverse_funcs = list() if (from_rep != 'matrix'): funcs = [getattr(pt, f'{from_rep}_to_matrix'), getattr(pt, f'matrix_to_{from_rep}')] if (from_convention is not None): funcs = [functools.partial(func, convention=from_convention) for func in funcs] forward_funcs.append(funcs[0]) inverse_funcs.append(funcs[1]) if (to_rep != 'matrix'): funcs = [getattr(pt, f'matrix_to_{to_rep}'), getattr(pt, f'{to_rep}_to_matrix')] if (to_convention is not None): funcs = [functools.partial(func, convention=to_convention) for func in funcs] forward_funcs.append(funcs[0]) inverse_funcs.append(funcs[1]) inverse_funcs = inverse_funcs[::(- 1)] self.forward_funcs = forward_funcs self.inverse_funcs = inverse_funcs def _apply_funcs(x: Union[(np.ndarray, torch.Tensor)], funcs: list) -> Union[(np.ndarray, torch.Tensor)]: x_ = x if isinstance(x, np.ndarray): x_ = torch.from_numpy(x) x_: torch.Tensor for func in funcs: x_ = func(x_) y = x_ if isinstance(x, np.ndarray): y = x_.numpy() return y def forward(self, x: Union[(np.ndarray, torch.Tensor)]) -> Union[(np.ndarray, torch.Tensor)]: return self._apply_funcs(x, self.forward_funcs) def inverse(self, x: Union[(np.ndarray, torch.Tensor)]) -> Union[(np.ndarray, torch.Tensor)]: return self._apply_funcs(x, self.inverse_funcs)
class PlyData(object): def __init__(self, elements=[], text=False, byte_order='=', comments=[], obj_info=[]): if ((byte_order == '=') and (not text)): byte_order = _native_byte_order self.byte_order = byte_order self.text = text self.comments = list(comments) self.obj_info = list(obj_info) self.elements = elements def _get_elements(self): return self._elements def _set_elements(self, elements): self._elements = tuple(elements) self._index() elements = property(_get_elements, _set_elements) def _get_byte_order(self): return self._byte_order def _set_byte_order(self, byte_order): if (byte_order not in ['<', '>', '=']): raise ValueError("byte order must be '<', '>', or '='") self._byte_order = byte_order byte_order = property(_get_byte_order, _set_byte_order) def _index(self): self._element_lookup = dict(((elt.name, elt) for elt in self._elements)) if (len(self._element_lookup) != len(self._elements)): raise ValueError('two elements with same name') def _parse_header(stream): lines = [] comments = {'comment': [], 'obj_info': []} while True: line = stream.readline().decode('ascii').strip() fields = _split_line(line, 1) if (fields[0] == 'end_header'): break elif (fields[0] in comments.keys()): lines.append(fields) else: lines.append(line.split()) a = 0 if (lines[a] != ['ply']): raise PlyParseError("expected 'ply'") a += 1 while (lines[a][0] in comments.keys()): comments[lines[a][0]].append(lines[a][1]) a += 1 if (lines[a][0] != 'format'): raise PlyParseError("expected 'format'") if (lines[a][2] != '1.0'): raise PlyParseError("expected version '1.0'") if (len(lines[a]) != 3): raise PlyParseError("too many fields after 'format'") fmt = lines[a][1] if (fmt not in _byte_order_map): raise PlyParseError(("don't understand format %r" % fmt)) byte_order = _byte_order_map[fmt] text = (fmt == 'ascii') a += 1 while ((a < len(lines)) and (lines[a][0] in comments.keys())): comments[lines[a][0]].append(lines[a][1]) a += 1 return PlyData(PlyElement._parse_multi(lines[a:]), text, byte_order, comments['comment'], comments['obj_info']) def read(stream): (must_close, stream) = _open_stream(stream, 'read') try: data = PlyData._parse_header(stream) for elt in data: elt._read(stream, data.text, data.byte_order) finally: if must_close: stream.close() return data def write(self, stream): (must_close, stream) = _open_stream(stream, 'write') try: stream.write(self.header.encode('ascii')) stream.write(b'\r\n') for elt in self: elt._write(stream, self.text, self.byte_order) finally: if must_close: stream.close() def header(self): lines = ['ply'] if self.text: lines.append('format ascii 1.0') else: lines.append((('format ' + _byte_order_reverse[self.byte_order]) + ' 1.0')) for c in self.comments: lines.append(('comment ' + c)) for c in self.obj_info: lines.append(('obj_info ' + c)) lines.extend((elt.header for elt in self.elements)) lines.append('end_header') return '\r\n'.join(lines) def __iter__(self): return iter(self.elements) def __len__(self): return len(self.elements) def __contains__(self, name): return (name in self._element_lookup) def __getitem__(self, name): return self._element_lookup[name] def __str__(self): return self.header def __repr__(self): return ('PlyData(%r, text=%r, byte_order=%r, comments=%r, obj_info=%r)' % (self.elements, self.text, self.byte_order, self.comments, self.obj_info))
class _ServerCapabilities(): actions: bool body_markup: bool body_hyperlinks: bool kde_origin_name: bool def from_list(cls, capabilities: List[str]) -> '_ServerCapabilities': return cls(actions=('actions' in capabilities), body_markup=('body-markup' in capabilities), body_hyperlinks=('body-hyperlinks' in capabilities), kde_origin_name=('x-kde-origin-name' in capabilities))
class Effect8104(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): lvl = src.level fit.drones.filteredItemIncrease((lambda mod: mod.item.requiresSkill('Salvage Drone Specialization')), 'accessDifficultyBonus', (src.getModifiedItemAttr('specAccessDifficultyBonus') * lvl), **kwargs) fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Salvage Drone Specialization')), 'maxVelocity', (src.getModifiedItemAttr('maxVelocityBonus') * lvl), **kwargs)
class SlackOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.slack.SlackOAuth2' user_data_url = ' access_token_body = json.dumps({'access_token': 'foobar', 'token_type': 'bearer'}) user_data_body = json.dumps({'ok': True, 'user': {'email': '', 'name': 'Foo Bar', 'id': '123456'}, 'team': {'id': '456789'}, 'scope': 'identity.basic,identity.email'}) expected_username = 'foobar' def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
def test_inner_call_with_dynamic_argument() -> None: node = builder.extract_node('\n def f(x):\n return g(x)\n\n def g(y):\n return y + 2\n\n f(1) #\n ') assert isinstance(node, nodes.NodeNG) inferred = node.inferred() assert (len(inferred) == 1) assert (inferred[0] is Uninferable)
class Font(BaseObject, EqNeAttrs): bold = 0 character_set = 0 colour_index = 0 escapement = 0 family = 0 font_index = 0 height = 0 italic = 0 name = UNICODE_LITERAL('') struck_out = 0 underline_type = 0 underlined = 0 weight = 400 outline = 0 shadow = 0
class DummyCondStage(): def __init__(self, conditional_key): self.conditional_key = conditional_key self.train = None def eval(self): return self def encode(c: Tensor): return (c, None, (None, None, c)) def decode(c: Tensor): return c def to_rgb(c: Tensor): return c
class cifar_dataloader(): def __init__(self, dataset, r, noise_mode, batch_size, num_workers, root_dir, log, noise_file=''): self.dataset = dataset self.r = r self.noise_mode = noise_mode self.batch_size = batch_size self.num_workers = num_workers self.root_dir = root_dir self.log = log self.noise_file = noise_file normalize = transforms.Normalize(mean=[(x / 255.0) for x in [125.3, 123.0, 113.9]], std=[(x / 255.0) for x in [63.0, 62.1, 66.7]]) self.transform_train = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: F.pad(x.unsqueeze(0), (4, 4, 4, 4), mode='reflect').squeeze())), transforms.ToPILImage(), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) self.transform_test = transforms.Compose([transforms.ToTensor(), normalize]) def run(self, mode, losses=[]): if (mode == 'warmup'): all_dataset = cifar_dataset(dataset=self.dataset, noise_mode=self.noise_mode, r=self.r, root_dir=self.root_dir, transform=self.transform_train, mode='all', noise_file=self.noise_file) trainloader = DataLoader(dataset=all_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers) return trainloader elif (mode == 'test'): test_dataset = cifar_dataset(dataset=self.dataset, noise_mode=self.noise_mode, r=self.r, root_dir=self.root_dir, transform=self.transform_test, mode='test') test_loader = DataLoader(dataset=test_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers) return test_loader elif (mode == 'eval_train'): eval_dataset = cifar_dataset(dataset=self.dataset, noise_mode=self.noise_mode, r=self.r, root_dir=self.root_dir, transform=self.transform_test, mode='all', noise_file=self.noise_file) eval_loader = DataLoader(dataset=eval_dataset, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers) return eval_loader elif (mode == 'meta'): meta_dataset = cifar_dataset(dataset=self.dataset, noise_mode=self.noise_mode, r=self.r, root_dir=self.root_dir, transform=self.transform_train, mode='meta', noise_file=self.noise_file, losses=losses) meta_trainloader = DataLoader(dataset=meta_dataset, batch_size=self.batch_size, shuffle=True, num_workers=self.num_workers, pin_memory=True) return meta_trainloader
def tags_handler(ctx, param, value): retval = options.from_like_context(ctx, param, value) if ((retval is None) and value): try: retval = dict((p.split('=') for p in value)) except Exception: raise click.BadParameter(("'%s' contains a malformed tag." % value), param=param, param_hint='transform') return retval
class FlavaImageConfig(PretrainedConfig): model_type = 'flava_image_model' def __init__(self, hidden_size: int=768, num_hidden_layers: int=12, num_attention_heads: int=12, intermediate_size: int=3072, hidden_act: int='gelu', hidden_dropout_prob: float=0.0, attention_probs_dropout_prob: float=0.0, initializer_range: float=0.02, layer_norm_eps: float=1e-12, image_size: int=224, patch_size: int=16, num_channels: int=3, qkv_bias: bool=True, mask_token: bool=True, vocab_size: int=8192, **kwargs): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig': (config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) if (config_dict.get('model_type') == 'flava'): config_dict = config_dict['image_config'] if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)): logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.") return cls.from_dict(config_dict, **kwargs)
class unit_tcn_G(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=9, stride=1): super(unit_tcn_G, self).__init__() pad = int(((kernel_size - 1) / 2)) inter_channels = (out_channels // 4) self.inter_c = inter_channels self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), stride=(stride, 1)) self.conv1 = nn.Conv2d(in_channels, inter_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), stride=(1, 1)) self.conv2 = nn.Conv2d(in_channels, inter_channels, kernel_size=(kernel_size, 1), padding=(pad, 0), stride=(1, 1)) self.soft = nn.Softmax((- 2)) self.bn = nn.BatchNorm2d(out_channels) conv_init(self.conv) conv_init(self.conv1) conv_init(self.conv2) bn_init(self.bn, 1) def forward(self, x): (N, C, T, V) = x.size() A1 = self.conv1(x).permute(0, 3, 1, 2).contiguous().view(N, V, (self.inter_c * T)) A2 = self.conv2(x).view(N, (self.inter_c * T), V) A1 = self.soft((torch.matmul(A1, A2) / A1.size((- 1)))) A2 = x.view(N, (C * T), V) x = torch.matmul(A2, A1).view(N, C, T, V) x = self.bn(self.conv(x)) return x
class SFTLayer_torch(nn.Module): def __init__(self): super(SFTLayer_torch, self).__init__() self.SFT_scale_conv0 = nn.Conv2d(32, 32, 1) self.SFT_scale_conv1 = nn.Conv2d(32, 64, 1) self.SFT_shift_conv0 = nn.Conv2d(32, 32, 1) self.SFT_shift_conv1 = nn.Conv2d(32, 64, 1) def forward(self, x): scale = self.SFT_scale_conv1(F.leaky_relu(self.SFT_scale_conv0(x[1]), 0.01, inplace=True)) shift = self.SFT_shift_conv1(F.leaky_relu(self.SFT_shift_conv0(x[1]), 0.01, inplace=True)) return ((x[0] * scale) + shift)
class TextAccumulator(): def __init__(self, separator: str=''): self._separator = separator self._texts: List[str] = [] def push(self, text: str) -> None: self._texts.append(text) def pop(self) -> Iterator[str]: if (not self._texts): return text = self._separator.join(self._texts) self._texts.clear() (yield text)
class Decorators(NodeNG): _astroid_fields = ('nodes',) nodes: list[NodeNG] def postinit(self, nodes: list[NodeNG]) -> None: self.nodes = nodes def scope(self) -> LocalsDictNodeNG: if (not self.parent): raise ParentMissingError(target=self) if (not self.parent.parent): raise ParentMissingError(target=self.parent) return self.parent.parent.scope() def get_children(self): (yield from self.nodes)
('bot.constants.Channels.incidents', 123) class TestIsIncident(unittest.TestCase): def setUp(self) -> None: self.incident = MockMessage(channel=MockTextChannel(id=123), content='this is an incident', author=MockUser(bot=False), pinned=False, reference=None) def test_is_incident_true(self): self.assertTrue(incidents.is_incident(self.incident)) def check_false(self): self.assertFalse(incidents.is_incident(self.incident)) def test_is_incident_false_channel(self): self.incident.channel = MockTextChannel(id=456) self.check_false() def test_is_incident_false_content(self): self.incident.content = '# this is a comment message' self.check_false() def test_is_incident_false_author(self): self.incident.author = MockUser(bot=True) self.check_false() def test_is_incident_false_pinned(self): self.incident.pinned = True self.check_false()
def test_bound_crs__example(): proj_crs = ProjectedCRS(conversion=TransverseMercatorConversion(latitude_natural_origin=0, longitude_natural_origin=15, false_easting=2520000, false_northing=0, scale_factor_natural_origin=0.9996), geodetic_crs=GeographicCRS(datum=CustomDatum(ellipsoid='International 1924 (Hayford 1909, 1910)'))) bound_crs = BoundCRS(source_crs=proj_crs, target_crs='WGS 84', transformation=ToWGS84Transformation(proj_crs.geodetic_crs, (- 122.74), (- 34.27), (- 22.83), (- 1.884), (- 3.4), (- 3.03), (- 15.62))) with pytest.warns(UserWarning): assert (bound_crs.to_dict() == {'ellps': 'intl', 'k': 0.9996, 'lat_0': 0, 'lon_0': 15, 'no_defs': None, 'proj': 'tmerc', 'towgs84': [(- 122.74), (- 34.27), (- 22.83), (- 1.884), (- 3.4), (- 3.03), (- 15.62)], 'type': 'crs', 'units': 'm', 'x_0': 2520000, 'y_0': 0})
def build_valid_col_units(table_units, schema): col_ids = [table_unit[1] for table_unit in table_units if (table_unit[0] == TABLE_TYPE['table_unit'])] prefixs = [col_id[:(- 2)] for col_id in col_ids] valid_col_units = [] for value in schema.idMap.values(): if (('.' in value) and (value[:value.index('.')] in prefixs)): valid_col_units.append(value) return valid_col_units
def main(): opts = TrainOptions().parse() if os.path.exists(opts.exp_dir): raise Exception('Oops... {} already exists'.format(opts.exp_dir)) os.makedirs(opts.exp_dir) opts_dict = vars(opts) pprint.pprint(opts_dict) with open(os.path.join(opts.exp_dir, 'opt.json'), 'w') as f: json.dump(opts_dict, f, indent=4, sort_keys=True) coach = Coach(opts) coach.train()
def test_fix_finalizer_func_only(testdir): testdir.makepyfile("\n import time, pytest\n\n class TestFoo:\n\n \n def fix(self, request):\n print('fix setup')\n def fin():\n print('fix finaliser')\n time.sleep(0.1)\n request.addfinalizer(fin)\n\n .timeout(func_only=True)\n def test_foo(self, fix):\n pass\n ") result = testdir.runpytest('--timeout=1', '-s') assert (result.ret == 0) assert ('Timeout' not in (result.stdout.str() + result.stderr.str()))
def test_multiple_channel_states(chain_state, token_network_state, channel_properties): open_block_number = 10 open_block_hash = factories.make_block_hash() pseudo_random_generator = random.Random() (properties, pkey) = channel_properties channel_state = factories.create(properties) channel_new_state_change = ContractReceiveChannelNew(transaction_hash=factories.make_transaction_hash(), channel_state=channel_state, block_number=open_block_number, block_hash=open_block_hash) channel_new_iteration = token_network.state_transition(token_network_state=token_network_state, state_change=channel_new_state_change, block_number=open_block_number, block_hash=open_block_hash, pseudo_random_generator=pseudo_random_generator) lock_amount = 30 lock_expiration = 20 lock_secret = keccak(b'test_end_state') lock_secrethash = sha256(lock_secret).digest() lock = HashTimeLockState(lock_amount, lock_expiration, lock_secrethash) mediated_transfer = make_receive_transfer_mediated(channel_state=channel_state, privkey=pkey, nonce=1, transferred_amount=0, lock=lock) from_hop = factories.make_hop_from_channel(channel_state) init_target = ActionInitTarget(from_hop=from_hop, transfer=mediated_transfer, balance_proof=mediated_transfer.balance_proof, sender=mediated_transfer.balance_proof.sender) node.state_transition(chain_state, init_target) closed_block_number = (open_block_number + 10) closed_block_hash = factories.make_block_hash() channel_close_state_change = ContractReceiveChannelClosed(transaction_hash=factories.make_transaction_hash(), transaction_from=channel_state.partner_state.address, canonical_identifier=channel_state.canonical_identifier, block_number=closed_block_number, block_hash=closed_block_hash) channel_closed_iteration = token_network.state_transition(token_network_state=channel_new_iteration.new_state, state_change=channel_close_state_change, block_number=closed_block_number, block_hash=closed_block_hash, pseudo_random_generator=pseudo_random_generator) settle_block_number = ((closed_block_number + channel_state.settle_timeout) + 1) channel_settled_state_change = ContractReceiveChannelSettled(transaction_hash=factories.make_transaction_hash(), canonical_identifier=channel_state.canonical_identifier, block_number=settle_block_number, block_hash=factories.make_block_hash(), our_onchain_locksroot=factories.make_32bytes(), our_transferred_amount=0, partner_onchain_locksroot=LOCKSROOT_OF_NO_LOCKS, partner_transferred_amount=0) channel_settled_iteration = token_network.state_transition(token_network_state=channel_closed_iteration.new_state, state_change=channel_settled_state_change, block_number=closed_block_number, block_hash=closed_block_hash, pseudo_random_generator=pseudo_random_generator) token_network_state_after_settle = channel_settled_iteration.new_state ids_to_channels = token_network_state_after_settle.channelidentifiers_to_channels assert (len(ids_to_channels) == 1) assert (channel_state.identifier in ids_to_channels) new_channel_properties = factories.create_properties(factories.NettingChannelStateProperties(canonical_identifier=factories.make_canonical_identifier()), defaults=properties) new_channel_state = factories.create(new_channel_properties) channel_new_state_change = ContractReceiveChannelNew(transaction_hash=factories.make_transaction_hash(), channel_state=new_channel_state, block_number=(closed_block_number + 1), block_hash=factories.make_block_hash()) channel_new_iteration = token_network.state_transition(token_network_state=token_network_state, state_change=channel_new_state_change, block_number=open_block_number, block_hash=open_block_hash, pseudo_random_generator=pseudo_random_generator) token_network_state_after_new_open = channel_new_iteration.new_state ids_to_channels = token_network_state_after_new_open.channelidentifiers_to_channels assert (len(ids_to_channels) == 2) assert (channel_state.identifier in ids_to_channels)
def _list_to_acl(entry_list, map_names=1): def char_to_acltag(typechar): if (typechar == 'U'): return posix1e.ACL_USER_OBJ elif (typechar == 'u'): return posix1e.ACL_USER elif (typechar == 'G'): return posix1e.ACL_GROUP_OBJ elif (typechar == 'g'): return posix1e.ACL_GROUP elif (typechar == 'M'): return posix1e.ACL_MASK elif (typechar == 'O'): return posix1e.ACL_OTHER else: raise ValueError('Unknown ACL character {achar} (must be one of [UuGgMO]).'.format(achar=typechar)) def warn_drop(name): global dropped_acl_names if Globals.never_drop_acls: log.Log.FatalError('--never-drop-acls specified but cannot map ACL name {an}'.format(an=name)) if (name in dropped_acl_names): return log.Log('ACL name {an} not found on system, dropping entry. Further ACL entries dropped with this name will not trigger further warnings'.format(an=name), log.WARNING) dropped_acl_names[name] = name acl = posix1e.ACL() for (typechar, owner_pair, perms) in entry_list: id = None if owner_pair: if map_names: if (typechar == 'u'): id = map_owners.map_acl_user(*owner_pair) elif (typechar == 'g'): id = map_owners.map_acl_group(*owner_pair) else: raise ValueError("Type '{tc}' must be one of 'u' or 'g'.".format(tc=typechar)) if (id is None): warn_drop(owner_pair[1]) continue else: assert (owner_pair[0] is not None), "First owner can't be None with type={tc}, owner pair={own}, perms={perms}".format(tc=typechar, own=owner_pair, perms=perms) id = owner_pair[0] entry = posix1e.Entry(acl) entry.tag_type = char_to_acltag(typechar) if (id is not None): entry.qualifier = id entry.permset.read = (perms >> 2) entry.permset.write = ((perms >> 1) & 1) entry.permset.execute = (perms & 1) return acl
def test_example(): parser = ArgumentParser() action = make_action(nargs=1) assert (interactive.example(parser, action, {}).strip() == '# --option OPTION') assert (interactive.example(parser, action, {'option': 32}).strip() == '--option 32') action = make_action(nargs=3) example = interactive.example(parser, action, {}).strip() assert (example == '# --option OPTION OPTION OPTION') example = interactive.example(parser, action, {'option': [32, 21, 5]}).strip() assert (example == '--option 32 21 5') action = make_action(nargs='*') example = interactive.example(parser, action, {}).strip() expected = ('# --option [OPTION [OPTION ...]]', '# --option [OPTION ...]') assert (example in expected) action = make_action(nargs='+') example = interactive.example(parser, action, {}).strip() assert (example == '# --option OPTION [OPTION ...]') action = make_action(nargs='?') assert (interactive.example(parser, action, {}).strip() == '# --option [OPTION]') action = argparse.Action([], 'arg', nargs=1, metavar='ARGUMENT') assert (interactive.example(parser, action, {}).strip() == '# ARGUMENT') assert (interactive.example(parser, action, {'arg': 'value'}).strip() == 'value')
def test_timeheadwaycondition(): cond = OSC.TimeHeadwayCondition('Ego', 20, OSC.Rule.equalTo, True, False) prettyprint(cond.get_element()) cond2 = OSC.TimeHeadwayCondition('Ego', 20, OSC.Rule.equalTo, True, False) cond3 = OSC.TimeHeadwayCondition('Ego', 20, OSC.Rule.equalTo, True, True, routing_algorithm=OSC.RoutingAlgorithm.assignedRoute) assert (cond == cond2) assert (cond != cond3) prettyprint(cond3.get_element()) cond4 = OSC.TimeHeadwayCondition.parse(cond.get_element()) assert (cond == cond4) assert (version_validation('EntityCondition', cond, 0) == ValidationResponse.OK) assert (version_validation('EntityCondition', cond, 1) == ValidationResponse.OK) assert (version_validation('EntityCondition', cond, 2) == ValidationResponse.OK) assert (version_validation('EntityCondition', cond3, 0) == ValidationResponse.OSC_VERSION) assert (version_validation('EntityCondition', cond3, 1) == ValidationResponse.OSC_VERSION) assert (version_validation('EntityCondition', cond3, 2) == ValidationResponse.OK)
def test_jaynes_cummings_zero_temperature_spectral_callable(): N = 10 a = qutip.tensor(qutip.destroy(N), qutip.qeye(2)) sp = qutip.tensor(qutip.qeye(N), qutip.sigmap()) psi0 = qutip.ket2dm(qutip.tensor(qutip.basis(N, 1), qutip.basis(2, 0))) kappa = 0.05 a_ops = [((a + a.dag()), (lambda w: (kappa * (w >= 0))))] e_ops = [(a.dag() * a), (sp.dag() * sp)] w0 = ((1.0 * 2) * np.pi) g = ((0.05 * 2) * np.pi) times = np.linspace(0, (((2 * 2) * np.pi) / g), 1000) c_ops = [(np.sqrt(kappa) * a)] H = ((((w0 * a.dag()) * a) + ((w0 * sp.dag()) * sp)) + ((g * (a + a.dag())) * (sp + sp.dag()))) me = qutip.mesolve(H, psi0, times, c_ops, e_ops) brme = brmesolve(H, psi0, times, a_ops, e_ops) for (me_expectation, brme_expectation) in zip(me.expect, brme.expect): np.testing.assert_allclose(me_expectation, brme_expectation, atol=0.05)
class ContextFormatter(ABC): def get_formatters(self) -> MutableMapping: def format_path(cls, path: str, modifier: str) -> str: if (not modifier): return os.path.normpath(path) modifiers = modifier.split(':')[::(- 1)] while (modifiers and (modifiers[(- 1)] == 'parent')): path = os.path.dirname(path) modifiers.pop() if (not modifiers): return path if (len(modifiers) > 1): message = f"Expected a single path modifier and instead got: {', '.join(reversed(modifiers))}" raise ValueError(message) modifier = modifiers[0] if (modifier == 'uri'): return path_to_uri(path) if (modifier == 'real'): return os.path.realpath(path) message = f'Unknown path modifier: {modifier}' raise ValueError(message)
class DescribeInlineShapes(): def it_knows_how_many_inline_shapes_it_contains(self, inline_shapes_fixture): (inline_shapes, expected_count) = inline_shapes_fixture assert (len(inline_shapes) == expected_count) def it_can_iterate_over_its_InlineShape_instances(self, inline_shapes_fixture): (inline_shapes, inline_shape_count) = inline_shapes_fixture actual_count = 0 for inline_shape in inline_shapes: assert isinstance(inline_shape, InlineShape) actual_count += 1 assert (actual_count == inline_shape_count) def it_provides_indexed_access_to_inline_shapes(self, inline_shapes_fixture): (inline_shapes, inline_shape_count) = inline_shapes_fixture for idx in range((- inline_shape_count), inline_shape_count): inline_shape = inline_shapes[idx] assert isinstance(inline_shape, InlineShape) def it_raises_on_indexed_access_out_of_range(self, inline_shapes_fixture): (inline_shapes, inline_shape_count) = inline_shapes_fixture too_low = ((- 1) - inline_shape_count) with pytest.raises(IndexError, match='inline shape index \\[-3\\] out of rang'): inline_shapes[too_low] too_high = inline_shape_count with pytest.raises(IndexError, match='inline shape index \\[2\\] out of range'): inline_shapes[too_high] def it_knows_the_part_it_belongs_to(self, inline_shapes_with_parent_): (inline_shapes, parent_) = inline_shapes_with_parent_ part = inline_shapes.part assert (part is parent_.part) def inline_shapes_fixture(self): body = element('w:body/w:p/(w:r/w:drawing/wp:inline, w:r/w:drawing/wp:inline)') inline_shapes = InlineShapes(body, None) expected_count = 2 return (inline_shapes, expected_count) def inline_shapes_with_parent_(self, request): parent_ = loose_mock(request, name='parent_') inline_shapes = InlineShapes(None, parent_) return (inline_shapes, parent_)
def get_current_component_version_from_source_files(component: str, version_file: Optional[str]=None) -> str: all_version_files = get_component_version_files(component, abs_path=True) if version_file: all_version_files = {version_file: all_version_files[version_file]} version = '' if all_version_files.get(HELM_VERSION_FILE): with open(all_version_files.get(HELM_VERSION_FILE)) as f: chart_yaml = yaml.safe_load(f.read()) version = chart_yaml['version'] elif all_version_files.get(PYTHON_VERSION_FILE): spec = importlib.util.spec_from_file_location(component, all_version_files.get(PYTHON_VERSION_FILE)) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) version = module.__version__ elif all_version_files.get(JAVASCRIPT_VERSION_FILE): with open(all_version_files.get(JAVASCRIPT_VERSION_FILE)) as f: package_json = json.loads(f.read()) version = package_json['version'] elif all_version_files.get(OPENAPI_VERSION_FILE): with open(all_version_files.get(OPENAPI_VERSION_FILE)) as f: openapi_json = json.loads(f.read()) version = openapi_json.get('info', {}).get('version') return version