code
stringlengths
281
23.7M
class Emojis(commands.Cog): def __init__(self, bot: Bot) -> None: self.bot = bot def embed_builder(emoji: dict) -> tuple[(Embed, list[str])]: embed = Embed(color=Colours.orange, title='Emoji Count', timestamp=datetime.now(tz=UTC)) msg = [] if (len(emoji) == 1): for (category_name, category_emojis) in emoji.items(): if (len(category_emojis) == 1): msg.append(f'There is **{len(category_emojis)}** emoji in the **{category_name}** category.') else: msg.append(f'There are **{len(category_emojis)}** emojis in the **{category_name}** category.') embed.set_thumbnail(url=random.choice(category_emojis).url) else: for (category_name, category_emojis) in emoji.items(): emoji_choice = random.choice(category_emojis) if (len(category_emojis) > 1): emoji_info = f'There are **{len(category_emojis)}** emojis in the **{category_name}** category.' else: emoji_info = f'There is **{len(category_emojis)}** emoji in the **{category_name}** category.' if emoji_choice.animated: msg.append(f'<a:{emoji_choice.name}:{emoji_choice.id}> {emoji_info}') else: msg.append(f'<:{emoji_choice.name}:{emoji_choice.id}> {emoji_info}') return (embed, msg) def generate_invalid_embed(emojis: list[Emoji]) -> tuple[(Embed, list[str])]: embed = Embed(color=Colours.soft_red, title=random.choice(ERROR_REPLIES)) msg = [] emoji_dict = defaultdict(list) for emoji in emojis: emoji_dict[emoji.name.split('_')[0]].append(emoji) error_comp = ', '.join(emoji_dict) msg.append(f'''These are the valid emoji categories: ``` {error_comp} ```''') return (embed, msg) (name='emoji', invoke_without_command=True) async def emoji_group(self, ctx: commands.Context, emoji: (Emoji | None)) -> None: if (emoji is not None): (await ctx.invoke(self.info_command, emoji)) else: (await self.bot.invoke_help_command(ctx)) _group.command(name='count', aliases=('c',)) async def count_command(self, ctx: commands.Context, *, category_query: (str | None)=None) -> None: emoji_dict = defaultdict(list) if (not ctx.guild.emojis): (await ctx.send('No emojis found.')) return log.trace(f"Emoji Category {('' if category_query else 'not ')}provided by the user.") for emoji in ctx.guild.emojis: emoji_category = emoji.name.split('_')[0] if ((category_query is not None) and (emoji_category not in category_query)): continue emoji_dict[emoji_category].append(emoji) if (not emoji_dict): log.trace('Invalid name provided by the user') (embed, msg) = self.generate_invalid_embed(ctx.guild.emojis) else: (embed, msg) = self.embed_builder(emoji_dict) (await LinePaginator.paginate(lines=msg, ctx=ctx, embed=embed)) _group.command(name='info', aliases=('i',)) async def info_command(self, ctx: commands.Context, emoji: Emoji) -> None: emoji_information = Embed(title=f'Emoji Information: {emoji.name}', description=textwrap.dedent(f''' **Name:** {emoji.name} **Created:** {time_since(emoji.created_at.replace(tzinfo=None), precision='hours')} **Date:** {datetime.strftime(emoji.created_at.replace(tzinfo=None), '%d/%m/%Y')} **ID:** {emoji.id} '''), color=Color.og_blurple(), url=str(emoji.url)).set_thumbnail(url=emoji.url) (await ctx.send(embed=emoji_information))
def test_user_can_vote_if_has_ticket_for_a_previous_conference(user, conference, mocker, included_event_factory): def side_effect(email, event_organizer, event_slug, additional_events): return ((additional_events[0]['organizer_slug'] == 'organizer-slug') and (additional_events[0]['event_slug'] == 'event-slug')) mocker.patch('voting.helpers.user_has_admission_ticket', side_effect=side_effect) included_event_factory(conference=conference, pretix_organizer_id='organizer-slug', pretix_event_id='event-slug') assert (check_if_user_can_vote(user, conference) is True)
def test_extensions_only(hatch, temp_dir, helpers, config_file): config_file.model.template.plugins['default']['src-layout'] = False config_file.save() project_name = 'My.App' with temp_dir.as_cwd(): result = hatch('new', project_name) assert (result.exit_code == 0), result.output path = (temp_dir / 'my-app') build_script = (path / DEFAULT_BUILD_SCRIPT) build_script.write_text(helpers.dedent("\n import pathlib\n\n from hatchling.builders.hooks.plugin.interface import BuildHookInterface\n\n class CustomHook(BuildHookInterface):\n def initialize(self, version, build_data):\n if self.target_name == 'wheel':\n pathlib.Path('my_app', 'lib.so').touch()\n ")) project = Project(path) config = dict(project.raw_config) config['tool']['hatch']['build'] = {'hooks': {'custom': {'path': build_script.name}}} project.save_config(config) with path.as_cwd(): result = hatch('-v', 'build', '--ext') assert (result.exit_code == 0), result.output build_directory = (path / 'dist') assert build_directory.is_dir() artifacts = list(build_directory.iterdir()) assert (len(artifacts) == 0) assert ((path / 'my_app') / 'lib.so').is_file() assert (result.output == helpers.dedent('\n wheel \n Setting up build environment\n Building `wheel` version `standard`\n Only ran build hooks for `wheel` version `standard`\n '))
class Observable(): def __init__(self): self.observers: List[Observer] = [] def setChanged(self): pass def notifyObservers(self, value): for observer in self.observers: observer.update(self, value) def addObserver(self, observer: Observer): self.observers.append(observer)
class Vector2(): __slots__ = ['x', 'y'] __hash__ = None def __init__(self, x=0, y=0): self.x = x self.y = y def __copy__(self): return self.__class__(self.x, self.y) copy = __copy__ def __repr__(self): return ('Vector2(%.2f, %.2f)' % (self.x, self.y)) def __eq__(self, other): if isinstance(other, Vector2): return ((self.x == other.x) and (self.y == other.y)) else: assert (hasattr(other, '__len__') and (len(other) == 2)) return ((self.x == other[0]) and (self.y == other[1])) def __ne__(self, other): return (not self.__eq__(other)) def __nonzero__(self): return bool(((self.x != 0) or (self.y != 0))) def __len__(self): return 2 def __getitem__(self, key): return (self.x, self.y)[key] def __setitem__(self, key, value): items = [self.x, self.y] items[key] = value (self.x, self.y) = items def __iter__(self): return iter((self.x, self.y)) def __getattr__(self, name): try: return tuple([(self.x, self.y)['xy'.index(c)] for c in name]) except ValueError: raise AttributeError(name) def __add__(self, other): if isinstance(other, Vector2): if (self.__class__ is other.__class__): _class = Vector2 else: _class = Point2 return _class((self.x + other.x), (self.y + other.y)) else: assert (hasattr(other, '__len__') and (len(other) == 2)) return Vector2((self.x + other[0]), (self.y + other[1])) __radd__ = __add__ def __iadd__(self, other): if isinstance(other, Vector2): self.x += other.x self.y += other.y else: self.x += other[0] self.y += other[1] return self def __sub__(self, other): if isinstance(other, Vector2): if (self.__class__ is other.__class__): _class = Vector2 else: _class = Point2 return _class((self.x - other.x), (self.y - other.y)) else: assert (hasattr(other, '__len__') and (len(other) == 2)) return Vector2((self.x - other[0]), (self.y - other[1])) def __rsub__(self, other): if isinstance(other, Vector2): return Vector2((other.x - self.x), (other.y - self.y)) else: assert (hasattr(other, '__len__') and (len(other) == 2)) return Vector2((other.x - self[0]), (other.y - self[1])) def __mul__(self, other): assert (type(other) in (int, float)) return Vector2((self.x * other), (self.y * other)) __rmul__ = __mul__ def __imul__(self, other): assert (type(other) in (int, float)) self.x *= other self.y *= other return self def __div__(self, other): assert (type(other) in (int, float)) return Vector2(op.div(self.x, other), op.div(self.y, other)) def __rdiv__(self, other): assert (type(other) in (int, float)) return Vector2(op.div(other, self.x), op.div(other, self.y)) def __floordiv__(self, other): assert (type(other) in (int, float)) return Vector2(op.floordiv(self.x, other), op.floordiv(self.y, other)) def __rfloordiv__(self, other): assert (type(other) in (int, float)) return Vector2(op.floordiv(other, self.x), op.floordiv(other, self.y)) def __truediv__(self, other): assert (type(other) in (int, float)) return Vector2(op.truediv(self.x, other), op.truediv(self.y, other)) def __rtruediv__(self, other): assert (type(other) in (int, float)) return Vector2(op.truediv(other, self.x), op.truediv(other, self.y)) def __neg__(self): return Vector2((- self.x), (- self.y)) __pos__ = __copy__ def __abs__(self): return math.sqrt(((self.x ** 2) + (self.y ** 2))) magnitude = __abs__ def magnitude_squared(self): return ((self.x ** 2) + (self.y ** 2)) def normalize(self): d = self.magnitude() if d: self.x /= d self.y /= d return self def normalized(self): d = self.magnitude() if d: return Vector2((self.x / d), (self.y / d)) return self.copy() def dot(self, other): assert isinstance(other, Vector2) return ((self.x * other.x) + (self.y * other.y)) def cross(self): return Vector2(self.y, (- self.x)) def reflect(self, normal): assert isinstance(normal, Vector2) d = (2 * ((self.x * normal.x) + (self.y * normal.y))) return Vector2((self.x - (d * normal.x)), (self.y - (d * normal.y))) def angle(self, other): return math.acos((self.dot(other) / (self.magnitude() * other.magnitude()))) def project(self, other): n = other.normalized() return (self.dot(n) * n)
def _process_attr_input_field(field: InputField, param_name_to_base_field: Dict[(str, BaseField)], has_custom_init: bool): try: base_field = param_name_to_base_field[field.id] except KeyError: return field if ((not has_custom_init) and (field.type == Any) and (base_field.type == NoneType)): field = replace(field, type=NoneType) return replace(field, default=(base_field.default if (isinstance(field.default, DefaultValue) and (field.default.value is attrs.NOTHING)) else field.default), metadata=base_field.metadata, id=base_field.id)
.skipif((not RAY_DATASET_AVAILABLE), reason='Ray datasets are not available in this version of Ray') def test_simple_ray_dataset(start_client_server_5_cpus): assert ray.util.client.ray.is_connected() from xgboost_ray.examples.simple_ray_dataset import main main(cpus_per_actor=1, num_actors=4)
def test_debug_not_used(django_pytester: DjangoPytester) -> None: django_pytester.create_test_module('\n from django.test import TestCase\n\n pre_setup_count = 0\n\n\n class TestClass1(TestCase):\n\n def debug(self):\n assert 0, "should not be called"\n\n def test_method(self):\n pass\n ') result = django_pytester.runpytest_subprocess('--pdb') result.stdout.fnmatch_lines(['*= 1 passed*']) assert (result.ret == 0)
def test_mspn_backbone(): with pytest.raises(AssertionError): MSPN(num_stages=0) with pytest.raises(AssertionError): MSPN(num_units=1) with pytest.raises(AssertionError): MSPN(num_units=2, num_blocks=[2, 2, 2]) model = MSPN(num_stages=2, num_units=2, num_blocks=[2, 2]) model.init_weights() model.train() imgs = torch.randn(1, 3, 511, 511) feat = model(imgs) assert (len(feat) == 2) assert (len(feat[0]) == 2) assert (len(feat[1]) == 2) assert (feat[0][0].shape == torch.Size([1, 256, 64, 64])) assert (feat[0][1].shape == torch.Size([1, 256, 128, 128])) assert (feat[1][0].shape == torch.Size([1, 256, 64, 64])) assert (feat[1][1].shape == torch.Size([1, 256, 128, 128]))
.parametrize('spoiler', [False, True]) .parametrize('layout', [{}, {'sky_temple_keys': LayoutSkyTempleKeyMode.ALL_GUARDIANS}, {'menu_mod': True, 'warp_to_start': False}]) def test_round_trip(spoiler: bool, layout: dict, default_echoes_preset, mocker): random_uuid = uuid.uuid4() mocker.patch('uuid.uuid4', return_value=random_uuid) preset = Preset(name=f'{default_echoes_preset.game.long_name} Custom', description='A customized preset.', uuid=random_uuid, game=default_echoes_preset.game, configuration=dataclasses.replace(default_echoes_preset.configuration, **layout)) params = GeneratorParameters(seed_number=1000, spoiler=spoiler, presets=[preset]) after = GeneratorParameters.from_bytes(params.as_bytes) assert (params == after)
class EntryCreate(LoginRequiredMixin, EntryCreateMixin, FormView): template_name = 'dictionary/edit/entry_create.html' def dispatch(self, request, *args, **kwargs): self.extra_context = {'title': self.request.POST.get('title', '')} return super().dispatch(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['recent_drafts'] = Entry.objects_all.filter((Q(date_created__gte=time_threshold(hours=24)) | Q(date_edited__gte=time_threshold(hours=24))), is_draft=True, author=self.request.user).select_related('topic').only('topic__title', 'date_created', 'date_edited').alias(last_edited=Coalesce(F('date_edited'), F('date_created'))).order_by('-last_edited')[:5] return context def form_valid(self, form): if (not self.request.POST.get('pub_draft_pk', '').isdigit()): self.topic = Topic.objects.get_or_pseudo(unicode_string=self.extra_context.get('title')) return super().form_valid(form)
def test_get_macro_completion_items(base_app): run_cmd(base_app, 'macro create foo !echo foo') run_cmd(base_app, 'macro create bar !echo bar') results = base_app._get_macro_completion_items() assert (len(results) == len(base_app.macros)) for cur_res in results: assert (cur_res in base_app.macros) assert (cur_res.description.rstrip() == base_app.macros[cur_res].value)
class MultiResidual(nn.Module): def __init__(self, scale, res_block, num_blocks): super(MultiResidual, self).__init__() assert (num_blocks >= 1) self.scale = scale self.res_blocks = nn.ModuleList([res_block() for _ in range(num_blocks)]) self.activ = nn.ReLU(inplace=False) def forward(self, x): out = x for res_block in self.res_blocks: out = (out + (self.scale * res_block(x))) out = self.activ(out) return out
def test_method_dynamic_instance_attr_2() -> None: node = builder.extract_node('\n class A:\n # Note: no initializer, so the only assignment happens in get_x\n\n def get_x(self, x):\n self.x = x\n return self.x\n\n A().get_x(1) #\n ') assert isinstance(node, nodes.NodeNG) inferred = node.inferred() assert (len(inferred) == 1) assert isinstance(inferred[0], nodes.Const) assert (inferred[0].value == 1)
def read_wid1(f): line = f.readline() if (not line.strip()): raise EOF() (wid1, stmin, imilli, nsamples, sta, channel_id, channel_name, sample_rate, system_type, data_format, diff_flag) = util.unpack_fixed('a4,x1,a17,x1,i3,x1,i8,x1,a6,x1,a8,x1,a2,x1,f11,x1,a6,x1,a4,x1,i1', line[:80]) if (wid1 != 'WID1'): raise GSE1LoadError('"WID1" marker expected but not found.') tmin = (util.str_to_time(stmin, format='%Y%j %H %M %S') + (0.001 * imilli)) line = f.readline() (gain, units, calib_period, lat, lon, elevation, depth, beam_azimuth, beam_slowness, horizontal_orientation) = util.unpack_fixed('f9,i1,f7,x1,f9,x1,f9,x1,f9,x1,f9,x1,f7,x1,f7,x1,f6', line[:80]) return (tmin, nsamples, sta, channel_id, channel_name, sample_rate, system_type, data_format, diff_flag, gain, units, calib_period, lat, lon, elevation, depth, beam_azimuth, beam_slowness, horizontal_orientation)
class PyAnalogClockPlugin(QPyDesignerCustomWidgetPlugin): def __init__(self, parent=None): super(PyAnalogClockPlugin, self).__init__(parent) self.initialized = False def initialize(self, core): if self.initialized: return self.initialized = True def isInitialized(self): return self.initialized def createWidget(self, parent): return PyAnalogClock(parent) def name(self): return 'PyAnalogClock' def group(self): return 'PyQt Examples' def icon(self): return QIcon(_logo_pixmap) def toolTip(self): return '' def whatsThis(self): return '' def isContainer(self): return False def domXml(self): return '<widget class="PyAnalogClock" name="analogClock">\n <property name="toolTip">\n <string>The current time</string>\n </property>\n <property name="whatsThis">\n <string>The analog clock widget displays the current time.</string>\n </property>\n</widget>\n' def includeFile(self): return 'analogclock'
def _make_decorator(module: nn.Module, fun_name: str) -> Callable: fun = getattr(module, fun_name) from tensordict.nn.common import TensorDictModuleBase (fun) def new_fun(self, *args, **kwargs): _is_stateless = self.__dict__.get('_is_stateless', False) params = kwargs.pop('params', None) if isinstance(self, TensorDictModuleBase): if ((params is None) and (len(args) == 2) and all((_is_tensor_collection(item.__class__) for item in args))): params = args[1] args = args[:1] elif ((len(args) and _is_tensor_collection(args[0].__class__)) or ('tensordict' in kwargs)): warnings.warn('You are passing a tensordict/tensorclass instance to a module that does not inherit from TensorDictModuleBase. This may lead to unexpected behaviours with functional calls.') if (_is_stateless or (params is not None)): if (params is None): params = args[(- 1)] args = args[:(- 1)] old_params = _assign_params(self, params, make_stateless=False, return_old_tensordict=True) try: out = getattr(type(self), fun_name)(self, *args, **kwargs) finally: _assign_params(self, old_params, make_stateless=_is_stateless, return_old_tensordict=False) return out else: try: return getattr(type(self), fun_name)(self, *args, **kwargs) except TypeError as err: pattern = '.*takes \\d+ positional arguments but \\d+ were given|got multiple values for argument' pattern = re.compile(pattern) if (pattern.search(str(err)) and isinstance(args[(- 1)], TensorDictBase)): raise TypeError('It seems you tried to provide the parameters as an argument to the module when the module was not stateless. If this is the case, this error should vanish by providing the parameters using the ``module(..., params=params)`` syntax.') from err else: raise err oldsig = inspect.signature(fun) if ('_forward_unimplemented' in fun.__name__): raise AttributeError('_forward_unimplemented not supported') params = list(oldsig.parameters.values()) for (i, param) in enumerate(params): if (param.kind == inspect.Parameter.KEYWORD_ONLY): out_type = inspect.Parameter.POSITIONAL_OR_KEYWORD break if (param.kind == inspect.Parameter.VAR_POSITIONAL): out_type = inspect.Parameter.KEYWORD_ONLY i = (i + 1) break if (param.kind == inspect.Parameter.VAR_KEYWORD): out_type = inspect.Parameter.POSITIONAL_OR_KEYWORD break if ((param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD) and (param.default is not inspect._empty)): out_type = inspect.Parameter.POSITIONAL_OR_KEYWORD break else: out_type = inspect.Parameter.POSITIONAL_OR_KEYWORD i = len(params) name = 'params' while (name in oldsig.parameters): name += '_' newparam = inspect.Parameter(name, out_type, default=None) params.insert(i, newparam) sig = oldsig.replace(parameters=params) new_fun.__signature__ = sig return new_fun
class ConfigMultiRPaned(MultiRPaned): def __init__(self, section, option): super().__init__() self.section = section self.option = option def set_widgets(self, widgets): super().set_widgets(widgets) paneds = self._get_paneds() for paned in paneds: paned.connect('notify::position', self.__changed) self._restore_widths() def save_widths(self): paneds = self._get_paneds() if ((len(paneds) == 1) and (not paneds[0].get_child1())): widths = [] else: widths = [str(p.get_relative()) for p in paneds] config.setstringlist(self.section, self.option, widths) def _restore_widths(self): widths = config.getstringlist(self.section, self.option, []) paneds = self._get_paneds() if (not widths): self.__changed() else: for (i, width) in enumerate(map(float, widths)): if (i >= len(paneds)): break paneds[i].set_relative(width) self.__changed() def __changed(self, widget=None, event=None): self.save_widths()
def generate_sparse_data(shape, num_points, num_channels, integer=False, data_range=((- 1), 1), with_dense=True, dtype=np.float32): dense_shape = shape ndim = len(dense_shape) num_points = np.array(num_points) batch_size = len(num_points) batch_indices = [] coors_total = np.stack(np.meshgrid(*[np.arange(0, s) for s in shape]), axis=(- 1)) coors_total = coors_total.reshape((- 1), ndim) for i in range(batch_size): np.random.shuffle(coors_total) inds_total = coors_total[:num_points[i]] inds_total = np.pad(inds_total, ((0, 0), (0, 1)), mode='constant', constant_values=i) batch_indices.append(inds_total) if integer: sparse_data = np.random.randint(data_range[0], data_range[1], size=[num_points.sum(), num_channels]).astype(dtype) else: sparse_data = np.random.uniform(data_range[0], data_range[1], size=[num_points.sum(), num_channels]).astype(dtype) res = {'features': sparse_data.astype(dtype)} if with_dense: dense_data = np.zeros([batch_size, num_channels, *dense_shape], dtype=sparse_data.dtype) start = 0 for (i, inds) in enumerate(batch_indices): for (j, ind) in enumerate(inds): dense_slice = (i, slice(None), *ind[:(- 1)]) dense_data[dense_slice] = sparse_data[(start + j)] start += len(inds) res['features_dense'] = dense_data.astype(dtype) batch_indices = np.concatenate(batch_indices, axis=0) res['indices'] = batch_indices.astype(np.int32) return res
('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False) def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=(- 1)): threadsPerBlock = (8 * 8) row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min((N - (row_start * threadsPerBlock)), threadsPerBlock) col_size = min((K - (col_start * threadsPerBlock)), threadsPerBlock) block_boxes = cuda.shared.array(shape=((64 * 5),), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=((64 * 5),), dtype=numba.float32) dev_query_box_idx = ((threadsPerBlock * col_start) + tx) dev_box_idx = ((threadsPerBlock * row_start) + tx) if (tx < col_size): block_qboxes[((tx * 5) + 0)] = dev_query_boxes[((dev_query_box_idx * 5) + 0)] block_qboxes[((tx * 5) + 1)] = dev_query_boxes[((dev_query_box_idx * 5) + 1)] block_qboxes[((tx * 5) + 2)] = dev_query_boxes[((dev_query_box_idx * 5) + 2)] block_qboxes[((tx * 5) + 3)] = dev_query_boxes[((dev_query_box_idx * 5) + 3)] block_qboxes[((tx * 5) + 4)] = dev_query_boxes[((dev_query_box_idx * 5) + 4)] if (tx < row_size): block_boxes[((tx * 5) + 0)] = dev_boxes[((dev_box_idx * 5) + 0)] block_boxes[((tx * 5) + 1)] = dev_boxes[((dev_box_idx * 5) + 1)] block_boxes[((tx * 5) + 2)] = dev_boxes[((dev_box_idx * 5) + 2)] block_boxes[((tx * 5) + 3)] = dev_boxes[((dev_box_idx * 5) + 3)] block_boxes[((tx * 5) + 4)] = dev_boxes[((dev_box_idx * 5) + 4)] cuda.syncthreads() if (tx < row_size): for i in range(col_size): offset = (((((row_start * threadsPerBlock) * K) + (col_start * threadsPerBlock)) + (tx * K)) + i) dev_iou[offset] = devRotateIoUEval(block_qboxes[(i * 5):((i * 5) + 5)], block_boxes[(tx * 5):((tx * 5) + 5)], criterion)
class ProxyMixin(object): def get_proxied(self): return self.inner def is_proxy(self): return True def get_properties(self): return self.properties def get_property(self, prop, default=None): if (self.properties is None): return default return self.properties.get(prop, default) def immutable(self): return get_base_object(self.inner).immutable() def tostring(self): return get_base_object(self.inner).tostring()
def validate_conf(*args, **kwargs): config = ConfFile(*args, **kwargs) is_config_ok = True try: config.pyms except AttrDoesNotExistException: is_config_ok = False if (not is_config_ok): raise ConfigErrorException('Config file must start with `pyms` keyword, for example:\n pyms:\n services:\n metrics: true\n requests:\n data: data\n swagger:\n path: ""\n file: "swagger.yaml"\n tracer:\n client: "jaeger"\n host: "localhost"\n component_name: "Python Microservice"\n config:\n DEBUG: true\n TESTING: true') try: config.pyms.config except AttrDoesNotExistException: is_config_ok = False if (not is_config_ok): raise ConfigErrorException('`pyms` block must contain a `config` keyword in your Config file, for example:\n pyms:\n services:\n metrics: true\n requests:\n data: data\n swagger:\n path: ""\n file: "swagger.yaml"\n tracer:\n client: "jaeger"\n host: "localhost"\n component_name: "Python Microservice"\n config:\n DEBUG: true\n TESTING: true') wrong_keywords = [i for i in config.pyms if (i not in PYMS_CONFIG_WHITELIST_KEYWORDS)] if (len(wrong_keywords) > 0): raise ConfigErrorException('{} isn`t a valid keyword for pyms block, for example:\n pyms:\n services:\n metrics: true\n requests:\n data: data\n swagger:\n path: ""\n file: "swagger.yaml"\n tracer:\n client: "jaeger"\n host: "localhost"\n component_name: "Python Microservice"\n config:\n DEBUG: true\n TESTING: true'.format(wrong_keywords)) __verify_deprecated_env_variables(config)
class LosDialog(QtWidgets.QDialog): def __init__(self, sandbox, *args, **kwargs): QtWidgets.QDialog.__init__(self, *args, **kwargs) loadUi(get_resource('dialog_los.ui'), self) self.setSizeGripEnabled(False) self.move((self.parent().window().mapToGlobal(self.parent().window().rect().center()) - self.mapToGlobal(self.rect().center()))) self.sandbox = sandbox self.sandbox.model self.applyButton.released.connect(self.updateValues) self.okButton.released.connect(self.updateValues) self.okButton.released.connect(self.close) self.setValues() def setValues(self): self.sandbox.model np.deg2rad(self.spinlos_phi.value()) np.deg2rad(self.spinlos_theta.value()) () def updateValues(self): print('updated los!') self.sandbox.model.setLOS(self.spinlos_phi.value(), self.spinlos_theta.value()) self.setValues()
class TestPortaraDataProviderIntraday(TestCase): def setUpClass(cls) -> None: cls.start_date = datetime(2021, 6, 11, 17, 13) cls.end_date = datetime(2021, 6, 14, 8, 46) cls.number_of_data_bars = 29 cls.fields = PriceField.ohlcv() cls.ticker = PortaraTicker('AB', SecurityType.FUTURE, 1) cls.tickers = [PortaraTicker('AB', SecurityType.FUTURE, 1), PortaraTicker('ABCD', SecurityType.FUTURE, 7)] cls.future_ticker = PortaraFutureTicker('', 'AB{}', 1, 1) cls.future_tickers = [PortaraFutureTicker('', 'AB{}', 1, 1), PortaraFutureTicker('', 'ABCD{}', 1, 7)] cls.futures_path = str(((Path(__file__).parent / Path('input_data')) / Path('Futures'))) def get_data_provider(self, tickers, fields) -> PortaraDataProvider: return PortaraDataProvider(self.futures_path, tickers, fields, self.start_date, self.end_date, Frequency.MIN_1) def test_get_price_single_ticker_many_fields_many_dates(self): data_provider = self.get_data_provider(self.ticker, self.fields) prices = data_provider.get_price(self.ticker, self.fields, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), PricesDataFrame) self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.fields))) self.assertEqual(Frequency.infer_freq(prices.index), Frequency.MIN_1) def test_get_price_single_ticker_many_fields_single_date(self): date = datetime(2021, 6, 11, 17, 28) data_provider = self.get_data_provider(self.ticker, self.fields) prices = data_provider.get_price(self.ticker, self.fields, date, date, Frequency.MIN_1) self.assertEqual(type(prices), PricesSeries) self.assertEqual(prices.shape, (len(self.fields),)) def test_get_price_single_ticker_single_field_many_dates(self): data_provider = self.get_data_provider(self.ticker, PriceField.Close) prices = data_provider.get_price(self.ticker, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), PricesSeries) self.assertEqual(prices.shape, (self.number_of_data_bars,)) def test_get_price_single_ticker_single_field_single_date(self): date = datetime(2021, 6, 11, 17, 28) data_provider = self.get_data_provider(self.ticker, PriceField.Close) prices = data_provider.get_price(self.ticker, PriceField.Close, date, date, Frequency.MIN_1) self.assertEqual(type(prices), int) self.assertEqual(prices, 61640) def test_get_price_many_tickers_many_fields_many_dates(self): data_provider = self.get_data_provider(self.tickers, self.fields) prices = data_provider.get_price(self.tickers, self.fields, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), QFDataArray) self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.tickers), len(self.fields))) self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.MIN_1) def test_get_price_many_tickers_single_field_many_dates(self): data_provider = self.get_data_provider(self.tickers, PriceField.Close) prices = data_provider.get_price(self.tickers, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), PricesDataFrame) self.assertEqual(prices.shape, (self.number_of_data_bars, len(self.tickers))) self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.index)), Frequency.MIN_1) def test_get_price_many_tickers_many_fields_single_date(self): date = datetime(2021, 6, 11, 17, 28) data_provider = self.get_data_provider(self.tickers, self.fields) prices = data_provider.get_price(self.tickers, self.fields, date, date, Frequency.MIN_1) self.assertEqual(type(prices), PricesDataFrame) self.assertEqual(prices.shape, (len(self.tickers), len(self.fields))) def test_get_price_many_tickers_single_field_single_date(self): date = datetime(2021, 6, 11, 17, 28) data_provider = self.get_data_provider(self.tickers, PriceField.Close) prices = data_provider.get_price(self.tickers, PriceField.Close, date, date, Frequency.MIN_1) self.assertEqual(type(prices), PricesSeries) self.assertEqual(prices.shape, (len(self.tickers),)) def test_get_price_single_future_ticker_many_fields(self): data_provider = self.get_data_provider(self.future_ticker, self.fields) tickers_to_check = [PortaraTicker('AB2021M', SecurityType.FUTURE, 1), PortaraTicker('AB2021U', SecurityType.FUTURE, 1)] prices = data_provider.get_price(tickers_to_check, self.fields, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), QFDataArray) self.assertEqual(prices.shape, (self.number_of_data_bars, len(tickers_to_check), len(self.fields))) self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.MIN_1) self.assertCountEqual(prices.tickers.values, tickers_to_check) def test_get_price_many_future_tickers_many_fields(self): data_provider = self.get_data_provider(self.future_tickers, self.fields) tickers_to_check = [PortaraTicker('AB2021M', SecurityType.FUTURE, 1), PortaraTicker('AB2021U', SecurityType.FUTURE, 1), PortaraTicker('ABCD2021M', SecurityType.FUTURE, 7), PortaraTicker('ABCD2021N', SecurityType.FUTURE, 7), PortaraTicker('ABCD2021Q', SecurityType.FUTURE, 7)] prices = data_provider.get_price(tickers_to_check, self.fields, self.start_date, self.end_date, Frequency.MIN_1) self.assertEqual(type(prices), QFDataArray) self.assertEqual(prices.shape, (self.number_of_data_bars, len(tickers_to_check), len(self.fields))) self.assertEqual(Frequency.infer_freq(pd.to_datetime(prices.dates.values)), Frequency.MIN_1) self.assertCountEqual(prices.tickers.values, tickers_to_check) def test_get_fut_chain_single_future_ticker(self): data_provider = self.get_data_provider(self.future_ticker, self.fields) fut_chain = data_provider.get_futures_chain_tickers(self.future_ticker, ExpirationDateField.LastTradeableDate) self.assertTrue(fut_chain) self.assertEqual(type(fut_chain), dict) self.assertEqual(type(fut_chain[self.future_ticker]), QFDataFrame) self.assertEqual(fut_chain[self.future_ticker].shape, (4, 1)) def test_get_fut_chain_many_future_tickers(self): data_provider = self.get_data_provider(self.future_tickers, self.fields) fut_chain = data_provider.get_futures_chain_tickers(self.future_tickers, ExpirationDateField.LastTradeableDate) self.assertTrue(fut_chain) self.assertEqual(type(fut_chain), dict) self.assertEqual(type(fut_chain[self.future_tickers[0]]), QFDataFrame) self.assertEqual(fut_chain[self.future_tickers[0]].shape, (4, 1)) self.assertEqual(fut_chain[self.future_tickers[1]].shape, (3, 1)) def test_get_price_aggregation_single_ticker(self): MarketOpenEvent.set_trigger_time({'hour': 17, 'minute': 13, 'second': 0, 'microsecond': 0}) dp = PortaraDataProvider(self.futures_path, self.ticker, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_1) prices = dp.get_price(self.ticker, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_1) prices5 = dp.get_price(self.ticker, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_5) prices15 = dp.get_price(self.ticker, PriceField.Close, self.start_date, self.end_date, Frequency.MIN_15) self.assertTrue(len(prices5)) self.assertEqual(type(prices5), PricesSeries) self.assertEqual(Frequency.infer_freq(prices5.index), Frequency.MIN_5) assert_series_equal(prices5, prices.loc[datetime(2021, 6, 11, 17, 15):].resample('5T').last().dropna(), check_names=False) self.assertTrue(len(prices15)) self.assertEqual(type(prices15), PricesSeries) assert_series_equal(prices15, prices.loc[datetime(2021, 6, 11, 17, 15):].resample('15T').last().dropna(), check_names=False)
class EarleyRegexpMatcher(): def __init__(self, lexer_conf): self.regexps = {} for t in lexer_conf.terminals: regexp = t.pattern.to_regexp() try: width = get_regexp_width(regexp)[0] except ValueError: raise GrammarError(('Bad regexp in token %s: %s' % (t.name, regexp))) else: if (width == 0): raise GrammarError("Dynamic Earley doesn't allow zero-width regexps", t) if lexer_conf.use_bytes: regexp = regexp.encode('utf-8') self.regexps[t.name] = lexer_conf.re_module.compile(regexp, lexer_conf.g_regex_flags) def match(self, term, text, index=0): return self.regexps[term.name].match(text, index)
class Tab(): def __init__(self, view, title, tab_key, contents_factory): self.title = title self.tab_key = tab_key self.contents_factory = contents_factory self.view = view self.panel = None self.menu_item = None def get_bookmark(self, view): return view.as_bookmark(description=self.title, query_arguments=self.query_arguments) def top_level_nav(self): return self.panel.nav def data_toggle(self): return self.top_level_nav.layout.key def default_active_tab_key(self): return self.tab_key def query_arguments(self): return {'tab': self.tab_key} def contents(self): return self.contents_factory.create(self.view) def set_panel(self, tabbed_panel): self.panel = tabbed_panel def is_open(self): return self.panel.is_currently_open(self) def is_active(self): return self.is_open def add_to_menu(self, menu): menu_item = menu.add_bookmark(self.get_bookmark(menu.view)) menu_item.determine_is_active_using((lambda : self.is_active)) menu_item.a.set_css_id(('nav_%s_tab' % self.css_id)) menu_item.a.add_attribute_source(TabContentAttributes(self)) menu_item.a.add_attribute_source(ActiveStateAttributes(menu_item, attribute_name='aria-selected', active_value='true', inactive_value='false')) self.menu_item = menu_item return menu_item def css_id(self): return ('tab_%s' % self.tab_key) def add_contents_to(self, content_panel): return self.add_contents_of_tab_to(content_panel, self) def add_contents_of_tab_to(self, content_panel, tab): assert tab.menu_item, 'add the tab to a menu first before adding its contents' div = content_panel.add_child(Div(self.view)) div.add_child(tab.contents) div.append_class('tab-pane') div.set_attribute('role', 'tabpanel') div.set_id(tab.css_id) div.add_attribute_source(ActiveStateAttributes(tab, active_value='active')) div.set_attribute('aria-labelledby', ('%s' % tab.menu_item.a.css_id)) return div
class Gaussian(object): def __init__(self, mu, rho): super().__init__() self.mu = mu self.rho = rho self.normal = torch.distributions.Normal(0, 1) def sigma(self): return F.softplus(self.rho, beta=1) def sample(self, stochastic=False, return_log_prob=False): wsize = self.mu.numel() sigma = self.sigma if stochastic: epsilon = torch.rand_like(self.mu) var = (sigma * epsilon) w = (self.mu + var) else: w = self.mu var = 0 if (not return_log_prob): return (w, 0) else: sigma = sigma.float() log_prob = (((- log_sqrt_2pi) - torch.log(sigma)) - ((var ** 2) / (2 * (sigma ** 2)))).sum() return (w, log_prob) def log_prob(self, input): sigma = self.sigma.float() input = input.float() return ((math.log(math.sqrt((2 * math.pi))) - torch.log(sigma)) - (((input - self.mu) ** 2) / (2 * (sigma ** 2)))).sum()
class TypeHexagonSymmetry(Enum): (R60, R120, R180, R240, R300, L1, L2, L3, L4, L5, L6) = auto(11) def rotations(): return (TypeHexagonSymmetry.R60, TypeHexagonSymmetry.R120, TypeHexagonSymmetry.R180, TypeHexagonSymmetry.R240, TypeHexagonSymmetry.R300) def reflections(): return (TypeHexagonSymmetry.L1, TypeHexagonSymmetry.L2, TypeHexagonSymmetry.L3, TypeHexagonSymmetry.L4, TypeHexagonSymmetry.L5, TypeHexagonSymmetry.L6) def is_rotation(self): return (self in TypeHexagonSymmetry.rotations()) def is_reflection(self): return (self in TypeHexagonSymmetry.reflections()) def ring_cells(ring): assert (ring >= 2) base = ((2 * ring) - 2) return (((([(0, j) for j in range(ring)] + [((i + 1), (ring + i)) for i in range((ring - 1))]) + [((ring + i), ((base - i) - 1)) for i in range((ring - 1))]) + [(base, ((ring - i) - 2)) for i in range((ring - 1))]) + [(((base - i) - 1), 0) for i in range((base - 1))]) def apply_on(self, n): if (not hasattr(TypeHexagonSymmetry, '_cache')): TypeHexagonSymmetry._cache = {} key = (self, n) if (key not in TypeHexagonSymmetry._cache): w = ((2 * n) - 1) widths = [(w - abs(((n - i) - 1))) for i in range(w)] rings = ([[], [(0, 0)]] + [TypeHexagonSymmetry.ring_cells(ring) for ring in range(2, (n + 1))]) which_rings = [[(n - min(i, ((w - 1) - i), j, ((widths[i] - 1) - j))) for j in range(widths[i])] for i in range(w)] if self.is_rotation(): def rot(i, j): ring = which_rings[i][j] skip = (coeff * (ring - 1)) gap = (n - ring) t = rings[ring] idx = t.index(((i - gap), (j - gap))) (k, l) = t[((idx + skip) % len(t))] return ((k + gap), (l + gap)) coeff = (1 + TypeHexagonSymmetry.rotations().index(self)) TypeHexagonSymmetry._cache[key] = [[rot(i, j) for j in range(widths[i])] for i in range(w)] else: def rot(i, j): ring = which_rings[i][j] center = (((ring + 1) // 2) - 1) if (self is TypeHexagonSymmetry.L1): (pivot, offset) = (((ring - 1), 0), 0) elif (self is TypeHexagonSymmetry.L2): (pivot, offset) = ((center, 0), ((- 1) if ((ring % 2) == 0) else 0)) elif (self is TypeHexagonSymmetry.L3): (pivot, offset) = ((0, 0), 0) elif (self is TypeHexagonSymmetry.L4): (pivot, offset) = ((0, center), (1 if ((ring % 2) == 0) else 0)) elif (self is TypeHexagonSymmetry.L5): (pivot, offset) = ((0, (ring - 1)), 0) else: assert (self is TypeHexagonSymmetry.L6) (pivot, offset) = ((center, ((ring - 1) + center)), (1 if ((ring % 2) == 0) else 0)) gap = (n - ring) t = rings[ring] ind_pivot = t.index(pivot) ind_cell = t.index(((i - gap), (j - gap))) diff = abs((ind_pivot - ind_cell)) if (ind_pivot <= ind_cell): ind = ((ind_pivot - diff) + offset) else: ind = ((ind_pivot + diff) + offset) (k, l) = t[((ind + len(t)) % len(t))] return ((k + gap), (l + gap)) TypeHexagonSymmetry._cache[key] = [[rot(i, j) for j in range(widths[i])] for i in range(w)] return TypeHexagonSymmetry._cache[key]
def test_config_pickling(): root = configdefaults.config buffer = io.BytesIO() pickle.dump(root, buffer) buffer.seek(0) restored = pickle.load(buffer) for name in root._config_var_dict.keys(): v_original = getattr(root, name) v_restored = getattr(restored, name) assert (v_restored == v_original), f'{name} did not survive pickling ({v_restored} != {v_original})' root = _create_test_config() root.add('test__lambda_kills_pickling', 'Lambda functions cause pickling problems.', configparser.IntParam(5, (lambda i: (i > 0)))) with pytest.raises(AttributeError, match="Can't pickle local object"): pickle.dump(root, io.BytesIO())
def run_simulation(model, time=10000, points=200, cleanup=True, output_prefix=None, output_dir=None, flux_map=False, perturbation=None, seed=None, verbose=False): warnings.warn('run_simulation will be removed in a future version of PySB. Use pysb.simulator.KappaSimulator instead.', DeprecationWarning) gen = KappaGenerator(model) if (output_prefix is None): output_prefix = ('tmpKappa_%s_' % model.name) base_directory = tempfile.mkdtemp(prefix=output_prefix, dir=output_dir) base_filename = os.path.join(base_directory, model.name) kappa_filename = (base_filename + '.ka') fm_filename = (base_filename + '_fm.dot') out_filename = (base_filename + '.out') if (points == 0): raise ValueError('The number of data points cannot be zero.') plot_period = ((float(time) / points) if (time > 0) else 1.0) args = ['-i', kappa_filename, '-u', 'time', '-l', str(time), '-p', ('%.5f' % plot_period), '-o', out_filename] if seed: args.extend(['-seed', str(seed)]) with open(kappa_filename, 'w') as kappa_file: file_data = gen.get_content() if flux_map: file_data += ('%%mod: [true] do $DIN "%s" [true];\n' % fm_filename) if perturbation: file_data += ('\n%s\n' % perturbation) logger.debug(('Kappa file contents:\n\n' + file_data)) kappa_file.write(file_data) kasim_path = pf.get_path('kasim') p = subprocess.Popen(([kasim_path] + args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=base_directory) if verbose: for line in iter(p.stdout.readline, b''): print('', line, end='') (p_out, p_err) = p.communicate() if p.returncode: raise KasimInterfaceError(((p_out.decode('utf8') + '\n') + p_err.decode('utf8'))) data = _parse_kasim_outfile(out_filename) if flux_map: try: flux_graph = read_dot(fm_filename) except ImportError: if cleanup: raise else: warnings.warn(('The pydot library could not be imported, so no MultiGraph object returned (returning None); flux map dot file available at %s' % fm_filename)) flux_graph = None if cleanup: shutil.rmtree(base_directory) if flux_map: return SimulationResult(data, flux_graph) else: return data
class TestFlatLayoutPackageFinder(): EXAMPLES = {'hidden-folders': (['.pkg/__init__.py', 'pkg/__init__.py', 'pkg/nested/file.txt'], ['pkg', 'pkg.nested']), 'private-packages': (['_pkg/__init__.py', 'pkg/_private/__init__.py'], ['pkg', 'pkg._private']), 'invalid-name': (['invalid-pkg/__init__.py', 'other.pkg/__init__.py', 'yet,another/file.py'], []), 'docs': (['pkg/__init__.py', 'docs/conf.py', 'docs/readme.rst'], ['pkg']), 'tests': (['pkg/__init__.py', 'tests/test_pkg.py', 'tests/__init__.py'], ['pkg']), 'examples': (['pkg/__init__.py', 'examples/__init__.py', 'examples/file.pyexample/other_file.py', 'pkg/example/__init__.py', 'pkg/examples/__init__.py'], ['pkg', 'pkg.examples', 'pkg.example']), 'tool-specific': (['htmlcov/index.html', 'pkg/__init__.py', 'tasks/__init__.py', 'tasks/subpackage/__init__.py', 'fabfile/__init__.py', 'fabfile/subpackage/__init__.py', 'pkg/tasks/__init__.py', 'pkg/fabfile/__init__.py'], ['pkg', 'pkg.tasks', 'pkg.fabfile'])} .parametrize('example', EXAMPLES.keys()) def test_unwanted_directories_not_included(self, tmp_path, example): (files, expected_packages) = self.EXAMPLES[example] ensure_files(tmp_path, files) found_packages = FlatLayoutPackageFinder.find(str(tmp_path)) assert (set(found_packages) == set(expected_packages))
def main() -> None: if ((len(sys.argv) > 1) and (sys.argv[1] in ('run', 'test'))): manager = SiteManager(sys.argv) if (sys.argv[1] == 'run'): manager.run_server() elif (sys.argv[1] == 'test'): manager.run_tests() else: _static_build = ((len(sys.argv) > 1) and ('distill' in sys.argv[1])) if _static_build: os.environ['STATIC_BUILD'] = 'True' if (not os.getenv('PARENT_HOST')): os.environ['PARENT_HOST'] = 'REPLACE_THIS.HOST' execute_from_command_line(sys.argv) if _static_build: for arg in sys.argv[2:]: if (not arg.startswith('-')): clean_up_static_files(Path(arg)) break
_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): def __init__(self, *, video_height: int, video_width: int, max_num_instances: int=200, max_lost_frame_count: int=0, min_box_rel_dim: float=0.02, min_instance_period: int=1, track_iou_threshold: float=0.5, **kwargs): super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold def from_config(cls, cfg: CfgNode_): assert ('VIDEO_HEIGHT' in cfg.TRACKER_HEADS) assert ('VIDEO_WIDTH' in cfg.TRACKER_HEADS) video_height = cfg.TRACKER_HEADS.get('VIDEO_HEIGHT') video_width = cfg.TRACKER_HEADS.get('VIDEO_WIDTH') max_num_instances = cfg.TRACKER_HEADS.get('MAX_NUM_INSTANCES', 200) max_lost_frame_count = cfg.TRACKER_HEADS.get('MAX_LOST_FRAME_COUNT', 0) min_box_rel_dim = cfg.TRACKER_HEADS.get('MIN_BOX_REL_DIM', 0.02) min_instance_period = cfg.TRACKER_HEADS.get('MIN_INSTANCE_PERIOD', 1) track_iou_threshold = cfg.TRACKER_HEADS.get('TRACK_IOU_THRESHOLD', 0.5) return {'_target_': 'detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker', 'video_height': video_height, 'video_width': video_width, 'max_num_instances': max_num_instances, 'max_lost_frame_count': max_lost_frame_count, 'min_box_rel_dim': min_box_rel_dim, 'min_instance_period': min_instance_period, 'track_iou_threshold': track_iou_threshold} def update(self, instances: Instances) -> Instances: if instances.has('pred_keypoints'): raise NotImplementedError('Need to add support for keypoints') instances = self._initialize_extra_fields(instances) if (self._prev_instances is not None): iou_all = pairwise_iou(boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes) bbox_pairs = self._create_prediction_pairs(instances, iou_all) self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair['idx'] prev_id = bbox_pair['prev_id'] if ((idx in self._matched_idx) or (prev_id in self._matched_ID) or (bbox_pair['IoU'] < self._track_iou_threshold)): continue instances.ID[idx] = prev_id instances.ID_period[idx] = (bbox_pair['prev_period'] + 1) instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair['prev_idx']) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs(self, instances: Instances, iou_all: np.ndarray) -> List: bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append({'idx': i, 'prev_idx': j, 'prev_id': self._prev_instances.ID[j], 'IoU': iou_all[(i, j)], 'prev_period': self._prev_instances.ID_period[j]}) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: if (not instances.has('ID')): instances.set('ID', ([None] * len(instances))) if (not instances.has('ID_period')): instances.set('ID_period', ([None] * len(instances))) if (not instances.has('lost_frame_count')): instances.set('lost_frame_count', ([None] * len(instances))) if (self._prev_instances is None): instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = ([1] * len(instances)) instances.lost_frame_count = ([0] * len(instances)) return instances def _reset_fields(self): self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: untracked_instances = Instances(image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[]) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has('pred_masks'): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: (x_left, y_top, x_right, y_bot) = prev_bboxes[idx] if ((((1.0 * (x_right - x_left)) / self._video_width) < self._min_box_rel_dim) or (((1.0 * (y_bot - y_top)) / self._video_height) < self._min_box_rel_dim) or (self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count) or (prev_ID_period[idx] <= self._min_instance_period)): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append((self._prev_instances.lost_frame_count[idx] + 1)) if instances.has('pred_masks'): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has('pred_masks'): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove('pred_masks') return Instances.cat([instances, untracked_instances])
class TestFDCapture(): def test_simple(self, tmpfile: BinaryIO) -> None: fd = tmpfile.fileno() cap = capture.FDCapture(fd) data = b'hello' os.write(fd, data) pytest.raises(AssertionError, cap.snap) cap.done() cap = capture.FDCapture(fd) cap.start() os.write(fd, data) s = cap.snap() cap.done() assert (s == 'hello') def test_simple_many(self, tmpfile: BinaryIO) -> None: for i in range(10): self.test_simple(tmpfile) def test_simple_many_check_open_files(self, pytester: Pytester) -> None: with lsof_check(): with pytester.makepyfile('').open('wb+') as tmpfile: self.test_simple_many(tmpfile) def test_simple_fail_second_start(self, tmpfile: BinaryIO) -> None: fd = tmpfile.fileno() cap = capture.FDCapture(fd) cap.done() pytest.raises(AssertionError, cap.start) def test_stderr(self) -> None: cap = capture.FDCapture(2) cap.start() print('hello', file=sys.stderr) s = cap.snap() cap.done() assert (s == 'hello\n') def test_stdin(self) -> None: cap = capture.FDCapture(0) cap.start() x = os.read(0, 100).strip() cap.done() assert (x == b'') def test_writeorg(self, tmpfile: BinaryIO) -> None: (data1, data2) = (b'foo', b'bar') cap = capture.FDCapture(tmpfile.fileno()) cap.start() tmpfile.write(data1) tmpfile.flush() cap.writeorg(data2.decode('ascii')) scap = cap.snap() cap.done() assert (scap == data1.decode('ascii')) with open(tmpfile.name, 'rb') as stmp_file: stmp = stmp_file.read() assert (stmp == data2) def test_simple_resume_suspend(self) -> None: with saved_fd(1): cap = capture.FDCapture(1) cap.start() data = b'hello' os.write(1, data) sys.stdout.write('whatever') s = cap.snap() assert (s == 'hellowhatever') cap.suspend() os.write(1, b'world') sys.stdout.write('qlwkej') assert (not cap.snap()) cap.resume() os.write(1, b'but now') sys.stdout.write(' yes\n') s = cap.snap() assert (s == 'but now yes\n') cap.suspend() cap.done() pytest.raises(AssertionError, cap.suspend) assert (repr(cap) == "<FDCapture 1 oldfd={} _state='done' tmpfile={!r}>".format(cap.targetfd_save, cap.tmpfile)) assert isinstance(cap.syscapture, capture.SysCapture) assert (repr(cap.syscapture) == "<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(cap.syscapture.tmpfile)) def test_capfd_sys_stdout_mode(self, capfd) -> None: assert ('b' not in sys.stdout.mode)
def main(): maxiters = 300 DE_class = calc_min_Jsc() PDE_obj = PDE(DE_class.evaluate, bounds=[[10, 150], [10, 105], [200, 1000], [500, 10000], [500, 10000]], maxiters=maxiters) res = PDE_obj.solve() best_pop = res[0] print('parameters for best result:', best_pop, '\n', 'optimized Jsc value (mA/cm2):', (- res[1])) DE_class.plot(best_pop) best_pop_evo = res[2] best_fitn_evo = res[3] mean_fitn_evo = res[4] final_fitness = res[1] plt.figure() plt.plot((- best_fitn_evo), '-k') plt.xlabel('iteration') plt.ylabel('fitness') plt.title('Best fitness') plt.show() plt.figure() plt.plot((- mean_fitn_evo), '-k') plt.xlabel('iteration') plt.ylabel('fitness') plt.title('Mean fitness') plt.show() starting_params = np.append(best_pop[2:], [200000]) lower = (0.75 * starting_params) upper = (1.25 * starting_params) lower_ntype = [20, 20, 20, 20] upper_ntype = [200, 300, 300, 500] all_lower = np.append(lower, lower_ntype) all_upper = np.append(upper, upper_ntype) all_bounds = np.stack((all_lower, all_upper)).T maxiters_DA = 10 DE_class_DA = calc_min_Jsc_DA(best_pop[0:2]) PDE_obj_DA = PDE(DE_class_DA.evaluate, bounds=all_bounds, maxiters=maxiters_DA) res_DA = PDE_obj_DA.solve() best_pop_DA = res_DA[0] print('parameters for best result:', best_pop_DA, 'optimized efficiency (%)', (res_DA[1] * 100)) DE_class_DA.plot(best_pop_DA) best_pop_evo = res_DA[2] best_fitn_evo = res_DA[3] mean_fitn_evo = res_DA[4] final_fitness = res_DA[1] plt.figure() plt.plot((- best_fitn_evo), '-k') plt.xlabel('iteration') plt.ylabel('fitness') plt.title('Best fitness') plt.show() plt.figure() plt.plot((- mean_fitn_evo), '-k') plt.xlabel('iteration') plt.ylabel('fitness') plt.title('Mean fitness') plt.show()
def regex_match(text, pattern): try: pattern = re.compile(pattern, flags=((re.IGNORECASE + re.UNICODE) + re.MULTILINE)) except BaseException: print(('Regular expression failed to compile: %s' % pattern)) return [] matched = [x.group() for x in re.finditer(pattern, text)] return list(set(matched))
def get_xwayland_atoms(xwayland: xwayland.XWayland) -> dict[(int, str)]: xwayland_wm_types = {'_NET_WM_WINDOW_TYPE_DESKTOP': 'desktop', '_NET_WM_WINDOW_TYPE_DOCK': 'dock', '_NET_WM_WINDOW_TYPE_TOOLBAR': 'toolbar', '_NET_WM_WINDOW_TYPE_MENU': 'menu', '_NET_WM_WINDOW_TYPE_UTILITY': 'utility', '_NET_WM_WINDOW_TYPE_SPLASH': 'splash', '_NET_WM_WINDOW_TYPE_DIALOG': 'dialog', '_NET_WM_WINDOW_TYPE_DROPDOWN_MENU': 'dropdown', '_NET_WM_WINDOW_TYPE_POPUP_MENU': 'menu', '_NET_WM_WINDOW_TYPE_TOOLTIP': 'tooltip', '_NET_WM_WINDOW_TYPE_NOTIFICATION': 'notification', '_NET_WM_WINDOW_TYPE_COMBO': 'combo', '_NET_WM_WINDOW_TYPE_DND': 'dnd', '_NET_WM_WINDOW_TYPE_NORMAL': 'normal'} atoms = {} for (atom, name) in xwayland_wm_types.items(): atoms[xwayland.get_atom(atom)] = name return atoms
class _UsageKind(enum.IntEnum): unused = 1 used_in_test = 2 used = 3 def classify(cls, module_name: str) -> '_UsageKind': if ('.' not in module_name): return cls.used own_name = module_name.rsplit('.', maxsplit=1)[1] if own_name.startswith('test'): return cls.used_in_test else: return cls.used def aggregate(cls, usages: Iterable['_UsageKind']) -> '_UsageKind': return max(usages, default=cls.unused) def aggregate_modules(cls, module_names: Iterable[str]) -> '_UsageKind': return cls.aggregate((cls.classify(module_name) for module_name in module_names))
def test_hierarchical_obs_logp(): obs = np.array([0.5, 0.4, 5, 2]) with pm.Model() as model: x = pm.Uniform('x', 0, 1, observed=obs) pm.Uniform('y', x, 2, observed=obs) logp_ancestors = list(ancestors([model.logp()])) ops = {a.owner.op for a in logp_ancestors if a.owner} assert (len(ops) > 0) assert (not any((isinstance(o, RandomVariable) for o in ops)))
class OpenMM(Engines): def __init__(self, molecule, pdb_file=None, xml_file=None): super().__init__(molecule) self.pdb_file = (pdb_file or f'{molecule.name}.pdb') self.xml_file = (xml_file or f'{molecule.name}.xml') self.system = None self.simulation = None self.create_system() def create_system(self): forcefield = app.ForceField(self.xml_file) top = self.molecule.to_openmm_topology() positions = self.molecule.openmm_coordinates() modeller = app.Modeller(topology=top, positions=positions) try: self.system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None) except ValueError: print('Virtual sites were found in the xml file') modeller.addExtraParticles(forcefield) self.system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None) if (self.molecule.combination == 'opls'): print('OPLS combination rules found in XML file') self.opls_lj() integrator = openmm.VerletIntegrator((1.0 * unit.femtoseconds)) platform = openmm.Platform.getPlatformByName('Reference') self.simulation = app.Simulation(modeller.topology, self.system, integrator, platform) self.simulation.context.setPositions(modeller.positions) def get_energy(self, position): extra_sites = (self.molecule.extra_sites if (self.molecule.extra_sites is not None) else []) if (len(position) != (len(self.molecule.atoms) + len(extra_sites))): for i in range(len(self.molecule.extra_sites)): position.append((0, 0, 0)) self.simulation.context.setPositions(position) self.simulation.context.computeVirtualSites() state = self.simulation.context.getState(getEnergy=True) energy = state.getPotentialEnergy().value_in_unit(unit.kilocalories_per_mole) return energy def opls_lj(self): forces = {self.system.getForce(index).__class__.__name__: self.system.getForce(index) for index in range(self.system.getNumForces())} nonbonded_force = forces['NonbondedForce'] lorentz = openmm.CustomNonbondedForce('epsilon*((sigma/r)^12-(sigma/r)^6); sigma=sqrt(sigma1*sigma2); epsilon=sqrt(epsilon1*epsilon2)*4.0') lorentz.setNonbondedMethod(nonbonded_force.getNonbondedMethod()) lorentz.setCutoffDistance(nonbonded_force.getCutoffDistance()) lorentz.addPerParticleParameter('sigma') lorentz.addPerParticleParameter('epsilon') self.system.addForce(lorentz) l_j_set = {} for index in range(nonbonded_force.getNumParticles()): (charge, sigma, epsilon) = nonbonded_force.getParticleParameters(index) l_j_set[index] = AtomParams(charge, sigma, epsilon) lorentz.addParticle([sigma, epsilon]) nonbonded_force.setParticleParameters(index, charge, 0, 0) exclusions = {} for i in range(nonbonded_force.getNumExceptions()): (p1, p2, q, _, eps) = nonbonded_force.getExceptionParameters(i) lorentz.addExclusion(p1, p2) exclusions[tuple(sorted((p1, p2)))] = i if (eps._value != 0.0): sig14 = np.sqrt((l_j_set[p1].sigma * l_j_set[p2].sigma)) nonbonded_force.setExceptionParameters(i, p1, p2, q, sig14, eps) if (self.molecule.extra_sites is not None): (excep_pairs, normal_pairs) = self.get_vsite_interactions() for pair in excep_pairs: atom1 = l_j_set[pair[0]] atom2 = l_j_set[pair[1]] q = ((atom1.charge * atom2.charge) * 0.5) if (pair not in exclusions): lorentz.addExclusion(*pair) nonbonded_force.addException(*pair, q, 0, 0, True) for pair in normal_pairs: atom1 = l_j_set[pair[0]] atom2 = l_j_set[pair[1]] q = (atom1.charge * atom2.charge) if (pair not in exclusions): lorentz.addExclusion(*pair) nonbonded_force.addException(*pair, q, 0, 0, True) def format_coords(self, coordinates): coords = [] for i in range(0, len(coordinates), 3): coords.append(tuple(coordinates[i:(i + 3)])) return coords def calculate_hessian(self, finite_step): input_coords = (self.molecule.coordinates.flatten() * constants.ANGS_TO_NM) hessian = np.zeros(((3 * len(self.molecule.atoms)), (3 * len(self.molecule.atoms)))) for i in range((3 * len(self.molecule.atoms))): for j in range(i, (3 * len(self.molecule.atoms))): if (i == j): coords = deepcopy(input_coords) coords[i] += (2 * finite_step) e1 = self.get_energy(self.format_coords(coords)) coords = deepcopy(input_coords) coords[i] -= (2 * finite_step) e2 = self.get_energy(self.format_coords(coords)) hessian[(i, j)] = ((e1 + e2) / ((4 * (finite_step ** 2)) * self.molecule.atoms[(i // 3)].atomic_mass)) else: coords = deepcopy(input_coords) coords[i] += finite_step coords[j] += finite_step e1 = self.get_energy(self.format_coords(coords)) coords = deepcopy(input_coords) coords[i] -= finite_step coords[j] -= finite_step e2 = self.get_energy(self.format_coords(coords)) coords = deepcopy(input_coords) coords[i] += finite_step coords[j] -= finite_step e3 = self.get_energy(self.format_coords(coords)) coords = deepcopy(input_coords) coords[i] -= finite_step coords[j] += finite_step e4 = self.get_energy(self.format_coords(coords)) hessian[(i, j)] = ((((e1 + e2) - e3) - e4) / ((4 * (finite_step ** 2)) * self.molecule.atoms[(i // 3)].atomic_mass)) sym_hessian = ((hessian + hessian.T) - np.diag(hessian.diagonal())) return sym_hessian def get_vsite_interactions(self): (exception_pairs, normal_pairs) = ([], []) topology = self.molecule.to_topology() for (site_key, site) in self.molecule.extra_sites.items(): site_no = (site_key + len(self.molecule.atoms)) topology.add_node(site_no) topology.add_edge(site_no, site.parent_index) for (site_key, site) in self.molecule.extra_sites.items(): site_no = (site_key + len(self.molecule.atoms)) path_lengths = nx.single_source_shortest_path_length(topology, site_no) for (atom, length) in path_lengths.items(): if (length == 3): exception_pairs.append(tuple(sorted([site_no, atom]))) elif (length > 3): normal_pairs.append(tuple(sorted([site_no, atom]))) return (set(exception_pairs), set(normal_pairs))
class Request(object): headers = {'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686; ru; rv:1.9.1.8) Gecko/ Linux Mint/8 (Helena) Firefox/3.5.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'ru,en-us;q=0.7,en;q=0.3', 'Accept-Encoding': 'deflate', 'Accept-Charset': 'windows-1251,utf-8;q=0.7,*;q=0.7', 'Keep-Alive': '300', 'Connection': 'keep-alive', 'Referer': ' 'Cookie': 'users_info[check_sh_bool]=none; search_last_date=2010-02-19; search_last_month=2010-02; PHPSESSID=b6df76a958983da150476d9cfa0aab18'} def __init__(self): import requests self.session = requests.Session() def get(self, *args, **kwargs): kwargs['headers'] = self.headers response = self.session.get(*args, **kwargs) response.connection.close() return response def get_content(self, *args, **kwargs): response = self.get(*args, **kwargs) content = response.content.decode(response.encoding) self.raise_for_errors(content) return content def raise_for_errors(self, content): if ('captcha' in content): raise ValueError('Kinopoisk block this IP. Too many requests')
class NoOpOptimizer(torch.optim.Optimizer): def __init__(self, params, defaults): torch._C._log_api_usage_once('python.optimizer') self.defaults = defaults self._hook_for_profile() if isinstance(params, torch.Tensor): raise TypeError(('params argument given to the optimizer should be an iterable of Tensors or dicts, but got ' + torch.typename(params))) self.state = defaultdict(dict) self.param_groups = [] param_groups = list(params) if (len(param_groups) != 0): if (not isinstance(param_groups[0], dict)): param_groups = [{'params': param_groups}] for param_group in param_groups: self.add_param_group(param_group) def step(self, **kwargs): pass
class StoryboardElementStateCondition(_ValueTriggerType): def __init__(self, element, reference, state): self.element = convert_enum(element, StoryboardElementType) self.reference = reference self.state = convert_enum(state, StoryboardElementState) def __eq__(self, other): if isinstance(other, StoryboardElementStateCondition): if (self.get_attributes() == other.get_attributes()): return True return False def parse(element): ref = element.attrib['storyboardElementRef'] sbet = convert_enum(element.attrib['storyboardElementType'], StoryboardElementType) state = convert_enum(element.attrib['state'], StoryboardElementState) return StoryboardElementStateCondition(sbet, ref, state) def get_attributes(self): return {'storyboardElementType': self.element.get_name(), 'storyboardElementRef': self.reference, 'state': self.state.get_name()} def get_element(self): return ET.Element('StoryboardElementStateCondition', attrib=self.get_attributes())
class AddressBookPanel(Div): def __init__(self, view, address_book_ui): super().__init__(view) self.rows = self.initialise_rows() self.add_child(H(view, 1, text='Addresses')) form = self.add_child(Form(view, 'address_table_form')) self.define_event_handler(self.events.delete_selected) def make_link_widget(view, row): return A.from_bookmark(view, address_book_ui.get_edit_bookmark(row.address, description='Edit')) def make_checkbox_widget(view, row): return PrimitiveCheckboxInput(form, row.fields.selected_by_user.with_discriminator(str(row.address.id))) def make_delete_selected_button(view): return Button(form, self.events.delete_selected) def make_total(view, item): return TextNode(view, str(item.total_rows)) columns = [StaticColumn(Field(label='Name'), 'name', footer_label='Total friends'), StaticColumn(EmailField(label='Email'), 'email_address'), DynamicColumn('', make_link_widget), DynamicColumn(make_delete_selected_button, make_checkbox_widget, make_footer_widget=make_total)] table = Table(view, caption_text='All my friends', summary='Summary for screen reader') table.use_layout(TableLayout(striped=True)) table.with_data(columns, self.rows, footer_items=[TotalRow(self.rows)]) form.add_child(table) def initialise_rows(self): return [Row(address) for address in Session.query(Address).all()] events = ExposedNames() events.delete_selected = (lambda i: Event(label='Delete Selected', action=Action(i.delete_selected))) def delete_selected(self): for row in self.rows: if row.selected_by_user: Session.delete(row.address)
class CmdDrop(MuxCommand): key = 'drop' locks = 'cmd:all()' arg_regex = '\\s|$' def func(self): caller = self.caller if (not self.args): caller.msg('Drop what?') return obj = caller.search(self.args, location=caller, nofound_string=("You aren't carrying %s." % self.args), multimatch_string=('You carry more than one %s:' % self.args)) if (not obj): return if obj.db.covered_by: caller.msg(("You can't drop that because it's covered by %s." % obj.db.covered_by)) return if obj.db.worn: obj.remove(caller, quiet=True) obj.move_to(caller.location, quiet=True) caller.msg(('You drop %s.' % (obj.name,))) caller.location.msg_contents(('%s drops %s.' % (caller.name, obj.name)), exclude=caller) obj.at_drop(caller)
class ReadBitsRequestBase(ModbusRequest): _rtu_frame_size = 8 def __init__(self, address, count, slave=0, **kwargs): ModbusRequest.__init__(self, slave, **kwargs) self.address = address self.count = count def encode(self): return struct.pack('>HH', self.address, self.count) def decode(self, data): (self.address, self.count) = struct.unpack('>HH', data) def get_response_pdu_size(self): count = (self.count // 8) if (self.count % 8): count += 1 return ((1 + 1) + count) def __str__(self): return f'ReadBitRequest({self.address},{self.count})'
class resnet(torch.nn.Module): def __init__(self, requires_grad=False, pretrained=True, num=18): super(resnet, self).__init__() if (num == 18): self.net = models.resnet18(pretrained=pretrained) elif (num == 34): self.net = models.resnet34(pretrained=pretrained) elif (num == 50): self.net = models.resnet50(pretrained=pretrained) elif (num == 101): self.net = models.resnet101(pretrained=pretrained) elif (num == 152): self.net = models.resnet152(pretrained=pretrained) self.N_slices = 5 self.conv1 = self.net.conv1 self.bn1 = self.net.bn1 self.relu = self.net.relu self.maxpool = self.net.maxpool self.layer1 = self.net.layer1 self.layer2 = self.net.layer2 self.layer3 = self.net.layer3 self.layer4 = self.net.layer4 def forward(self, X): h = self.conv1(X) h = self.bn1(h) h = self.relu(h) h_relu1 = h h = self.maxpool(h) h = self.layer1(h) h_conv2 = h h = self.layer2(h) h_conv3 = h h = self.layer3(h) h_conv4 = h h = self.layer4(h) h_conv5 = h outputs = namedtuple('Outputs', ['relu1', 'conv2', 'conv3', 'conv4', 'conv5']) out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5) return out
class CILogonOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.cilogon.CILogonOAuth2' user_data_url = ' user_data_url_post = True expected_username = '' access_token_body = json.dumps({'access_token': ' 'refresh_token': ' 'id_token': 'aBigStringOfRandomChars.123abc', 'token_type': 'Bearer', 'expires_in': 900}) user_data_body = json.dumps({'sub': ' 'idp_name': 'University of Illinois at Urbana-Champaign', 'idp': 'urn:mace:incommon:uiuc.edu', 'affiliation': ';;', 'eppn': '', 'eptid': 'urn:mace:incommon:uiuc.edu! 'name': 'James Alan Basney', 'given_name': 'James', 'family_name': 'Basney', 'email': ''}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
.parametrize('strict', [False, True]) def test_loading(strict): assert (msgspec.convert(create_dumped_response(), GetRepoIssuesResponse, strict=strict) == create_response(GetRepoIssuesResponse, Issue, Reactions, PullRequest, Label, SimpleUser)) assert (msgspec.convert(create_dumped_response(), GetRepoIssuesResponseNoGC, strict=strict) == create_response(GetRepoIssuesResponseNoGC, IssueNoGC, ReactionsNoGC, PullRequestNoGC, LabelNoGC, SimpleUserNoGC))
class CardUser(QWidget, Ui_CardUser): emitDisableUser = pyqtSignal(User) emitDeleteUser = pyqtSignal(User) emitEditUser = pyqtSignal(User) def __init__(self, user): super(CardUser, self).__init__() self.setupUi(self) self.user = user self.settings = QSettings(zapzap.__appname__, zapzap.__appname__) if (self.user.id == 1): self.btnDisable.hide() self.btnDelete.hide() else: self.btnDisable.clicked.connect(self.buttonClick) self.btnDelete.clicked.connect(self.buttonClick) self.name.editingFinished.connect(self.editingFinished) self.showNotifications.clicked.connect(self.checkBoxClick) self.loadCard() def editingFinished(self): self.user.name = self.name.text() self.emitEditUser.emit(self.user) def loadCard(self): self.name.setText(self.user.name) svg = self.user.icon if self.user.enable: self.name.setEnabled(True) self.btnDisable.setText(_('Disable')) else: self.name.setEnabled(False) self.btnDisable.setText(_('Enable')) svg = svg.format(IMAGE_DISABLE) self.icon.setPixmap(getImageQPixmap(svg)) self.showNotifications.setChecked(self.settings.value(f'{str(self.user.getId())}/notification', True, bool)) def buttonClick(self): btn = self.sender() btnName = btn.objectName() if (btnName == 'btnDisable'): self.user.enable = (not self.user.enable) self.emitDisableUser.emit(self.user) self.loadCard() if (btnName == 'btnDelete'): self.setParent(None) self.emitDeleteUser.emit(self.user) def checkBoxClick(self): self.settings.setValue(f'{str(self.user.getId())}/notification', self.showNotifications.isChecked())
def test_precmd_hook_emptystatement_first(capsys): app = PluggedApp() app.register_precmd_hook(app.precmd_hook_emptystatement) stop = app.onecmd_plus_hooks('say hello') (out, err) = capsys.readouterr() assert (not stop) assert (not out) assert (not err) assert (app.called_precmd == 1) app.reset_counters() stop = app.register_precmd_hook(app.precmd_hook) app.onecmd_plus_hooks('say hello') (out, err) = capsys.readouterr() assert (not stop) assert (not out) assert (not err) assert (app.called_precmd == 1)
class TestTrainingExtensionsTensorReduction(unittest.TestCase): def test_tensor_reduction(self): shape = [3, 2, 4] tensor = torch.zeros(shape, dtype=torch.int8) view = tensor.reshape([(- 1)]) for i in range(tensor.numel()): view[i] = (101 + i) reduct = PolySlice(dim=0, index=1) result = reduce_tensor(tensor, reduct) assert (list(result.shape) == [2, 2, 4]) assert tensor_contains(result, 101) assert tensor_contains(result, 108) assert (not tensor_contains(result, 109)) assert (not tensor_contains(result, 116)) assert tensor_contains(result, 117) assert tensor_contains(result, 124) reduct.set(dim=2, index=[0]) reduct.add(dim=2, index=3) result = reduce_tensor(tensor, reduct) assert (list(result.shape) == [2, 2, 2]) assert (not tensor_contains(result, 101)) assert tensor_contains(result, 102) assert tensor_contains(result, 103) assert (not tensor_contains(result, 104)) assert (not tensor_contains(result, 117)) assert (not tensor_contains(result, 121)) assert tensor_contains(result, 122) assert tensor_contains(result, 123) assert (not tensor_contains(result, 124))
class ParallelMaxxVitBlock(nn.Module): def __init__(self, dim, dim_out, stride=1, num_conv=2, conv_cfg: MaxxVitConvCfg=MaxxVitConvCfg(), transformer_cfg: MaxxVitTransformerCfg=MaxxVitTransformerCfg(), drop_path=0.0): super().__init__() conv_cls = (ConvNeXtBlock if (conv_cfg.block_type == 'convnext') else MbConvBlock) if (num_conv > 1): convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)] convs += ([conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)) self.conv = nn.Sequential(*convs) else: self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path) self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path) def init_weights(self, scheme=''): named_apply(partial(_init_transformer, scheme=scheme), self.attn) named_apply(partial(_init_conv, scheme=scheme), self.conv) def forward(self, x): x = self.conv(x) x = x.permute(0, 2, 3, 1) x = self.attn(x) x = x.permute(0, 3, 1, 2) return x
_criterion('ctc', dataclass=CtcCriterionConfig) class CtcCriterion(FairseqCriterion): def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask): super().__init__(task) self.blank_idx = (task.target_dictionary.index(task.blank_symbol) if hasattr(task, 'blank_symbol') else 0) self.pad_idx = task.target_dictionary.pad() self.eos_idx = task.target_dictionary.eos() self.post_process = cfg.post_process if (cfg.wer_args is not None): (cfg.wer_kenlm_model, cfg.wer_lexicon, cfg.wer_lm_weight, cfg.wer_word_score) = eval(cfg.wer_args) if (cfg.wer_kenlm_model is not None): from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder dec_args = Namespace() dec_args.nbest = 1 dec_args.criterion = 'ctc' dec_args.kenlm_model = cfg.wer_kenlm_model dec_args.lexicon = cfg.wer_lexicon dec_args.beam = 50 dec_args.beam_size_token = min(50, len(task.target_dictionary)) dec_args.beam_threshold = min(50, len(task.target_dictionary)) dec_args.lm_weight = cfg.wer_lm_weight dec_args.word_score = cfg.wer_word_score dec_args.unk_weight = (- math.inf) dec_args.sil_weight = 0 self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) else: self.w2l_decoder = None self.zero_infinity = cfg.zero_infinity self.sentence_avg = cfg.sentence_avg def forward(self, model, sample, reduce=True): net_output = model(**sample['net_input']) lprobs = model.get_normalized_probs(net_output, log_probs=True).contiguous() if ('src_lengths' in sample['net_input']): input_lengths = sample['net_input']['src_lengths'] elif (net_output['padding_mask'] is not None): non_padding_mask = (~ net_output['padding_mask']) input_lengths = non_padding_mask.long().sum((- 1)) else: input_lengths = lprobs.new_full((lprobs.size(1),), lprobs.size(0), dtype=torch.long) pad_mask = ((sample['target'] != self.pad_idx) & (sample['target'] != self.eos_idx)) targets_flat = sample['target'].masked_select(pad_mask) if ('target_lengths' in sample): target_lengths = sample['target_lengths'] else: target_lengths = pad_mask.sum((- 1)) with torch.backends.cudnn.flags(enabled=False): loss = F.ctc_loss(lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction='sum', zero_infinity=self.zero_infinity) ntokens = (sample['ntokens'] if ('ntokens' in sample) else target_lengths.sum().item()) sample_size = (sample['target'].size(0) if self.sentence_avg else ntokens) logging_output = {'loss': utils.item(loss.data), 'ntokens': ntokens, 'nsentences': sample['id'].numel(), 'sample_size': sample_size} if (not model.training): import editdistance with torch.no_grad(): lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu() c_err = 0 c_len = 0 w_errs = 0 w_len = 0 wv_errs = 0 for (lp, t, inp_l) in zip(lprobs_t, (sample['target_label'] if ('target_label' in sample) else sample['target']), input_lengths): lp = lp[:inp_l].unsqueeze(0) decoded = None if (self.w2l_decoder is not None): decoded = self.w2l_decoder.decode(lp) if (len(decoded) < 1): decoded = None else: decoded = decoded[0] if (len(decoded) < 1): decoded = None else: decoded = decoded[0] p = ((t != self.task.target_dictionary.pad()) & (t != self.task.target_dictionary.eos())) targ = t[p] targ_units = self.task.target_dictionary.string(targ) targ_units_arr = targ.tolist() toks = lp.argmax(dim=(- 1)).unique_consecutive() pred_units_arr = toks[(toks != self.blank_idx)].tolist() c_err += editdistance.eval(pred_units_arr, targ_units_arr) c_len += len(targ_units_arr) targ_words = post_process(targ_units, self.post_process).split() pred_units = self.task.target_dictionary.string(pred_units_arr) pred_words_raw = post_process(pred_units, self.post_process).split() if ((decoded is not None) and ('words' in decoded)): pred_words = decoded['words'] w_errs += editdistance.eval(pred_words, targ_words) wv_errs += editdistance.eval(pred_words_raw, targ_words) else: dist = editdistance.eval(pred_words_raw, targ_words) w_errs += dist wv_errs += dist w_len += len(targ_words) logging_output['wv_errors'] = wv_errs logging_output['w_errors'] = w_errs logging_output['w_total'] = w_len logging_output['c_errors'] = c_err logging_output['c_total'] = c_len return (loss, sample_size, logging_output) def reduce_metrics(logging_outputs) -> None: loss_sum = utils.item(sum((log.get('loss', 0) for log in logging_outputs))) ntokens = utils.item(sum((log.get('ntokens', 0) for log in logging_outputs))) nsentences = utils.item(sum((log.get('nsentences', 0) for log in logging_outputs))) sample_size = utils.item(sum((log.get('sample_size', 0) for log in logging_outputs))) metrics.log_scalar('loss', ((loss_sum / sample_size) / math.log(2)), sample_size, round=3) metrics.log_scalar('ntokens', ntokens) metrics.log_scalar('nsentences', nsentences) if (sample_size != ntokens): metrics.log_scalar('nll_loss', ((loss_sum / ntokens) / math.log(2)), ntokens, round=3) c_errors = sum((log.get('c_errors', 0) for log in logging_outputs)) metrics.log_scalar('_c_errors', c_errors) c_total = sum((log.get('c_total', 0) for log in logging_outputs)) metrics.log_scalar('_c_total', c_total) w_errors = sum((log.get('w_errors', 0) for log in logging_outputs)) metrics.log_scalar('_w_errors', w_errors) wv_errors = sum((log.get('wv_errors', 0) for log in logging_outputs)) metrics.log_scalar('_wv_errors', wv_errors) w_total = sum((log.get('w_total', 0) for log in logging_outputs)) metrics.log_scalar('_w_total', w_total) if (c_total > 0): metrics.log_derived('uer', (lambda meters: (safe_round(((meters['_c_errors'].sum * 100.0) / meters['_c_total'].sum), 3) if (meters['_c_total'].sum > 0) else float('nan')))) if (w_total > 0): metrics.log_derived('wer', (lambda meters: (safe_round(((meters['_w_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan')))) metrics.log_derived('raw_wer', (lambda meters: (safe_round(((meters['_wv_errors'].sum * 100.0) / meters['_w_total'].sum), 3) if (meters['_w_total'].sum > 0) else float('nan')))) def logging_outputs_can_be_summed() -> bool: return True
.usefixtures('_force_blank_two_way') def test_distribute_pre_fill_weaknesses_swap_force_two_way(empty_patches): rng = Random(10000) patches = dataclasses.replace(empty_patches, configuration=dataclasses.replace(empty_patches.configuration, dock_rando=dataclasses.replace(empty_patches.configuration.dock_rando, mode=DockRandoMode.WEAKNESSES))) result = dock_weakness_distributor.distribute_pre_fill_weaknesses(patches, rng) docks = {(n.identifier.area_name, n.name): w.name for (n, w) in result.all_dock_weaknesses()} assert (docks == {('Back-Only Lock Room', 'Door to Starting Area'): 'Back-Only Door', ('Blue Key Room', 'Door to Starting Area (Entrance)'): 'Back-Only Door', ('Blue Key Room', 'Door to Starting Area (Exit)'): 'Back-Only Door', ('Boss Arena', 'Door to Starting Area'): 'Back-Only Door', ('Explosive Depot', 'Door to Hint Room'): 'Back-Only Door', ('Explosive Depot', 'Door to Starting Area'): 'Back-Only Door', ('Heated Room', 'Door to Starting Area'): 'Back-Only Door', ('Hint Room', 'Door to Explosive Depot'): 'Back-Only Door', ('Starting Area', 'Door to Back-Only Lock Room'): 'Back-Only Door', ('Starting Area', 'Door to Blue Key Room (Entrance)'): 'Back-Only Door', ('Starting Area', 'Door to Blue Key Room (Exit)'): 'Back-Only Door', ('Starting Area', 'Door to Boss Arena'): 'Back-Only Door', ('Starting Area', 'Door to Explosive Depot'): 'Back-Only Door', ('Starting Area', 'Door to Heated Room'): 'Back-Only Door'}) assert (list(result.all_weaknesses_to_shuffle()) == [])
_db def test_query_single_page(rf, graphql_client): request = rf.get('/') page = PageFactory(slug=LazyI18nString({'en': 'demo'}), published=True, image=None, conference__code='pycon11') resp = graphql_client.query('query {\n page(code: "pycon11", slug: "demo") {\n id\n title\n slug\n content\n image\n }\n } ') assert (not resp.get('errors')) assert ({'id': str(page.id), 'title': str(page.title), 'slug': str(page.slug), 'content': str(page.content), 'image': _get_image_url(request, page.image)} == resp['data']['page']) resp = graphql_client.query('query {\n page(slug: "demo", code: "pyconb") {\n id\n }\n } ') assert (resp['data']['page'] is None)
def assert_source_added(tester: CommandTester, poetry: Poetry, source_existing: Source, source_added: Source) -> None: assert (tester.io.fetch_output().strip() == f'Adding source with name {source_added.name}.') poetry.pyproject.reload() sources = poetry.get_sources() assert (sources == [source_existing, source_added]) assert (tester.status_code == 0)
def safe_join(t: Type, s: Type) -> Type: if ((not isinstance(t, UnpackType)) and (not isinstance(s, UnpackType))): return join_types(t, s) if (isinstance(t, UnpackType) and isinstance(s, UnpackType)): return UnpackType(join_types(t.type, s.type)) return object_or_any_from_type(get_proper_type(t))
def test_rcs_bistatic(): phi = np.array([(- 30), (- 24), 65]) theta = 90 inc_phi = 30 inc_theta = 90 freq = .0 pol = np.array([0, 0, 1]) density = 1 rcs = np.zeros_like(phi) target = {'model': './models/plate5x5.stl', 'location': (0, 0, 0)} for (phi_idx, phi_ang) in enumerate(phi): rcs[phi_idx] = (10 * np.log10(rcs_sbr([target], freq, phi_ang, theta, inc_phi=inc_phi, inc_theta=inc_theta, pol=pol, density=density))) npt.assert_almost_equal(rcs, np.array([47, 34, 6]), decimal=0)
class TestOnlineContrastiveLoss(): embeddings = torch.Tensor([[0.0, (- 1.0), 0.5], [0.1, 2.0, 0.5], [0.0, 0.3, 0.2], [1.0, 0.0, 0.9], [1.2, (- 1.2), 0.01], [(- 0.7), 0.0, 1.5]]) groups = torch.LongTensor([1, 2, 3, 3, 2, 1]) def test_batch_all(self): loss = OnlineContrastiveLoss(mining='all') loss_res = loss.forward(embeddings=self.embeddings, groups=self.groups) assert (loss_res.shape == torch.Size([])) def test_batch_hard(self): loss = OnlineContrastiveLoss(mining='hard') loss_res = loss.forward(embeddings=self.embeddings, groups=self.groups) print(loss_res) assert (loss_res.shape == torch.Size([]))
class EncodeProcessDecode(nn.Module): def __init__(self, output_size, latent_size, num_layers, message_passing_aggregator, message_passing_steps, attention, ripple_used, ripple_generation=None, ripple_generation_number=None, ripple_node_selection=None, ripple_node_selection_random_top_n=None, ripple_node_connection=None, ripple_node_ncross=None): super().__init__() self._latent_size = latent_size self._output_size = output_size self._num_layers = num_layers self._message_passing_steps = message_passing_steps self._message_passing_aggregator = message_passing_aggregator self._attention = attention self._ripple_used = ripple_used if self._ripple_used: self._ripple_generation = ripple_generation self._ripple_generation_number = ripple_generation_number self._ripple_node_selection = ripple_node_selection self._ripple_node_selection_random_top_n = ripple_node_selection_random_top_n self._ripple_node_connection = ripple_node_connection self._ripple_node_ncross = ripple_node_ncross self._ripple_machine = ripple_machine.RippleMachine(ripple_generation, ripple_generation_number, ripple_node_selection, ripple_node_selection_random_top_n, ripple_node_connection, ripple_node_ncross) self.encoder = Encoder(make_mlp=self._make_mlp, latent_size=self._latent_size) self.processor = Processor(make_mlp=self._make_mlp, output_size=self._latent_size, message_passing_steps=self._message_passing_steps, message_passing_aggregator=self._message_passing_aggregator, attention=self._attention, stochastic_message_passing_used=False) self.decoder = Decoder(make_mlp=functools.partial(self._make_mlp, layer_norm=False), output_size=self._output_size) def _make_mlp(self, output_size, layer_norm=True): widths = (([self._latent_size] * self._num_layers) + [output_size]) network = LazyMLP(widths) if layer_norm: network = nn.Sequential(network, nn.LayerNorm(normalized_shape=widths[(- 1)])) return network def forward(self, graph, is_training, world_edge_normalizer=None): if self._ripple_used: graph = self._ripple_machine.add_meta_edges(graph, world_edge_normalizer, is_training) latent_graph = self.encoder(graph) latent_graph = self.processor(latent_graph) return self.decoder(latent_graph)
def _create_parametrization(data: list[str]) -> dict[(str, Path)]: id_to_kwargs = {} for (data_name, kwargs) in data.items(): id_to_kwargs[data_name] = {'path_to_input_data': path_to_input_data(data_name), 'path_to_processed_data': path_to_processed_data(data_name), **kwargs} return id_to_kwargs
def add_testvalue_and_checking_configvars(): config.add('print_test_value', "If 'True', the __eval__ of an PyTensor variable will return its test_value when this is available. This has the practical consequence that, e.g., in debugging `my_var` will print the same as `my_var.tag.test_value` when a test value is defined.", BoolParam(False), in_c_key=False) config.add('compute_test_value', "If 'True', PyTensor will run each op at graph build time, using Constants, SharedVariables and the tag 'test_value' as inputs to the function. This helps the user track down problems in the graph before it gets optimized.", EnumStr('off', ['ignore', 'warn', 'raise', 'pdb']), in_c_key=False) config.add('compute_test_value_opt', 'For debugging PyTensor optimization only. Same as compute_test_value, but is used during PyTensor optimization', EnumStr('off', ['ignore', 'warn', 'raise', 'pdb']), in_c_key=False) config.add('check_input', 'Specify if types should check their input in their C code. It can be used to speed up compilation, reduce overhead (particularly for scalars) and reduce the number of generated C files.', BoolParam(True), in_c_key=True) config.add('NanGuardMode__nan_is_error', 'Default value for nan_is_error', BoolParam(True), in_c_key=False) config.add('NanGuardMode__inf_is_error', 'Default value for inf_is_error', BoolParam(True), in_c_key=False) config.add('NanGuardMode__big_is_error', 'Default value for big_is_error', BoolParam(True), in_c_key=False) config.add('NanGuardMode__action', 'What NanGuardMode does when it finds a problem', EnumStr('raise', ['warn', 'pdb']), in_c_key=False) config.add('DebugMode__patience', 'Optimize graph this many times to detect inconsistency', IntParam(10, _is_gt_0), in_c_key=False) config.add('DebugMode__check_c', 'Run C implementations where possible', BoolParam(_has_cxx), in_c_key=False) config.add('DebugMode__check_py', 'Run Python implementations where possible', BoolParam(True), in_c_key=False) config.add('DebugMode__check_finite', 'True -> complain about NaN/Inf results', BoolParam(True), in_c_key=False) config.add('DebugMode__check_strides', 'Check that Python- and C-produced ndarrays have same strides. On difference: (0) - ignore, (1) warn, or (2) raise error', IntParam(0, _is_valid_check_strides), in_c_key=False) config.add('DebugMode__warn_input_not_reused', 'Generate a warning when destroy_map or view_map says that an op works inplace, but the op did not reuse the input for its output.', BoolParam(True), in_c_key=False) config.add('DebugMode__check_preallocated_output', 'Test thunks with pre-allocated memory as output storage. This is a list of strings separated by ":". Valid values are: "initial" (initial storage in storage map, happens with Scan),"previous" (previously-returned memory), "c_contiguous", "f_contiguous", "strided" (positive and negative strides), "wrong_size" (larger and smaller dimensions), and "ALL" (all of the above).', StrParam('', validate=_is_valid_check_preallocated_output_param), in_c_key=False) config.add('DebugMode__check_preallocated_output_ndim', 'When testing with "strided" preallocated output memory, test all combinations of strides over that number of (inner-most) dimensions. You may want to reduce that number to reduce memory or time usage, but it is advised to keep a minimum of 2.', IntParam(4, _is_gt_0), in_c_key=False) config.add('profiling__time_thunks', 'Time individual thunks when profiling', BoolParam(True), in_c_key=False) config.add('profiling__n_apply', 'Number of Apply instances to print by default', IntParam(20, _is_gt_0), in_c_key=False) config.add('profiling__n_ops', 'Number of Ops to print by default', IntParam(20, _is_gt_0), in_c_key=False) config.add('profiling__output_line_width', 'Max line width for the profiling output', IntParam(512, _is_gt_0), in_c_key=False) config.add('profiling__min_memory_size', 'For the memory profile, do not print Apply nodes if the size\n of their outputs (in bytes) is lower than this threshold', IntParam(1024, _is_greater_or_equal_0), in_c_key=False) config.add('profiling__min_peak_memory', 'The min peak memory usage of the order', BoolParam(False), in_c_key=False) config.add('profiling__destination', 'File destination of the profiling output', StrParam('stderr'), in_c_key=False) config.add('profiling__debugprint', 'Do a debugprint of the profiled functions', BoolParam(False), in_c_key=False) config.add('profiling__ignore_first_call', 'Do we ignore the first call of an PyTensor function.', BoolParam(False), in_c_key=False) config.add('on_shape_error', 'warn: print a warning and use the default value. raise: raise an error', EnumStr('warn', ['raise']), in_c_key=False)
def test_run_pyscript_stop(base_app, request): test_dir = os.path.dirname(request.module.__file__) python_script = os.path.join(test_dir, 'pyscript', 'help.py') stop = base_app.onecmd_plus_hooks('run_pyscript {}'.format(python_script)) assert (not stop) python_script = os.path.join(test_dir, 'pyscript', 'stop.py') stop = base_app.onecmd_plus_hooks('run_pyscript {}'.format(python_script)) assert stop
def test_project_issue_label_events(project, resp_project_issue_label_events): issue = project.issues.list()[0] label_events = issue.resourcelabelevents.list() assert isinstance(label_events, list) label_event = label_events[0] assert isinstance(label_event, ProjectIssueResourceLabelEvent) assert (label_event.resource_type == 'Issue')
def deser_compact_size(f) -> Optional[int]: try: nit = f.read(1)[0] except IndexError: return None if (nit == 253): nit = struct.unpack('<H', f.read(2))[0] elif (nit == 254): nit = struct.unpack('<I', f.read(4))[0] elif (nit == 255): nit = struct.unpack('<Q', f.read(8))[0] return nit
class TestInstall(): def test_real_profile(self): profile = QWebEngineProfile() cookies.install_filter(profile) def test_fake_profile(self, stubs): store = stubs.FakeCookieStore() profile = stubs.FakeWebEngineProfile(cookie_store=store) cookies.install_filter(profile) assert (store.cookie_filter is cookies._accept_cookie)
def test_crop_item(item): item.crop = QtCore.QRectF(0, 0, 100, 80) command = commands.CropItem(item, QtCore.QRectF(10, 20, 30, 40)) command.redo() assert (item.crop == QtCore.QRectF(10, 20, 30, 40)) assert (item.pos() == QtCore.QPointF(0, 0)) command.undo() assert (item.crop == QtCore.QRectF(0, 0, 100, 80)) assert (item.pos() == QtCore.QPointF(0, 0))
def set_default_units(system=None, currency=None, current=None, information=None, length=None, luminous_intensity=None, mass=None, substance=None, temperature=None, time=None): if (system is not None): system = system.lower() try: assert (system in ('si', 'cgs')) except AssertionError: raise ValueError(('system must be "SI" or "cgs", got "%s"' % system)) if (system == 'si'): UnitCurrent.set_default_unit('A') UnitLength.set_default_unit('m') UnitMass.set_default_unit('kg') elif (system == 'cgs'): UnitLength.set_default_unit('cm') UnitMass.set_default_unit('g') UnitLuminousIntensity.set_default_unit('cd') UnitSubstance.set_default_unit('mol') UnitTemperature.set_default_unit('degK') UnitTime.set_default_unit('s') UnitCurrency.set_default_unit(currency) UnitCurrent.set_default_unit(current) UnitInformation.set_default_unit(information) UnitLength.set_default_unit(length) UnitLuminousIntensity.set_default_unit(luminous_intensity) UnitMass.set_default_unit(mass) UnitSubstance.set_default_unit(substance) UnitTemperature.set_default_unit(temperature) UnitTime.set_default_unit(time)
.end_to_end() def test_task_function_with_partialed_args_and_task_decorator(tmp_path, runner): source = '\n from pytask import task\n import functools\n from pathlib import Path\n\n def func(content):\n return content\n\n task_func = task(produces=Path("out.txt"))(\n functools.partial(func, content="hello")\n )\n ' tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source)) result = runner.invoke(cli, [tmp_path.as_posix()]) assert (result.exit_code == ExitCode.OK) assert ('1 Succeeded' in result.output) assert (tmp_path.joinpath('out.txt').read_text() == 'hello')
def test_tpdm_opdm_mapping(): db = tpdm_to_opdm_mapping(6, (1 / 2)) for dbe in db: assert isinstance(dbe, DualBasisElement) assert (set(dbe.primal_tensors_names) == {'cckk', 'ck'}) assert ((len(dbe.primal_elements) == 7) or (len(dbe.primal_elements) == 14)) assert np.isclose(dbe.dual_scalar, 0) assert np.isclose(dbe.constant_bias, 0) print(vars(dbe)) for (idx, element) in enumerate(dbe.primal_elements): if (len(element) == 4): assert (element[1] == element[3]) if (element[0] == element[2]): assert np.isclose(dbe.primal_coeffs[idx], 1.0) else: assert np.isclose(dbe.primal_coeffs[idx], 0.5) assert dbe.primal_coeffs elif (len(element) == 2): gem_idx = [len(x) for x in dbe.primal_elements].index(4) assert (sorted(element) == sorted([dbe.primal_elements[gem_idx][0], dbe.primal_elements[gem_idx][2]]))
def test_log_file_cli_level(pytester: Pytester) -> None: pytester.makepyfile('\n import pytest\n import logging\n def test_log_file(request):\n plugin = request.config.pluginmanager.getplugin(\'logging-plugin\')\n assert plugin.log_file_handler.level == logging.INFO\n logging.getLogger(\'catchlog\').debug("This log message won\'t be shown")\n logging.getLogger(\'catchlog\').info("This log message will be shown")\n print(\'PASSED\')\n ') log_file = str(pytester.path.joinpath('pytest.log')) result = pytester.runpytest('-s', f'--log-file={log_file}', '--log-file-level=INFO') result.stdout.fnmatch_lines(['test_log_file_cli_level.py PASSED']) assert (result.ret == 0) assert os.path.isfile(log_file) with open(log_file, encoding='utf-8') as rfh: contents = rfh.read() assert ('This log message will be shown' in contents) assert ("This log message won't be shown" not in contents)
class Solution(): def __init__(self): self.list = [] def postorderTraversal(self, root: TreeNode) -> List[int]: if root: self.postorderTraversal(root.left) self.postorderTraversal(root.right) if root.val: self.list.append(root.val) return self.list
class Trainer(object): def __init__(self, cfg_trainer, model, optimizer, train_loader, test_loader, lr_scheduler, bnm_scheduler, logger): self.cfg = cfg_trainer self.model = model self.optimizer = optimizer self.train_loader = train_loader self.test_loader = test_loader self.lr_scheduler = lr_scheduler self.bnm_scheduler = bnm_scheduler self.decorator = decorator self.logger = logger self.device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) self.model.to(self.device) def train(self): for epoch in range(self.cfg['max_epoch']): if (self.lr_scheduler is not None): self.lr_scheduler.step() if (self.bnm_scheduler is not None): self.bnm_scheduler.step() self.logger.info(('------ TRAIN EPOCH %03d ------' % (epoch + 1))) self.logger.info(('Learning Rate: %f' % self.lr_scheduler.get_lr()[0])) self.logger.info(('BN Momentum: %f' % self.bnm_scheduler.lmbd(self.bnm_scheduler.last_epoch))) np.random.seed((np.random.get_state()[1][0] + epoch)) self.train_one_epoch() trained_epoch = (epoch + 1) if ((trained_epoch % self.cfg['eval_frequency']) == 0): self.logger.info(('------ EVAL EPOCH %03d ------' % trained_epoch)) self.eval_one_epoch() if ((trained_epoch % self.cfg['save_frequency']) == 0): os.makedirs('checkpoints', exist_ok=True) ckpt_name = os.path.join('checkpoints', ('checkpoint_epoch_%d' % trained_epoch)) save_checkpoint(get_checkpoint_state(self.model, self.optimizer, trained_epoch), ckpt_name, self.logger) return None def train_one_epoch(self): self.model.train() disp_dict = {} for (batch_idx, batch_data) in enumerate(self.train_loader): batch_data = [item.to(self.device) for item in batch_data] self.optimizer.zero_grad() (loss, stat_dict) = self.decorator(self.model, batch_data, self.cfg['decorator']) loss.backward() self.optimizer.step() trained_batch = (batch_idx + 1) for key in stat_dict.keys(): if (key not in disp_dict.keys()): disp_dict[key] = 0 disp_dict[key] += stat_dict[key] if ((trained_batch % self.cfg['disp_frequency']) == 0): log_str = ('BATCH[%04d/%04d]' % (trained_batch, len(self.train_loader))) for key in sorted(disp_dict.keys()): disp_dict[key] = (disp_dict[key] / self.cfg['disp_frequency']) log_str += (' %s:%.4f,' % (key, disp_dict[key])) disp_dict[key] = 0 self.logger.info(log_str) def eval_one_epoch(self): self.model.eval() disp_dict = {} progress_bar = tqdm.tqdm(total=len(self.test_loader), leave=True, desc='Evaluation Progress') with torch.no_grad(): for (batch_idx, batch_data) in enumerate(self.test_loader): batch_data = [item.to(self.device) for item in batch_data] (loss, stat_dict) = self.decorator(self.model, batch_data, self.cfg['decorator']) for key in stat_dict.keys(): if (key not in disp_dict.keys()): disp_dict[key] = 0 disp_dict[key] += stat_dict[key] progress_bar.update() progress_bar.close() log_str = '' for key in sorted(disp_dict.keys()): disp_dict[key] /= len(self.test_loader) log_str += (' %s:%.4f,' % (key, disp_dict[key])) self.logger.info(log_str)
class mysql(object): host = (mysql_url.hostname or 'localhost') port = (mysql_url.port or '3306') database = (mysql_url.path[1:] or 'qd') user = (mysql_url.username or 'qd') passwd = (mysql_url.password or None) auth_plugin = parse_qs(mysql_url.query).get('auth_plugin', [''])[0]
_fixtures(WebFixture, WidgetCreationScenarios) def test_widget_factory_creates_widget_with_layout(web_fixture, widget_creation_scenarios): class MyLayout(Layout): def customise_widget(self): self.widget.add_child(P(self.view, text='This widget is using Mylayout')) layout_for_widget = MyLayout() class MainUI(UserInterface): def assemble(self): self.define_view('/', title='Hello', page=HTML5Page.factory(use_layout=layout_for_widget)) fixture = widget_creation_scenarios wsgi_app = web_fixture.new_wsgi_app(site_root=fixture.MainUI) browser = Browser(wsgi_app) browser.open('/') [p] = browser.lxml_html.xpath('//p') assert (p.text == 'This widget is using Mylayout')
_fixtures(WebFixture, CarouselFixture) def test_active_state_of_items(web_fixture, carousel_fixture): fixture = carousel_fixture carousel = Carousel(web_fixture.view, 'my_carousel_id') carousel.add_slide(Img(web_fixture.view)) carousel.add_slide(Img(web_fixture.view)) main_div = fixture.get_main_div_for(carousel) [indicator_list, carousel_inner, left_control, right_control] = main_div.children [carousel_item_1, carousel_item_2] = carousel_inner.children assert (carousel_item_1.get_attribute('class') == 'active carousel-item') assert (carousel_item_2.get_attribute('class') == 'carousel-item') [indicator_0, indicator_1] = fixture.get_indicator_list_for(carousel) assert (indicator_0.get_attribute('class') == 'active') assert (not indicator_1.has_attribute('class')) assert (indicator_0.get_attribute('data-slide-to') == '0') assert (indicator_1.get_attribute('data-slide-to') == '1')
def segmentation(): court_top_left_x = 470 court_top_left_y = 127 court_top_right_x = 895 court_top_right_y = 127 court_down_left_x = 276 court_down_left_y = 570 court_down_right_x = 1000 court_down_right_y = 570 hitpoint = [0 for _ in range(len(df['vecY']))] for i in range(2, (len(df['vecY']) - 2)): count = 0 if ((df['Y'][i] > court_top_left_y) and (df['Y'][i] < court_down_left_y)): point_x = 100 point_y = df['Y'][i] m1 = ((court_down_left_y - court_top_left_y) / (court_down_left_x - court_top_left_x)) a = np.array([[0, 1], [m1, (- 1)]]) b = np.array([point_y, (- (court_top_left_y - (m1 * court_top_left_x)))]) ans = np.linalg.solve(a, b) if ((ans[0] > 100) and (ans[0] < df['X'][i])): count += 1 point_x = 1100 m2 = ((court_down_right_y - court_top_right_y) / (court_down_right_x - court_top_right_x)) a = np.array([[0, 1], [m2, (- 1)]]) b = np.array([point_y, (- (court_top_right_y - (m2 * court_top_right_x)))]) ans = np.linalg.solve(a, b) if ((ans[0] < 1100) and (ans[0] > df['X'][i])): count += 1 if (count == 2): if (((((((df['vecX'][i] ** 2) + (df['vecY'][i] ** 2)) + (df['vecX'][(i + 1)] ** 2)) + (df['vecY'][(i + 1)] ** 2)) + (df['vecX'][(i + 2)] ** 2)) + (df['vecY'][(i + 2)] ** 2)) >= 50): if (abs((df['vecX'][i] - df['vecX'][(i - 1)])) >= 10): if ((sum(df['vecX'][i:(i + 5)]) * sum(df['vecX'][(i - 5):i])) > 0): pass elif (df['Dup'][i] == 1): pass else: hitpoint[i] = 1 if (abs((df['vecY'][i] - df['vecY'][(i - 1)])) >= 8): if ((sum(df['vecY'][i:(i + 3)]) * sum(df['vecY'][(i - 3):i])) > 0): pass elif (df['Dup'][i] == 1): pass else: hitpoint[i] = 1 df['hitpoint'] = hitpoint i = 0 j = 0 count = 0 while (i < (len(df) - 10)): if (df['hitpoint'][i] == 1): j = (i + 1) count += 1 while (j < len(df)): if ((df['Frame'][j] - df['Frame'][i]) < 10): if (df['hitpoint'][j] == 1): df['hitpoint'][j] = 0 else: break j += 1 i += 1 print(('After pruning the consecutive detections, number of detected hit-point = %d' % count))
def invert(g_ema, perceptual, real_img, device, args): save = args.save result = {} to_vgg = TO_VGG() requires_grad(perceptual, True) requires_grad(g_ema, True) log_size = int(math.log(256, 2)) num_layers = (((log_size - 2) * 2) + 1) w = args.mean_w.clone().detach().to(device).unsqueeze(0) w.requires_grad = True wplr = args.wlr optimizer = torch.optim.Adam([w], lr=args.wlr) with torch.no_grad(): (sample, _) = g_ema([w], input_is_latent=True, randomize_noise=True) if save: utils.save_image(sample, (args.result_dir + f'/{args.image_name}_recon_w_initial.png'), nrow=int((sample.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) utils.save_image(real_img, (args.result_dir + f'/{args.image_name}_input.png'), nrow=int((real_img.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) print('optimizing w') pbar = range(args.w_iterations) pbar = tqdm(pbar, initial=0, dynamic_ncols=True, smoothing=0.01) for idx in pbar: if ((idx + (1 % (args.w_iterations // 2))) == 0): for g in optimizer.param_groups: g['lr'] = (g['lr'] * args.lr_decay_rate) wplr = (wplr * args.lr_decay_rate) real_img_vgg = to_vgg(real_img) t = 1 w_tilde = (w + ((torch.randn(w.shape, device=device) * t) * t)) (fake_img, _) = g_ema([w_tilde], input_is_latent=True, randomize_noise=True) fake_img_vgg = to_vgg(fake_img) fake_feature = perceptual(fake_img_vgg) real_feature = perceptual(real_img_vgg) loss_pixel = l2loss(fake_img, real_img) loss_feature = [] for (fake_feat, real_feat) in zip(fake_feature.values(), real_feature.values()): loss_feature.append(l2loss(fake_feat, real_feat)) loss_feature = torch.mean(torch.stack(loss_feature)) loss = ((args.lambda_l2 * loss_pixel) + (args.lambda_p * loss_feature)) optimizer.zero_grad() loss.backward() optimizer.step() pbar.set_description(f'optimizing w: loss_pixel: {loss_pixel:.4f}; loss_feature: {loss_feature:.4f}') if (((idx % (args.w_iterations // 3)) == 0) and save): with torch.no_grad(): (sample, _) = g_ema([w], input_is_latent=True, randomize_noise=True) utils.save_image(sample, (args.result_dir + f'/{args.image_name}_recon_w_{idx}.png'), nrow=int((sample.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) result['w'] = w.squeeze().cpu() if save: with torch.no_grad(): (sample, _) = g_ema([w], input_is_latent=True, randomize_noise=True) utils.save_image(sample, (args.result_dir + f'/{args.image_name}_recon_w_final.png'), nrow=int((sample.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) print('optimizing wp') wp = w.unsqueeze(1).repeat(1, args.num_layers, 1).detach().clone() wp.requires_grad = True noises = [] for layer_idx in range(num_layers): res = ((layer_idx + 5) // 2) shape = [1, 1, (2 ** res), (2 ** res)] noises.append(torch.randn(*shape, device=device).normal_()) noises[layer_idx].requires_grad = True optimizer = torch.optim.Adam(([wp] + noises), lr=wplr) if save: with torch.no_grad(): (sample, _) = g_ema(wp, noise=noises, input_is_w_plus=True, randomize_noise=False) utils.save_image(sample, (args.result_dir + f'/{args.image_name}_recon_wp_initial.png'), nrow=int((sample.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) pbar = range(args.wp_iterations) pbar = tqdm(pbar, initial=0, dynamic_ncols=True, smoothing=0.01) for idx in pbar: if ((idx + (1 % (args.wp_iterations // 6))) == 0): for g in optimizer.param_groups: g['lr'] = (g['lr'] * args.lr_decay_rate) real_img_vgg = to_vgg(real_img) t = max((1 - ((3 * idx) / args.wp_iterations)), 0) wp_tilde = (wp + ((torch.randn(wp.shape, device=device) * t) * t)) (fake_img, _) = g_ema(wp_tilde, noise=noises, input_is_w_plus=True, randomize_noise=False) fake_img_vgg = to_vgg(fake_img) fake_feature = perceptual(fake_img_vgg) real_feature = perceptual(real_img_vgg) loss_pixel = l2loss(fake_img, real_img) loss_feature = [] for (fake_feat, real_feat) in zip(fake_feature.values(), real_feature.values()): loss_feature.append(l2loss(fake_feat, real_feat)) loss_feature = torch.mean(torch.stack(loss_feature)) loss_noise = noise_regularize(noises) loss = (((args.lambda_l2 * loss_pixel) + (args.lambda_p * loss_feature)) + (args.lambda_noise * loss_noise)) optimizer.zero_grad() loss.backward() optimizer.step() noise_normalize_(noises) pbar.set_description(f'optimizing wp: loss_pixel: {loss_pixel:.4f}; loss_feature: {loss_feature:.4f}') if (((idx % (args.wp_iterations // 3)) == 0) and save): with torch.no_grad(): (sample, _) = g_ema(wp, noise=noises, input_is_w_plus=True, randomize_noise=False) utils.save_image(sample, (args.result_dir + f'/{args.image_name}_recon_wp_{idx}.png'), nrow=int((sample.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) with torch.no_grad(): (fake_img, _) = g_ema(wp, noise=noises, input_is_w_plus=True, randomize_noise=False) if save: utils.save_image(fake_img, (args.result_dir + f'/{args.image_name}_recon_final.png'), nrow=int((fake_img.shape[0] ** 0.5)), normalize=True, range=((- 1), 1)) result['wp'] = wp.squeeze().cpu() result['noise'] = [n.cpu() for n in noises] torch.save(result, (args.result_dir + f'/{args.image_name}.pt'))
def test_expression_not_string(temp_dir): source = CodeSource(str(temp_dir), {'path': 'a/b.py', 'expression': 23}) file_path = ((temp_dir / 'a') / 'b.py') file_path.ensure_parent_dir_exists() file_path.touch() with pytest.raises(TypeError, match='option `expression` must be a string'): source.get_version_data()
class _RequestCounter(): exp_cnt: int _keys: List[str] = ['limit', 'pause', 'reset_epoch', 'resume'] _cnt: Dict[(str, int)] _reached: Dict[(str, bool)] def __init__(self, exp_cnt: int): self.exp_cnt = exp_cnt self._cnt = {k: 0 for k in self._keys} self._reached = {k: False for k in self._keys} def increment(self, key: str) -> None: assert (key in self._reached) self._cnt[key] += 1 assert (self._cnt[key] <= self.exp_cnt) if (self._cnt[key] == self.exp_cnt): self._reached[key] = True def is_reached(self, key: str) -> bool: assert (key in self._reached) return self._reached[key] def reset(self, key: str) -> None: assert ((key in self._reached) and self._reached[key]) assert (self._cnt[key] >= 1) self._cnt[key] -= 1 if (self._cnt[key] == 0): self._reached[key] = False
class MatchesReraisedExcInfo(object): expected = attr.ib() def match(self, actual): valcheck = Equals(self.expected.args).match(actual.args) if (valcheck is not None): return valcheck typecheck = Equals(type(self.expected)).match(type(actual)) if (typecheck is not None): return typecheck expected = list(traceback.TracebackException.from_exception(self.expected).format()) new = list(traceback.TracebackException.from_exception(actual).format()) tail_equals = (lambda a, b: (a == b[(- len(a)):])) if (not tail_equals(expected[1:], new[1:])): return ReraisedTracebackMismatch(expected_tb=expected, got_tb=new)
class CharacterListView(LoginRequiredMixin, CharacterMixin, ListView): template_name = 'website/character_list.html' paginate_by = 100 page_title = 'Character List' access_type = 'view' def get_queryset(self): account = self.request.user ids = [obj.id for obj in self.typeclass.objects.all() if obj.access(account, self.access_type)] return self.typeclass.objects.filter(id__in=ids).order_by(Lower('db_key'))
def train(args, model, device, train_loader, optimizer, epoch): model.train() criterion = nn.BCELoss() for (batch_idx, (images_1, images_2, targets)) in enumerate(train_loader): (images_1, images_2, targets) = (images_1.to(device), images_2.to(device), targets.to(device)) optimizer.zero_grad() outputs = model(images_1, images_2).squeeze() loss = criterion(outputs, targets) loss.backward() optimizer.step() if ((batch_idx % args.log_interval) == 0): print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, (batch_idx * len(images_1)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), loss.item())) if args.dry_run: break
class TestEvaluate(TestCase): def test_find_symbols(self): a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = (a + b) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.id) self.assertEqual(next(iter(variable_symbols.values())), 'y[0:1]') self.assertEqual(list(variable_symbols.values())[1], 'y[1:2]') var_a = pybamm.id_to_python_variable(a.id) var_b = pybamm.id_to_python_variable(b.id) self.assertEqual(list(variable_symbols.values())[2], f'{var_a} + {var_b}') constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = ((a + b) + b) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.children[0].id) self.assertEqual(list(variable_symbols.keys())[3], expr.id) self.assertEqual(next(iter(variable_symbols.values())), 'y[0:1]') self.assertEqual(list(variable_symbols.values())[1], 'y[1:2]') self.assertEqual(list(variable_symbols.values())[2], f'{var_a} + {var_b}') var_child = pybamm.id_to_python_variable(expr.children[0].id) self.assertEqual(list(variable_symbols.values())[3], f'{var_child} + {var_b}') constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = pybamm.maximum(a, (- b)) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.children[1].id) self.assertEqual(list(variable_symbols.keys())[3], expr.id) self.assertEqual(next(iter(variable_symbols.values())), 'y[0:1]') self.assertEqual(list(variable_symbols.values())[1], 'y[1:2]') self.assertEqual(list(variable_symbols.values())[2], f'-{var_b}') var_child = pybamm.id_to_python_variable(expr.children[1].id) self.assertEqual(list(variable_symbols.values())[3], f'np.maximum({var_a},{var_child})') constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = pybamm.Function(test_function, a) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(next(iter(constant_symbols.keys())), expr.id) self.assertEqual(next(iter(constant_symbols.values())), test_function) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], expr.id) self.assertEqual(next(iter(variable_symbols.values())), 'y[0:1]') var_funct = pybamm.id_to_python_variable(expr.id, True) self.assertEqual(list(variable_symbols.values())[1], f'{var_funct}({var_a})') constant_symbols = OrderedDict() variable_symbols = OrderedDict() A = pybamm.Matrix([[1, 2], [3, 4]]) pybamm.find_symbols(A, constant_symbols, variable_symbols) self.assertEqual(len(variable_symbols), 0) self.assertEqual(next(iter(constant_symbols.keys())), A.id) np.testing.assert_allclose(next(iter(constant_symbols.values())), np.array([[1, 2], [3, 4]])) constant_symbols = OrderedDict() variable_symbols = OrderedDict() A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[0, 2], [0, 4]]))) pybamm.find_symbols(A, constant_symbols, variable_symbols) self.assertEqual(len(variable_symbols), 0) self.assertEqual(next(iter(constant_symbols.keys())), A.id) np.testing.assert_allclose(next(iter(constant_symbols.values())).toarray(), A.entries.toarray()) constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = pybamm.NumpyConcatenation(a, b) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.id) self.assertEqual(list(variable_symbols.values())[2], f'np.concatenate(({var_a},{var_b}))') constant_symbols = OrderedDict() variable_symbols = OrderedDict() expr = pybamm.NumpyConcatenation(a, b) pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.id) self.assertEqual(list(variable_symbols.values())[2], f'np.concatenate(({var_a},{var_b}))') a = pybamm.StateVector(slice(0, 1), domain='test a') b = pybamm.StateVector(slice(1, 2), domain='test b') expr = pybamm.concatenation(a, b) with self.assertRaises(NotImplementedError): pybamm.find_symbols(expr, constant_symbols, variable_symbols) for expr in (pybamm.Variable('a'), pybamm.Parameter('a')): with self.assertRaises(NotImplementedError): pybamm.find_symbols(expr, constant_symbols, variable_symbols) def test_domain_concatenation(self): disc = get_discretisation_for_testing() mesh = disc.mesh a_dom = ['negative electrode'] b_dom = ['positive electrode'] a_pts = mesh[a_dom[0]].npts b_pts = mesh[b_dom[0]].npts a = pybamm.StateVector(slice(0, a_pts), domain=a_dom) b = pybamm.StateVector(slice(a_pts, (a_pts + b_pts)), domain=b_dom) y = np.empty(((a_pts + b_pts), 1)) for i in range(len(y)): y[i] = i expr = pybamm.DomainConcatenation([a, b], mesh) constant_symbols = OrderedDict() variable_symbols = OrderedDict() pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(next(iter(variable_symbols.keys())), a.id) self.assertEqual(list(variable_symbols.keys())[1], b.id) self.assertEqual(list(variable_symbols.keys())[2], expr.id) var_a = pybamm.id_to_python_variable(a.id) var_b = pybamm.id_to_python_variable(b.id) self.assertEqual(len(constant_symbols), 0) self.assertEqual(list(variable_symbols.values())[2], f'np.concatenate(({var_a}[0:{a_pts}],{var_b}[0:{b_pts}]))') evaluator = pybamm.EvaluatorPython(expr) result = evaluator(y=y) np.testing.assert_allclose(result, expr.evaluate(y=y)) expr = pybamm.DomainConcatenation([a], mesh) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(y=y) np.testing.assert_allclose(result, expr.evaluate(y=y)) a_dom = ['separator'] b_dom = ['negative electrode', 'positive electrode'] b0_pts = mesh[b_dom[0]].npts a0_pts = mesh[a_dom[0]].npts b1_pts = mesh[b_dom[1]].npts a = pybamm.StateVector(slice(0, a0_pts), domain=a_dom) b = pybamm.StateVector(slice(a0_pts, ((a0_pts + b0_pts) + b1_pts)), domain=b_dom) y = np.empty((((a0_pts + b0_pts) + b1_pts), 1)) for i in range(len(y)): y[i] = i var_a = pybamm.id_to_python_variable(a.id) var_b = pybamm.id_to_python_variable(b.id) expr = pybamm.DomainConcatenation([a, b], mesh) constant_symbols = OrderedDict() variable_symbols = OrderedDict() pybamm.find_symbols(expr, constant_symbols, variable_symbols) b0_str = f'{var_b}[0:{b0_pts}]' a0_str = f'{var_a}[0:{a0_pts}]' b1_str = f'{var_b}[{b0_pts}:{(b0_pts + b1_pts)}]' self.assertEqual(len(constant_symbols), 0) self.assertEqual(list(variable_symbols.values())[2], f'np.concatenate(({a0_str},{b0_str},{b1_str}))') evaluator = pybamm.EvaluatorPython(expr) result = evaluator(y=y) np.testing.assert_allclose(result, expr.evaluate(y=y)) def test_domain_concatenation_2D(self): disc = get_1p1d_discretisation_for_testing() a_dom = ['negative electrode'] b_dom = ['separator'] a = pybamm.Variable('a', domain=a_dom) b = pybamm.Variable('b', domain=b_dom) conc = pybamm.concatenation((2 * a), (3 * b)) disc.set_variable_slices([a, b]) expr = disc.process_symbol(conc) self.assertIsInstance(expr, pybamm.DomainConcatenation) y = np.empty((expr._size, 1)) for i in range(len(y)): y[i] = i constant_symbols = OrderedDict() variable_symbols = OrderedDict() pybamm.find_symbols(expr, constant_symbols, variable_symbols) self.assertEqual(len(constant_symbols), 0) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(y=y) np.testing.assert_allclose(result, expr.evaluate(y=y)) expr = disc.process_symbol(pybamm.concatenation(a)) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(y=y) np.testing.assert_allclose(result, expr.evaluate(y=y)) def test_to_python(self): a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) expr = (a + b) (constant_str, variable_str) = pybamm.to_python(expr) expected_str = 'var_[0-9m]+ = y\\[0:1\\].*\\nvar_[0-9m]+ = y\\[1:2\\].*\\nvar_[0-9m]+ = var_[0-9m]+ \\+ var_[0-9m]+' self.assertRegex(variable_str, expected_str) def test_evaluator_python(self): a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) y_tests = [np.array([[2], [3]]), np.array([[1], [3]])] t_tests = [1, 2] expr = (a * b) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(t=None, y=np.array([[2], [3]])) self.assertEqual(result, 6) result = evaluator(t=None, y=np.array([[1], [3]])) self.assertEqual(result, 3) expr = pybamm.Function(test_function, (a * b)) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(t=None, y=np.array([[2], [3]])) self.assertEqual(result, 12) expr = pybamm.Function(test_function2, a, b) evaluator = pybamm.EvaluatorPython(expr) result = evaluator(t=None, y=np.array([[2], [3]])) self.assertEqual(result, 5) expr = (pybamm.Scalar(2) * pybamm.Scalar(3)) evaluator = pybamm.EvaluatorPython(expr) result = evaluator() self.assertEqual(result, 6) expr = ((((((a * b) + b) + ((a ** 2) / b)) + (2 * a)) + (b / 2)) + 4) evaluator = pybamm.EvaluatorPython(expr) for y in y_tests: result = evaluator(t=None, y=y) self.assertEqual(result, expr.evaluate(t=None, y=y)) expr = (a * pybamm.t) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) self.assertEqual(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix([[1, 2], [3, 4]]) expr = (A pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) a = pybamm.Vector([1, 2]) expr = (a <= pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = (a > pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) a = pybamm.Vector([1, 2]) expr = pybamm.minimum(a, pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = pybamm.maximum(a, pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = pybamm.Index((A pybamm.StateVector(slice(0, 2))), 0) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) self.assertEqual(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix([[1, 2], [3, 4]]) B = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) C = pybamm.Matrix(scipy.sparse.coo_matrix(np.array([[1, 0], [0, 4]]))) expr = (((A B) C) pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = (B pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) c = pybamm.StateVector(slice(2, 3)) y_tests = [np.array([[2], [3], [4]]), np.array([[1], [3], [2]])] t_tests = [1, 2] expr = pybamm.NumpyConcatenation(a, b) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = pybamm.NumpyConcatenation(a, c) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) B = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[2, 0], [5, 0]]))) a = pybamm.StateVector(slice(0, 1)) expr = pybamm.SparseStack(A, (a * B)) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y).toarray() np.testing.assert_allclose(result, expr.evaluate(t=t, y=y).toarray()) expr = pybamm.SparseStack(A) evaluator = pybamm.EvaluatorPython(expr) result = evaluator().toarray() np.testing.assert_allclose(result, expr.evaluate().toarray()) expr = pybamm.Inner(a, b) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) v = pybamm.StateVector(slice(0, 2)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) for expr in [pybamm.Inner(A, v), pybamm.Inner(v, A)]: evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y).toarray() np.testing.assert_allclose(result, expr.evaluate(t=t, y=y).toarray()) y_tests = [np.array([[2], [3], [4], [5]]), np.array([[1], [3], [2], [1]])] t_tests = [1, 2] a = pybamm.StateVector(slice(0, 1), slice(3, 4)) b = pybamm.StateVector(slice(1, 3)) expr = (a * b) evaluator = pybamm.EvaluatorPython(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_find_symbols_jax(self): constant_symbols = OrderedDict() variable_symbols = OrderedDict() A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[0, 2], [0, 4]]))) pybamm.find_symbols(A, constant_symbols, variable_symbols, output_jax=True) self.assertEqual(len(variable_symbols), 0) self.assertEqual(next(iter(constant_symbols.keys())), A.id) np.testing.assert_allclose(next(iter(constant_symbols.values())).toarray(), A.entries.toarray()) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_evaluator_jax(self): a = pybamm.StateVector(slice(0, 1)) b = pybamm.StateVector(slice(1, 2)) y_tests = [np.array([[2.0], [3.0]]), np.array([[1.0], [3.0]]), np.array([1.0, 3.0])] t_tests = [1.0, 2.0] expr = (a * b) evaluator = pybamm.EvaluatorJax(expr) result = evaluator(t=None, y=np.array([[2], [3]])) self.assertEqual(result, 6) result = evaluator(t=None, y=np.array([[1], [3]])) self.assertEqual(result, 3) expr = pybamm.Function(test_function, (a * b)) evaluator = pybamm.EvaluatorJax(expr) result = evaluator(t=None, y=np.array([[2], [3]])) self.assertEqual(result, 12) expr = pybamm.exp((a * b)) evaluator = pybamm.EvaluatorJax(expr) result = evaluator(t=None, y=np.array([[2], [3]])) np.testing.assert_array_almost_equal(result, np.exp(6), decimal=15) expr = (pybamm.Scalar(2) * pybamm.Scalar(3)) evaluator = pybamm.EvaluatorJax(expr) result = evaluator() self.assertEqual(result, 6) expr = ((((((a * b) + b) + ((a ** 2) / b)) + (2 * a)) + (b / 2)) + 4) evaluator = pybamm.EvaluatorJax(expr) for y in y_tests: result = evaluator(t=None, y=y) np.testing.assert_allclose(result, expr.evaluate(t=None, y=y)) expr = (a * pybamm.t) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) self.assertEqual(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(np.array([[1, 2], [3, 4]])) expr = (A pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) a = pybamm.Vector(np.array([1, 2])) expr = (a <= pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = (a > pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) a = pybamm.Vector(np.array([1, 2])) expr = pybamm.minimum(a, pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = pybamm.maximum(a, pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) expr = pybamm.Index((A pybamm.StateVector(slice(0, 2))), 0) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) self.assertEqual(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(np.array([[1, 2], [3, 4]])) B = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) C = pybamm.Matrix(scipy.sparse.coo_matrix(np.array([[1, 0], [0, 4]]))) expr = (((A B) C) pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) for expr in [((A * pybamm.t) pybamm.StateVector(slice(0, 2))), ((pybamm.t * A) pybamm.StateVector(slice(0, 2)))]: evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) expr = ((A / (1.0 + pybamm.t)) pybamm.StateVector(slice(0, 2))) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) B = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[2, 0], [5, 0]]))) a = pybamm.StateVector(slice(0, 1)) expr = pybamm.SparseStack(A, (a * B)) with self.assertRaises(NotImplementedError): evaluator = pybamm.EvaluatorJax(expr) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1, 0], [0, 4]]))) B = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[2, 0], [5, 0]]))) a = pybamm.StateVector(slice(0, 1)) expr = (A (a * B)) with self.assertRaises(NotImplementedError): evaluator = pybamm.EvaluatorJax(expr) a = pybamm.Vector(np.array([[1], [2]])) b = pybamm.Vector(np.array([[3]])) expr = pybamm.NumpyConcatenation(a, b) evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) A = pybamm.Matrix(scipy.sparse.csr_matrix(np.array([[1]]))) v = pybamm.StateVector(slice(0, 1)) for expr in [(pybamm.Inner(A, v) v), (pybamm.Inner(v, A) v), (pybamm.Inner(v, v) v)]: evaluator = pybamm.EvaluatorJax(expr) for (t, y) in zip(t_tests, y_tests): result = evaluator(t=t, y=y) np.testing.assert_allclose(result, expr.evaluate(t=t, y=y)) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_evaluator_jax_jacobian(self): a = pybamm.StateVector(slice(0, 1)) y_tests = [np.array([[2.0]]), np.array([[1.0]]), np.array([1.0])] expr = (a ** 2) expr_jac = (2 * a) evaluator = pybamm.EvaluatorJax(expr) evaluator_jac_test = evaluator.get_jacobian() evaluator_jac = pybamm.EvaluatorJax(expr_jac) for y in y_tests: result_test = evaluator_jac_test(t=None, y=y) result_true = evaluator_jac(t=None, y=y) np.testing.assert_allclose(result_test, result_true) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_evaluator_jax_jvp(self): a = pybamm.StateVector(slice(0, 1)) y_tests = [np.array([[2.0]]), np.array([[1.0]]), np.array([1.0])] v_tests = [np.array([[2.9]]), np.array([[0.9]]), np.array([1.3])] expr = (a ** 2) expr_jac = (2 * a) evaluator = pybamm.EvaluatorJax(expr) evaluator_jac_test = evaluator.get_jacobian() evaluator_jac_action_test = evaluator.get_jacobian_action() evaluator_jac = pybamm.EvaluatorJax(expr_jac) for (y, v) in zip(y_tests, v_tests): result_test = evaluator_jac_test(t=None, y=y) result_test_times_v = evaluator_jac_action_test(t=None, y=y, v=v) result_true = evaluator_jac(t=None, y=y) result_true_times_v = (evaluator_jac(t=None, y=y) v.reshape((- 1), 1)) np.testing.assert_allclose(result_test, result_true) np.testing.assert_allclose(result_test_times_v, result_true_times_v) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_evaluator_jax_debug(self): a = pybamm.StateVector(slice(0, 1)) expr = (a ** 2) y_test = np.array([2.0, 3.0]) evaluator = pybamm.EvaluatorJax(expr) evaluator.debug(y=y_test) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_evaluator_jax_inputs(self): a = pybamm.InputParameter('a') expr = (a ** 2) evaluator = pybamm.EvaluatorJax(expr) result = evaluator(inputs={'a': 2}) self.assertEqual(result, 4) ((not pybamm.have_jax()), 'jax or jaxlib is not installed') def test_jax_coo_matrix(self): A = pybamm.JaxCooMatrix([0, 1], [0, 1], [1.0, 2.0], (2, 2)) Adense = jax.numpy.array([[1.0, 0], [0, 2.0]]) v = jax.numpy.array([[2.0], [1.0]]) np.testing.assert_allclose(A.toarray(), Adense) np.testing.assert_allclose((A v), (Adense v)) np.testing.assert_allclose(A.scalar_multiply(3.0).toarray(), (Adense * 3.0)) with self.assertRaises(NotImplementedError): A.multiply(v)
def get_logger(filename=None): logger = logging.getLogger('logger') logger.setLevel(logging.DEBUG) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) if (filename is not None): handler = logging.FileHandler(filename) handler.setLevel(logging.DEBUG) handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s')) logging.getLogger().addHandler(handler) return logger
def _object_search_select(caller, obj_entry, **kwargs): choices = kwargs['available_choices'] num = choices.index(obj_entry) matches = caller.ndb._menutree.olc_search_object_matches obj = matches[num] if (not obj.access(caller, 'examine')): caller.msg("|rYou don't have 'examine' access on this object.|n") del caller.ndb._menutree.olc_search_object_term return 'node_search_object' prot = spawner.prototype_from_object(obj) txt = protlib.prototype_to_str(prot) return ('node_examine_entity', {'text': txt, 'back': 'search_object'})
class RGBEncoder(nn.Module): def __init__(self): super().__init__() resnet = mod_resnet.resnet50(pretrained=True) self.conv1 = resnet.conv1 self.bn1 = resnet.bn1 self.relu = resnet.relu self.maxpool = resnet.maxpool self.res2 = resnet.layer1 self.layer2 = resnet.layer2 self.layer3 = resnet.layer3 def forward(self, f): x = self.conv1(f) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) f4 = self.res2(x) f8 = self.layer2(f4) f16 = self.layer3(f8) return (f16, f8, f4)
class CPD_ResNet(nn.Module): def __init__(self, channel=32): super(CPD_ResNet, self).__init__() self.resnet = B2_ResNet() self.rfb2_1 = RFB(512, channel) self.rfb3_1 = RFB(1024, channel) self.rfb4_1 = RFB(2048, channel) self.agg1 = aggregation(channel) self.rfb2_2 = RFB(512, channel) self.rfb3_2 = RFB(1024, channel) self.rfb4_2 = RFB(2048, channel) self.agg2 = aggregation(channel) self.upsample = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True) self.HA = HA() if self.training: self.initialize_weights() def forward(self, x): x = self.resnet.conv1(x) x = self.resnet.bn1(x) x = self.resnet.relu(x) x = self.resnet.maxpool(x) x1 = self.resnet.layer1(x) x2 = self.resnet.layer2(x1) x2_1 = x2 x3_1 = self.resnet.layer3_1(x2_1) x4_1 = self.resnet.layer4_1(x3_1) x2_1 = self.rfb2_1(x2_1) x3_1 = self.rfb3_1(x3_1) x4_1 = self.rfb4_1(x4_1) attention_map = self.agg1(x4_1, x3_1, x2_1) x2_2 = self.HA(attention_map.sigmoid(), x2) x3_2 = self.resnet.layer3_2(x2_2) x4_2 = self.resnet.layer4_2(x3_2) x2_2 = self.rfb2_2(x2_2) x3_2 = self.rfb3_2(x3_2) x4_2 = self.rfb4_2(x4_2) detection_map = self.agg2(x4_2, x3_2, x2_2) return (self.upsample(attention_map), self.upsample(detection_map)) def initialize_weights(self): res50 = models.resnet50(pretrained=True) pretrained_dict = res50.state_dict() all_params = {} for (k, v) in self.resnet.state_dict().items(): if (k in pretrained_dict.keys()): v = pretrained_dict[k] all_params[k] = v elif ('_1' in k): name = (k.split('_1')[0] + k.split('_1')[1]) v = pretrained_dict[name] all_params[k] = v elif ('_2' in k): name = (k.split('_2')[0] + k.split('_2')[1]) v = pretrained_dict[name] all_params[k] = v assert (len(all_params.keys()) == len(self.resnet.state_dict().keys())) self.resnet.load_state_dict(all_params)
def test_rate_limits(gl): settings = gl.settings.get() settings.throttle_authenticated_api_enabled = True settings.throttle_authenticated_api_requests_per_period = 1 settings.throttle_authenticated_api_period_in_seconds = 3 settings.save() projects = [] for i in range(0, 20): projects.append(gl.projects.create({'name': f'{str(i)}ok'})) with pytest.raises(gitlab.GitlabCreateError) as e: for i in range(20, 40): projects.append(gl.projects.create({'name': f'{str(i)}shouldfail'}, obey_rate_limit=False)) assert ('Retry later' in str(e.value)) settings.throttle_authenticated_api_enabled = False settings.save() [project.delete() for project in projects]
def MaximumArg(term, *others, rank=None, condition=None): terms = _extremum_terms(term, others) checkType(rank, (type(None), TypeRank)) c = ConstraintMaximumArg(terms, rank, condition) return (_wrapping_by_complete_or_partial_constraint(c) if isinstance(c, ConstraintMaximumArg) else c)
class _AdditionalInformationPredict(): def __init__(self, directory, xml_file, method_name, model_type=('classification', None)): self.directory = directory self.xml_file = xml_file self.method_name = method_name self.model_type = model_type self.result_filename = os.path.join(directory, 'dump_predictions.pkl')
def test__optimal_time_ocp__multiphase_time_constraint(): from bioptim.examples.optimal_time_ocp import multiphase_time_constraint as ocp_module bioptim_folder = os.path.dirname(ocp_module.__file__) final_time = (2, 5, 4) time_min = (1, 3, 0.1) time_max = (2, 4, 0.8) ns = (20, 30, 20) ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/cube.bioMod'), final_time=final_time, time_min=time_min, time_max=time_max, n_shooting=ns, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False)
def test_run_with_dependencies_nested_extras(installer: Installer, locker: Locker, repo: Repository, package: ProjectPackage) -> None: package_a = get_package('A', '1.0') package_b = get_package('B', '1.0') package_c = get_package('C', '1.0') dependency_c = Factory.create_dependency('C', {'version': '^1.0', 'optional': True}) dependency_b = Factory.create_dependency('B', {'version': '^1.0', 'optional': True, 'extras': ['C']}) dependency_a = Factory.create_dependency('A', {'version': '^1.0', 'extras': ['B']}) package_b.extras = {canonicalize_name('c'): [dependency_c]} package_b.add_dependency(dependency_c) package_a.add_dependency(dependency_b) package_a.extras = {canonicalize_name('b'): [dependency_b]} repo.add_package(package_a) repo.add_package(package_b) repo.add_package(package_c) package.add_dependency(dependency_a) result = installer.run() assert (result == 0) expected = fixture('with-dependencies-nested-extras') assert (locker.written_data == expected)
() def skip_userid_validation(monkeypatch): import raiden.network.transport.matrix import raiden.network.transport.matrix.utils def mock_validate_userid_signature(user): return factories.HOP1 monkeypatch.setattr(raiden.network.transport.matrix, 'validate_userid_signature', mock_validate_userid_signature) monkeypatch.setattr(raiden.network.transport.matrix.transport, 'validate_userid_signature', mock_validate_userid_signature) monkeypatch.setattr(raiden.network.transport.matrix.utils, 'validate_userid_signature', mock_validate_userid_signature)
class ModelCompressor(): def compress_model(model: tf.keras.Model, eval_callback: EvalFunction, eval_iterations, compress_scheme: CompressionScheme, cost_metric: CostMetric, parameters: Union[SpatialSvdParameters], trainer: Callable=None, visualization_url: str=None) -> Tuple[(tf.keras.Model, CompressionStats)]: if (not visualization_url): bokeh_session = None else: bokeh_session = BokehServerSession(url=visualization_url, session_id='compression') if (parameters.multiplicity < 1): raise ValueError('Rounding Multiplicity should be greater than 1') eval_callback = keras_wrapper_func(eval_callback) if (compress_scheme == CompressionScheme.spatial_svd): algo = CompressionFactory.create_spatial_svd_algo(model, eval_callback, eval_iterations, cost_metric, parameters, bokeh_session) elif (compress_scheme == CompressionScheme.weight_svd): raise NotImplementedError('Not yet implemented for: {}'.format(compress_scheme)) elif (compress_scheme == CompressionScheme.channel_pruning): raise NotImplementedError('Not yet implemented for: {}'.format(compress_scheme)) else: raise ValueError('Compression scheme not supported: {}'.format(compress_scheme)) (compressed_layer_db, stats) = algo.compress_model(cost_metric, trainer) tmp_dir = './data/saved_model' updated_model = keras_save_and_load_graph(tmp_dir, compressed_layer_db.model) updated_model = keras_remove_hanging_nodes(updated_model) return (updated_model, stats)
def rewrite_dyld_path(dylib: Path): def _read_until_zero(fp): cur = fp.tell() s = b'' ch = fp.read(1) while ((ch != b'\x00') and (ch != b'')): s += ch ch = fp.read(1) fp.seek(cur, 0) return s.decode('utf-8') def _parse_libr_name(path): result = re.findall('(libr_.*\\.dylib$)', path) if (len(result) == 0): return None else: return result[0] def _verbose_call(*args, **kwargs): print(f'Calling: {args[0]}') return subprocess.call(*args, **kwargs) with open(dylib, 'rb+') as f: magic = f.read(4) if (magic != b'\xcf\xfa\xed\xfe'): return (_, _, _, load_num) = struct.unpack('<IIII', f.read(16)) f.seek(32, 0) for _ in range(load_num): section_start_pos = f.tell() section_header = f.read(8) if (len(section_header) != 8): break (cmd, size) = struct.unpack('<II', section_header) if (cmd == 13): (offset,) = struct.unpack('<I', f.read(4)) f.seek((section_start_pos + offset), 0) id_dylib = _read_until_zero(f) lib_name = _parse_libr_name(id_dylib) if (lib_name is not None): print(f'Patching ID_DYLIB {id_dylib} for {str(dylib)}') _verbose_call(['install_name_tool', '-id', f'_path/{lib_name}', str(dylib)]) elif (cmd == 12): (offset,) = struct.unpack('<I', f.read(4)) f.seek((section_start_pos + offset), 0) load_dylib = _read_until_zero(f) lib_name = _parse_libr_name(load_dylib) if (lib_name is not None): print(f'Patching LOAD_DYLIB {lib_name} for {str(dylib)}') _verbose_call(['install_name_tool', '-change', load_dylib, f'_path/{lib_name}', str(dylib)]) f.seek((section_start_pos + size), 0) return
_module() class Res2Net(ResNet): arch_settings = {50: (Bottle2neck, (3, 4, 6, 3)), 101: (Bottle2neck, (3, 4, 23, 3)), 152: (Bottle2neck, (3, 8, 36, 3))} def __init__(self, scales=4, base_width=26, style='pytorch', deep_stem=True, avg_down=True, **kwargs): self.scales = scales self.base_width = base_width super(Res2Net, self).__init__(style='pytorch', deep_stem=True, avg_down=True, **kwargs) def make_res_layer(self, **kwargs): return Res2Layer(scales=self.scales, base_width=self.base_width, base_channels=self.base_channels, **kwargs) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = get_root_logger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif (pretrained is None): for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if (self.dcn is not None): for m in self.modules(): if isinstance(m, Bottle2neck): for n in m.convs: if hasattr(n, 'conv_offset'): constant_init(n.conv_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottle2neck): constant_init(m.norm3, 0) else: raise TypeError('pretrained must be a str or None')
class MsgSensors(): def __init__(self): self.gyro_x = 0 self.gyro_y = 0 self.gyro_z = 0 self.accel_x = 0 self.accel_y = 0 self.accel_z = 0 self.mag_x = 0 self.mag_y = 0 self.mag_z = 0 self.abs_pressure = 0 self.diff_pressure = 0 self.gps_n = 0 self.gps_e = 0 self.gps_h = 0 self.gps_Vg = 0 self.gps_course = 0
class Graph(object): def __init__(self, idx, num_nodes): self.g_id = idx self.num_nodes = num_nodes self.node = [] def add_node(self, one_node): self.node.append(one_node) def neighbors(self, idx): return self.node[idx].neighbors def ins(self, idx): return self.node[idx].ins def outs(self, idx): return self.node[idx].outs def nodes(self): return [n.idx for n in self.node]