code
stringlengths
281
23.7M
def test_jsonrepresenter_loads(): representer = filesystem.JsonRepresenter() with open('./tests/testfiles/test.json', representer.read_mode) as file: obj = representer.load(file) assert obj assert (obj['key1'] == 'value1') assert (obj['key2'] == 'value2') assert (obj['key3'] == 'value3')
def delete_pod(cli, name, namespace): try: cli.delete_namespaced_pod(name=name, namespace=namespace) while cli.read_namespaced_pod(name=name, namespace=namespace): time.sleep(1) except ApiException as e: if (e.status == 404): logging.info('Pod deleted') else: logging.error(('Failed to delete pod %s' % e)) raise e
class ResNet(nn.Module): def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear((512 * block.expansion), num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if ((stride != 1) or (self.inplanes != (planes * block.expansion))): downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion))) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = (planes * block.expansion) for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x, classify=False): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) out3 = self.layer2(x) out4 = self.layer3(out3) out5 = self.layer4(out4) if classify: x = self.avgpool(out5) x = x.view(x.size(0), (- 1)) x = self.fc(x) return x else: return (out3, out4, out5) def freeze_bn(self): for layer in self.modules(): if isinstance(layer, nn.BatchNorm2d): layer.eval() def freeze_stages(self, stage): if (stage >= 0): self.bn1.eval() for m in [self.conv1, self.bn1]: for param in m.parameters(): param.requires_grad = False for i in range(1, (stage + 1)): layer = getattr(self, 'layer{}'.format(i)) layer.eval() for param in layer.parameters(): param.requires_grad = False
def dict_map(fn: Callable[([T], Any)], dic: Dict[(Any, Union[(dict, list, tuple, T)])], leaf_type: Type[T]) -> Dict[(Any, Union[(dict, list, tuple, Any)])]: new_dict: Dict[(Any, Union[(dict, list, tuple, Any)])] = {} for (k, v) in dic.items(): if isinstance(v, dict): new_dict[k] = dict_map(fn, v, leaf_type) else: new_dict[k] = tree_map(fn, v, leaf_type) return new_dict
class ArchARM(Arch): def __init__(self): super().__init__() self._regs = ('r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'sp', 'lr', 'pc') def regs(self): return self._regs def regs(self, regs): self._regs += regs def regs_need_swapped(self): return {'sl': 'r10', 'ip': 'r12', 'fp': 'r11'} def get_flags(bits: int) -> Mapping[(str, bool)]: def get_mode(bits: int) -> int: return {16: 'User', 17: 'FIQ', 18: 'IRQ', 19: 'Supervisor', 22: 'Monitor', 23: 'Abort', 26: 'Hypervisor', 27: 'Undefined', 31: 'System'}.get((bits & 31)) return {'mode': get_mode(bits), 'thumb': ((bits & 32) != 0), 'fiq': ((bits & 64) != 0), 'irq': ((bits & 128) != 0), 'neg': ((bits & ) != 0), 'zero': ((bits & ) != 0), 'carry': ((bits & ) != 0), 'overflow': ((bits & ) != 0)} def thumb_mode(self) -> bool: return self.ql.arch.is_thumb def read_insn(self, address: int) -> bytes: def thumb_read(address: int) -> bytes: first_two = self.ql.mem.read_ptr(address, 2) result = self.ql.pack16(first_two) if any([((first_two & 61440) == 61440), ((first_two & 63488) == 63488), ((first_two & 59392) == 59392)]): latter_two = self.ql.mem.read_ptr((address + 2), 2) result += self.ql.pack16(latter_two) return result return (super().read_insn(address) if (not self.thumb_mode) else thumb_read(address))
def test_dry_run_does_not_build(tester: CommandTester, mocker: MockerFixture) -> None: assert isinstance(tester.command, InstallerCommand) mocker.patch.object(tester.command.installer, 'run', return_value=0) mocked_editable_builder = mocker.patch('poetry.masonry.builders.editable.EditableBuilder') tester.execute('--dry-run') assert (mocked_editable_builder.return_value.build.call_count == 0)
def load_and_broadcast_checkpoint(checkpoint_path: str, device: torch.device=CPU_DEVICE) -> Optional[Dict]: if is_primary(): checkpoint = load_checkpoint(checkpoint_path, device) else: checkpoint = None logging.info(f'Broadcasting checkpoint loaded from {checkpoint_path}') return broadcast_object(checkpoint)
def keras_model_functional_with_non_fused_batchnorms_for_tf2(): inputs = tf.keras.Input(shape=(32, 32, 3)) x = tf.keras.layers.Conv2D(32, (3, 3))(inputs) x = tf.keras.layers.BatchNormalization(momentum=0.3, epsilon=0.65, fused=False)(x, training=True) with tf.compat.v1.variable_scope('scope_1'): x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x) x = tf.keras.layers.BatchNormalization(momentum=0.4, epsilon=0.25, fused=False)(x, training=False) x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x) x = tf.keras.layers.BatchNormalization(momentum=0.5, epsilon=0.35, fused=False)(x, training=False) x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x) x = tf.keras.layers.Flatten()(x) outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='keras_model_functional_with_non_fused_batchnorms')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) return model
class WithDescriptors(Serialisable): descriptor = Descriptor[str]() typed_default = Typed(expected_type=str) typed_not_none = Typed(expected_type=str, allow_none=False) typed_none = Typed(expected_type=str, allow_none=True) set_tuple = Set(values=('a', 1, 0.0)) set_list = Set(values=['a', 1, 0.0]) set_tuple_none = Set(values=('a', 1, 0.0, None)) noneset_tuple = NoneSet(values=('a', 1, 0.0)) noneset_list = NoneSet(values=['a', 1, 0.0]) length_tuple = Length[Tuple[(str, str)]](length=1) length_list = Length[List[str]](length=1) length_invalid = Length[object](length=1) match_pattern_str_default = MatchPattern(pattern='') match_pattern_str = MatchPattern(pattern='', allow_none=False) match_pattern_str_none = MatchPattern(pattern='', allow_none=True) match_pattern_bytes_default = MatchPattern(pattern=b'') match_pattern_bytes = MatchPattern(pattern=b'', allow_none=False) match_pattern_bytes_none = MatchPattern(pattern=b'', allow_none=True) convertible_default = Convertible(expected_type=int) convertible_not_none = Convertible(expected_type=int, allow_none=False) convertible_none = Convertible(expected_type=int, allow_none=True) minmax_default = MinMax(min=0, max=0) minmax_float = MinMax(min=0, max=0, expected_type=float, allow_none=False) minmax_float_none = MinMax(min=0, max=0, expected_type=float, allow_none=True) minmax_int = MinMax(min=0.0, max=0.0, expected_type=int, allow_none=False) minmax_int_none = MinMax(min=0.0, max=0.0, expected_type=int, allow_none=True) bool_default = Bool() bool_not_none = Bool(allow_none=False) bool_none = Bool(allow_none=True) datetime_default = DateTime() datetime_not_none = DateTime(allow_none=False) datetime_none = DateTime(allow_none=True) string_default = String() string_not_none = String(allow_none=False) string_none = String(allow_none=True) float_default = Float() float_not_none = Float(allow_none=False) float_none = Float(allow_none=True) integer_default = Integer() integer_not_none = Integer(allow_none=False) integer_none = Integer(allow_none=True) assert_type(descriptor, Descriptor[str]) assert_type(typed_default, Typed[(str, Literal[False])]) assert_type(typed_not_none, Typed[(str, Literal[False])]) assert_type(typed_none, Typed[(str, Literal[True])]) assert_type(set_tuple, Set[Union[(Literal[('a', 1)], float)]]) assert_type(set_list, Set[Union[(str, int, float)]]) assert_type(set_tuple_none, Set[Union[(Literal[('a', 1, None)], float)]]) assert_type(noneset_tuple, NoneSet[Union[(Literal[('a', 1)], float)]]) assert_type(noneset_list, NoneSet[Union[(str, float)]]) assert_type(length_tuple, Length[Tuple[(str, str)]]) assert_type(length_list, Length[List[str]]) assert_type(match_pattern_str_default, MatchPattern[(str, Literal[False])]) assert_type(match_pattern_str, MatchPattern[(str, Literal[False])]) assert_type(match_pattern_str_none, MatchPattern[(str, Literal[True])]) assert_type(match_pattern_bytes_default, MatchPattern[(ReadableBuffer, Literal[False])]) assert_type(match_pattern_bytes, MatchPattern[(ReadableBuffer, Literal[False])]) assert_type(match_pattern_bytes_none, MatchPattern[(ReadableBuffer, Literal[True])]) assert_type(convertible_default, Convertible[(int, Literal[False])]) assert_type(convertible_not_none, Convertible[(int, Literal[False])]) assert_type(convertible_none, Convertible[(int, Literal[True])]) assert_type(minmax_default, MinMax[(float, Literal[False])]) assert_type(minmax_float, MinMax[(float, Literal[False])]) assert_type(minmax_float_none, MinMax[(float, Literal[True])]) assert_type(minmax_int, MinMax[(int, Literal[False])]) assert_type(minmax_int_none, MinMax[(int, Literal[True])]) assert_type(bool_default, Bool[Literal[False]]) assert_type(bool_not_none, Bool[Literal[False]]) assert_type(bool_none, Bool[Literal[True]]) assert_type(datetime_default, DateTime[Literal[False]]) assert_type(datetime_not_none, DateTime[Literal[False]]) assert_type(datetime_none, DateTime[Literal[True]]) assert_type(string_default, String[Literal[False]]) assert_type(string_not_none, String[Literal[False]]) assert_type(string_none, String[Literal[True]]) assert_type(float_default, Float[Literal[False]]) assert_type(float_not_none, Float[Literal[False]]) assert_type(float_none, Float[Literal[True]]) assert_type(integer_default, Integer[Literal[False]]) assert_type(integer_not_none, Integer[Literal[False]]) assert_type(integer_none, Integer[Literal[True]])
class Float24(Codec): codec_id = 'imagecodecs_float24' def __init__(self, byteorder=None, rounding=None): self.byteorder = byteorder self.rounding = rounding def encode(self, buf): buf = protective_squeeze(numpy.asarray(buf)) return imagecodecs.float24_encode(buf, byteorder=self.byteorder, rounding=self.rounding) def decode(self, buf, out=None): return imagecodecs.float24_decode(buf, byteorder=self.byteorder, out=out)
def test_colored_captured_log(pytester: Pytester) -> None: pytester.makepyfile("\n import logging\n\n logger = logging.getLogger(__name__)\n\n def test_foo():\n logger.info('text going to logger from call')\n assert False\n ") result = pytester.runpytest('--log-level=INFO', '--color=yes') assert (result.ret == 1) result.stdout.fnmatch_lines(['*-- Captured log call --*', '\x1b[32mINFO \x1b[0m*text going to logger from call'])
_module() class CyclicLrUpdaterHook(LrUpdaterHook): def __init__(self, by_epoch=False, target_ratio=(10, 0.0001), cyclic_times=1, step_ratio_up=0.4, anneal_strategy='cos', **kwargs): if isinstance(target_ratio, float): target_ratio = (target_ratio, (target_ratio / 100000.0)) elif isinstance(target_ratio, tuple): target_ratio = ((target_ratio[0], (target_ratio[0] / 100000.0)) if (len(target_ratio) == 1) else target_ratio) else: raise ValueError(f'target_ratio should be either float or tuple, got {type(target_ratio)}') assert (len(target_ratio) == 2), '"target_ratio" must be list or tuple of two floats' assert (0 <= step_ratio_up < 1.0), '"step_ratio_up" must be in range [0,1)' self.target_ratio = target_ratio self.cyclic_times = cyclic_times self.step_ratio_up = step_ratio_up self.lr_phases = [] if (anneal_strategy not in ['cos', 'linear']): raise ValueError(f'anneal_strategy must be one of "cos" or "linear", instead got {anneal_strategy}') elif (anneal_strategy == 'cos'): self.anneal_func = annealing_cos elif (anneal_strategy == 'linear'): self.anneal_func = annealing_linear assert (not by_epoch), 'currently only support "by_epoch" = False' super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs) def before_run(self, runner): super(CyclicLrUpdaterHook, self).before_run(runner) max_iter_per_phase = (runner.max_iters // self.cyclic_times) iter_up_phase = int((self.step_ratio_up * max_iter_per_phase)) self.lr_phases.append([0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) self.lr_phases.append([iter_up_phase, max_iter_per_phase, max_iter_per_phase, self.target_ratio[0], self.target_ratio[1]]) def get_lr(self, runner, base_lr): curr_iter = runner.iter for (start_iter, end_iter, max_iter_per_phase, start_ratio, end_ratio) in self.lr_phases: curr_iter %= max_iter_per_phase if (start_iter <= curr_iter < end_iter): progress = (curr_iter - start_iter) return self.anneal_func((base_lr * start_ratio), (base_lr * end_ratio), (progress / (end_iter - start_iter)))
_new_faces(MaterialGroup.RAILING_RAILS) def create_railing_bottom(bm, bot_edge, prop): initial_loc = (prop.corner_post_width * 1.5) clamped_offset = clamp(prop.bottom_rail_offset, ((- initial_loc) + (prop.corner_post_width / 2)), (prop.corner_post_height - (initial_loc * 2))) bmesh.ops.translate(bm, verts=bot_edge.verts, vec=(0, 0, (initial_loc + clamped_offset))) create_railing_cylinder(bm, bot_edge, prop) bmesh.ops.translate(bm, verts=bot_edge.verts, vec=(0, 0, (prop.corner_post_width / 2)))
def render_pep8_errors_e227(msg, _node, source_lines=None): line = msg.line res = re.search('column (\\d+)', msg.msg) col = int(res.group().split()[(- 1)]) operators = {'>>', '<<'} end_idx = (col + 1) end_idx = ((end_idx + 1) if (source_lines[(line - 1)][col:(col + 2)] in operators) else end_idx) (yield from render_context((line - 2), line, source_lines)) (yield (line, slice(col, end_idx), LineType.ERROR, source_lines[(line - 1)])) (yield from render_context((line + 1), (line + 3), source_lines))
def _split_numeric_sortkey(s, limit=10, reg=re.compile('[0-9][0-9]*\\.?[0-9]*').search, join=' '.join): result = reg(s) if ((not result) or (not limit)): text = join(s.split()) return ((text,) if text else ()) else: (start, end) = result.span() return (join(s[:start].split()), float(result.group()), _split_numeric_sortkey(s[end:], (limit - 1)))
class Effect5957(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Medium Energy Turret')), 'maxRange', ship.getModifiedItemAttr('eliteBonusHeavyInterdictors1'), skill='Heavy Interdiction Cruisers', **kwargs)
class DefaultProvider(EggProvider): def _has(self, path): return os.path.exists(path) def _isdir(self, path): return os.path.isdir(path) def _listdir(self, path): return os.listdir(path) def get_resource_stream(self, manager, resource_name): return open(self._fn(self.module_path, resource_name), 'rb') def _get(self, path): with open(path, 'rb') as stream: return stream.read() def _register(cls): loader_names = ('SourceFileLoader', 'SourcelessFileLoader') for name in loader_names: loader_cls = getattr(importlib_machinery, name, type(None)) register_loader_type(loader_cls, cls)
class TopPoolFunction(Function): def forward(ctx, input): output = top_pool.forward(input)[0] ctx.save_for_backward(input) return output def backward(ctx, grad_output): input = ctx.saved_variables[0] output = top_pool.backward(input, grad_output)[0] return output
class Matchmaking(): def __init__(self, p2p: P2P, schema_hash: bytes, dht: DHT, *, servicer_type: Type[ServicerBase], prefix: str, target_group_size: int, min_group_size: int, request_timeout: float, client_mode: bool, initial_group_bits: str='', averaging_expiration: float=15): assert ('.' not in prefix), 'group prefix must be a string without .' if ((request_timeout is None) or (request_timeout >= averaging_expiration)): logger.warning('It is recommended to use request_timeout smaller than averaging_expiration. Otherwise,matchmaking can cause deadlocks in some rare cases. Please see Matchmaking docstring.') super().__init__() self._p2p = p2p if (not issubclass(servicer_type, ServicerBase)): raise TypeError('`servicer_type` is expected to be a ServicerBase subclass') self._servicer_type = servicer_type self._prefix = prefix self.peer_id = p2p.peer_id self.schema_hash = schema_hash self.group_key_manager = GroupKeyManager(dht, prefix, initial_group_bits, target_group_size) (self.target_group_size, self.min_group_size) = (target_group_size, min_group_size) (self.averaging_expiration, self.request_timeout) = (averaging_expiration, request_timeout) self.client_mode = client_mode self.lock_looking_for_group = asyncio.Lock() self.lock_request_join_group = asyncio.Lock() self.follower_was_discarded = asyncio.Event() self.was_accepted_to_group = asyncio.Event() self.assembled_group = asyncio.Future() self.current_leader: Optional[PeerID] = None self.current_followers: Dict[(PeerID, averaging_pb2.JoinRequest)] = {} self.potential_leaders = PotentialLeaders(self.peer_id, averaging_expiration, target_group_size) self.data_for_gather: Optional[bytes] = None def is_looking_for_group(self): return self.lock_looking_for_group.locked() def __repr__(self): lfg_status = ('looking for group,' if self.is_looking_for_group else 'not looking for group,') if self.is_looking_for_group: if self.current_leader: lfg_status += f' following {self.current_leader},' if len(self.current_followers): lfg_status += f' leading {len(self.current_followers)} followers,' schema_hash_repr = f'{self.schema_hash[0]}...{self.schema_hash[(- 8):]}' return f'{self.__class__.__name__}(peer_id={self.peer_id}, schema={schema_hash_repr}, {lfg_status} current key = {self.group_key_manager.current_key}, client_mode={self.client_mode})' async def look_for_group(self, *, data_for_gather: bytes, timeout: Optional[float]=None) -> Optional[GroupInfo]: if self.is_looking_for_group: logger.info('Another look_for_group is already in progress. The current run will be scheduled after the existing group is either assembled or disbanded.') async with self.lock_looking_for_group: self.data_for_gather = data_for_gather request_leaders_task = asyncio.create_task(self._request_join_potential_leaders(timeout)) try: return (await asyncio.wait_for(self.assembled_group, timeout=timeout)) except asyncio.TimeoutError: return None except BaseException as e: if (len(self.current_followers) > 0): async with self.lock_request_join_group: (await self.leader_disband_group()) if (not self.assembled_group.done()): self.assembled_group.set_exception(e) raise finally: (await cancel_and_wait(request_leaders_task)) self.assembled_group.cancel() while (len(self.current_followers) > 0): (await self.follower_was_discarded.wait()) self.follower_was_discarded.clear() self.assembled_group = asyncio.Future() self.was_accepted_to_group.clear() self.data_for_gather = None async def _request_join_potential_leaders(self, timeout: Optional[float]) -> GroupInfo: async with self.potential_leaders.begin_search(self.group_key_manager, timeout, declare=(not self.client_mode)): while True: try: next_leader = (await self.potential_leaders.pop_next_leader()) group = (await self.request_join_group(next_leader, self.potential_leaders.request_expiration_time)) if (group is not None): return group except asyncio.TimeoutError: async with self.lock_request_join_group: if self.assembled_group.done(): return self.assembled_group.result() elif ((len(self.current_followers) + 1) >= self.min_group_size): return (await self.leader_assemble_group()) elif (len(self.current_followers) > 0): (await self.leader_disband_group()) continue except (concurrent.futures.CancelledError, asyncio.CancelledError): break except Exception as e: if (not self.assembled_group.done()): self.assembled_group.set_exception(e) raise e async def request_join_group(self, leader: PeerID, expiration_time: DHTExpiration) -> Optional[GroupInfo]: assert (self.is_looking_for_group and (self.current_leader is None)) stream: AsyncIterator[averaging_pb2.MessageFromLeader] = None try: async with self.lock_request_join_group: leader_stub = self._servicer_type.get_stub(self._p2p, leader, namespace=self._prefix) stream = leader_stub.rpc_join_group(averaging_pb2.JoinRequest(schema_hash=self.schema_hash, expiration=expiration_time, client_mode=self.client_mode, gather=self.data_for_gather, group_key=self.group_key_manager.current_key)) message = (await asyncio.wait_for(anext(stream), timeout=self.request_timeout)) if (message.code == averaging_pb2.ACCEPTED): logger.debug(f'{self.peer_id} - joining the group of {leader}; waiting for peers') self.current_leader = leader self.was_accepted_to_group.set() if (len(self.current_followers) > 0): (await self.leader_disband_group()) if (message.code != averaging_pb2.ACCEPTED): code = averaging_pb2.MessageCode.Name(message.code) logger.debug(f'{self.peer_id} - requested {leader} to be my leader, but got rejected with {code}') return None async with self.potential_leaders.pause_search(): time_to_expiration = max((expiration_time - get_dht_time()), 0.0) message = (await asyncio.wait_for(anext(stream), (time_to_expiration + self.request_timeout))) if (message.code == averaging_pb2.BEGIN_ALLREDUCE): async with self.lock_request_join_group: return (await self.follower_assemble_group(leader, message)) if (message.code in (averaging_pb2.GROUP_DISBANDED, averaging_pb2.CANCELLED)): if message.suggested_leader: suggested_leader = PeerID(message.suggested_leader) if (suggested_leader != self.peer_id): logger.debug(f'{self} - leader disbanded group and redirected us to {suggested_leader}') self.current_leader = None (await stream.aclose()) return (await self.request_join_group(suggested_leader, expiration_time)) logger.debug(f'{self} - leader disbanded group') return None logger.debug(f'{self} - unexpected message from leader: {averaging_pb2.MessageCode.Name(message.code)}') return None except asyncio.TimeoutError: logger.debug(f'{self} - potential leader {leader} did not respond within {self.request_timeout}') return None except (P2PHandlerError, StopAsyncIteration) as e: logger.exception(f'{self} - failed to request potential leader {leader}:') return None finally: self.was_accepted_to_group.clear() self.current_leader = None if (stream is not None): (await stream.aclose()) async def rpc_join_group(self, request: averaging_pb2.JoinRequest, context: P2PContext) -> AsyncIterator[averaging_pb2.MessageFromLeader]: try: async with self.lock_request_join_group: reason_to_reject = self._check_reasons_to_reject(request, context) if (reason_to_reject is not None): (yield reason_to_reject) return self.current_followers[context.remote_id] = request (yield averaging_pb2.MessageFromLeader(code=averaging_pb2.ACCEPTED)) if (((len(self.current_followers) + 1) >= self.target_group_size) and (not self.assembled_group.done())): (await self.leader_assemble_group()) timeout = max(0.0, (self.potential_leaders.declared_expiration_time - get_dht_time())) (await asyncio.wait({self.assembled_group, self.was_accepted_to_group.wait()}, return_when=asyncio.FIRST_COMPLETED, timeout=timeout)) if ((not self.assembled_group.done()) and (not self.was_accepted_to_group.is_set())): async with self.lock_request_join_group: if self.assembled_group.done(): pass elif (((len(self.current_followers) + 1) >= self.min_group_size) and self.is_looking_for_group): (await self.leader_assemble_group()) else: (await self.leader_disband_group()) if (self.was_accepted_to_group.is_set() or (not self.assembled_group.done()) or self.assembled_group.cancelled() or (context.remote_id not in self.assembled_group.result())): if (self.current_leader is not None): (yield averaging_pb2.MessageFromLeader(code=averaging_pb2.GROUP_DISBANDED, suggested_leader=self.current_leader.to_bytes())) return else: (yield averaging_pb2.MessageFromLeader(code=averaging_pb2.GROUP_DISBANDED)) return group_info = self.assembled_group.result() (yield averaging_pb2.MessageFromLeader(code=averaging_pb2.BEGIN_ALLREDUCE, group_id=group_info.group_id, ordered_peer_ids=[item.to_bytes() for item in group_info.peer_ids], gathered=group_info.gathered)) except (concurrent.futures.CancelledError, asyncio.CancelledError): return except Exception as e: logger.exception(e) (yield averaging_pb2.MessageFromLeader(code=averaging_pb2.INTERNAL_ERROR)) finally: self.current_followers.pop(context.remote_id, None) self.follower_was_discarded.set() def _check_reasons_to_reject(self, request: averaging_pb2.JoinRequest, context: P2PContext) -> Optional[averaging_pb2.MessageFromLeader]: if ((not self.is_looking_for_group) or self.assembled_group.done()): return averaging_pb2.MessageFromLeader(code=averaging_pb2.NOT_LOOKING_FOR_GROUP) if (((request.ListFields() == 3) and (not isinstance(request.schema_hash, bytes))) or (len(request.schema_hash) == 0) or (not isinstance(request.expiration, DHTExpiration)) or (not isfinite(request.expiration)) or self.client_mode or (not isinstance(request.group_key, GroupKey))): return averaging_pb2.MessageFromLeader(code=averaging_pb2.PROTOCOL_VIOLATION) elif (request.schema_hash != self.schema_hash): return averaging_pb2.MessageFromLeader(code=averaging_pb2.BAD_SCHEMA_HASH) elif (request.group_key != self.group_key_manager.current_key): return averaging_pb2.MessageFromLeader(code=averaging_pb2.BAD_GROUP_KEY) elif (self.potential_leaders.declared_group_key is None): return averaging_pb2.MessageFromLeader(code=averaging_pb2.NOT_DECLARED) elif (self.potential_leaders.declared_expiration_time > (request.expiration or float('inf'))): return averaging_pb2.MessageFromLeader(code=averaging_pb2.BAD_EXPIRATION_TIME) elif (self.current_leader is not None): return averaging_pb2.MessageFromLeader(code=averaging_pb2.NOT_A_LEADER, suggested_leader=self.current_leader.to_bytes()) elif ((context.remote_id == self.peer_id) or (context.remote_id in self.current_followers)): return averaging_pb2.MessageFromLeader(code=averaging_pb2.DUPLICATE_PEER_ID) elif ((len(self.current_followers) + 1) >= self.target_group_size): return averaging_pb2.MessageFromLeader(code=averaging_pb2.GROUP_IS_FULL) else: return None async def leader_assemble_group(self) -> GroupInfo: assert (self.lock_looking_for_group.locked() and self.lock_request_join_group.locked() and (not self.client_mode)) assert (not self.assembled_group.done()) group_id = DHTID.generate().to_bytes() ordered_peer_ids = list(self.current_followers) ordered_peer_ids.append(self.peer_id) random.shuffle(ordered_peer_ids) gathered = tuple(((self.data_for_gather if (peer_id == self.peer_id) else self.current_followers[peer_id].gather) for peer_id in ordered_peer_ids)) logger.debug(f'{self.peer_id} - assembled group of {len(ordered_peer_ids)} peers.') group_info = GroupInfo(group_id, tuple(ordered_peer_ids), gathered) (await self.group_key_manager.update_key_on_group_assembled(group_info, is_leader=True)) self.assembled_group.set_result(group_info) return group_info async def follower_assemble_group(self, leader: PeerID, msg: averaging_pb2.MessageFromLeader) -> GroupInfo: assert (self.lock_looking_for_group.locked() and self.lock_request_join_group.locked()) assert (not self.assembled_group.done()) assert (self.current_leader == leader), f'averager does not follow {leader} (actual: {self.current_leader})' group_id = msg.group_id ordered_peer_ids = [PeerID(item) for item in msg.ordered_peer_ids] assert (self.peer_id in ordered_peer_ids), 'Leader sent us group_peer_ids that does not contain us!' assert (len(ordered_peer_ids) == len(msg.gathered)) logger.debug(f'{self.peer_id} - follower assembled group with leader {leader}.') group_info = GroupInfo(group_id, tuple(ordered_peer_ids), tuple(msg.gathered)) (await self.group_key_manager.update_key_on_group_assembled(group_info)) self.assembled_group.set_result(group_info) return group_info async def leader_disband_group(self): assert (self.lock_request_join_group.locked() and (not self.client_mode)) self.current_followers.clear()
class SingleQubitCompare(GateWithRegisters): adjoint: bool = False _property def signature(self) -> Signature: one_side = (Side.RIGHT if (not self.adjoint) else Side.LEFT) return Signature([Register('a', 1), Register('b', 1), Register('less_than', 1, side=one_side), Register('greater_than', 1, side=one_side)]) def decompose_from_registers(self, *, context: cirq.DecompositionContext, **quregs: NDArray[cirq.Qid]) -> cirq.OP_TREE: a = quregs['a'] b = quregs['b'] less_than = quregs['less_than'] greater_than = quregs['greater_than'] def _decomposition() -> Iterator[cirq.Operation]: (yield And(0, 1, adjoint=self.adjoint).on(*a, *b, *less_than)) (yield cirq.CNOT(*less_than, *greater_than)) (yield cirq.CNOT(*b, *greater_than)) (yield cirq.CNOT(*a, *b)) (yield cirq.CNOT(*a, *greater_than)) (yield cirq.X(*b)) if self.adjoint: (yield from reversed(tuple(_decomposition()))) else: (yield from _decomposition()) def __pow__(self, power: int) -> cirq.Gate: if (not isinstance(power, int)): raise ValueError('SingleQubitCompare is only defined for integer powers.') if ((power % 2) == 0): return cirq.IdentityGate(4) if (power < 0): return SingleQubitCompare(adjoint=(not self.adjoint)) return self def _t_complexity_(self) -> TComplexity: if self.adjoint: return TComplexity(clifford=11) return TComplexity(t=4, clifford=16)
class TerminusPasteTextCommand(sublime_plugin.TextCommand): def run(self, edit, text, bracketed=True): view = self.view terminal = Terminal.from_id(view.id()) if (not terminal): return bracketed = (bracketed and terminal.bracketed_paste_mode_enabled()) if bracketed: terminal.send_key('bracketed_paste_mode_start') self.view.run_command('terminus_show_cursor') terminal.send_string(text) if bracketed: terminal.send_key('bracketed_paste_mode_end')
def samples_from_source(sample_source, buffering=BUFFER_SIZE, labeled=None, reverse=False): ext = os.path.splitext(sample_source)[1].lower() if (ext == '.sdb'): return SDB(sample_source, buffering=buffering, labeled=labeled, reverse=reverse) if (ext == '.csv'): return CSV(sample_source, labeled=labeled, reverse=reverse) raise ValueError('Unknown file type: "{}"'.format(ext))
def block6(): for i in range(11): re.sub('(?i)##yv0##', '', strings[27], 0) regexs[57].sub('', strings[27], subcount[57]) regexs[58].sub('', strings[28], subcount[58]) regexs[59].sub('', strings[29], subcount[59]) re.sub('(?i)##\\/o##', '', strings[30], 0) re.sub('(?i)##\\/v##', '', strings[30], 0) re.sub('(?i)##\\/h##', '', strings[30], 0) re.sub('(?i)##o##', '', strings[30], 0) re.sub('(?i)##oe##', '', strings[30], 0) re.sub('(?i)##v##', '', strings[30], 0) re.sub('(?i)##h##', '', strings[30], 0) re.sub('(?i)##n##', '', strings[31], 0) re.sub('(?i)##\\/n##', '', strings[32], 0) re.sub('#~#argjbexybtb#~#', '', strings[33], 0) re.search(' Zbovyr\\/', strings[0]) re.search('##yv1##', strings[27], re.IGNORECASE) re.search('##yv10##', strings[28], re.IGNORECASE) re.search('##yv11##', strings[28], re.IGNORECASE) re.search('##yv12##', strings[28], re.IGNORECASE) re.search('##yv13##', strings[28], re.IGNORECASE) re.search('##yv14##', strings[28], re.IGNORECASE) re.search('##yv15##', strings[28], re.IGNORECASE) regexs[58].search(strings[28]) re.search('##yv17##', strings[29], re.IGNORECASE) re.search('##yv18##', strings[29], re.IGNORECASE) regexs[59].search(strings[29]) re.search('##yv2##', strings[27], re.IGNORECASE) re.search('##yv20##', strings[30], re.IGNORECASE) re.search('##yv21##', strings[30], re.IGNORECASE) re.search('##yv22##', strings[30], re.IGNORECASE) re.search('##yv23##', strings[30], re.IGNORECASE) re.search('##yv3##', strings[27], re.IGNORECASE) regexs[57].search(strings[27]) re.search('##yv5##', strings[28], re.IGNORECASE) re.search('##yv6##', strings[28], re.IGNORECASE) re.search('##yv7##', strings[28], re.IGNORECASE) re.search('##yv8##', strings[28], re.IGNORECASE) re.search('##yv9##', strings[28], re.IGNORECASE) regexs[8].search('473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29') regexs[8].search('SbeprqRkcvengvba=') regexs[8].search('FrffvbaQQS2=473qq1rs0n2r70q9qo1pq48n021s9468ron90nps048p4p29') re.search('AbxvnA[^\\/]*', strings[0]) for i in range(10): re.sub('(?:^|\\s+)bss(?:\\s+|$)', '', ' bss', 0) re.sub('(\\$\\{0\\})|(\\$0\\b)', '', strings[34], 0) re.sub('(\\$\\{1\\})|(\\$1\\b)', '', strings[34], 0) re.sub('(\\$\\{pbzcyrgr\\})|(\\$pbzcyrgr\\b)', '', strings[34], 0) re.sub('(\\$\\{sentzrag\\})|(\\$sentzrag\\b)', '', strings[34], 0) re.sub('(\\$\\{ubfgcbeg\\})|(\\$ubfgcbeg\\b)', '', strings[34], 0) regexs[56].sub('', strings[34], subcount[56]) re.sub('(\\$\\{cebgbpby\\})|(\\$cebgbpby\\b)', '', strings[34], 0) re.sub('(\\$\\{dhrel\\})|(\\$dhrel\\b)', '', strings[34], 0) regexs[29].sub('', 'nqfvmr', subcount[29]) regexs[30].sub('', 'nqfvmr', subcount[30]) re.sub('(\\$\\{2\\})|(\\$2\\b)', '', 'uggc://${2}${3}${4}${5}', 0) re.sub('(\\$\\{3\\})|(\\$3\\b)', '', 'uggc://wf.hv-cbegny.qr${3}${4}${5}', 0) regexs[40].sub('', 'arjf', subcount[40]) regexs[41].sub('', 'arjf', subcount[41]) regexs[42].sub('', 'arjf', subcount[42]) regexs[43].sub('', 'arjf', subcount[43]) regexs[44].sub('', 'arjf', subcount[44]) regexs[45].sub('', 'arjf', subcount[45]) regexs[46].sub('', 'arjf', subcount[46]) regexs[47].sub('', 'arjf', subcount[47]) regexs[48].sub('', 'arjf', subcount[48]) re.search(' PC=i=(\\d+)&oe=(.)', strings[35]) regexs[60].search(' ') regexs[60].search(' bss') regexs[60].search('') regexs[19].search(' ') regexs[19].search('svefg ba') regexs[19].search('ynfg vtaber') regexs[19].search('ba') regexs[9].search('scnq so ') regexs[9].search('zrqvgobk') regexs[9].search('hsgy') regexs[9].search('lhv-h') re.search('Fnsnev|Xbadhrebe|XUGZY', strings[0], re.IGNORECASE) regexs[61].search('uggc://wf.hv-cbegny.qr/tzk/ubzr/wf//onfr.wf') regexs[62].search('#Ybtva_rznvy')
def cache_size(mb=True): numtotal = [0] classdict = {} def get_recurse(submodels): for submodel in submodels: subclasses = submodel.__subclasses__() if (not subclasses): num = len(submodel.get_all_cached_instances()) numtotal[0] += num classdict[submodel.__dbclass__.__name__] = num else: get_recurse(subclasses) get_recurse(SharedMemoryModel.__subclasses__()) return (numtotal[0], classdict)
class RRDB(nn.Module): def __init__(self, nc, kernel_size=3, gc=32, stride=1, bias=True, pad_type='zero', norm_type=None, act_type='leakyrelu', mode='CNA'): super(RRDB, self).__init__() self.RDB1 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) self.RDB2 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) self.RDB3 = ResidualDenseBlock_5C(nc, kernel_size, gc, stride, bias, pad_type, norm_type, act_type, mode) def forward(self, x): out = self.RDB1(x) out = self.RDB2(out) out = self.RDB3(out) return (out.mul(0.2) + x)
def create_nested(dirname, s, depth, branch_factor): def write(rp): fp = rp.open('w') fp.write(s) fp.close() def helper(rp, depth): if (not rp.isdir()): rp.mkdir() sub_rps = [rp.append(('file_%d' % i)) for i in range(branch_factor)] if (depth == 1): list(map(write, sub_rps)) else: list(map((lambda rp: helper(rp, (depth - 1))), sub_rps)) re_init_subdir(abs_test_dir, b'nested_out') helper(rpath.RPath(Globals.local_connection, dirname), depth)
def test_exporter_handles_overlapping_python_versions(tmp_path: Path, poetry: Poetry) -> None: poetry.locker.mock_lock_data({'package': [{'name': 'ipython', 'python-versions': '>=3.6', 'version': '7.16.3', 'optional': False, 'dependencies': {}}, {'name': 'ipython', 'python-versions': '>=3.7', 'version': '7.34.0', 'optional': False, 'dependencies': {}}, {'name': 'slash', 'python-versions': '>=3.6.*', 'version': '1.13.0', 'optional': False, 'dependencies': {'ipython': [{'version': '*', 'markers': 'python_version >= "3.6" and implementation_name != "pypy"'}, {'version': '<7.17.0', 'markers': 'python_version < "3.6" and implementation_name != "pypy"'}]}}], 'metadata': {'lock-version': '1.1', 'python-versions': '^3.6', 'content-hash': '832b13a88e5020c27cbcd95faa577bf0dbf054a65c023b45dc9442b640d414e6', 'files': {'ipython': [], 'slash': []}}}) root = poetry.package.with_dependency_groups([], only=True) root.python_versions = '^3.6' root.add_dependency(Factory.create_dependency(name='ipython', constraint={'version': '*', 'python': '~3.6'})) root.add_dependency(Factory.create_dependency(name='ipython', constraint={'version': '^7.17', 'python': '^3.7'})) root.add_dependency(Factory.create_dependency(name='slash', constraint={'version': '^1.12', 'markers': "implementation_name == 'cpython'"})) poetry._package = root exporter = Exporter(poetry, NullIO()) io = BufferedIO() exporter.export('requirements.txt', tmp_path, io) expected = f'''ipython==7.16.3 ; {MARKER_PY36_ONLY} ipython==7.34.0 ; {MARKER_PY37} slash==1.13.0 ; {MARKER_PY36} and {MARKER_CPYTHON} ''' assert (io.fetch_output() == expected)
def normalize_outbound_headers(headers, hdr_validation_flags, should_split_outbound_cookies): headers = _lowercase_header_names(headers, hdr_validation_flags) if should_split_outbound_cookies: headers = _split_outbound_cookie_fields(headers, hdr_validation_flags) headers = _strip_surrounding_whitespace(headers, hdr_validation_flags) headers = _strip_connection_headers(headers, hdr_validation_flags) headers = _secure_headers(headers, hdr_validation_flags) return headers
def test_skip_fails_with_msg_and_reason(pytester: Pytester) -> None: p = pytester.makepyfile('\n import pytest\n\n def test_skip_both_arguments():\n pytest.skip(reason="foo", msg="bar")\n ') result = pytester.runpytest(p) result.stdout.fnmatch_lines('*UsageError: Passing both ``reason`` and ``msg`` to pytest.skip(...) is not permitted.*') result.assert_outcomes(failed=1)
def _pad_or_crop_to_shape(x, in_shape, tgt_shape): if (len(in_shape) == 2): in_shape = np.asarray(in_shape) tgt_shape = np.asarray(tgt_shape) print('Padding input from {} to {}'.format(in_shape, tgt_shape)) im_diff = (in_shape - tgt_shape) if (im_diff[0] < 0): pad_amt = (int(np.ceil((abs(im_diff[0]) / 2.0))), int(np.floor((abs(im_diff[0]) / 2.0)))) x = ZeroPadding2D((pad_amt, (0, 0)))(x) if (im_diff[1] < 0): pad_amt = (int(np.ceil((abs(im_diff[1]) / 2.0))), int(np.floor((abs(im_diff[1]) / 2.0)))) x = ZeroPadding2D(((0, 0), pad_amt))(x) if (im_diff[0] > 0): crop_amt = (int(np.ceil((im_diff[0] / 2.0))), int(np.floor((im_diff[0] / 2.0)))) x = Cropping2D((crop_amt, (0, 0)))(x) if (im_diff[1] > 0): crop_amt = (int(np.ceil((im_diff[1] / 2.0))), int(np.floor((im_diff[1] / 2.0)))) x = Cropping2D(((0, 0), crop_amt))(x) return x else: return _pad_or_crop_to_shape_3D(x, in_shape, tgt_shape)
def urlunparse(parts): (scheme, netloc, path, params, query, fragment) = parts if RE_DRIVE_LETTER_PATH.match(path): quoted_path = (path[:3] + parse.quote(path[3:])) else: quoted_path = parse.quote(path) return parse.urlunparse((parse.quote(scheme), parse.quote(netloc), quoted_path, parse.quote(params), parse.quote(query), parse.quote(fragment)))
def histogram(returns, benchmark=None, resample='M', fontname='Arial', grayscale=False, figsize=(10, 5), ylabel=True, subtitle=True, compounded=True, savefig=None, show=True, prepare_returns=True): if prepare_returns: returns = _utils._prepare_returns(returns) if (benchmark is not None): benchmark = _utils._prepare_returns(benchmark) if (resample == 'W'): title = 'Weekly ' elif (resample == 'M'): title = 'Monthly ' elif (resample == 'Q'): title = 'Quarterly ' elif (resample == 'A'): title = 'Annual ' else: title = '' return _core.plot_histogram(returns, benchmark, resample=resample, grayscale=grayscale, fontname=fontname, title=('Distribution of %sReturns' % title), figsize=figsize, ylabel=ylabel, subtitle=subtitle, compounded=compounded, savefig=savefig, show=show)
_db ('cfp_open', (True, False)) def test_is_cfp_open(graphql_client, conference_factory, deadline_factory, cfp_open): now = timezone.now() conference = conference_factory(timezone=pytz.timezone('America/Los_Angeles')) deadline_factory(start=(now - timezone.timedelta(days=1)), end=((now + timezone.timedelta(days=1)) if cfp_open else now), conference=conference, type='cfp') resp = graphql_client.query('\n query($code: String!) {\n conference(code: $code) {\n isCFPOpen\n }\n }\n ', variables={'code': conference.code}) assert (resp['data']['conference']['isCFPOpen'] is cfp_open)
class RobertaPreLayerNormOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: if (self.task == 'multiple-choice'): dynamic_axis = {0: 'batch', 1: 'choice', 2: 'sequence'} else: dynamic_axis = {0: 'batch', 1: 'sequence'} return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
def get_data(input_path): all_imgs = [] classes_count = {} class_mapping = {} visualise = False data_paths = [os.path.join(input_path, s) for s in ['VOC2012']] print('Parsing annotation files') for data_path in data_paths: annot_path = os.path.join(data_path, 'Annotations') imgs_path = os.path.join(data_path, 'JPEGImages') imgsets_path_trainval = os.path.join(data_path, 'ImageSets', 'Main', 'trainval.txt') imgsets_path_test = os.path.join(data_path, 'ImageSets', 'Main', 'test.txt') trainval_files = [] test_files = [] try: with open(imgsets_path_trainval) as f: for line in f: trainval_files.append((line.strip() + '.jpg')) except Exception as e: print(e) try: with open(imgsets_path_test) as f: for line in f: test_files.append((line.strip() + '.jpg')) except Exception as e: if (data_path[(- 7):] == 'VOC2012'): pass else: print(e) annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)] idx = 0 for annot in annots: try: idx += 1 et = ET.parse(annot) element = et.getroot() element_objs = element.findall('object') element_filename = element.find('filename').text element_width = int(element.find('size').find('width').text) element_height = int(element.find('size').find('height').text) if (len(element_objs) > 0): annotation_data = {'filepath': os.path.join(imgs_path, element_filename), 'width': element_width, 'height': element_height, 'bboxes': []} if (element_filename in trainval_files): annotation_data['imageset'] = 'trainval' elif (element_filename in test_files): annotation_data['imageset'] = 'test' else: annotation_data['imageset'] = 'trainval' for element_obj in element_objs: class_name = element_obj.find('name').text if (class_name not in classes_count): classes_count[class_name] = 1 else: classes_count[class_name] += 1 if (class_name not in class_mapping): class_mapping[class_name] = len(class_mapping) obj_bbox = element_obj.find('bndbox') x1 = int(round(float(obj_bbox.find('xmin').text))) y1 = int(round(float(obj_bbox.find('ymin').text))) x2 = int(round(float(obj_bbox.find('xmax').text))) y2 = int(round(float(obj_bbox.find('ymax').text))) difficulty = (int(element_obj.find('difficult').text) == 1) annotation_data['bboxes'].append({'class': class_name, 'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'difficult': difficulty}) all_imgs.append(annotation_data) if visualise: img = cv2.imread(annotation_data['filepath']) for bbox in annotation_data['bboxes']: cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255)) cv2.imshow('img', img) cv2.waitKey(0) except Exception as e: print(e) continue return (all_imgs, classes_count, class_mapping)
def test_video(): video = 'BAACAgIAAx0CAAGgr9AAAgmRX7b4Xv9f-4BK5VR_5ppIOF6UIp0AAgYAA4GkuUmhnZz2xC37wR4E' video_unique = 'AgADBgADgaS5SQ' video_thumb = 'AAMCAgADHQIAAaCv0AACCZFftvhe_1_7gErlVH_mmkg4XpQinQACBgADgaS5SaGdnPbELfvBIH3qihAAAwEAB20AA_WeAQABHgQ' video_thumb_unique = 'AQADIH3qihAAA_WeAQAB' check(video, FileType.VIDEO) check_unique(video_unique, FileUniqueType.DOCUMENT) check(video_thumb, FileType.THUMBNAIL) check_unique(video_thumb_unique, FileUniqueType.PHOTO)
class TempStoreTestCase(SqlAlchemyTestCase): def setUpClass(cls): cls.this_dir = abspath(join(dirname(__file__), '..')) cls.stuff_path = join(cls.this_dir, 'stuff') cls.dog_jpeg = join(cls.stuff_path, 'dog.jpg') cls.cat_jpeg = join(cls.stuff_path, 'cat.jpg') cls.dog_png = join(cls.stuff_path, 'dog.png') cls.cat_png = join(cls.stuff_path, 'cat.png') super().setUpClass() def setUp(self): self.temp_path = join(self.this_dir, 'temp', self.__class__.__name__, self._testMethodName) self.sys_temp_path = join('/tmp/sa-media-tests', self.__class__.__name__, self._testMethodName) self.base_url = ' if exists(self.temp_path): shutil.rmtree(self.temp_path) makedirs(self.temp_path) StoreManager.register('fs', functools.partial(FileSystemStore, self.temp_path, self.base_url), default=True) StoreManager.register('temp_fs', functools.partial(FileSystemStore, self.sys_temp_path, self.base_url)) super().setUp()
class ClarisRandomizerExportError(UnableToExportError): def __init__(self, reason: str, output: (str | None)): super().__init__(reason) self.output = output def detailed_text(self) -> str: result = [] if (self.output is not None): result.append(self.output) return '\n'.join(result)
def test_traversal(simple_chart, rich_chart): (_, values) = simple_chart simple_output = values assert (len(simple_output) == 17) assert (['replicaCount', '', '1'] in simple_output) (_, values) = rich_chart rich_output = values assert (['replicaCount', 'number of nginx pod replicas to create', '1'] in rich_output)
class StopReg(ScrimsButton): def __init__(self): super().__init__(label='Stop Reg', style=discord.ButtonStyle.red, row=2) async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) if (not self.view.record.opened_at): return (await self.view.ctx.error('Registration is already closed.', 5)) try: (await self.view.record.close_registration()) except Exception as e: return (await self.view.ctx.error(e, 10)) else: (await self.view.record.refresh_from_db()) (await self.view.ctx.success(f'Registration closed {self.view.record}.', 5))
def draw_mask(mask, draw, random_color=False): if random_color: color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 153) else: color = (30, 144, 255, 153) nonzero_coords = np.transpose(np.nonzero(mask)) for coord in nonzero_coords: draw.point(coord[::(- 1)], fill=color)
def test_cmd_list_input_with_complex_args_error_on_first_save(): cmd1 = get_cmd('tests/testfiles/cmds/args.sh', 'tests\\testfiles\\cmds\\args.bat') cmd2 = get_cmd('tests/testfiles/cmds/args2.sh', 'tests\\testfiles\\cmds\\args2.bat') context = Context({'a': 'WRONG', 'b': 'two two', 'c': 'three', 'd': cmd1, 'e': cmd2, 'cmd': [{'run': '{d} {a} "{b}" {c}', 'save': True}, '{e} WRONG "five six" seven']}) with pytest.raises(subprocess.CalledProcessError) as err: pypyr.steps.cmd.run_step(context) if is_windows: assert (err.value.cmd == f'{cmd1} WRONG "two two" three') else: assert (err.value.cmd == [cmd1, 'WRONG', 'two two', 'three']) out = context['cmdOut'] assert (out.returncode == 1) assert (out.stdout == '') assert (out.stderr == 'assert failed')
class CosPlus_Classifier(nn.Module): def __init__(self, num_classes=10, in_dim=640, scale=16, bias=False, gamma=0.03125, eta=1, moving_avg=True, mu=0.9, **kwargs): super(CosPlus_Classifier, self).__init__() self.num_classes = num_classes self.moving_avg = moving_avg self.in_dim = in_dim self.scale = scale self.gamma = gamma self.eta = eta self.mu = mu self.weight = Parameter(torch.Tensor(num_classes, in_dim).cuda()) self.bias = Parameter(torch.Tensor(num_classes).cuda(), requires_grad=bias) if self.moving_avg: self.moving_ed = Parameter(torch.Tensor(1, in_dim).cuda(), requires_grad=False) self.init_weights() print('>> CosPlus Classifier built!') def init_weights(self): self.bias.data.fill_(0.0) stdv = (1.0 / math.sqrt(self.weight.size(1))) self.weight.data.uniform_((- stdv), stdv) def forward(self, x, **kwargs): if (self.eta != 1): ex = (x / (torch.norm(x.clone(), 2, 1, keepdim=True) ** self.eta)) ew = (self.weight / ((torch.norm(self.weight, 2, 1, keepdim=True) ** self.eta) + self.gamma)) else: ex = (x / torch.norm(x.clone(), 2, 1, keepdim=True)) ew = (self.weight / (torch.norm(self.weight, 2, 1, keepdim=True) + self.gamma)) x = ((self.scale * torch.mm(ex, ew.t())) + self.bias) if (self.training and self.moving_avg): self.moving_ed.data = ((self.moving_ed.data * self.mu) + torch.mean(ex, dim=0)) return x
def install_atlas_from_zipfile(zip_file_path, atlas_path): with tempfile.TemporaryDirectory() as temp_dir: temp_atlas_path = Path(temp_dir).joinpath('test_atlas') with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: zip_ref.extractall(temp_atlas_path) if (not atlas_path.parent.exists()): atlas_path.parent.mkdir(parents=True) shutil.copytree(temp_atlas_path, atlas_path)
class Source(Stream): _graphviz_shape = 'doubleoctagon' def __init__(self, start=False, **kwargs): self.stopped = True super().__init__(ensure_io_loop=True, **kwargs) self.started = False if start: self.start() def stop(self): if (not self.stopped): self.stopped = True def start(self): if self.stopped: self.stopped = False self.started = True self.loop.add_callback(self.run) async def run(self): while (not self.stopped): (await self._run()) async def _run(self): raise NotImplementedError
def _default_implementation() -> BackendType[Any]: global _DEFAULT_IMPLEMENTATION if (_DEFAULT_IMPLEMENTATION is not None): return _DEFAULT_IMPLEMENTATION try: implementation = next(all_implementations()) except StopIteration: logger.debug('Backend implementation import failed', exc_info=exc_info()) supported_backends = ', '.join(SUPPORTED_BACKENDS) msg = f'''It seems you haven't installed a backend. To resolve this issue, you can install a backend by running: pip install "reactpy[starlette]" Other supported backends include: {supported_backends}.''' raise RuntimeError(msg) from None else: _DEFAULT_IMPLEMENTATION = implementation return implementation
class SDIO_ICR(IntEnum): CCRCFAILC = (1 << 0) DCRCFAILC = (1 << 1) CTIMEOUTC = (1 << 2) DTIMEOUTC = (1 << 3) TXUNDERRC = (1 << 4) RXOVERRC = (1 << 5) CMDRENDC = (1 << 6) CMDSENTC = (1 << 7) DATAENDC = (1 << 8) STBITERRC = (1 << 9) DBCKENDC = (1 << 10) SDIOITC = (1 << 22) CEATAENDC = (1 << 23)
class ResNeXt(nn.Module): def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10): super(ResNeXt, self).__init__() self.cardinality = cardinality self.bottleneck_width = bottleneck_width self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(num_blocks[0], 1) self.layer2 = self._make_layer(num_blocks[1], 2) self.layer3 = self._make_layer(num_blocks[2], 2) self.linear = nn.Linear(((cardinality * bottleneck_width) * 8), num_classes) def _make_layer(self, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride)) self.in_planes = ((Block.expansion * self.cardinality) * self.bottleneck_width) self.bottleneck_width *= 2 return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.avg_pool2d(out, 8) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
class BaseModel(pybamm.BaseSubModel): def __init__(self, param, domain, options): super().__init__(param, domain, options=options) def _get_standard_interface_utilisation_variables(self, u_var): (domain, Domain) = self.domain_Domain u = pybamm.maximum(u_var, 1e-08) u_var_av = pybamm.x_average(u_var) u_av = pybamm.maximum(u_var_av, 1e-08) if (self.options.electrode_types[self.domain] == 'planar'): variables = {'Lithium metal interface utilisation variable': u_var_av, 'Lithium metal interface utilisation': u_av} else: variables = {f'{Domain} electrode interface utilisation variable': u_var, f'X-averaged {domain} electrode interface utilisation variable': u_var_av, f'{Domain} electrode interface utilisation': u, f'X-averaged {domain} electrode interface utilisation': u_av} return variables
def test_upload_time(s3_mock: S3Path) -> None: backend = s3.S3Storage() backend.PATH_BACKEND(f'/{s3_mock.bucket}/folder1/file1').touch() assert (backend.get_upload_time(f'/{s3_mock.bucket}/folder1/file1').second == 0) assert (backend.get_upload_time(f'/{s3_mock.bucket}/folder1/file1').year == 1970) dt = datetime(2008, 8, 8, 10, 10, 0) backend.set_upload_time(f'/{s3_mock.bucket}/folder1/file1', dt) assert (datetime.timestamp(backend.get_upload_time(f'/{s3_mock.bucket}/folder1/file1')) == datetime.timestamp(dt))
class OurModelVAE(Model): def __init__(self, placeholders, num_features, num_nodes, features_nonzero, **kwargs): super(OurModelVAE, self).__init__(**kwargs) self.inputs = placeholders['features'] self.input_dim = num_features self.features_nonzero = features_nonzero self.n_samples = num_nodes self.adj = placeholders['adj'] self.dropout = placeholders['dropout'] self.build() def _build(self): self.hidden1 = GraphConvolutionSparse(input_dim=self.input_dim, output_dim=FLAGS.hidden1, adj=self.adj, features_nonzero=self.features_nonzero, act=tf.nn.relu, dropout=self.dropout, logging=self.logging)(self.inputs) self.z_mean = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=(lambda x: x), dropout=self.dropout, logging=self.logging)(self.hidden1) self.z_log_std = GraphConvolution(input_dim=FLAGS.hidden1, output_dim=FLAGS.hidden2, adj=self.adj, act=(lambda x: x), dropout=self.dropout, logging=self.logging)(self.hidden1) self.z = (self.z_mean + (tf.random_normal([self.n_samples, FLAGS.hidden2]) * tf.sqrt(tf.exp(self.z_log_std)))) self.soft_z = tf.nn.softmax(self.z, axis=(- 1)) self.reconstructions = InnerProductDecoder(input_dim=FLAGS.hidden2, act=(lambda x: x), logging=self.logging)(self.z)
class AppConfig(DjangoAppConfig): name = 'django_cassandra_engine' def connect(self): from django_cassandra_engine.utils import get_cassandra_connections for (_, conn) in get_cassandra_connections(): conn.connect() def import_models(self, *args, **kwargs): self.connect() return super().import_models(*args, **kwargs)
class _EvalManager(): def __init__(self, quantsim_factory: Callable, eval_func: Callable[([ort.InferenceSession], float)], results_dir: str, strict_validation: bool): self._quantsim_factory = quantsim_factory self._eval_func = eval_func self._results_dir = results_dir self._strict_validation = strict_validation os.makedirs(self._results_dir, exist_ok=True) self._all_sessions = OrderedDict() def clear(self): for sess in self._all_sessions.values(): sess.reset_status() def get_best_ptq_result(self) -> Optional[PtqResult]: ptq_results = [sess.ptq_result for sess in self._all_sessions.values() if ((sess.ptq_result is not None) and sess._ptq)] if (not ptq_results): return None return max(ptq_results, key=(lambda ptq_result: ptq_result.accuracy)) def session(self, title: str, ptq: bool=False): if (title not in self._all_sessions): session = _EvalSession(title, self._quantsim_factory, self._eval_func, results_dir=os.path.join(self._results_dir, '.trace'), strict_validation=self._strict_validation, ptq=ptq) self._all_sessions[title] = session return self._all_sessions[title] HTML_TEMPLATE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'auto_quant_v2_diagnostics_template.html') def export_diagnostics(self) -> str: loader = jinja2.FileSystemLoader(os.path.dirname(self.HTML_TEMPLATE_FILE)) env = jinja2.Environment(loader=loader) template = env.get_template(os.path.basename(self.HTML_TEMPLATE_FILE)) if any((sess.diagnostics.contains_bokeh() for sess in self._all_sessions.values())): from bokeh.resources import CDN head = CDN.render() else: head = '' log = io.StringIO() for sess in self._all_sessions.values(): if sess.diagnostics.is_empty(): continue log.write(f'''<h1> {sess.title} </h1> ''') content = '\n'.join((line.get_html_elem() for line in sess.diagnostics)) log.write(f'''{content} ''') result = OrderedDict() result['ptq_techniques'] = OrderedDict() for sess in self._all_sessions.values(): if sess.is_ptq_session(): result['ptq_techniques'][sess.title_lowercase] = sess.result else: result[sess.title_lowercase] = sess.result flowchart_metadata = _build_flowchart_metadata(result) html = template.render(head=head, log=log.getvalue(), **flowchart_metadata) filename = os.path.join(self._results_dir, 'diagnostics.html') with open(filename, 'w') as f: f.write(html) return html
class ScikitChebyshev2DSubMesh(ScikitSubMesh2D): def __init__(self, lims, npts): (spatial_vars, tabs) = self.read_lims(lims) coord_sys = spatial_vars[0].coord_sys edges = {} for var in spatial_vars: if (var.name not in ['y', 'z']): raise pybamm.DomainError(f'spatial variable must be y or z not {var.name}') else: N = (npts[var.name] - 2) ii = np.array(range(1, (N + 1))) a = lims[var.name]['min'] b = lims[var.name]['max'] x_cheb = (((a + b) / 2) + (((b - a) / 2) * np.cos((((((2 * ii) - 1) * np.pi) / 2) / N)))) edges[var.name] = np.concatenate(([a], np.flip(x_cheb), [b])) super().__init__(edges, coord_sys, tabs)
_constant(MultiVectorType) def lower_constant_MultiVector(context, builder, typ: MultiVectorType, pyval: MultiVector) -> llvmlite.ir.Value: mv = cgutils.create_struct_proxy(typ)(context, builder) mv.value = context.get_constant_generic(builder, typ.value_type, pyval.value) mv.layout = context.get_constant_generic(builder, typ.layout_type, pyval.layout) return mv._getvalue()
class PDFExporter(DocumentExporter): DEFAULT_CSS_DIR_NAME = 'default_css' def __init__(self, settings: Settings): super().__init__(settings) if hasattr(settings, 'document_css_directory'): self._document_css_dir = join(get_starting_dir_abs_path(), settings.document_css_directory) else: this_dir_abs_path = abspath(dirname(__file__)) self._document_css_dir = join(this_dir_abs_path, self.DEFAULT_CSS_DIR_NAME) self.logger = qf_logger.getChild(self.__class__.__name__) def set_default_directory_level_up(self): self._document_css_dir = abspath(dirname(__file__)) def generate(self, documents: List[Document], export_dir: str, filename: str, include_table_of_contents=False, css_file_names: List[str]=None) -> str: css_file_paths = [] documents = [self._merge_documents(documents, filename)] output_dir = self.get_output_dir(export_dir) output_filename = os.path.join(output_dir, filename) for document in documents: if include_table_of_contents: self._add_table_of_contents(document) self.logger.info('Generating HTML for PDF...') html = document.generate_html() base_css = os.listdir(self._document_css_dir) for name in base_css: path = os.path.join(self._document_css_dir, name) if os.path.isfile(path): css_file_paths.append(CSS(path)) if (css_file_names is not None): for name in css_file_names: css_file_paths.append(CSS(os.path.join(self._document_css_dir, (name + '.css')))) html = HTML(string=html) self.logger.info('Rendering PDF in {}...'.format(output_filename)) html.write_pdf(output_filename, css_file_paths) return output_filename
def postprocess_args(args): ROOTDIR = args.root_dir ft_file_map = {'vitbase': 'pth_vit_base_patch16_224_imagenet.hdf5'} args.img_ft_file = os.path.join(ROOTDIR, 'R2R', 'features', ft_file_map[args.features]) args.connectivity_dir = os.path.join(ROOTDIR, 'R2R', 'connectivity') args.scan_data_dir = os.path.join(ROOTDIR, 'Matterport3D', 'v1_unzip_scans') args.anno_dir = os.path.join(ROOTDIR, 'R2R', 'annotations') args.ckpt_dir = os.path.join(args.output_dir, 'ckpts') args.log_dir = os.path.join(args.output_dir, 'logs') args.pred_dir = os.path.join(args.output_dir, 'preds') os.makedirs(args.output_dir, exist_ok=True) os.makedirs(args.ckpt_dir, exist_ok=True) os.makedirs(args.log_dir, exist_ok=True) os.makedirs(args.pred_dir, exist_ok=True) return args
class QAOA(VQE): def __init__(self, operator: Union[(OperatorBase, LegacyBaseOperator)]=None, optimizer: Optimizer=None, p: int=1, initial_state: Optional[Union[(QuantumCircuit, InitialState)]]=None, mixer: Union[(QuantumCircuit, OperatorBase, LegacyBaseOperator)]=None, initial_point: Optional[np.ndarray]=None, gradient: Optional[Union[(GradientBase, Callable[([Union[(np.ndarray, List)]], List)])]]=None, expectation: Optional[ExpectationBase]=None, include_custom: bool=False, max_evals_grouped: int=1, aux_operators: Optional[List[Optional[Union[(OperatorBase, LegacyBaseOperator)]]]]=None, callback: Optional[Callable[([int, np.ndarray, float, float], None)]]=None, quantum_instance: Optional[Union[(QuantumInstance, BaseBackend, Backend)]]=None) -> None: validate_min('p', p, 1) self._p = p self._mixer = (mixer.to_opflow() if isinstance(mixer, LegacyBaseOperator) else mixer) self._initial_state = initial_state super().__init__(operator, None, optimizer, initial_point=initial_point, gradient=gradient, expectation=expectation, include_custom=include_custom, max_evals_grouped=max_evals_grouped, callback=callback, quantum_instance=quantum_instance, aux_operators=aux_operators) .setter def operator(self, operator: Union[(OperatorBase, LegacyBaseOperator)]) -> None: self.var_form = None super(QAOA, self.__class__).operator.__set__(self, operator) self.var_form = QAOAVarForm(self.operator, self._p, initial_state=self._initial_state, mixer_operator=self._mixer) def initial_state(self) -> Optional[Union[(QuantumCircuit, InitialState)]]: return self._initial_state _state.setter def initial_state(self, initial_state: Optional[Union[(QuantumCircuit, InitialState)]]) -> None: self._initial_state = initial_state def mixer(self) -> Union[(QuantumCircuit, OperatorBase, LegacyBaseOperator)]: return self._mixer def mixer(self, mixer: Union[(QuantumCircuit, OperatorBase, LegacyBaseOperator)]) -> None: self._mixer = mixer
def test_prune_projects_output2(db, settings): (stdout, stderr) = (io.StringIO(), io.StringIO()) instances = Project.objects.filter(id__in=projects_without_owner) call_command('prune_projects', stdout=stdout, stderr=stderr) assert (stdout.getvalue() == ("Found projects without ['owner']:\n%s" % get_prune_output(instances))) assert (not stderr.getvalue())
def merge_edges(edges): base_e = edges[0][1] merged_edges = [edges[0]] base_len = np.sqrt((((base_e[1][0] - base_e[0][0]) ** 2) + ((base_e[1][1] - base_e[0][1]) ** 2))) base_unit_v = (((base_e[1][0] - base_e[0][0]) / base_len), ((base_e[1][1] - base_e[0][1]) / base_len)) for edge in edges[1:]: room_idx = edge[0] e = edge[1] v_b0e0 = ((e[0][0] - base_e[0][0]), (e[0][1] - base_e[0][1])) proj_len = ((v_b0e0[0] * base_unit_v[0]) + (v_b0e0[1] * base_unit_v[1])) proj_e0 = (int((base_e[0][0] + (base_unit_v[0] * proj_len))), int((base_e[0][1] + (base_unit_v[1] * proj_len)))) proj_e1 = (int(((proj_e0[0] + e[1][0]) - e[0][0])), int(((proj_e0[1] + e[1][1]) - e[0][1]))) new_e = (proj_e0, proj_e1) new_edge = (room_idx, new_e) merged_edges.append(new_edge) adjusted_merged_edges = adjust_colinear_edges(merged_edges) return adjusted_merged_edges
def get_oggz_validate_version(): process = subprocess.Popen(['oggz-validate', '--version'], stdout=subprocess.PIPE) (output, unused_err) = process.communicate() retcode = process.poll() if (retcode != 0): return (0,) lines = output.splitlines() if (not lines): return (0,) parts = lines[0].split() if (not parts): return (0,) try: return tuple(map(int, parts[(- 1)].split(b'.'))) except ValueError: return (0,)
class SelectTHC(SelectOracle): num_mu: int num_spin_orb: int num_bits_theta: int kr1: int = 1 kr2: int = 1 control_val: Optional[int] = None _property def control_registers(self) -> Tuple[(Register, ...)]: return (() if (self.control_val is None) else (Register('control', 1),)) _property def selection_registers(self) -> Tuple[(SelectionRegister, ...)]: return (SelectionRegister('succ', bitsize=1), SelectionRegister('nu_eq_mp1', bitsize=1), SelectionRegister('mu', bitsize=self.num_mu.bit_length(), iteration_length=(self.num_mu + 1)), SelectionRegister('nu', bitsize=self.num_mu.bit_length(), iteration_length=(self.num_mu + 1)), SelectionRegister('plus_mn', bitsize=1), SelectionRegister('plus_a', bitsize=1), SelectionRegister('plus_b', bitsize=1)) _property def target_registers(self) -> Tuple[(Register, ...)]: return (Register('sys_a', bitsize=(self.num_spin_orb // 2)), Register('sys_b', bitsize=(self.num_spin_orb // 2))) def build_composite_bloq(self, bb: 'BloqBuilder', succ: SoquetT, nu_eq_mp1: SoquetT, mu: SoquetT, nu: SoquetT, plus_mn: SoquetT, plus_a: SoquetT, plus_b: SoquetT, sys_a: SoquetT, sys_b: SoquetT) -> Dict[(str, 'SoquetT')]: (plus_b, sys_a, sys_b) = bb.add(CSwap((self.num_spin_orb // 2)), ctrl=plus_b, x=sys_a, y=sys_b) data = bb.allocate(self.num_bits_theta) (nu_eq_mp1, data, mu, sys_a) = bb.add(THCRotations(num_mu=self.num_mu, num_spin_orb=self.num_spin_orb, num_bits_theta=self.num_bits_theta, kr1=self.kr1, kr2=self.kr2), nu_eq_mp1=nu_eq_mp1, data=data, sel=mu, trg=sys_a) ((succ,), sys_b) = bb.add(ApplyControlledZs(cvs=(1,), bitsize=(self.num_spin_orb // 2)), ctrls=(succ,), system=sys_b) (nu_eq_mp1, data, mu, sys_a) = bb.add(THCRotations(num_mu=self.num_mu, num_spin_orb=self.num_spin_orb, num_bits_theta=self.num_bits_theta, kr1=self.kr1, kr2=self.kr2, adjoint=True), nu_eq_mp1=nu_eq_mp1, data=data, sel=mu, trg=sys_a) (plus_b, sys_a, sys_b) = bb.add(CSwap((self.num_spin_orb // 2)), ctrl=plus_b, x=sys_a, y=sys_b) plus_mn = bb.add(XGate(), q=plus_mn) (nu_eq_mp1, plus_a, plus_b) = bb.add(CSwap(1), ctrl=nu_eq_mp1, x=plus_a, y=plus_b) (nu_eq_mp1, mu, nu) = bb.add(CSwap(self.num_mu.bit_length()), ctrl=nu_eq_mp1, x=mu, y=nu) (plus_b, sys_a, sys_b) = bb.add(CSwap((self.num_spin_orb // 2)), ctrl=plus_b, x=sys_a, y=sys_b) (nu_eq_mp1, data, mu, sys_a) = bb.add(THCRotations(num_mu=self.num_mu, num_spin_orb=self.num_spin_orb, num_bits_theta=self.num_bits_theta, kr1=self.kr1, kr2=self.kr2, two_body_only=True), nu_eq_mp1=nu_eq_mp1, data=data, sel=mu, trg=sys_a) ((succ, nu_eq_mp1), sys_b) = bb.add(ApplyControlledZs(cvs=(1, 0), bitsize=(self.num_spin_orb // 2)), ctrls=(succ, nu_eq_mp1), system=sys_b) (nu_eq_mp1, data, mu, sys_a) = bb.add(THCRotations(num_mu=self.num_mu, num_spin_orb=self.num_spin_orb, num_bits_theta=self.num_bits_theta, kr1=self.kr1, kr2=self.kr2, two_body_only=True, adjoint=True), nu_eq_mp1=nu_eq_mp1, data=data, sel=mu, trg=sys_a) (plus_b, sys_a, sys_b) = bb.add(CSwap((self.num_spin_orb // 2)), ctrl=plus_b, x=sys_a, y=sys_b) bb.free(data) return {'succ': succ, 'nu_eq_mp1': nu_eq_mp1, 'mu': mu, 'nu': nu, 'plus_mn': plus_mn, 'plus_a': plus_a, 'plus_b': plus_b, 'sys_a': sys_a, 'sys_b': sys_b}
class TestTrainingExtensionsSpatialSvdCostCalculator(unittest.TestCase): def test_calculate_spatial_svd_cost(self): inp_tensor = tf.Variable(tf.random.normal([1, 32, 28, 28])) filter_tensor = tf.Variable(tf.random.normal([5, 5, 32, 64])) conv = tf.nn.conv2d(inp_tensor, filter_tensor, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW', name='Conv2D') conv_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv2D') sess = tf.compat.v1.Session() sess.run(tf.compat.v1.global_variables_initializer()) shape = conv_op.outputs[0].get_shape().as_list() self.assertEqual(shape, [1, 64, 28, 28]) layer = Layer(model=sess, op=conv_op, output_shape=shape) self.assertEqual((32 * 5), cc.SpatialSvdCostCalculator.calculate_max_rank(layer)) comp_ratios_to_check = [0.8, 0.75, 0.5, 0.25, 0.125] original_cost = cc.CostCalculator.compute_layer_cost(layer) for comp_ratio in comp_ratios_to_check: rank = cc.SpatialSvdCostCalculator.calculate_rank_given_comp_ratio(layer, comp_ratio, CostMetric.mac) print('Rank = {}, for compression_ratio={}'.format(rank, comp_ratio)) compressed_cost = cc.SpatialSvdCostCalculator.calculate_cost_given_rank(layer, rank) self.assertTrue(math.isclose((compressed_cost.mac / original_cost.mac), comp_ratio, abs_tol=0.01)) for comp_ratio in comp_ratios_to_check: compressed_cost = cc.SpatialSvdCostCalculator.calculate_per_layer_compressed_cost(layer, comp_ratio, CostMetric.mac) self.assertTrue(math.isclose((compressed_cost.mac / original_cost.mac), comp_ratio, abs_tol=0.01)) tf.compat.v1.reset_default_graph() sess.close() def test_calculate_spatial_svd_cost_with_stride(self): inp_tensor = tf.Variable(tf.random.normal([1, 28, 28, 32])) filter_tensor = tf.Variable(tf.random.normal([5, 5, 32, 64])) conv = tf.nn.conv2d(inp_tensor, filter_tensor, strides=[1, 2, 2, 1], padding='SAME', data_format='NHWC', name='Conv2D') conv_op = tf.compat.v1.get_default_graph().get_operation_by_name('Conv2D') sess = tf.compat.v1.Session() sess.run(tf.compat.v1.global_variables_initializer()) shape = conv_op.outputs[0].get_shape().as_list() self.assertEqual(shape, [1, 14, 14, 64]) shape = (shape[0], shape[3], shape[1], shape[2]) layer = Layer(model=sess, op=conv_op, output_shape=shape) original_cost = cc.CostCalculator.compute_layer_cost(layer) compressed_cost = cc.SpatialSvdCostCalculator.calculate_cost_given_rank(layer, 40) self.assertEqual(, original_cost.mac) self.assertEqual(5017600, compressed_cost.mac) tf.compat.v1.reset_default_graph() sess.close() def test_calculate_spatial_svd_cost_all_layers(self): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(graph=tf.Graph(), config=config) with sess.graph.as_default(): _ = mnist_tf_model.create_model(data_format='channels_last') sess.run(tf.compat.v1.global_variables_initializer()) layer_database = LayerDatabase(model=sess, input_shape=(1, 28, 28, 1), working_dir=None) layer_ratio_list = [] for layer in layer_database: if (layer.module.type == 'Conv2D'): layer_ratio_list.append(LayerCompRatioPair(layer, Decimal(0.5))) else: layer_ratio_list.append(LayerCompRatioPair(layer, None)) compressed_cost = cc.SpatialSvdCostCalculator.calculate_compressed_cost(layer_database, layer_ratio_list, CostMetric.mac) self.assertEqual(8466464, compressed_cost.mac) tf.compat.v1.reset_default_graph() sess.close() shutil.rmtree(str('./temp_meta/'))
class OpMat(object): def __init__(self, name, array, nelem=1, type=None, asym=False, dimens=None): if isinstance(name, str): self.name = name else: raise TypeError if isinstance(array, np.ndarray): self.array = array else: raise TypeError if isinstance(nelem, int): self.nelem = nelem else: raise TypeError if (type is None): if (self.array.dtype == np.int32): self.type = 'i' elif (self.array.dtype == np.int64): self.type = 'i' elif (self.array.dtype == np.float64): self.type = 'd' elif (self.array.dtype == np.complex128): self.type = 'c' else: raise TypeError elif (not isinstance(type, str)): raise TypeError else: self.type = type if asym: self.asym = True else: self.asym = False if (dimens is None): self.dimens = ((self.array.size / self.nelem),) elif (not isinstance(dimens, tuple)): raise TypeError else: self.dimens = dimens def lenarray(self): return _lenarray(self.dimens) def labpars(self): if (self.type == 'c'): ni = 0 nr = self.nelem nri = 2 elif (self.type == 'd'): ni = 0 nr = self.nelem nri = 1 else: ni = self.nelem nr = 0 nri = 1 ntot = self.lenarray n1 = self.dimens[0] n2 = (self.dimens[1] if (len(self.dimens) >= 2) else 1) n3 = (self.dimens[2] if (len(self.dimens) >= 3) else 1) n4 = (self.dimens[3] if (len(self.dimens) >= 4) else 1) n5 = (self.dimens[3] if (len(self.dimens) >= 5) else 1) return (self.name, ni, nr, nri, ntot, n1, n2, n3, n4, n5, self.asym) def print_mat(self, wid=1, doinp=False, **kwargs): (name, ni, nr, nri, ntot, n1, n2, n3, n4, n5, asym) = self.labpars if doinpprt(name, self.array, doinp=False, **kwargs): return printlab(name, ni, nr, nri, ntot, 0, n1, n2, n3, n4, n5, asym, **kwargs) if qcmio.aoints(name): print2e(self.name, self.dimens[3], self.array, **kwargs) elif (len(self.dimens) == 1): if re.match('GAUSSIAN SCALARS', self.name): print1d(True, self.type, 5, ' ', self.array, **kwargs) else: print1d(False, self.type, wid, ' ', self.array, **kwargs) elif (len(self.dimens) == 2): if (self.dimens[0] < 0): ltout(' ', self.dimens[1], self.array, 0, 0, **kwargs) else: sqout(' ', self.dimens[0], self.dimens[1], self.array, 0, 0, **kwargs) elif (len(self.dimens) >= 3): allpos = True nmat = 1 for i in range(2, len(self.dimens)): nmat = (nmat * self.dimens[i]) allpos = (allpos and (self.dimens[i] > 0)) if ((self.dimens[0] < 0) and (self.dimens[1] > 0) and allpos): for im in range(nmat): ltout(name, self.dimens[1], self.array, 0, (im + 1), **kwargs) elif ((self.dimens[0] > 0) and (self.dimens[1] > 0) and allpos): for im in range(self.dimens[2]): sqout(name, self.dimens[0], self.dimens[1], self.array, 0, (im + 1), **kwargs) elif ((len(self.dimens) >= 4) and (self.dimens[0] == (- self.dimens[1])) and (self.dimens[2] == (- self.dimens[3]))): nmat = ((self.dimens[3] * (self.dimens[3] + 1)) // 2) if (len(self.dimens) >= 5): nmat = (self.dimens[4] * nmat) for im in range(nmat): ltout(name, self.dimens[1], self.array, 0, (im + 1), **kwargs) else: print1d(False, self.type, 1, ' ', self.array, **kwargs) else: print('cannot print dims', self.dimens, **kwargs) def __str__(self): stream = io.StringIO() self.print_mat(file=stream) str = stream.getvalue() return str[:(- 1)] def make_indxf(self, *args): return _makeindx(self.dimens, self.asym, args) def make_indxc(self, *args): return _makeindxc(self.dimens, self.asym, args) def get_elemf(self, *args): (indx, sign) = _makeindx(self.dimens, self.asym, args) val = self.array[indx] if (sign < 0): if (self.type == 'c'): val = val.conjugate() if self.asym: val = (- val) return val def get_elemc(self, *args): (indx, sign) = _makeindxc(self.dimens, self.asym, args) val = self.array[indx] if (sign < 0): if (self.type == 'c'): val = val.conjugate() if self.asym: val = (- val) return val def set_elemf(self, value, *args): (indx, sign) = _makeindx(self.dimens, self.asym, args) val = value if (sign < 0): if (self.type == 'c'): val = val.conjugate() if self.asym: val = (- val) self.array[indx] = val return self.array[indx] def set_elemc(self, value, *args): (indx, sign) = _makeindxc(self.dimens, self.asym, args) val = value if (sign < 0): if (self.type == 'c'): val = val.conjugate() if self.asym: val = (- val) self.array[indx] = val return self.array[indx] def expand(self): d = tuple(reversed([abs(num) for num in self.dimens])) if qcmio.aoints(self.name): if (self.dimens[0] < 0): n = self.dimens[3] lr = (self.array.size // self.nelem) if (self.nelem == 1): narr = qcmio.expao1(n, self.array) else: narr = qcmio.expaon(n, self.array) else: narr = self.array else: narr = np.empty(d, dtype=type(self.array[0])) for i in np.ndindex(*d): narr[i] = self.get_elemc(*i) self.array = narr.reshape(_lenarray(d)) self.dimens = tuple(reversed(d)) return self.array def wr_lbuf(self, iu, lenbuf): (label, ni, nr, nri, ntot, n1, n2, n3, n4, n5, asym) = self.labpars lenbx = (lenbuf - (lenbuf % (nri * self.nelem))) lenbx = (lenbx // nri) qcmio.wr_labl(iu, label, ni, nr, ntot, lenbx, n1, n2, n3, n4, n5, asym) if (self.type == 'i'): qcmio.wr_ibuf(iu, lenbx, self.array) elif (self.type == 'c'): qcmio.wr_cbuf(iu, lenbx, self.array) else: qcmio.wr_rbuf(iu, lenbx, self.array) def wr_lrind(iu, lenbuf): ntot = self.lenarr lenbx = (lenbuf // self.nelem) y = self.array.reshape((self.nelem, ntot), order='F') nnz = qcmio.numnzr(y) wr_labl(iu, self.name, 1, nr, nnz, lenbx, ntot, 1, 1, 1, 1, 0) wr_rind(iu, nnz, lenbx, y) def wr_lao2e(self, iu, lenbuf): (label, ni, nr, nri, ntot, n1, n2, n3, n4, n5, asym) = self.labpars ntot = self.lenarray if (((ntot * self.nelem) != self.array.size) or (self.nelem > 3)): print('2e write error NTot=', ntot, 'nelem=', self.nelem, 'size', self.array.size) raise TypeError lenbx = (lenbuf // (2 + self.nelem)) nnz = qcmio.numnza(self.array) qcmio.wr_labl(iu, label, 4, nr, nnz, lenbx, n1, n2, n3, n4, n5, asym) qcmio.wr_2e(iu, nnz, self.dimens[3], lenbx, self.array) def write(self, iu, lenbuf): if qcmio.aoints(self.name): self.wr_lao2e(iu, lenbuf) else: self.wr_lbuf(iu, lenbuf)
def spice_junction(jc, nc, isc, j01, j02, n1, n2, Eg, rsh): isource = 'i{0} {1} {2} dc {3}\n'.format(jc, nc, (nc + 1), isc) d1 = 'd{0} {1} {2} diode{3} OFF\n'.format(((2 * jc) - 1), (nc + 1), nc, ((2 * jc) - 1)) d1deff = '.model diode{0} d(is={1},n={2},eg={3})\n'.format(((2 * jc) - 1), j01, n1, Eg) d2 = 'd{0} {1} {2} diode{3} OFF\n'.format((2 * jc), (nc + 1), nc, (2 * jc)) d2deff = '.model diode{0} d(is={1},n={2},eg={3})\n'.format((2 * jc), j02, n2, Eg) rshunt = 'r{0} {1} {2} {3}\n'.format((2 * jc), (nc + 1), nc, rsh) junction = (((((isource + d1) + d1deff) + d2) + d2deff) + rshunt) return junction
_mode() def main(): parser = argparse.ArgumentParser() parser.add_argument('checkpoint', help="Model checkpoint (or 'pretrained=<model_id>')") parser.add_argument('--data_root', default='data') parser.add_argument('--batch_size', type=int, default=512) parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--cased', action='store_true', default=False, help='Cased comparison') parser.add_argument('--punctuation', action='store_true', default=False, help='Check punctuation') parser.add_argument('--rotation', type=int, default=0, help='Angle of rotation (counter clockwise) in degrees.') parser.add_argument('--device', default='cuda') parser.add_argument('--visualize', action='store_true', default=False) parser.add_argument('--test_set', default='base') (args, unknown) = parser.parse_known_args() kwargs = parse_model_args(unknown) charset_test = (string.digits + string.ascii_lowercase) if args.cased: charset_test += string.ascii_uppercase if args.punctuation: charset_test += string.punctuation kwargs.update({'charset_test': charset_test}) print(f'Additional keyword arguments: {kwargs}') model_paths = [] if os.path.isfile(args.checkpoint): model_paths.append(args.checkpoint) else: for model_path in os.listdir(args.checkpoint): if model_path.endswith('.ckpt'): model_paths.append(os.path.join(args.checkpoint, model_path)) if args.visualize: assert (len(model_paths) == 1) for model_path in model_paths: model = load_from_checkpoint(model_path, **kwargs).eval().to(args.device) hp = model.hparams datamodule = SceneTextDataModule(args.data_root, '_unused_', hp.img_size, hp.max_label_length, hp.charset_train, hp.charset_test, args.batch_size, args.num_workers, False, rotation=args.rotation) if (args.test_set == 'base'): test_set = (SceneTextDataModule.TEST_BENCHMARK_SUB + SceneTextDataModule.TEST_BENCHMARK) elif (args.test_set == 'new'): test_set = SceneTextDataModule.TEST_NEW test_set = sorted(set(test_set)) results = {} max_width = max(map(len, test_set)) for (name, dataloader) in datamodule.test_dataloaders(test_set).items(): total = 0 correct = 0 ned = 0 confidence = 0 label_length = 0 if args.visualize: visualize_dir = os.path.join(model_path.split('run')[0], 'val', name) if (not os.path.exists(visualize_dir)): os.makedirs(visualize_dir) else: visualize_dir = None for (imgs, labels) in tqdm(iter(dataloader), desc=f'{name:>{max_width}}'): res = model.test_step((imgs.to(model.device), labels), (- 1), visualize_dir=visualize_dir)['output'] total += res.num_samples correct += res.correct ned += res.ned confidence += res.confidence label_length += res.label_length accuracy = ((100 * correct) / total) mean_ned = (100 * (1 - (ned / total))) mean_conf = ((100 * confidence) / total) mean_label_length = (label_length / total) results[name] = Result(name, total, accuracy, mean_ned, mean_conf, mean_label_length) if (args.test_set == 'base'): result_groups = {'Benchmark (Subset)': SceneTextDataModule.TEST_BENCHMARK_SUB, 'Benchmark': SceneTextDataModule.TEST_BENCHMARK} elif (args.test_set == 'new'): result_groups = {'New': SceneTextDataModule.TEST_NEW} with open((((model_path + '_') + args.test_set) + '.log.txt'), 'w') as f: for out in [f, sys.stdout]: for (group, subset) in result_groups.items(): print(f'{group} set:', file=out) print_results_table([results[s] for s in subset], out) print('\n', file=out)
def prime2_hint_text(): from randovania.games.prime2.generator.pickup_pool import dark_temple_keys, sky_temple_keys db = default_database.resource_database_for(RandovaniaGame.METROID_PRIME_ECHOES) result = [] for temple in range(3): key = dark_temple_keys.create_dark_temple_key(0, temple, db) result.append((key.name.replace(' 1', '').strip(), key.pickup_category, key.broad_category)) key = sky_temple_keys.create_sky_temple_key(0, db) result.append(('Sky Temple Key', key.pickup_category, key.broad_category)) return result
.parametrize('username,password', users) def test_detail_export(db, client, username, password): client.login(username=username, password=password) instances = Attribute.objects.all() for instance in instances: url = reverse(urlnames['detail_export'], args=[instance.pk]) response = client.get(url) assert (response.status_code == status_map['detail'][username]), response.content if (response.status_code == 200): root = et.fromstring(response.content) assert (root.tag == 'rdmo') for child in root: assert (child.tag in ['attribute'])
def test_interactive(hatch, helpers, temp_dir): project_name = 'My.App' description = 'foo ' with temp_dir.as_cwd(): result = hatch('new', '-i', input=f'''{project_name} {description}''') path = (temp_dir / 'my-app') expected_files = helpers.get_template_files('new.default', project_name, description=description) helpers.assert_files(path, expected_files) assert (result.exit_code == 0), result.output assert (remove_trailing_spaces(result.output) == helpers.dedent(f''' Project name: {project_name} Description []: {description} my-app src my_app __about__.py __init__.py tests __init__.py LICENSE.txt README.md pyproject.toml '''))
def test_all_partitions(): (mechanism, purview) = ((0, 1), (2,)) assert (set(all_partitions(mechanism, purview)) == set([KPartition(Part((0, 1), ()), Part((), (2,))), KPartition(Part((0,), ()), Part((1,), ()), Part((), (2,))), KPartition(Part((0,), (2,)), Part((1,), ()), Part((), ())), KPartition(Part((0,), ()), Part((1,), (2,)), Part((), ()))])) (mechanism, purview) = ((0, 1), (2, 3)) assert (set(all_partitions(mechanism, purview)) == set([KPartition(Part((0, 1), ()), Part((), (2, 3))), KPartition(Part((0,), ()), Part((1,), (2, 3)), Part((), ())), KPartition(Part((0,), (2, 3)), Part((1,), ()), Part((), ())), KPartition(Part((0,), ()), Part((1,), ()), Part((), (2, 3))), KPartition(Part((0,), ()), Part((1,), (3,)), Part((), (2,))), KPartition(Part((0,), (2,)), Part((1,), ()), Part((), (3,))), KPartition(Part((0,), ()), Part((1,), (2,)), Part((), (3,))), KPartition(Part((0,), (3,)), Part((1,), (2,)), Part((), ())), KPartition(Part((0,), (3,)), Part((1,), ()), Part((), (2,))), KPartition(Part((0,), (2,)), Part((1,), (3,)), Part((), ()))]))
def remove_na(x, y=None, paired=False, axis='rows'): x = np.asarray(x) assert (axis in ['rows', 'columns']), 'axis must be rows or columns.' if (y is None): return _remove_na_single(x, axis=axis) elif isinstance(y, (int, float, str)): return (_remove_na_single(x, axis=axis), y) else: y = np.asarray(y) assert (y.size != 0), 'y cannot be an empty list or array.' if (y.size == 1): return (_remove_na_single(x, axis=axis), y) if ((x.ndim != y.ndim) or (paired is False)): x_no_nan = _remove_na_single(x, axis=axis) y_no_nan = _remove_na_single(y, axis=axis) return (x_no_nan, y_no_nan) if (x.ndim == 1): x_mask = (~ np.isnan(x)) y_mask = (~ np.isnan(y)) else: ax = (1 if (axis == 'rows') else 0) x_mask = (~ np.any(np.isnan(x), axis=ax)) y_mask = (~ np.any(np.isnan(y), axis=ax)) if ((~ x_mask.all()) or (~ y_mask.all())): ax = (0 if (axis == 'rows') else 1) ax = (0 if (x.ndim == 1) else ax) both = np.logical_and(x_mask, y_mask) x = x.compress(both, axis=ax) y = y.compress(both, axis=ax) return (x, y)
.skipif((literal_eval(os.getenv('TEST_SAGEMAKER', 'False')) is not True), reason='Skipping test because should only be run when releasing minor transformers version') .usefixtures('sm_env') _class([{'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9}}, {'framework': 'tensorflow', 'script': 'run_tf.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.g4dn.xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9}}]) class SingleNodeTest(unittest.TestCase): def setUp(self): if (self.framework == 'pytorch'): subprocess.run(f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(), encoding='utf-8', check=True) assert hasattr(self, 'env') def create_estimator(self, instance_count=1): return HuggingFace(entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f'{self.env.base_job_name}-single', instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version='py36') def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv') def test_glue(self): estimator = self.create_estimator() estimator.fit() result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() eval_accuracy = list(result_metrics_df[(result_metrics_df.metric_name == 'eval_accuracy')]['value']) eval_loss = list(result_metrics_df[(result_metrics_df.metric_name == 'eval_loss')]['value']) train_runtime = Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds', 999999) assert (train_runtime <= self.results['train_runtime']) assert all(((t >= self.results['eval_accuracy']) for t in eval_accuracy)) assert all(((t <= self.results['eval_loss']) for t in eval_loss)) with open(f'{estimator.latest_training_job.name}.json', 'w') as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss}, outfile)
def make_pin(pin, i, lcd): global noisr if (pin in keypad_pullup): pin = Pin(pin, Pin.IN, Pin.PULL_UP) else: pin = Pin(pin, Pin.IN, Pin.PULL_DOWN) def cbr(pin): handle_pin(pin, i, lcd) if (not noisr): try: pin.irq(handler=cbr, trigger=(Pin.IRQ_FALLING | Pin.IRQ_RISING)) except: print('no Pin.irq!! keypresses will lag') noisr = True return pin
def main(): parser = argparse.ArgumentParser(description='Command line interface for P-Tuning.') parser.add_argument('--data_dir', default=None, type=str, required=True, help='The input data dir. Should contain the data files for the task.') parser.add_argument('--model_type', default='albert', type=str, required=True, choices=MODEL_CLASSES.keys(), help='The type of the pretrained language model to use') parser.add_argument('--model_name_or_path', default='roberta-large', type=str, required=True, help='Path to the pre-trained model or shortcut name') parser.add_argument('--task_type', default='single_task', type=str, required=False, choices=['single_task', 'cross_task'], help='The type of the task to train/evaluate on') parser.add_argument('--task_name', default=None, type=str, required=True, choices=PROCESSORS.keys(), help='The name of the task to train/evaluate on') parser.add_argument('--k', default=16, type=int, required=False, help='The number of examples of each label') parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written') parser.add_argument('--pattern_ids', default=[1], type=int, nargs='+', help='The ids of the PVPs to be used (only for PET)') parser.add_argument('--cross_prompt', action='store_true', help='If true, when task_type is cross-task, each task in one group has different specific PVPs,If false, all the task in one group share the same PVPs') parser.add_argument('--alpha', default=0.9999, type=float, help='Weighting term for the auxiliary language modeling task (only for PET)') parser.add_argument('--pet_repetitions', default=3, type=int, help='The number of times to repeat PET training and testing with different seeds.') parser.add_argument('--pet_max_seq_length', default=256, type=int, help='The maximum total input sequence length after tokenization for PET. Sequences longer than this will be truncated, sequences shorter will be padded.') parser.add_argument('--pet_per_gpu_train_batch_size', default=4, type=int, help='Batch size per GPU/CPU for PET training.') parser.add_argument('--pet_per_gpu_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for PET evaluation.') parser.add_argument('--pet_gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass in PET.') parser.add_argument('--pet_num_train_epochs', default=3, type=float, help='Total number of training epochs to perform in PET.') parser.add_argument('--pet_max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform in PET. Override num_train_epochs.') parser.add_argument('--train_examples', default=(- 1), type=int, help='The total number of train examples to use, where -1 equals all examples.') parser.add_argument('--eval_examples', default=(- 1), type=int, help='The total number of test examples to use, where -1 equals all examples.') parser.add_argument('--dev32_examples', default=(- 1), type=int, help='The total number of dev32 examples to use, where -1 equals all examples.') parser.add_argument('--split_examples_evenly', action='store_true', help='If true, train examples are not chosen randomly, but split evenly across all labels.') parser.add_argument('--cache_dir', default='', type=str, help='Where to store the pre-trained models downloaded from S3.') parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--weight_decay', default=0.1, type=float, help='Weight decay if we apply some.') parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.') parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.') parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.') parser.add_argument('--logging_steps', type=int, default=50, help='Log every X updates steps.') parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available') parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory') parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument('--do_train', action='store_true', help='Whether to perform training') parser.add_argument('--do_eval', action='store_true', help='Whether to perform evaluation') parser.add_argument('--eval_set', choices=['dev', 'test'], default='dev', help='Whether to perform evaluation on the dev set or the test set') parser.add_argument('--embed_size', default=128, type=int, help='albert: 128, roberta-large:1024, roberta-base:768') parser.add_argument('--prompt_encoder_type', type=str, default='lstm', choices=['lstm', 'mlp']) parser.add_argument('--eval_every_step', default=20, type=int, help='') args = parser.parse_args() logger.info('Parameters: {}'.format(args)) args.device = ('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu') args.n_gpu = torch.cuda.device_count() args.task_name = args.task_name.lower() if (args.task_name not in PROCESSORS): raise ValueError("Task '{}' not found".format(args.task_name)) processor = PROCESSORS[args.task_name](args.task_name) args.label_list = processor.get_labels() (train_ex_per_label, eval_ex_per_label, dev32_ex_per_label) = (None, None, None) (train_ex, eval_ex, dev32_ex) = (args.train_examples, args.eval_examples, args.dev32_examples) if args.split_examples_evenly: train_ex_per_label = (eq_div(args.train_examples, len(args.label_list)) if (args.train_examples != (- 1)) else (- 1)) eval_ex_per_label = (eq_div(args.eval_examples, len(args.label_list)) if (args.eval_examples != (- 1)) else (- 1)) dev32_ex_per_label = (eq_div(args.dev32_examples, len(args.label_list)) if (args.dev32_examples != (- 1)) else (- 1)) (train_ex, eval_ex, dev32_ex) = (None, None, None) eval_set = (TEST_SET if (args.eval_set == 'test') else DEV_SET) if ((args.task_type is None) or (args.task_type == 'single_task')): train_data = load_examples(args.task_name, args.data_dir, TRAIN_SET, num_examples=(- 1), num_examples_per_label=None) eval_data = load_examples(args.task_name, args.data_dir, TEST_SET, num_examples=(- 1), num_examples_per_label=None) dev32_data = load_examples(args.task_name, args.data_dir, DEV_SET, num_examples=(- 1), num_examples_per_label=None) args.metrics = METRICS.get(args.task_name, DEFAULT_METRICS) (pet_model_cfg, pet_train_cfg, pet_eval_cfg) = load_pet_configs(args) logger.info('Training Example:') logger.info('text_a={}'.format(train_data[0].text_a)) logger.info('text_b={}'.format(train_data[0].text_b)) logger.info('task={}'.format(train_data[0].task)) logger.info('label={}'.format(train_data[0].label)) logger.info('') train_pet(eval_data=eval_data, dev32_data=dev32_data, train_data=train_data, train_config=pet_train_cfg, eval_config=pet_eval_cfg, model_config=pet_model_cfg, pattern_ids=args.pattern_ids, output_dir=args.output_dir, repetitions=args.pet_repetitions, do_train=args.do_train, do_eval=args.do_eval, seed=args.seed) elif (args.task_type == 'cross_task'): train_data = load_examples(args.task_name, args.data_dir, TRAIN_SET, num_examples=(- 1), num_examples_per_label=None) dev_data = load_examples(args.task_name, args.data_dir, DEV_SET, num_examples=(- 1), num_examples_per_label=None) args.metrics = METRICS.get(args.task_name, DEFAULT_METRICS) (pet_model_cfg, pet_train_cfg, pet_eval_cfg) = load_pet_configs(args) logger.info('Training Example:') logger.info('text_a={}'.format(train_data[0].text_a)) logger.info('text_b={}'.format(train_data[0].text_b)) logger.info('task={}'.format(train_data[0].task)) logger.info('label={}'.format(train_data[0].label)) logger.info('') train_pet_cross(dev32_data=dev_data, train_data=train_data, train_config=pet_train_cfg, eval_config=pet_eval_cfg, model_config=pet_model_cfg, pattern_ids=args.pattern_ids, output_dir=args.output_dir, repetitions=args.pet_repetitions, do_train=args.do_train, do_eval=args.do_eval, seed=args.seed)
_fixtures(ConfigWithFiles) def test_incorrect_replacement_of_configuration(config_with_files): fixture = config_with_files config_file = fixture.new_config_file(filename=ConfigWithSetting.filename, contents='from reahl.component.config import Configuration; some_key = Configuration()') fixture.set_config_spec(easter_egg, 'reahl.component_dev.test_config:ConfigWithSetting') config = StoredConfiguration(fixture.config_dir.name) with expected(ConfigurationException): config.configure()
class MediatorMixin(): address_to_privkey: Dict[(Address, PrivateKey)] address_to_client: Dict[(Address, Client)] block_number: BlockNumber token_id: TokenAddress def __init__(self): super().__init__() self.partner_to_balance_proof_data: Dict[(Address, BalanceProofData)] = {} self.secrethash_to_secret: Dict[(SecretHash, Secret)] = {} self.waiting_for_unlock: Dict[(Secret, Address)] = {} self.initial_number_of_channels = 2 def _get_balance_proof_data(self, partner, client_address): if (partner not in self.partner_to_balance_proof_data): client = self.address_to_client[client_address] partner_channel = client.address_to_channel[partner] self.partner_to_balance_proof_data[partner] = BalanceProofData(canonical_identifier=partner_channel.canonical_identifier) return self.partner_to_balance_proof_data[partner] def _update_balance_proof_data(self, partner, amount, expiration, secret, our_address): expected = self._get_balance_proof_data(partner, our_address) lock = HashTimeLockState(amount=amount, expiration=expiration, secrethash=sha256_secrethash(secret)) expected.update(amount, lock) return expected def _new_mediator_transfer(self, initiator_address, target_address, payment_id, amount, secret, our_address) -> LockedTransferSignedState: initiator_pkey = self.address_to_privkey[initiator_address] balance_proof_data = self._update_balance_proof_data(initiator_address, amount, (self.block_number + 10), secret, our_address) self.secrethash_to_secret[sha256_secrethash(secret)] = secret return factories.create(factories.LockedTransferSignedStateProperties(**balance_proof_data.properties.__dict__, amount=amount, expiration=BlockExpiration((self.block_number + 10)), payment_identifier=payment_id, secret=secret, initiator=initiator_address, target=target_address, token=self.token_id, sender=initiator_address, recipient=our_address, pkey=initiator_pkey, message_identifier=MessageID(1))) def _action_init_mediator(self, transfer: LockedTransferSignedState, client_address) -> WithOurAddress: client = self.address_to_client[client_address] initiator_channel = client.address_to_channel[Address(transfer.initiator)] target_channel = client.address_to_channel[Address(transfer.target)] assert isinstance(target_channel, NettingChannelState) action = ActionInitMediator(candidate_route_states=[factories.make_route_from_channel(target_channel)], from_hop=factories.make_hop_to_channel(initiator_channel), from_transfer=transfer, balance_proof=transfer.balance_proof, sender=transfer.balance_proof.sender) return WithOurAddress(our_address=client_address, data=action) def _unwrap(self, with_our_address: WithOurAddress): our_address = with_our_address.our_address data = with_our_address.data client = self.address_to_client[our_address] return (data, client, our_address) (target=init_mediators, payment_id=payment_id(), amount=integers(min_value=1, max_value=100), secret=secret()) def valid_init_mediator(self, from_channel, to_channel, payment_id, amount, secret): our_address = from_channel.our_address assume((to_channel.our_address == our_address)) client = self.address_to_client[our_address] from_partner = from_channel.partner_address to_partner = to_channel.partner_address assume((from_partner != to_partner)) transfer = self._new_mediator_transfer(from_partner, to_partner, payment_id, amount, secret, our_address) client_data = self._action_init_mediator(transfer, our_address) result = node.state_transition(client.chain_state, client_data.data) assert event_types_match(result.events, SendProcessed, SendLockedTransfer) return client_data (target=secret_requests, previous_action_with_address=consumes(init_mediators)) def valid_receive_secret_reveal(self, previous_action_with_address): (previous_action, client, our_address) = self._unwrap(previous_action_with_address) secret = self.secrethash_to_secret[previous_action.from_transfer.lock.secrethash] sender = previous_action.from_transfer.target recipient = previous_action.from_transfer.initiator action = ReceiveSecretReveal(secret=secret, sender=sender) result = node.state_transition(client.chain_state, action) expiration = previous_action.from_transfer.lock.expiration in_time = (self.block_number < (expiration - DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS)) still_waiting = (self.block_number < (expiration + DEFAULT_WAIT_BEFORE_LOCK_REMOVAL)) if (in_time and self.channel_opened(sender, our_address) and self.channel_opened(recipient, our_address)): assert event_types_match(result.events, SendSecretReveal, SendUnlock, EventUnlockSuccess) self.event('Unlock successful.') self.waiting_for_unlock[secret] = recipient elif (still_waiting and self.channel_opened(recipient, our_address)): assert event_types_match(result.events, SendSecretReveal) self.event('Unlock failed, secret revealed too late.') else: assert (not result.events) self.event('ReceiveSecretRevealed after removal of lock - dropped.') return WithOurAddress(our_address=our_address, data=action) (previous_action_with_address=secret_requests) def replay_receive_secret_reveal(self, previous_action_with_address): (previous_action, client, _) = self._unwrap(previous_action_with_address) result = node.state_transition(client.chain_state, previous_action) assert (not result.events) (previous_action_with_address=secret_requests, invalid_sender=address()) def replay_receive_secret_reveal_scrambled_sender(self, previous_action_with_address, invalid_sender): (previous_action, client, _) = self._unwrap(previous_action_with_address) action = ReceiveSecretReveal(previous_action.secret, invalid_sender) result = node.state_transition(client.chain_state, action) assert (not result.events) (previous_action_with_address=init_mediators, secret=secret()) def wrong_secret_receive_secret_reveal(self, previous_action_with_address, secret): (previous_action, client, _) = self._unwrap(previous_action_with_address) sender = previous_action.from_transfer.target action = ReceiveSecretReveal(secret, sender) result = node.state_transition(client.chain_state, action) assert (not result.events) (target=secret_requests, previous_action_with_address=consumes(init_mediators), invalid_sender=address()) def wrong_address_receive_secret_reveal(self, previous_action_with_address, invalid_sender): (previous_action, client, our_address) = self._unwrap(previous_action_with_address) secret = self.secrethash_to_secret[previous_action.from_transfer.lock.secrethash] invalid_action = ReceiveSecretReveal(secret, invalid_sender) result = node.state_transition(client.chain_state, invalid_action) assert (not result.events) valid_sender = previous_action.from_transfer.target valid_action = ReceiveSecretReveal(secret, valid_sender) return WithOurAddress(our_address=our_address, data=valid_action)
class StsbProcessor(DataProcessor): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) warnings.warn(DEPRECATION_WARNING.format('processor'), FutureWarning) def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev') def get_test_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'test.tsv')), 'test') def get_labels(self): return [None] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = f'{set_type}-{line[0]}' text_a = line[7] text_b = line[8] label = (None if (set_type == 'test') else line[(- 1)]) examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
class ResBlock(nn.Module): def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, groups=1): super(ResBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=get_same_padding(kernel_size, dilation), dilation=dilation, groups=groups) self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size, stride=1, padding=get_same_padding(kernel_size, dilation), dilation=dilation, groups=groups) self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True) self.res_translate = None if ((not (inplanes == planes)) or (not (stride == 1))): self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride) def forward(self, x): residual = x out = self.relu(self.conv1(x)) out = self.conv2(out) if (self.res_translate is not None): residual = self.res_translate(residual) out += residual return out
def _create_delegate_for(item: ItemResourceInfo): factory = QtWidgets.QItemEditorFactory() factory.registerEditor(QtCore.QMetaType.Int.value, RangeSpinBoxItemEditorCreator(0, item.max_capacity)) delegate = QtWidgets.QStyledItemDelegate() delegate.setItemEditorFactory(factory) return delegate
(params=[{'encoded': b'\x00\x00', 'bit_count': 15, 'json': {'minimal_logic': False, 'specific_levels': {}}}, {'encoded': b'\x80', 'bit_count': 1, 'json': {'minimal_logic': True, 'specific_levels': {}}}, {'encoded': b'X\x00\x00', 'bit_count': 18, 'json': {'minimal_logic': False, 'specific_levels': {'Dash': 'expert'}}}, {'encoded': b'f3\x00\x00', 'bit_count': 27, 'json': {'minimal_logic': False, 'specific_levels': {i: 'hypermode' for i in ['BombJump', 'BSJ', 'Dash', 'Movement']}}}]) def trick_level_data(request, mocker, echoes_game_description): tricks = echoes_game_description.resource_database.trick[:14] mocker.patch('randovania.layout.base.trick_level_configuration._all_tricks', return_value=tricks) return (request.param['encoded'], request.param['bit_count'], TrickLevelConfiguration.from_json(request.param['json'], game=RandovaniaGame.METROID_PRIME_ECHOES))
class TrainingArguments(): model_ckpt: Optional[str] = field(default='lvwerra/codeparrot', metadata={'help': 'Model name or path of model to be trained.'}) save_dir: Optional[str] = field(default='./', metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'}) dataset_name_train: Optional[str] = field(default='lvwerra/codeparrot-clean-train', metadata={'help': 'Name or path of training dataset.'}) dataset_name_valid: Optional[str] = field(default='lvwerra/codeparrot-clean-valid', metadata={'help': 'Name or path of validation dataset.'}) train_batch_size: Optional[int] = field(default=2, metadata={'help': 'Batch size for training.'}) valid_batch_size: Optional[int] = field(default=2, metadata={'help': 'Batch size for evaluation.'}) weight_decay: Optional[float] = field(default=0.1, metadata={'help': 'Value of weight decay.'}) shuffle_buffer: Optional[int] = field(default=1000, metadata={'help': 'Size of buffer used to shuffle streaming dataset.'}) learning_rate: Optional[float] = field(default=0.0002, metadata={'help': 'Learning rate fo training.'}) lr_scheduler_type: Optional[str] = field(default='cosine', metadata={'help': 'Learning rate.'}) num_warmup_steps: Optional[int] = field(default=750, metadata={'help': 'Number of warmup steps in the learning rate schedule.'}) gradient_accumulation_steps: Optional[int] = field(default=16, metadata={'help': 'Number of gradient accumulation steps.'}) gradient_checkpointing: Optional[bool] = field(default=True, metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'}) max_train_steps: Optional[int] = field(default=50000, metadata={'help': 'Maximum number of training steps.'}) max_eval_steps: Optional[int] = field(default=(- 1), metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'}) seq_length: Optional[int] = field(default=1024, metadata={'help': 'Sequence lengths used for training.'}) seed: Optional[int] = field(default=1, metadata={'help': 'Training seed.'}) save_checkpoint_steps: Optional[int] = field(default=1024, metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'})
class FileuploadCom(XFSDownloader): __name__ = 'FileuploadCom' __type__ = 'downloader' __version__ = '0.02' __status__ = 'testing' __pattern__ = ' __config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('fallback', 'bool', 'Fallback to free download if premium fails', True), ('chk_filesize', 'bool', 'Check file size', True), ('max_wait', 'int', 'Reconnect if waiting time is greater than minutes', 10)] __description__ = 'Fileupload.com downloader plugin' __license__ = 'GPLv3' __authors__ = [('OzzieIsaacs', 'ozzie.fernandez.')] PLUGIN_DOMAIN = 'fileupload.com' NAME_PATTERN = '<input type="hidden" name="fname" value="(?P<N>.+?)">' SIZE_PATTERN = '</span> ((?P<S>[\\d.,]+) (?P<U>[\\w^_]+))</p>' WAIT_PATTERN = '<span class="label label-danger seconds">(\\d+)</span>' LINK_PATTERN = '<a id="download-btn" class="btn btn-sm btn-success" href="(.+?)">Click here to download</a>'
class TargetAssigner(object): def __init__(self, similarity_calc, matcher, box_coder, positive_class_weight=1.0, negative_class_weight=1.0, unmatched_cls_target=None): if (not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator)): raise ValueError('similarity_calc must be a RegionSimilarityCalculator') if (not isinstance(matcher, mat.Matcher)): raise ValueError('matcher must be a Matcher') if (not isinstance(box_coder, bcoder.BoxCoder)): raise ValueError('box_coder must be a BoxCoder') self._similarity_calc = similarity_calc self._matcher = matcher self._box_coder = box_coder self._positive_class_weight = positive_class_weight self._negative_class_weight = negative_class_weight if (unmatched_cls_target is None): self._unmatched_cls_target = tf.constant([0], tf.float32) else: self._unmatched_cls_target = unmatched_cls_target def box_coder(self): return self._box_coder def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None, **params): if (not isinstance(anchors, box_list.BoxList)): raise ValueError('anchors must be an BoxList') if (not isinstance(groundtruth_boxes, box_list.BoxList)): raise ValueError('groundtruth_boxes must be an BoxList') if (groundtruth_labels is None): groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(), 0)) groundtruth_labels = tf.expand_dims(groundtruth_labels, (- 1)) shape_assert = tf.assert_equal(tf.shape(groundtruth_labels)[1:], tf.shape(self._unmatched_cls_target)) with tf.control_dependencies([shape_assert]): match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes, anchors) match = self._matcher.match(match_quality_matrix, **params) reg_targets = self._create_regression_targets(anchors, groundtruth_boxes, match) cls_targets = self._create_classification_targets(groundtruth_labels, match) reg_weights = self._create_regression_weights(match) cls_weights = self._create_classification_weights(match, self._positive_class_weight, self._negative_class_weight) num_anchors = anchors.num_boxes_static() if (num_anchors is not None): reg_targets = self._reset_target_shape(reg_targets, num_anchors) cls_targets = self._reset_target_shape(cls_targets, num_anchors) reg_weights = self._reset_target_shape(reg_weights, num_anchors) cls_weights = self._reset_target_shape(cls_weights, num_anchors) return (cls_targets, cls_weights, reg_targets, reg_weights, match) def _reset_target_shape(self, target, num_anchors): target_shape = target.get_shape().as_list() target_shape[0] = num_anchors target.set_shape(target_shape) return target def _create_regression_targets(self, anchors, groundtruth_boxes, match): matched_anchor_indices = match.matched_column_indices() unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices() matched_gt_indices = match.matched_row_indices() matched_anchors = box_list_ops.gather(anchors, matched_anchor_indices) matched_gt_boxes = box_list_ops.gather(groundtruth_boxes, matched_gt_indices) matched_reg_targets = self._box_coder.encode(matched_gt_boxes, matched_anchors) unmatched_ignored_reg_targets = tf.tile(self._default_regression_target(), tf.stack([tf.size(unmatched_ignored_anchor_indices), 1])) reg_targets = tf.dynamic_stitch([matched_anchor_indices, unmatched_ignored_anchor_indices], [matched_reg_targets, unmatched_ignored_reg_targets]) return reg_targets def _default_regression_target(self): return tf.constant([(self._box_coder.code_size * [0])], tf.float32) def _create_classification_targets(self, groundtruth_labels, match): matched_anchor_indices = match.matched_column_indices() unmatched_ignored_anchor_indices = match.unmatched_or_ignored_column_indices() matched_gt_indices = match.matched_row_indices() matched_cls_targets = tf.gather(groundtruth_labels, matched_gt_indices) ones = (self._unmatched_cls_target.shape.ndims * [1]) unmatched_ignored_cls_targets = tf.tile(tf.expand_dims(self._unmatched_cls_target, 0), tf.stack(([tf.size(unmatched_ignored_anchor_indices)] + ones))) cls_targets = tf.dynamic_stitch([matched_anchor_indices, unmatched_ignored_anchor_indices], [matched_cls_targets, unmatched_ignored_cls_targets]) return cls_targets def _create_regression_weights(self, match): reg_weights = tf.cast(match.matched_column_indicator(), tf.float32) return reg_weights def _create_classification_weights(self, match, positive_class_weight=1.0, negative_class_weight=1.0): matched_indicator = tf.cast(match.matched_column_indicator(), tf.float32) ignore_indicator = tf.cast(match.ignored_column_indicator(), tf.float32) unmatched_indicator = ((1.0 - matched_indicator) - ignore_indicator) cls_weights = ((positive_class_weight * matched_indicator) + (negative_class_weight * unmatched_indicator)) return cls_weights def get_box_coder(self): return self._box_coder
.parametrize('vcf_file, encoding, generate_header', [('1kg_target_chr20_38_imputed_chr20_1000.vcf', {'variant_AF': {'filters': [FixedScaleOffset(offset=0, scale=10000, dtype='f4', astype='u2')]}, 'call_DS': {'filters': [FixedScaleOffset(offset=0, scale=100, dtype='f4', astype='u1')]}, 'variant_DR2': {'filters': [FixedScaleOffset(offset=0, scale=100, dtype='f4', astype='u1')]}}, True), ('all_fields.vcf', None, True), ('CEUTrio.20.21.gatk3.4.g.vcf.bgz', None, True), ('Homo_sapiens_assembly38.headerOnly.vcf.gz', None, False), ('mixed.vcf.gz', None, True), ('no_genotypes.vcf', None, True), ('no_genotypes_with_gt_header.vcf', None, True), ('sample_multiple_filters.vcf.gz', None, True), ('sample.vcf.gz', None, True)]) .filterwarnings('ignore::sgkit.io.vcf.FloatFormatFieldWarning', 'ignore::sgkit.io.vcfzarr_reader.DimensionNameForFixedFormatFieldWarning') def test_vcf_to_zarr_to_vcf__real_files(shared_datadir, tmp_path, vcf_file, encoding, generate_header): path = path_for_test(shared_datadir, vcf_file) intermediate = tmp_path.joinpath('intermediate.vcf.zarr').as_posix() output = tmp_path.joinpath('output.vcf').as_posix() kwargs = zarr_array_sizes(path) vcf_to_zarr(path, intermediate, fields=['INFO/*', 'FORMAT/*'], mixed_ploidy=True, encoding=encoding, **kwargs) zarr_to_vcf(intermediate, output) assert_vcfs_close(path, output) if generate_header: ds = load_dataset(intermediate) del ds.attrs['vcf_header'] write_vcf(ds, output)
class TestFileHandlerCalibrationBase(): platform_id = 324 gains_nominal = np.arange(1, 13) offsets_nominal = np.arange((- 1), (- 13), (- 1)) gains_gsics = [0, 0, 0, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 0] offsets_gsics = [0, 0, 0, (- 0.4), (- 0.5), (- 0.6), (- 0.7), (- 0.8), (- 0.9), (- 1.0), (- 1.1), 0] radiance_types = (2 * np.ones(12)) scan_time = datetime(2020, 1, 1) external_coefs = {'VIS006': {'gain': 10, 'offset': (- 10)}, 'IR_108': {'gain': 20, 'offset': (- 20)}, 'HRV': {'gain': 5, 'offset': (- 5)}} spectral_channel_ids = {'VIS006': 1, 'IR_108': 9, 'HRV': 12} expected = {'VIS006': {'counts': {'NOMINAL': xr.DataArray([[0, 10], [100, 255]], dims=('y', 'x'))}, 'radiance': {'NOMINAL': xr.DataArray([[np.nan, 9], [99, 254]], dims=('y', 'x')), 'GSICS': xr.DataArray([[np.nan, 9], [99, 254]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 90], [990, 2540]], dims=('y', 'x'))}, 'reflectance': {'NOMINAL': xr.DataArray([[np.nan, 41.88985], [460.7884, 1182.2247]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 418.89853], [4607.8843, 11822.249]], dims=('y', 'x'))}}, 'IR_108': {'counts': {'NOMINAL': xr.DataArray([[0, 10], [100, 255]], dims=('y', 'x'))}, 'radiance': {'NOMINAL': xr.DataArray([[np.nan, 81], [891, 2286]], dims=('y', 'x')), 'GSICS': xr.DataArray([[np.nan, 8.19], [89.19, 228.69]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 180], [1980, 5080]], dims=('y', 'x'))}, 'brightness_temperature': {'NOMINAL': xr.DataArray([[np.nan, 279.82318], [543.2585, 812.77167]], dims=('y', 'x')), 'GSICS': xr.DataArray([[np.nan, 189.20985], [285.53293, 356.06668]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 335.14236], [758.6249, 1262.7567]], dims=('y', 'x'))}}, 'HRV': {'counts': {'NOMINAL': xr.DataArray([[0, 10], [100, 255]], dims=('y', 'x'))}, 'radiance': {'NOMINAL': xr.DataArray([[np.nan, 108], [1188, 3048]], dims=('y', 'x')), 'GSICS': xr.DataArray([[np.nan, 108], [1188, 3048]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 45], [495, 1270]], dims=('y', 'x'))}, 'reflectance': {'NOMINAL': xr.DataArray([[np.nan, 415.26767], [4567.944, 11719.775]], dims=('y', 'x')), 'EXTERNAL': xr.DataArray([[np.nan, 173.02817], [1903.31, 4883.2397]], dims=('y', 'x'))}}} (name='counts') def counts(self): return xr.DataArray([[0, 10], [100, 255]], dims=('y', 'x')) def _get_expected(self, channel, calibration, calib_mode, use_ext_coefs): if use_ext_coefs: return self.expected[channel][calibration]['EXTERNAL'] return self.expected[channel][calibration][calib_mode]
class TensorKey(): def __init__(self, x: torch.Tensor, precision: int=4) -> None: x = x.detach() self._key = (*self._extract_meta(x), *self._calculate_stats(x, precision)) def _extract_meta(x: torch.Tensor) -> Tuple[(Hashable, ...)]: return (x.device, x.dtype, x.size()) def _calculate_stats(x: torch.Tensor, precision: int) -> List[str]: stat_fns: Tuple[(Callable[([torch.Tensor], torch.Tensor)], ...)] = (torch.min, torch.max, torch.norm) return [f'{stat_fn(x).item():.{precision}e}' for stat_fn in stat_fns] def key(self) -> Tuple[(Hashable, ...)]: return self._key def __eq__(self, other: Any) -> bool: if isinstance(other, torch.Tensor): other = TensorKey(other) return ((self.key == other.key) if isinstance(other, TensorKey) else False) def __hash__(self) -> int: return hash(self.key) def __repr__(self) -> str: return str(self.key)
class ParallelAllErrorsTests(TestCase): def test_parallel_all_errors(self): exc1 = EquitableException(message='foo') reraise1 = partial(raise_, exc1) exc2 = EquitableException(message='bar') reraise2 = partial(raise_, exc2) dispatcher = ComposedDispatcher([TypeDispatcher({ParallelEffects: perform_parallel_async}), base_dispatcher]) es = [Effect(Func(reraise1)), Effect(Constant(1)), Effect(Func(reraise2))] eff = parallel_all_errors(es) self.assertThat(sync_perform(dispatcher, eff), MatchesListwise([MatchesListwise([Equals(True), MatchesReraisedExcInfo(exc1)]), Equals((False, 1)), MatchesListwise([Equals(True), MatchesReraisedExcInfo(exc2)])]))
class ComplexDecoder(json.JSONDecoder): def __init__(self, *args, **kwargs): json.JSONDecoder.__init__(self, *args, object_hook=self.object_hook, **kwargs) def object_hook(self, obj): if (isinstance(obj, dict) and ('type' in obj) and ('keys' in obj)): return GroundingKey(grounding_type=obj['type'], keys=obj['keys']) elif (isinstance(obj, dict) and ('value' in obj) and ('from_qdmr' in obj)): return ValueUnit(**obj) elif (isinstance(obj, dict) and ('arg_type' in obj) and ('arg' in obj)): obj['arg'] = self.object_hook(obj['arg']) return QDMRStepArg(**obj) return obj
def test_remove_row_button(): widget = QgridWidget(df=create_df()) event_history = init_event_history(['row_removed', 'selection_changed'], widget=widget) selected_rows = [1, 2] widget._handle_qgrid_msg_helper({'rows': selected_rows, 'type': 'change_selection'}) widget._handle_qgrid_msg_helper({'type': 'remove_row'}) assert (event_history == [{'name': 'selection_changed', 'old': [], 'new': selected_rows, 'source': 'gui'}, {'name': 'row_removed', 'indices': selected_rows, 'source': 'gui'}])
class LDSR(Unfolding_Loss): def __init__(self, window_length, hop_length, **kwargs): super().__init__(window_length, hop_length) def criterion(self, target_signal_hat, target_signal): s_target = ((((target_signal_hat * target_signal).sum((- 1), keepdims=True) + 1e-08) / ((target_signal ** 2).sum(axis=(- 1), keepdims=True) + 1e-08)) * target_signal) distortion = (target_signal_hat - s_target) loss = (((distortion ** 2).sum((- 1)) + 1e-08).log() - ((s_target ** 2).sum((- 1)) + 1e-08).log()) return loss.mean()
(slots=True) class RPC(): height_off = attr.ib() height_scale = attr.ib() lat_off = attr.ib() lat_scale = attr.ib() line_den_coeff = attr.ib() line_num_coeff = attr.ib() line_off = attr.ib() line_scale = attr.ib() long_off = attr.ib() long_scale = attr.ib() samp_den_coeff = attr.ib() samp_num_coeff = attr.ib() samp_off = attr.ib() samp_scale = attr.ib() err_bias = attr.ib(default=None) err_rand = attr.ib(default=None) def to_dict(self): return attr.asdict(self) def to_gdal(self): out = {'HEIGHT_OFF': str(self.height_off), 'HEIGHT_SCALE': str(self.height_scale), 'LAT_OFF': str(self.lat_off), 'LAT_SCALE': str(self.lat_scale), 'LINE_DEN_COEFF': ' '.join(map(str, self.line_den_coeff)), 'LINE_NUM_COEFF': ' '.join(map(str, self.line_num_coeff)), 'LINE_OFF': str(self.line_off), 'LINE_SCALE': str(self.line_scale), 'LONG_OFF': str(self.long_off), 'LONG_SCALE': str(self.long_scale), 'SAMP_DEN_COEFF': ' '.join(map(str, self.samp_den_coeff)), 'SAMP_NUM_COEFF': ' '.join(map(str, self.samp_num_coeff)), 'SAMP_OFF': str(self.samp_off), 'SAMP_SCALE': str(self.samp_scale)} if self.err_bias: out.update(ERR_BIAS=str(self.err_bias)) if self.err_rand: out.update(ERR_RAND=str(self.err_rand)) return out def from_gdal(cls, rpcs): out = {} for (key, val) in rpcs.items(): if (key in {'LINE_NUM_COEFF', 'LINE_DEN_COEFF', 'SAMP_NUM_COEFF', 'SAMP_DEN_COEFF'}): out[key] = [float(v) for v in val.split(maxsplit=20)[:20]] else: out[key] = float(val.split(maxsplit=1)[0]) return cls(err_bias=out.get('ERR_BIAS'), err_rand=out.get('ERR_RAND'), height_off=out['HEIGHT_OFF'], height_scale=out['HEIGHT_SCALE'], lat_off=out['LAT_OFF'], lat_scale=out['LAT_SCALE'], line_den_coeff=out['LINE_DEN_COEFF'], line_num_coeff=out['LINE_NUM_COEFF'], line_off=out['LINE_OFF'], line_scale=out['LINE_SCALE'], long_off=out['LONG_OFF'], long_scale=out['LONG_SCALE'], samp_den_coeff=out['SAMP_DEN_COEFF'], samp_num_coeff=out['SAMP_NUM_COEFF'], samp_off=out['SAMP_OFF'], samp_scale=out['SAMP_SCALE'])
class MockErrorDataset(): def __init__(self, dataset): self.rebatch_map = {} self.dataset = dataset self.batchsize_per_replica = dataset.batchsize_per_replica def __getitem__(self, idx): batch = self.dataset[idx] if (idx in self.rebatch_map): num_samples = self.rebatch_map[idx] if (num_samples < batch['input'].size()[0]): batch['input'] = batch['input'][:num_samples] batch['target'] = batch['target'][:num_samples] return batch def __len__(self): return len(self.dataset)
class FixedOffsetTimezone(datetime.tzinfo): def __init__(self, offset: float, name: (str | None)=None) -> None: self._offset = datetime.timedelta(minutes=offset) if (name is None): name = ('Etc/GMT%+d' % offset) self.zone = name def __str__(self) -> str: return self.zone def __repr__(self) -> str: return f'<FixedOffset "{self.zone}" {self._offset}>' def utcoffset(self, dt: datetime.datetime) -> datetime.timedelta: return self._offset def tzname(self, dt: datetime.datetime) -> str: return self.zone def dst(self, dt: datetime.datetime) -> datetime.timedelta: return ZERO
def add_send_to_generator_class(builder: IRBuilder, fn_info: FuncInfo, fn_decl: FuncDecl, sig: FuncSignature) -> None: with builder.enter_method(fn_info.generator_class.ir, 'send', object_rprimitive, fn_info): arg = builder.add_argument('arg', object_rprimitive) none_reg = builder.none_object() result = builder.add(Call(fn_decl, [builder.self(), none_reg, none_reg, none_reg, builder.read(arg)], fn_info.fitem.line)) builder.add(Return(result))
def parse_diff(diff): hunks = [] hunk = None for line in diff: if line.startswith(''): if hunk: hunks.append(hunk) hunk = DiffHunk(line) elif (hunk is not None): hunk.append(line) if hunk: hunks.append(hunk) return hunks
class BertLMHead(OptimusModule): def __init__(self, mpu_vocab_size, hidden_size, init_method, layernorm_epsilon, parallel_output): super(BertLMHead, self).__init__() args = get_args() self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size)) self.bias.model_parallel = True self.bias.partition_dim = 0 self.bias.stride = 1 self.parallel_output = parallel_output self.dense = get_linear_layer(hidden_size, hidden_size, init_method) self.layernorm = LayerNorm(hidden_size, eps=layernorm_epsilon) self.gelu = torch.nn.functional.gelu if args.openai_gelu: self.gelu = openai_gelu elif args.onnx_safe: self.gelu = erf_gelu def forward(self, hidden_states, word_embeddings_weight): hidden_states = self.dense(hidden_states) hidden_states = self.gelu(hidden_states) hidden_states = self.layernorm(hidden_states) output = parallel_lm_logits(hidden_states, word_embeddings_weight, self.parallel_output, bias=self.bias) return output
class PFSFeedbackEventHandler(RaidenEventHandler): def __init__(self, wrapped_handler: EventHandler) -> None: self.wrapped = wrapped_handler def on_raiden_events(self, raiden: 'RaidenService', chain_state: ChainState, events: List[Event]) -> None: for event in events: if (type(event) == EventRouteFailed): assert isinstance(event, EventRouteFailed), MYPY_ANNOTATION self.handle_routefailed(raiden, event) elif (type(event) == EventPaymentSentSuccess): assert isinstance(event, EventPaymentSentSuccess), MYPY_ANNOTATION self.handle_paymentsentsuccess(raiden, event) self.wrapped.on_raiden_events(raiden, chain_state, events) def handle_routefailed(raiden: 'RaidenService', route_failed_event: EventRouteFailed) -> None: feedback_token = raiden.route_to_feedback_token.get(tuple(route_failed_event.route)) pfs_config = raiden.config.pfs_config if (feedback_token and pfs_config): log.debug('Received event for failed route', route=[to_checksum_address(node) for node in route_failed_event.route], secrethash=encode_hex(route_failed_event.secrethash), feedback_token=feedback_token) post_pfs_feedback(routing_mode=raiden.routing_mode, pfs_config=pfs_config, token_network_address=route_failed_event.token_network_address, route=route_failed_event.route, token=feedback_token, successful=False) def handle_paymentsentsuccess(raiden: 'RaidenService', payment_sent_success_event: EventPaymentSentSuccess) -> None: feedback_token = raiden.route_to_feedback_token.get(tuple(payment_sent_success_event.route)) pfs_config = raiden.config.pfs_config if (feedback_token and pfs_config): log.debug('Received payment success event', route=[to_checksum_address(node) for node in payment_sent_success_event.route], feedback_token=feedback_token) post_pfs_feedback(routing_mode=raiden.routing_mode, pfs_config=pfs_config, token_network_address=payment_sent_success_event.token_network_address, route=payment_sent_success_event.route, token=feedback_token, successful=True)
class EfficientNetEncoder(nn.Module): def __init__(self, config: EfficientNetConfig): super().__init__() self.config = config self.depth_coefficient = config.depth_coefficient def round_repeats(repeats): return int(math.ceil((self.depth_coefficient * repeats))) num_base_blocks = len(config.in_channels) num_blocks = sum((round_repeats(n) for n in config.num_block_repeats)) curr_block_num = 0 blocks = [] for i in range(num_base_blocks): in_dim = round_filters(config, config.in_channels[i]) out_dim = round_filters(config, config.out_channels[i]) stride = config.strides[i] kernel_size = config.kernel_sizes[i] expand_ratio = config.expand_ratios[i] for j in range(round_repeats(config.num_block_repeats[i])): id_skip = (True if (j == 0) else False) stride = (1 if (j > 0) else stride) in_dim = (out_dim if (j > 0) else in_dim) adjust_padding = (False if (curr_block_num in config.depthwise_padding) else True) drop_rate = ((config.drop_connect_rate * curr_block_num) / num_blocks) block = EfficientNetBlock(config=config, in_dim=in_dim, out_dim=out_dim, stride=stride, kernel_size=kernel_size, expand_ratio=expand_ratio, drop_rate=drop_rate, id_skip=id_skip, adjust_padding=adjust_padding) blocks.append(block) curr_block_num += 1 self.blocks = nn.ModuleList(blocks) self.top_conv = nn.Conv2d(in_channels=out_dim, out_channels=round_filters(config, 1280), kernel_size=1, padding='same', bias=False) self.top_bn = nn.BatchNorm2d(num_features=config.hidden_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) self.top_activation = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor, output_hidden_states: Optional[bool]=False, return_dict: Optional[bool]=True) -> BaseModelOutputWithNoAttention: all_hidden_states = ((hidden_states,) if output_hidden_states else None) for block in self.blocks: hidden_states = block(hidden_states) if output_hidden_states: all_hidden_states += (hidden_states,) hidden_states = self.top_conv(hidden_states) hidden_states = self.top_bn(hidden_states) hidden_states = self.top_activation(hidden_states) if (not return_dict): return tuple((v for v in [hidden_states, all_hidden_states] if (v is not None))) return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states)
class KJTSplitsAllToAllMeta(): pg: dist.ProcessGroup _input: KeyedJaggedTensor splits: List[int] splits_tensors: List[torch.Tensor] input_splits: List[List[int]] input_tensors: List[torch.Tensor] labels: List[str] keys: List[str] device: torch.device stagger: int splits_cumsum: List[int]
def parse_inp_section_config(raw_conf): conf = OrderedDict() if isinstance(raw_conf, list): conf['columns'] = raw_conf elif isinstance(raw_conf, (dict, OrderedDict)): if ('keys' in raw_conf): conf.update(raw_conf) conf['columns'] = ['Key', 'Value'] else: conf.update(raw_conf) return conf