code
stringlengths
281
23.7M
def test_create_lane_links_normalroad1(): planview = [] lanec = [] lanel = [] laner = [] lanesec = [] lanes = [] rm = pyodrx.RoadMark(pyodrx.RoadMarkType.solid, 0.2, rule=pyodrx.MarkRule.no_passing) geom = [] geom.append(pyodrx.Line(50)) geom.append(pyodrx.Arc(0.01, angle=(np.pi / 2))) geom.append(pyodrx.Line(50)) for i in range(len(geom)): planview.append(pyodrx.PlanView()) planview[i].add_geometry(geom[i]) for i in range(len(geom)): lanec.append(pyodrx.Lane(a=3)) lanel.append(pyodrx.Lane(a=3)) laner.append(pyodrx.Lane(a=3)) for i in range(len(geom)): lanec[i].add_roadmark(rm) lanel[i].add_roadmark(rm) laner[i].add_roadmark(rm) for i in range(len(geom)): lanesec.append(pyodrx.LaneSection(0, lanec[i])) lanesec[i].add_right_lane(lanel[i]) lanesec[i].add_left_lane(laner[i]) for i in range(len(geom)): lanes.append(pyodrx.Lanes()) lanes[i].add_lanesection(lanesec[i]) road1 = pyodrx.Road(1, planview[0], lanes[0]) road1.add_successor(pyodrx.ElementType.road, 2, pyodrx.ContactPoint.start) road2 = pyodrx.Road(2, planview[1], lanes[1]) road2.add_predecessor(pyodrx.ElementType.road, 1, pyodrx.ContactPoint.end) road2.add_successor(pyodrx.ElementType.road, 3, pyodrx.ContactPoint.start) road3 = pyodrx.Road(3, planview[2], lanes[2]) road3.add_predecessor(pyodrx.ElementType.road, 2, pyodrx.ContactPoint.end) odr = pyodrx.OpenDrive('myroad') odr.add_road(road1) odr.add_road(road2) odr.add_road(road3) odr.adjust_roads_and_lanes() assert (road1.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id() == None) assert (int(road1.lanes.lanesections[0].rightlanes[0].links.get_successor_id()) == (- 1)) assert (road1.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id() == None) assert (int(road1.lanes.lanesections[0].leftlanes[0].links.get_successor_id()) == 1) assert (int(road2.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id()) == (- 1)) assert (int(road2.lanes.lanesections[0].rightlanes[0].links.get_successor_id()) == (- 1)) assert (int(road2.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id()) == 1) assert (int(road2.lanes.lanesections[0].leftlanes[0].links.get_successor_id()) == 1) assert (int(road3.lanes.lanesections[0].rightlanes[0].links.get_predecessor_id()) == (- 1)) assert (road3.lanes.lanesections[0].rightlanes[0].links.get_successor_id() == None) assert (int(road3.lanes.lanesections[0].leftlanes[0].links.get_predecessor_id()) == 1) assert (road3.lanes.lanesections[0].leftlanes[0].links.get_successor_id() == None) assert (version_validation(None, odr, wanted_schema='xodr') == ValidationResponse.OK)
def _pickup_assignment_to_item_locations(region_list: RegionList, pickup_assignment: PickupAssignment, num_players: int) -> dict[(str, dict[(str, str)])]: items_locations: collections.defaultdict[(str, dict[(str, str)])] = collections.defaultdict(dict) for (region, area, node) in region_list.all_regions_areas_nodes: if ((not node.is_resource_node) or (not isinstance(node, PickupNode))): continue if (node.pickup_index in pickup_assignment): target = pickup_assignment[node.pickup_index] if (num_players > 1): item_name = f'{target.pickup.name} for Player {(target.player + 1)}' else: item_name = f'{target.pickup.name}' else: item_name = _ETM_NAME items_locations[region.correct_name(area.in_dark_aether)][region_list.node_name(node)] = item_name return {region: dict(sorted(items_locations[region].items())) for region in sorted(items_locations.keys())}
class PopcornPopper(): description: str def __init__(self, description: str): self.description = description def on(self) -> None: print(f'{self.description} on') def off(self) -> None: print(f'{self.description} off') def pop(self) -> None: print(f'{self.description} popping popcorn!') def toString(self) -> None: return self.description
class EnsurePackagesDiscovered(): def __init__(self, distribution: 'Distribution'): self._dist = distribution self._called = False def __call__(self): if (not self._called): self._called = True self._dist.set_defaults(name=False) def __enter__(self): return self def __exit__(self, _exc_type, _exc_value, _traceback): if self._called: self._dist.set_defaults.analyse_name() def _get_package_dir(self) -> Mapping[(str, str)]: self() pkg_dir = self._dist.package_dir return ({} if (pkg_dir is None) else pkg_dir) def package_dir(self) -> Mapping[(str, str)]: return LazyMappingProxy(self._get_package_dir)
class GodelTNormSolver(TNormSolver): def gettnorm(self, args, function, probs): def AND(t, dim): return (t.min(dim)[0] if (dim is not None) else t.min()) def OR(t, dim): return (t.max(dim)[0] if (dim is not None) else t.max()) (tnorm_dict, lv, rv) = self.base_tnorm(args, function, probs) return {**tnorm_dict, torch.Tensor.logical_and: (lambda : torch.min(lv, rv)), torch.Tensor.logical_or: (lambda : torch.max(lv, rv)), torch.logical_and: (lambda : torch.min(rv[0], rv[1])), torch.logical_or: (lambda : torch.max(rv[0], rv[1])), torch.eq: (lambda : (lv * rv).sum(dim=(- 1))), torch.ne: (lambda : (1 - (lv * rv)).sum(dim=(- 1))), torch.Tensor.__getitem__: (lambda : lv[rv]), torch.le: (lambda : (1 - torch.relu((lv - rv)))), torch.ge: (lambda : (1 - torch.relu((rv - lv)))), torch.Tensor.all: (lambda : AND(lv, rv)), torch.Tensor.any: (lambda : OR(lv, rv)), torch.all: (lambda : AND(rv[0], rv[1])), torch.any: (lambda : OR(rv[0], rv[1]))}[function]()
def get_bn_params(model: ModelProto, bn: NodeProto, channels: int) -> libpymo.BNParams: bn_params = libpymo.BNParams() gamma = numpy_helper.to_array(ParamUtils.get_param(model, bn, WEIGHT_INDEX)).reshape((- 1)) resize = (channels / len(gamma)) bn_params.gamma = np.repeat(gamma, resize) bn_params.beta = np.repeat(numpy_helper.to_array(ParamUtils.get_param(model, bn, BIAS_INDEX)).reshape((- 1)), resize) bn_params.runningMean = np.repeat(numpy_helper.to_array(ParamUtils.get_param(model, bn, RUNNING_MEAN_INDEX)).reshape((- 1)), resize) runningVar = numpy_helper.to_array(ParamUtils.get_param(model, bn, RUNNING_VAR_INDEX)) epsilon = get_node_attribute(bn, 'epsilon') sigma = np.sqrt((runningVar + epsilon)) bn_params.runningVar = np.repeat(sigma.reshape((- 1)), resize) return bn_params
_new_faces(MaterialGroup.WALLS) def create_window_split(bm, face, prop): (wall_w, wall_h) = calc_face_dimensions(face) (width, height, offset) = (*prop.size, prop.offset) h_widths = [(((wall_w / 2) - offset.x) - (width / 2)), width, (((wall_w / 2) + offset.x) - (width / 2))] h_faces = subdivide_face_horizontally(bm, face, h_widths) v_width = [(((wall_h / 2) + offset.y) - (height / 2)), height, (((wall_h / 2) - offset.y) - (height / 2))] v_faces = subdivide_face_vertically(bm, h_faces[1], v_width) return v_faces[1]
def _version_logger(save_dir, logger_name=''): if logger_name: path = os.path.join(save_dir, logger_name) else: path = save_dir if ((not os.path.exists(path)) or (not os.listdir(path))): version = 0 else: try: versions = [int(v.split('_')[(- 1)]) for v in os.listdir(path)] version = (max(versions) + 1) except: version = 0 return version
def get_random_ddf(chunk_size, num_chunks, frac_match, chunk_type, args): parts = [chunk_size for _ in range(num_chunks)] device_type = (True if (args.type == 'gpu') else False) meta = generate_chunk(0, 4, 1, chunk_type, None, device_type) divisions = ([None] * (len(parts) + 1)) name = ('generate-data-' + tokenize(chunk_size, num_chunks, frac_match, chunk_type)) graph = {(name, i): (generate_chunk, i, part, len(parts), chunk_type, frac_match, device_type) for (i, part) in enumerate(parts)} ddf = new_dd_object(graph, name, meta, divisions) if (chunk_type == 'build'): if (not args.no_shuffle): divisions = ([i for i in range(num_chunks)] + [num_chunks]) return ddf.set_index('shuffle', divisions=tuple(divisions)) else: del ddf['shuffle'] return ddf
class DebertaV2Tokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__(self, vocab_file, do_lower_case=False, split_by_punct=False, bos_token='[CLS]', eos_token='[SEP]', unk_token='[UNK]', sep_token='[SEP]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None: self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) super().__init__(do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs, **kwargs) if (not os.path.isfile(vocab_file)): raise ValueError(f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`") self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self._tokenizer = SPMTokenizer(vocab_file, split_by_punct=split_by_punct, sp_model_kwargs=self.sp_model_kwargs) def vocab_size(self): return len(self.vocab) def vocab(self): return self._tokenizer.vocab def get_vocab(self): vocab = self.vocab.copy() vocab.update(self.get_added_vocab()) return vocab def _tokenize(self, text: str) -> List[str]: if self.do_lower_case: text = text.lower() return self._tokenizer.tokenize(text) def _convert_token_to_id(self, token): return self._tokenizer.spm.PieceToId(token) def _convert_id_to_token(self, index): return (self._tokenizer.spm.IdToPiece(index) if (index < self.vocab_size) else self.unk_token) def convert_tokens_to_string(self, tokens): return self._tokenizer.decode(tokens) def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): if (token_ids_1 is None): return (([self.cls_token_id] + token_ids_0) + [self.sep_token_id]) cls = [self.cls_token_id] sep = [self.sep_token_id] return ((((cls + token_ids_0) + sep) + token_ids_1) + sep) def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): if already_has_special_tokens: return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True) if (token_ids_1 is not None): return (((([1] + ([0] * len(token_ids_0))) + [1]) + ([0] * len(token_ids_1))) + [1]) return (([1] + ([0] * len(token_ids_0))) + [1]) def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): sep = [self.sep_token_id] cls = [self.cls_token_id] if (token_ids_1 is None): return (len(((cls + token_ids_0) + sep)) * [0]) return ((len(((cls + token_ids_0) + sep)) * [0]) + (len((token_ids_1 + sep)) * [1])) def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop('add_prefix_space', False) if (is_split_into_words or add_prefix_space): text = (' ' + text) return (text, kwargs) def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: return self._tokenizer.save_pretrained(save_directory, filename_prefix=filename_prefix)
class Scope(): default: ty.ClassVar['Scope'] default_config: ty.Dict = {'device': 'cpu', 'tracing': False, 'types_to_trace': []} def __init__(self, config: ty.Union[(dict, str, None)]=None): if (config is None): self.config = type(self).default_config elif isinstance(config, str): path = config self.config = {**type(self).default_config, **json.load(open(path))} elif isinstance(config, dict): self.config = {**type(self).default_config, **config} self.ct = Counter() self.id = 's0' self._scope = self tracing = ty.cast(bool, self.config['tracing']) types = ty.cast(ty.Iterable[ty.Type], self.config['types_to_trace']) self.trace = Trace(tracing, tuple(types)) def device(self): return self.config['device'] def tracing(self): return self.config['tracing'] def is_same(self, other): return (id(self) == id(other)) def check_is_same(self, other): if ((id(self) != id(other)) or (self.device != other.device)): raise TypeError('scope and device must be the same') def check_are_same(self, others): if (not all(((self.is_same(other) and (self.device == other.device)) for other in others))): raise TypeError('scope and device must be the same') def _require_column_constructors_to_be_registered(): from .idataframe import dataframe from .ilist_column import ListColumn from .imap_column import MapColumn from .istring_column import StringColumn from .velox_rt import NumericalColumnCpu def _EmptyColumn(dtype, device=''): Scope._require_column_constructors_to_be_registered() device = (device or Scope.default.device) call = Dispatcher.lookup(((dtype.typecode + '_empty'), device)) return call(device, dtype) def _FullColumn(data, dtype, device='', mask=None): Scope._require_column_constructors_to_be_registered() device = (device or Scope.default.device) call = Dispatcher.lookup(((dtype.typecode + '_full'), device)) return call(device, data, dtype, mask) def _FromPySequence(data: ty.Sequence, dtype: dt.DType, device=''): Scope._require_column_constructors_to_be_registered() device = (device or Scope.default.device) call = Dispatcher.lookup(((dtype.typecode + '_from_pysequence'), device)) return call(device, data, dtype) def null_check_from_pysequence(data: ty.Sequence, dtype: dt.DType, device=''): result = Scope._FromPySequence(data, dtype, device) if ((not dtype.nullable) and (result.null_count != 0)): raise ValueError(f'None found in the list for non-nullable type: {dtype}') return result def _Column(data=None, dtype: ty.Optional[dt.DType]=None, device: Device=''): device = (device or Scope.default.device) if ((data is None) and (dtype is None)): raise TypeError(f'Column requires data and/or dtype parameter {data} {dtype}') if (isinstance(data, dt.DType) and isinstance(dtype, dt.DType)): raise TypeError('Column can only have one dtype parameter') if isinstance(data, dt.DType): (data, dtype) = (dtype, data) if (isinstance(data, ty.List) or isinstance(data, ty.Tuple)): dtype = (dtype or dt.infer_dtype_from_prefix(data[:7])) if ((dtype is None) or dt.is_any(dtype)): raise ValueError('Column cannot infer type from data') if dt.contains_tuple(dtype): raise TypeError('Cannot infer type from nested Python tuple') return Scope.null_check_from_pysequence(data, dtype, device) if Scope._is_column(data): dtype = (dtype or data.dtype) if ((data.device == device) and (data.dtype == dtype)): return data else: return ta.from_pysequence(data.to_pylist(), dtype=dtype, device=device) if (data is not None): warnings.warn('Constructing column from non Python list/Column may result in degenerated performance') if isinstance(dtype, dt.DType): col = Scope._EmptyColumn(dtype, device) if (data is not None): for i in data: col._append(i) return col._finalize() if (data is not None): if isinstance(data, ty.Iterable): data = iter(data) prefix = [] for (i, v) in enumerate(data): prefix.append(v) if (i > 5): break dtype = dt.infer_dtype_from_prefix(prefix) if ((dtype is None) or dt.is_any(dtype)): raise ValueError('Column cannot infer type from data') if dt.is_tuple(dtype): raise TypeError('Column cannot be used to created structs, use Dataframe constructor instead') col = Scope._EmptyColumn(dtype, device=device) for p in prefix: col._append(p) for i in data: col._append(i) return col._finalize() else: raise TypeError(f'data parameter of ty.Iterable type expected (got {type(dtype).__name__})') else: raise AssertionError('unexpected case') def _DataFrame(data=None, dtype=None, columns=None, device=''): if ((data is None) and (dtype is None)): assert (columns is None) return Scope._EmptyColumn(dt.Struct([]), device=device)._finalize() if ((data is not None) and isinstance(data, dt.DType)): if ((dtype is not None) and isinstance(dtype, dt.DType)): raise TypeError('Dataframe can only have one dtype parameter') dtype = data data = None if Scope._is_dataframe(data): dtype = (dtype or data.dtype) if ((data.device == device) and (data.dtype == dtype)): return data else: dtype_fields = {f.name for f in dtype.fields} data_fields = {f.name for f in data.dtype.fields} if (dtype_fields != data_fields): raise TypeError(f'data fields are {data_fields} while dtype fields are {dtype_fields}') res = {n: Scope._Column(data[n], dtype=dtype.get(n), device=device) for n in data.columns} return Scope._DataFrame(res, dtype=dtype, device=device) if (dtype is not None): if (not dt.is_struct(dtype)): raise TypeError(f'Dataframe takes a dt.Struct dtype as parameter (got {dtype})') dtype = ty.cast(dt.Struct, dtype) if (data is None): return Scope._EmptyColumn(dtype, device=device)._finalize() elif isinstance(data, ty.Sequence): return Scope._Column(data, dtype, device) elif isinstance(data, ty.Mapping): res = {} dtype_fields = {f.name: f.dtype for f in dtype.fields} if (len(data) != len(dtype_fields)): raise TypeError(f'''dtype provides {len(dtype.fields)} fields: {dtype_fields.keys()} but data only provides {len(data)} fields: {data.keys()} ''') for (n, c) in data.items(): if (n not in dtype_fields): raise AttributeError(f'Column {n} is present in the data but absent in explicitly provided dtype') if Scope._is_column(c): if (c.dtype != dtype_fields[n]): raise TypeError(f'Wrong type for column {n}: dtype specifies {dtype_fields[n]} while column of {c.dtype} is provided') else: c = Scope._Column(c, dtype_fields[n]) res[n] = c return Scope._FullColumn(res, dtype) else: raise TypeError(f'Dataframe does not support constructor for data of type {type(data).__name__}') if (data is not None): if isinstance(data, ty.Sequence): prefix = [] for (i, v) in enumerate(data): prefix.append(v) if (i > 5): break dtype = dt.infer_dtype_from_prefix(prefix) if ((dtype is None) or (not dt.is_tuple(dtype))): raise TypeError('Dataframe cannot infer struct type from data') dtype = ty.cast(dt.Tuple, dtype) if (columns is not None): if (len(dtype.fields) != len(columns)): raise TypeError('Dataframe column length must equal row length') else: first_tuple_columns = None for v in data: if (v is not None): if hasattr(v, '_fields'): first_tuple_columns = v._fields break if (first_tuple_columns is None): raise TypeError('DataFrame construction from tuples requires dtype or columns to be given') columns = list(first_tuple_columns) dtype = dt.Struct([dt.Field(n, t) for (n, t) in zip(columns, dtype.fields)]) return Scope._Column(data, dtype, device) elif isinstance(data, ty.Mapping): res = {} for (n, c) in data.items(): if Scope._is_column(c): res[n] = c elif isinstance(c, ty.Sequence): res[n] = Scope._Column(c, device=device) else: raise TypeError(f'dataframe does not support constructor for column data of type {type(c).__name__}') return Scope._FullColumn(res, dtype=dt.Struct([dt.Field(n, c.dtype) for (n, c) in res.items()]), device=device) else: raise TypeError(f'dataframe does not support constructor for data of type {type(data).__name__}') else: raise AssertionError('unexpected case') def _is_column(c): return ((c is not None) and hasattr(c, '_dtype') and hasattr(c, '_device')) def _is_dataframe(c): return ((c is not None) and hasattr(c, '_dtype') and hasattr(c, '_device') and hasattr(c, 'columns'))
class HVT(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, qk_scale=None, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, norm_layer=nn.LayerNorm, **kwargs): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim self.patch_embed = PatchEmbed(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) self.num_patches = self.patch_embed.num_patches self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim)) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] seq_len = self.num_patches self.blocks = nn.ModuleList([]) for i in range(depth): if ((i % args.pool_block_width) == 0): seq_len = math.floor((((seq_len - args.pool_kernel_size) / 2) + 1)) downsample = nn.MaxPool1d(kernel_size=args.pool_kernel_size, stride=2) else: downsample = None self.blocks.append(Block(seq_len, dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, downsample=downsample, **kwargs)) self.norm = norm_layer(embed_dim) self.head = (nn.Linear(embed_dim, num_classes) if (num_classes > 0) else nn.Identity()) trunc_normal_(self.pos_embed, std=0.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=0.02) if (isinstance(m, nn.Linear) and (m.bias is not None)): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) .ignore def no_weight_decay(self): skip = [] for (name, param) in self.named_parameters(): if ('pos_embed' in name): skip.append(name) return skip def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = (nn.Linear(self.embed_dim, num_classes) if (num_classes > 0) else nn.Identity()) def forward_features(self, x): x = self.patch_embed(x) x = (x + self.pos_embed) x = self.pos_drop(x) for blk in self.blocks: x = blk(x) x = self.norm(x) return x.mean(dim=1) def forward(self, x): x = self.forward_features(x) x = self.head(x) return x
class Slither(ProblemDetector): name: str = 'slither' docker_image: str dockerCl: Any threadPool: ClassVar[concurrent.futures.ThreadPoolExecutor] = concurrent.futures.ThreadPoolExecutor() titleVulDict: ClassVar[Dict[(str, str)]] = {'reentrancy-eth': 'reentrancy', 'reentrancy-no-eth': 'reentrancy', 'unused-return': 'unchecked_call', 'unchecked-lowlevel': 'unchecked_call', 'unchecked-send': 'unchecked_call'} def __init__(self, args): super().__init__(args) self.docker_image = args['slither-docker-image'] self.dockerCl = docker.from_env() async def detect(self, path_source: Sequence[Path], targetContractName: Optional[str]=None, targetLocations: Optional[Sequence[CodeRange]]=None, targetedVul: Optional[Sequence[str]]=None, fastFail: bool=True, **extra_args) -> ProblemDetectorResult: logger.debug('Going to start Slither container') cmd = ['/bin/bash', '-c', 'slither /tmp/subject.sol --json -'] def runContainer(): container = self.dockerCl.containers.run(image=self.docker_image, command=cmd, detach=True, auto_remove=False, network_disabled=True, network_mode='none', tty=False, stdin_open=False, volumes={path_source[0]: {'bind': '/tmp/subject.sol', 'mode': 'ro'}}) container.wait() output = ''.join((line.decode(utf_8.getregentry().name) for line in container.logs(stdout=True, stderr=False, stream=True, tail='all'))) STDERROut = ''.join((line.decode(utf_8.getregentry().name) for line in container.logs(stdout=False, stderr=True, stream=True, tail='all'))) container.remove() return (output, STDERROut) (output, STDERROut) = (await asyncio.get_event_loop().run_in_executor(self.threadPool, runContainer)) assert (len(STDERROut) == 0), f'Slither STDERR output is non-empty!{os.linesep}STDERR:{os.linesep}{STDERROut}' logger.debug(f'Slither STDERR:{os.linesep}{STDERROut}') logger.trace(f'Slither output:{os.linesep}{output}') return self.__processOutput(output, targetedVul) def __processOutput(self, output, targetedVul: Optional[Sequence[str]]=None): try: rst = cast(Dict[(str, Any)], json.loads(output)) except Exception: logger.critical(f'The following Slither output parsing failed...{os.linesep}{output}') raise if (not rst['success']): raise RuntimeError(f"Slither processed unsuccessfully. Error: {rst['error']}") ret: List[DetectedVulnerability] = [] for issue in rst['results']['detectors']: relevantElements_Node = tuple((e for e in issue['elements'] if (e['type'] == 'node'))) occurrence = tuple(sorted((CodeRange(start=Location(line=min(e_sourceMap['lines']), column=(e_sourceMap['starting_column'] - 1)), end=Location(line=max(e_sourceMap['lines']), column=(e_sourceMap['ending_column'] - 1))) for e_sourceMap in (e['source_mapping'] for e in relevantElements_Node)))) relevantElements_Function = sorted((e for e in issue['elements'] if (e['type'] == 'function')), key=(lambda e: (e['type_specific_fields']['parent']['name'], e['name']))) faultLocalizationInfo = tuple(chain((FaultElement_NodeType(contractName=func['type_specific_fields']['parent']['name'], functionName=func['name'], nodeType='Block') for func in relevantElements_Function), (FaultElement_CodeRange(codeRange=occur) for occur in occurrence))) ret.append(DetectedVulnerability(name=self.titleVulDict.get(issue['check'], issue['check']), faultLocalizationInfo=faultLocalizationInfo)) return ret
(tryfirst=True) def pytest_load_initial_conftests(args: list[str], early_config: pytest.Config, parser: pytest.Parser) -> None: for entry in _load_values(early_config): if (entry.skip_if_set and (entry.key in os.environ)): continue os.environ[entry.key] = (entry.value.format(**os.environ) if entry.transform else entry.value)
def define_G(opt): gpu_ids = opt['gpu_ids'] opt_net = opt['network_G'] which_model = opt_net['which_model_G'] if (which_model == 'sr_resnet'): netG = arch.SRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], act_type='relu', mode=opt_net['mode'], upsample_mode='pixelshuffle') elif (which_model == 'sft_arch'): netG = sft_arch.SFT_Net() elif (which_model == 'RRDB_net'): netG = arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], nb=opt_net['nb'], gc=opt_net['gc'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], act_type='leakyrelu', mode=opt_net['mode'], upsample_mode='upconv') else: raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model)) if opt['is_train']: init_weights(netG, init_type='kaiming', scale=0.1) if gpu_ids: assert torch.cuda.is_available() netG = nn.DataParallel(netG) return netG
class XdgStatic(Static[XdgSurface]): def __init__(self, core: Core, qtile: Qtile, win: XdgWindow, idle_inhibitor_count: int): surface = win.surface Static.__init__(self, core, qtile, surface, win.wid, idle_inhibitor_count=idle_inhibitor_count) if surface.toplevel.title: self.name = surface.toplevel.title self._wm_class = surface.toplevel.app_id self.add_listener(surface.map_event, self._on_map) self.add_listener(surface.unmap_event, self._on_unmap) self.add_listener(surface.destroy_event, self._on_destroy) self.add_listener(surface.toplevel.set_title_event, self._on_set_title) self.add_listener(surface.toplevel.set_app_id_event, self._on_set_app_id) self.container = win.container self.container.node.data = self.data_handle self.tree = win.tree _command() def kill(self) -> None: self.surface.send_close() def hide(self) -> None: super().hide() self.container.node.set_enabled(enabled=False) def unhide(self) -> None: self.container.node.set_enabled(enabled=True) def place(self, x: int, y: int, width: int, height: int, borderwidth: int, bordercolor: (ColorsType | None), above: bool=False, margin: ((int | list[int]) | None)=None, respect_hints: bool=False) -> None: self.x = x self.y = y self._width = width self._height = height self.surface.set_size(width, height) self.surface.set_bounds(width, height) self.container.node.set_position(x, y) def _on_set_title(self, _listener: Listener, _data: Any) -> None: logger.debug('Signal: xdgstatic set_title') title = self.surface.toplevel.title if (title and (title != self.name)): self.name = title if self.ftm_handle: self.ftm_handle.set_title(self.name) hook.fire('client_name_updated', self) def _on_set_app_id(self, _listener: Listener, _data: Any) -> None: logger.debug('Signal: xdgstatic set_app_id') self._wm_class = self.surface.toplevel.app_id if self.ftm_handle: self.ftm_handle.set_app_id((self._wm_class or ''))
def generate_app(appname, force=False, outpath='..', dbtype='sql', update_only=False, view_type=None): print((' generating app:' + str(appname))) import os, sys base = os.path.normpath(outpath) print((' base for app: ' + base)) root = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'start') print((' root: ' + root)) outdir = os.path.normpath(os.path.join(base, appname)) print((50 * '-')) print((' ..creating in: ' + os.path.abspath(outdir))) print((50 * '-')) os.makedirs(outdir, exist_ok=True) template_exts = ['.py', '.tmpl'] skip_extensions = [] exclude_dirs = ['static', 'stubs', 'views'] skip_dirs = ['stuff', 'werkzeug'] if (view_type.lower() != 'sui'): skip_dirs.append('sui') skip_extensions.append('.sui') exclude_files = [] if update_only: exclude_files.extend(['alembic.ini', 'db.sqlite', 'tiny.db', 'env.py', 'shorties.py', 'config.py', 'powhandler.py', 'powmodel.py', 'tinymodel.py', 'mongomodel.py']) skip_dirs.extend(['migrations', 'views', 'static']) elif (view_type == 'api-only'): skip_dirs.extend(['static', 'views']) filelist = [l for l in os.listdir(os.path.join(root, 'stubs')) if str.startswith(l, 'scaffold')] filelist += [l for l in os.listdir(os.path.join(root, 'stubs')) if str.startswith(l, 'view')] filelist += [l for l in os.listdir(os.path.join(root, 'stubs')) if str.startswith(l, 'dash')] exclude_files.extend(filelist) print(f'API ONLY:') print((40 * '-')) print(f' .. skipping files: {filelist}') db_base_path = os.path.normpath(os.path.abspath(outdir)) tinydb_path = os.path.normpath(os.path.abspath(os.path.join(outdir, 'tiny.db'))) sqlite_path = os.path.normpath(os.path.abspath(os.path.join(outdir, 'db.sqlite'))) if (sys.platform == 'win32'): sqlite_path = sqlite_path.replace('\\', '\\\\') tinydb_path = tinydb_path.replace('\\', '\\\\') db_base_path = db_base_path.replace('\\', '\\\\') elif (sys.platform in ['linux', 'darwin']): sqlite_path = ('/' + sqlite_path) tinydb_path = ('/' + tinydb_path) db_base_path = ('/' + db_base_path) else: sqlite_path = (('Unknown system platform (' + sys.platform) + '). Please set sqlite connection string yourself accordingly') cookie_secret = uuid.uuid4() for (dirname, dirs, files) in os.walk(root): dirs[:] = [d for d in dirs if (d not in skip_dirs)] for f in files: if (not (f in exclude_files)): (filename, file_extension) = os.path.splitext(f) if (file_extension in skip_extensions): continue print(f' processing: {f:<40} ....', end=' ') path = Path(dirname) index = path.parts.index('start') opath = Path(outdir).joinpath(*path.parts[(index + 1):]) (filename, file_extension) = os.path.splitext(f) if (path.parts[(- 1)] in skip_dirs): print(f' skipped: {f} ...') else: if (not os.path.exists(str(opath))): os.makedirs(str(opath), exist_ok=True) if ((file_extension in template_exts) and (not (path.parts[(- 1)] in exclude_dirs))): copy_or_pump(os.path.normpath(os.path.join(dirname, f)), os.path.normpath(os.path.join(str(opath), f)), copy=False, appname=appname, sqlite_path=sqlite_path, tinydb_path=tinydb_path, db_base_path=db_base_path, dbtype=dbtype, cookie_secret=str(cookie_secret), data='{{data}}', force=force) else: copy_or_pump(os.path.normpath(os.path.join(dirname, f)), os.path.normpath(os.path.join(str(opath), f)), copy=True, appname=appname, sqlite_path=sqlite_path, tinydb_path=tinydb_path, db_base_path=db_base_path, dbtype=dbtype, cookie_secret=str(cookie_secret), data='{{data}}', force=force) else: print((' skipped in update_only: ' + str(f))) if (view_type and (view_type != 'api-only')): print((50 * '-')) if (view_type == 'bs4'): print('preparing app for view_type: Bootstrap 4') elif (view_type == 'sui'): print('preparing app for view_type: SemanticUI ') else: print(('preparing app for view_type: ' + str(view_type))) print((50 * '-')) if (view_type == 'bs4'): folder = os.path.normpath(os.path.join(outdir, 'views')) rename_extensions(folder, ('.' + view_type), '.tmpl', files=['index', 'error', '404']) print(' ... Done. Bootstrap4 is the default') else: print(('outdir: ' + outdir)) import os, sys folder = os.path.normpath(os.path.join(outdir, 'views')) rename_extensions(folder, ('.' + view_type), '.tmpl', files=['index', 'error', '404']) elif (view_type != 'api-only'): print('Error: viewtype not set and apparantly no Default set either!') else: print('API Only: Created a headless API only version (No views, css, js or static files included)') if update_only: print((40 * '-')) print(' Update only. ') print((40 * '-')) print(' I did not touch: ') print(exclude_files) print(skip_dirs)
def get_parser(): parser = argparse.ArgumentParser(allow_abbrev=True, description='pypyr pipeline runner', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('pipeline_name', help=wrap('Name of pipeline to run. Don`t add the .yaml at the end.')) parser.add_argument(dest='context_args', nargs='*', default=None, help=wrap("Initialize context with this. Parsed by the pipeline's context_parser function.\nSeparate multiple args with spaces.")) parser.add_argument('--groups', dest='groups', nargs='*', default=None, help=wrap('Step-Groups to run. defaults to "steps".\nYou probably want to order --groups AFTER the pipeline name and context positional args. e.g\npypyr pipename context --groups group1 group2\nIf you prefer putting them before, use a -- to separate groups from the pipeline name, e.g\npypyr --groups group1 group2 -- pipename context')) parser.add_argument('--success', dest='success_group', default=None, help=wrap('Step-Group to run on successful completion of pipeline.\nDefaults to "on_success"')) parser.add_argument('--failure', dest='failure_group', default=None, help=wrap('Step-Group to run on error completion of pipeline.\nDefaults to "on_failure"')) parser.add_argument('--dir', dest='py_dir', default=config.cwd, help=wrap('Load custom python modules from this directory.\nDefaults to cwd (the current dir).')) parser.add_argument('--log', '--loglevel', dest='log_level', type=int, default=None, help=wrap('Integer log level. Defaults to 25 (NOTIFY).\n10=DEBUG \n20=INFO\n25=NOTIFY\n30=WARNING\n40=ERROR\n50=CRITICAL\nLog Level < 10 gives full traceback on errors.')) parser.add_argument('--logpath', dest='log_path', help=wrap('Log-file path. Append log output to this path.')) parser.add_argument('--version', action='version', help='Echo version number.', version=f'{pypyr.version.get_version()}') return parser
class TransformerSentenceEncoderLayer(nn.Module): def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', layer_norm_first: bool=False) -> None: super().__init__() self.embedding_dim = embedding_dim self.dropout = dropout self.activation_dropout = activation_dropout self.activation_fn = utils.get_activation_fn(activation_fn) self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, self_attention=True) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(self.activation_dropout) self.dropout3 = nn.Dropout(dropout) self.layer_norm_first = layer_norm_first self.self_attn_layer_norm = LayerNorm(self.embedding_dim) self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim) self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim) self.final_layer_norm = LayerNorm(self.embedding_dim) def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None, need_weights: bool=False, att_args=None): residual = x if self.layer_norm_first: x = self.self_attn_layer_norm(x) (x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, attn_mask=self_attn_mask) x = self.dropout1(x) x = (residual + x) residual = x x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = (residual + x) else: (x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask) x = self.dropout1(x) x = (residual + x) x = self.self_attn_layer_norm(x) residual = x x = self.activation_fn(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) x = self.dropout3(x) x = (residual + x) x = self.final_layer_norm(x) return (x, attn)
class Migration(migrations.Migration): dependencies = [('adserver', '0086_region_topic_pricing')] operations = [migrations.AddField(model_name='historicalpublisher', name='allow_multiple_placements', field=models.BooleanField(default=False, help_text='Can this publisher have multiple placements on the same pageview')), migrations.AddField(model_name='publisher', name='allow_multiple_placements', field=models.BooleanField(default=False, help_text='Can this publisher have multiple placements on the same pageview'))]
def filter_rop(ops): addr = 0 gadgets = r2p.cmdj(('/Rj %s' % ops[0])) gadgets.reverse() for gadget in gadgets: instrs = gadget['opcodes'] for (i, instr) in enumerate(instrs): rest = [x['opcode'] for x in instrs[i:]] if (rest == ops): addr = instr['offset'] break if (addr != 0): break return addr
def save_trees(trees, path, mode='all', replace_newline=True, joiner='***', short_long_sep=''): assert (mode in ['all', 'final_long', 'final_short']) num_iterations = max([root.max_depth_from_self() for root in trees]) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as wf: writer = csv.writer(wf) if (mode == 'final_short'): writer.writerow(['final_short']) for root in trees: writer.writerow([(root.short_text.replace('\n', '\\n') if replace_newline else root.short_text)]) elif (mode == 'final_long'): writer.writerow(['final_long']) for root in trees: long_text = root.full_text(joiner=joiner) long_text = (long_text.replace('\n', '\\n') if replace_newline else long_text) writer.writerow([long_text]) elif (mode == 'all'): writer.writerow([('iter' + str(i)) for i in range((num_iterations + 1))]) for root in trees: iters = [] current_nodes = [root] while (len(current_nodes) > 0): iters.append(((joiner.join([node.short_text for node in current_nodes]) + short_long_sep) + joiner.join([node.long_text for node in current_nodes]))) current_nodes = sum([node.children for node in current_nodes], []) writer.writerow(([t.replace('\n', '\\n') for t in iters] if replace_newline else iters))
def build_discriminator(): cnn = Sequential() cnn.add(Conv2D(32, 3, padding='same', strides=2, input_shape=(1, 28, 28))) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(64, 3, padding='same', strides=1)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(128, 3, padding='same', strides=2)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Conv2D(256, 3, padding='same', strides=1)) cnn.add(LeakyReLU()) cnn.add(Dropout(0.3)) cnn.add(Flatten()) image = Input(shape=(1, 28, 28)) features = cnn(image) fake = Dense(1, activation='sigmoid', name='generation')(features) aux = Dense(10, activation='softmax', name='auxiliary')(features) return Model(image, [fake, aux])
class BotShortDescription(TelegramObject): __slots__ = ('short_description',) def __init__(self, short_description: str, *, api_kwargs: Optional[JSONDict]=None): super().__init__(api_kwargs=api_kwargs) self.short_description: str = short_description self._id_attrs = (self.short_description,) self._freeze()
class Voxelization_Idx(Function): def forward(ctx, coords, batchsize, mode=4): assert coords.is_contiguous() N = coords.size(0) output_coords = coords.new() input_map = torch.IntTensor(N).zero_() output_map = input_map.new() PG_OP.voxelize_idx(coords, output_coords, input_map, output_map, batchsize, mode) return (output_coords, input_map, output_map) def backward(ctx, a=None, b=None, c=None): return None
class TooManyStoppingSequences(ErrorReason): def __init__(self, num_stopping_sequences: int, max_num_stopping_sequences: int) -> None: self.num_stopping_sequences = num_stopping_sequences self.max_num_stopping_sequences = max_num_stopping_sequences def get_message(self) -> str: return f'Too many stopping sequences. Recieved {self.num_stopping_sequences} stopping sequences,but the maximum is {self.max_num_stopping_sequences}. Please reduce the number of provided stopping sequences.' def exception(self) -> Exception: return TooManyStoppingSequencesError(self.get_message())
class NNVFunction(MLPFunction): def __init__(self, env_spec, hidden_layer_sizes=(100, 100), name='vf', batchnormvf=False, dropoutvf_keep_prob=1.0): Serializable.quick_init(self, locals()) self._Do = env_spec.observation_space.flat_dim self._obs_pl = tf.placeholder(tf.float32, shape=[None, self._Do], name='observation') self.dropoutvf_placeholder = tf.placeholder(shape=[], dtype=tf.float32) super(NNVFunction, self).__init__(name, (self._obs_pl,), hidden_layer_sizes, batchnorm=batchnormvf, dropoutvf_placeholder=(self.dropoutvf_placeholder if (dropoutvf_keep_prob < 1.0) else None), dropoutvf_keep_prob=dropoutvf_keep_prob)
def train(model, train_loader, myloss, optimizer, epoch): model.train() for (batch_idx, train_data) in enumerate(train_loader): train_data = Variable(train_data).type(torch.cuda.DoubleTensor).squeeze().view(175, 50, 34).permute(1, 0, 2) optimizer.zero_grad() output = model(train_data) loss = myloss(output) loss.backward() optimizer.step() if ((batch_idx % 100) == 0): print('Train Epoch: {} [{}/{} ({:.0f}%)]\tloss: {:.6f}'.format(epoch, (batch_idx * len(train_data)), len(train_loader.dataset), ((100.0 * batch_idx) / len(train_loader)), (10000.0 * loss.data.cpu().numpy()))) return loss
_attention('dot') class DotAttention(BaseAttention): def __init__(self, decoder_hidden_state_dim, context_dim, **kwargs): super().__init__(decoder_hidden_state_dim, context_dim) self.input_proj = None force_projection = kwargs.get('force_projection', False) if (force_projection or (decoder_hidden_state_dim != context_dim)): self.input_proj = Linear(decoder_hidden_state_dim, context_dim, bias=True) self.src_length_masking = kwargs.get('src_length_masking', True) def prepare_for_onnx_export_(self, **kwargs): self.src_length_masking = False def forward(self, decoder_state, source_hids, src_lengths): source_hids = source_hids.transpose(0, 1) if (self.input_proj is not None): decoder_state = self.input_proj(decoder_state) attn_scores = torch.bmm(source_hids, decoder_state.unsqueeze(2)).squeeze(2) normalized_masked_attn_scores = attention_utils.masked_softmax(attn_scores, src_lengths, self.src_length_masking) attn_weighted_context = (source_hids * normalized_masked_attn_scores.unsqueeze(2)).contiguous().sum(1) return (attn_weighted_context, normalized_masked_attn_scores.t())
def _get_text_feedback(schedule_item): questions = TextFeedbackQuestion.objects.filter(schedule_item_type__title=schedule_item.type) text = [{'question': question, 'values': ScheduleItemTextFeedback.objects.filter(question=question, schedule_item=schedule_item)} for question in questions] return text
class BatchEasyHardMiner(BaseTupleMiner): HARD = 'hard' SEMIHARD = 'semihard' EASY = 'easy' ALL = 'all' all_batch_mining_strategies = [HARD, SEMIHARD, EASY, ALL] def __init__(self, pos_strategy=EASY, neg_strategy=SEMIHARD, allowed_pos_range=None, allowed_neg_range=None, **kwargs): super().__init__(**kwargs) if (not ((pos_strategy in self.all_batch_mining_strategies) and (neg_strategy in self.all_batch_mining_strategies))): raise ValueError('\npos_strategy must be one of "{0}"\nneg_strategy must be one of "{0}"'.format('" or "'.join(self.all_batch_mining_strategies))) if (pos_strategy == neg_strategy == self.SEMIHARD): raise ValueError('pos_strategy and neg_strategy cannot both be "semihard"') if (((pos_strategy == self.ALL) and (neg_strategy == self.SEMIHARD)) or ((neg_strategy == self.ALL) and (pos_strategy == self.SEMIHARD))): raise ValueError('"semihard" cannot be used in combination with "all"') self.pos_strategy = pos_strategy self.neg_strategy = neg_strategy self.allowed_pos_range = allowed_pos_range self.allowed_neg_range = allowed_neg_range self.add_to_recordable_attributes(list_of_names=['easiest_triplet', 'hardest_triplet', 'easiest_pos_pair', 'hardest_pos_pair', 'easiest_neg_pair', 'hardest_neg_pair'], is_stat=True) def mine(self, embeddings, labels, ref_emb, ref_labels): mat = self.distance(embeddings, ref_emb) (a1_idx, p_idx, a2_idx, n_idx) = lmu.get_all_pairs_indices(labels, ref_labels) a = torch.arange(mat.size(0), device=mat.device) if ((self.pos_strategy == self.SEMIHARD) and (self.neg_strategy != self.ALL)): ((negative_dists, negative_indices), a2n_keep) = self.get_negatives(mat, a2_idx, n_idx) ((positive_dists, positive_indices), a1p_keep) = self.get_positives(mat, a1_idx, p_idx, negative_dists) elif ((self.neg_strategy == self.SEMIHARD) and (self.pos_strategy != self.ALL)): ((positive_dists, positive_indices), a1p_keep) = self.get_positives(mat, a1_idx, p_idx) ((negative_dists, negative_indices), a2n_keep) = self.get_negatives(mat, a2_idx, n_idx, positive_dists) else: if (self.pos_strategy != self.ALL): ((positive_dists, positive_indices), a1p_keep) = self.get_positives(mat, a1_idx, p_idx) if (self.neg_strategy != self.ALL): ((negative_dists, negative_indices), a2n_keep) = self.get_negatives(mat, a2_idx, n_idx) if (self.ALL not in [self.pos_strategy, self.neg_strategy]): a_keep_idx = torch.where((a1p_keep & a2n_keep)) self.set_stats(positive_dists[a_keep_idx], negative_dists[a_keep_idx]) a = a[a_keep_idx] p = positive_indices[a_keep_idx] n = negative_indices[a_keep_idx] return (a, p, a, n) elif ((self.pos_strategy == self.ALL) and (self.neg_strategy != self.ALL)): self.set_stats(mat[(a1_idx, p_idx)], negative_dists[a2n_keep]) a2 = a[a2n_keep] n = negative_indices[a2n_keep] return (a1_idx, p_idx, a2, n) elif ((self.pos_strategy != self.ALL) and (self.neg_strategy == self.ALL)): self.set_stats(positive_dists[a1p_keep], mat[(a2_idx, n_idx)]) a1 = a[a1p_keep] p = positive_indices[a1p_keep] return (a1, p, a2_idx, n_idx) else: self.set_stats(mat[(a1_idx, p_idx)], mat[(a2_idx, n_idx)]) return (a1_idx, p_idx, a2_idx, n_idx) def get_positives(self, mat, a1_idx, p_idx, negative_dists=None): pos_func = self.get_mine_function(self.pos_strategy) return pos_func(mat, a1_idx, p_idx, self.allowed_pos_range, negative_dists) def get_negatives(self, mat, a2_idx, n_idx, positive_dists=None): neg_func = self.get_mine_function((self.EASY if (self.neg_strategy in [self.HARD, self.SEMIHARD]) else self.HARD)) return neg_func(mat, a2_idx, n_idx, self.allowed_neg_range, positive_dists) def get_mine_function(self, strategy): if (strategy in [self.HARD, self.SEMIHARD]): mine_func = (self.get_min_per_row if self.distance.is_inverted else self.get_max_per_row) elif (strategy == self.EASY): mine_func = (self.get_max_per_row if self.distance.is_inverted else self.get_min_per_row) else: raise NotImplementedError return mine_func def get_max_per_row(self, mat, anchor_idx, other_idx, val_range=None, semihard_thresholds=None): mask = torch.zeros_like(mat) mask[(anchor_idx, other_idx)] = 1 if (semihard_thresholds is not None): mask[(mat >= semihard_thresholds.unsqueeze(1))] = 0 if (val_range is not None): mask[((mat > val_range[1]) | (mat < val_range[0]))] = 0 mat_masked = (mat * mask) non_zero_rows = torch.any((mask != 0), dim=1) return (torch.max(mat_masked, dim=1), non_zero_rows) def get_min_per_row(self, mat, anchor_idx, other_idx, val_range=None, semihard_thresholds=None): pos_inf = c_f.pos_inf(mat.dtype) mask = (torch.ones_like(mat) * pos_inf) mask[(anchor_idx, other_idx)] = 1 if (semihard_thresholds is not None): mask[(mat <= semihard_thresholds.unsqueeze(1))] = pos_inf if (val_range is not None): mask[((mat > val_range[1]) | (mat < val_range[0]))] = pos_inf non_inf_rows = torch.any((mask != pos_inf), dim=1) mat = mat.clone() mat[(mask == pos_inf)] = pos_inf return (torch.min(mat, dim=1), non_inf_rows) def set_stats(self, positive_dists, negative_dists): if self.collect_stats: with torch.no_grad(): len_pd = len(positive_dists) len_pn = len(negative_dists) if ((len_pd > 0) and (len_pn > 0) and (self.ALL not in [self.pos_strategy, self.neg_strategy])): easiest_triplet_func = self.get_func_for_stats(False) hardest_triplet_func = self.get_func_for_stats(True) self.easiest_triplet = easiest_triplet_func((positive_dists - negative_dists)).item() self.hardest_triplet = hardest_triplet_func((positive_dists - negative_dists)).item() if (len_pd > 0): easy_pos_func = self.get_func_for_stats(False) hard_pos_func = self.get_func_for_stats(True) self.easiest_pos_pair = easy_pos_func(positive_dists).item() self.hardest_pos_pair = hard_pos_func(positive_dists).item() if (len_pn > 0): easy_neg_func = self.get_func_for_stats(True) hard_neg_func = self.get_func_for_stats(False) self.easiest_neg_pair = easy_neg_func(negative_dists).item() self.hardest_neg_pair = hard_neg_func(negative_dists).item() def get_func_for_stats(self, min_if_inverted): if min_if_inverted: return (torch.min if self.distance.is_inverted else torch.max) return (torch.max if self.distance.is_inverted else torch.min)
class ColorShape(tc.nn.Module): ColorBiased = [(0.125, 'color', 0.1, 1.9), (0.125, 'brightness', 0.5, 1.9), (0.125, 'contrast', 0.5, 1.9), (0.125, 'sharpness', 0.1, 1.9), (0.125, 'autocontrast'), (0.125, 'equalize'), (0.125, 'shear', 0.05, 0.15), (0.125, 'rotate', 1, 11)] ShapeBiased = [(0.08, 'color', 0.1, 1.9), (0.08, 'brightness', 0.5, 1.9), (0.04, 'contrast', 0.5, 1.9), (0.08, 'sharpness', 0.1, 1.9), (0.04, 'autocontrast'), (0.08, 'equalize'), (0.3, 'shear', 0.05, 0.35), (0.3, 'rotate', 1, 31)] def __init__(self, version='color'): super().__init__() assert (version in ['color', 'shape']) space = (self.ColorBiased if (version == 'color') else self.ShapeBiased) self.space = {} p_accu = 0.0 for trans in space: p = trans[0] self.space[(p_accu, (p_accu + p))] = trans[1:] p_accu += p def transform(self, img, trans): if (len(trans) == 1): trans = trans[0] else: (lower, upper) = trans[1:] trans = trans[0] if (trans == 'rotate'): strength = tc.randint(lower, upper, (1,)).item() else: strength = ((tc.rand(1) * (upper - lower)) + lower) if (trans == 'color'): img = F.adjust_saturation(img, strength) elif (trans == 'brightness'): img = F.adjust_brightness(img, strength) elif (trans == 'contrast'): img = F.adjust_contrast(img, strength) elif (trans == 'sharpness'): img = F.adjust_sharpness(img, strength) elif (trans == 'shear'): if tc.randint(2, (1,)): strength *= (- 1) strength = math.degrees(strength) strength = ([strength, 0.0] if tc.randint(2, (1,)) else [0.0, strength]) img = F.affine(img, angle=0.0, translate=[0, 0], scale=1.0, shear=strength, interpolation=Interpolation.NEAREST, fill=0) elif (trans == 'rotate'): if tc.randint(2, (1,)): strength *= (- 1) img = F.rotate(img, angle=strength, interpolation=Interpolation.NEAREST, fill=0) elif (trans == 'autocontrast'): img = F.autocontrast(img) elif (trans == 'equalize'): img = F.equalize(img) return img def forward(self, img): roll = tc.rand(1) for ((lower, upper), trans) in self.space.items(): if ((roll <= upper) and (roll >= lower)): return self.transform(img, trans) return img
def read_output(meteor_output_path, n_repeats): n_combinations = (math.factorial(n_repeats) / (math.factorial(2) * math.factorial((n_repeats - 2)))) raw_scores = [] average_scores = [] for line in open(meteor_output_path): if (not line.startswith('Segment ')): continue score = float(line.strip().split('\t')[1]) raw_scores.append(score) if (len(raw_scores) == n_combinations): average_scores.append((sum(raw_scores) / n_combinations)) raw_scores = [] os.remove(meteor_output_path) return average_scores
def decode_dxt1_rgb(data, width, height): out = (ctypes.c_uint16 * (width * height))() image_offset = 0 for (c0_lo, c0_hi, c1_lo, c1_hi, b0, b1, b2, b3) in split_8byte.findall(data): color0 = (ord(c0_lo) | (ord(c0_hi) << 8)) color1 = (ord(c1_lo) | (ord(c1_hi) << 8)) bits = (((ord(b0) | (ord(b1) << 8)) | (ord(b2) << 16)) | (ord(b3) << 24)) r0 = (color0 & 31) g0 = ((color0 & 2016) >> 5) b0 = ((color0 & 63488) >> 11) r1 = (color1 & 31) g1 = ((color1 & 2016) >> 5) b1 = ((color1 & 63488) >> 11) i = image_offset for y in range(4): for x in range(4): code = (bits & 3) if (code == 0): out[i] = color0 elif (code == 1): out[i] = color1 elif ((code == 3) and (color0 <= color1)): out[i] = 0 else: if ((code == 2) and (color0 > color1)): r = (((2 * r0) + r1) // 3) g = (((2 * g0) + g1) // 3) b = (((2 * b0) + b1) // 3) elif ((code == 3) and (color0 > color1)): r = ((r0 + (2 * r1)) // 3) g = ((g0 + (2 * g1)) // 3) b = ((b0 + (2 * b1)) // 3) else: assert ((code == 2) and (color0 <= color1)) r = ((r0 + r1) // 2) g = ((g0 + g1) // 2) b = ((b0 + b1) // 2) out[i] = ((r | (g << 5)) | (b << 11)) bits >>= 2 i += 1 i += (width - 4) advance_row = (((image_offset + 4) % width) == 0) image_offset += (((width * 3) * advance_row) + 4) return PackedImageData(width, height, GL_RGB, GL_UNSIGNED_SHORT_5_6_5, out)
def _get_new_season_streams(config, db): handlers = services.get_service_handlers() for service in db.get_services(): if (service.key not in handlers): warning('Service handler for {} not installed'.format(service.key)) continue if service.enabled: handler = handlers.get(service.key) info(' Checking {} ({})'.format(handler.name, handler.key)) raw_stream = handler.get_seasonal_streams(useragent=config.useragent) for raw_stream in raw_stream: (yield raw_stream)
def parse_args(): parser = argparse.ArgumentParser(description='mmrotate benchmark a model') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument('--repeat-num', type=int, default=1, help='number of repeat times of measurement for averaging the results') parser.add_argument('--max-iter', type=int, default=2000, help='num of max iter') parser.add_argument('--log-interval', type=int, default=50, help='interval of logging') parser.add_argument('--fuse-conv-bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed') parser.add_argument('--use-fp16', action='store_true', help='Whether to use fp16 to inference') parser.add_argument('--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) args = parser.parse_args() if ('LOCAL_RANK' not in os.environ): os.environ['LOCAL_RANK'] = str(args.local_rank) return args
def main() -> None: application = Application.builder().token('TOKEN').build() application.add_handler(CommandHandler('start', start)) application.add_handler(CommandHandler('help', help_command)) application.add_handler(MessageHandler((filters.TEXT & (~ filters.COMMAND)), echo)) application.run_polling(allowed_updates=Update.ALL_TYPES)
class CeilingFan(): HIGH: Final[int] = 3 MEDIUM: Final[int] = 2 LOW: Final[int] = 1 OFF: Final[int] = 0 location: str = '' speed: int = 0 def __init__(self, location: str): self.location = location def high(self) -> None: self.speed = self.HIGH print(f'{self.location} ceiling fan is on high') def medium(self) -> None: self.speed = self.MEDIUM print(f'{self.location} ceiling fan is on medium') def low(self) -> None: self.speed = self.LOW print(f'{self.location} ceiling fan is on low') def off(self) -> None: self.speed = self.OFF print(f'{self.location} ceiling fan is off') def getSpeed(self) -> int: return self.speed
.slow def test_pinnacle_cli_missing_trial(data): output_path = tempfile.mkdtemp() for pinn_dir in data.joinpath('Pt1').joinpath('Pinnacle').iterdir(): command = (([str(pmp_test_utils.get_executable_even_when_embedded()), '-m'] + 'pymedphys pinnacle export'.split()) + ['-o', output_path, '-t', 'nonexistenttrial', pinn_dir.as_posix()]) cli_output = str(subprocess.check_output(command)) assert ('No Trial: nonexistenttrial found in Plan' in cli_output)
class HarmonicPotential(PotentialBase): def __init__(self, molecule: Molecule) -> None: self.k = 0.0 self.m_shift = 0.0 self.r_0 = 0.0 self.d_e: Optional[float] = None if (molecule.masses is not None): self._m_a = molecule.masses[0] self._m_b = molecule.masses[1] else: raise ValueError('Molecule masses need to be provided') def fit_function(x: float, k: float, r_0: float, m_shift: float) -> float: return (((k / 2) * ((x - r_0) ** 2)) + m_shift) def eval(self, x: float) -> float: return self.fit_function(x, self.k, self.r_0, self.m_shift) def update_molecule(self, molecule: Molecule) -> Molecule: super().update_molecule(molecule) if (len(molecule.masses) != 2): raise ValueError('Harmonic potential only works for diatomic molecules!') self._m_a = molecule.masses[0] self._m_b = molecule.masses[1] def fit(self, xdata: List[float], ydata: List[float], initial_vals: Optional[List[float]]=None, bounds_list: Optional[Tuple[(List[float], List[float])]]=None) -> None: h_p0 = (initial_vals if (initial_vals is not None) else np.array([0.2, 0.735, 1.5])) h_bounds = (bounds_list if (bounds_list is not None) else ([0, (- 1), (- 2)], [2, 3.0, 2])) xdata_fit = xdata ydata_fit = ydata (fit, _) = curve_fit(self.fit_function, xdata_fit, ydata_fit, p0=h_p0, maxfev=100000, bounds=h_bounds) self.k = fit[0] self.r_0 = fit[1] self.m_shift = fit[2] self.d_e = (max(ydata) - min(ydata)) def get_equilibrium_geometry(self, scaling: float=1.0) -> float: return (self.r_0 * scaling) def get_minimal_energy(self, scaling: float=1.0) -> float: return (self.m_shift * scaling) def dissociation_energy(self, scaling: float=1.0) -> float: k = (self.k * 1e+20) if (self.d_e is not None): k = self.d_e diss_nrg = (k - self.vibrational_energy_level(0)) return (diss_nrg * scaling) def fundamental_frequency(self) -> float: k = ((self.k * const.HARTREE_TO_J) * 1e+20) m_r = ((self._m_a * self._m_b) / (self._m_a + self._m_b)) omega_0 = (np.sqrt((k / m_r)) / (2 * np.pi)) return omega_0 def wave_number(self) -> int: return (self.fundamental_frequency() / const.C_CM_PER_S) def vibrational_energy_level(self, n: int) -> float: omega_0 = self.fundamental_frequency() e_n = ((const.H_J_S * omega_0) * (n + 0.5)) return (e_n * const.J_TO_HARTREE) def process_fit_data(cls, xdata: List[float], ydata: List[float]) -> Tuple[(list, list)]: sort_ind = np.argsort(xdata) ydata_s = ydata[sort_ind] xdata_s = xdata[sort_ind] min_y = min(ydata_s) x_min = np.where((ydata_s == min_y))[0] all_of_min = np.array([], dtype=int) for i in x_min: all_of_min = np.concatenate((all_of_min, np.where((xdata_s == xdata_s[i]))[0])) left_of_min = [] if (min(all_of_min) > 0): left_of_min = np.where((xdata_s == xdata_s[(min(all_of_min) - 1)]))[0] right_of_min = [] if (max(all_of_min) < (xdata_s.size - 1)): right_of_min = np.where((xdata_s == xdata_s[(max(all_of_min) + 1)]))[0] inds = np.concatenate((left_of_min, all_of_min, right_of_min)) return (xdata_s[inds], ydata_s[inds])
((not torch.cuda.is_available()), 'test requires a GPU') class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fp16_multigpu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fp16') as data_dir: log = os.path.join(data_dir, 'train.log') create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16', '--log-file', log], world_size=min(torch.cuda.device_count(), 2)) generate_main(data_dir) assert os.path.exists(log) def parse_logs(logfile): logs = [] for ln in open(logfile, 'r').readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs def test_resume_training_fsdp(self): self._test_resume_training(['--ddp-backend', 'fully_sharded']) def test_resume_training_fsdp_sharded_state(self): self._test_resume_training(['--ddp-backend', 'fully_sharded', '--use-sharded-state']) def test_resume_training_noc10d(self): self._test_resume_training([]) def _test_resume_training(self, extra_clargs, arch='fconv_iwslt_de_en'): flags = (['--fp16', '--log-format', 'json', '--max-update', '10', '--save-interval-updates', '2', '--log-interval', '1'] + extra_clargs) world_size = min(torch.cuda.device_count(), 2) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fp16') as data_dir: log = os.path.join(data_dir, 'train.log') create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, arch, (flags + ['--log-file', log]), world_size=world_size) log2 = os.path.join(data_dir, 'resume.log') restore_file = os.path.join(data_dir, 'checkpoint_1_2.pt') train_translation_model(data_dir, arch, (flags + ['--log-file', log2, '--restore-file', restore_file]), world_size=world_size) l1 = self.parse_logs(log) l2 = self.parse_logs(log2) assert (int(l2[0]['num_updates']) == 3), f'''{l1} {l2}''' for k in ['train_loss', 'train_num_updates', 'train_ppl', 'train_gnorm']: (from_scratch, resumed) = (l1[(- 1)][k], l2[(- 1)][k]) assert (from_scratch == resumed), f'difference at {k} {from_scratch} != {resumed}' def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16']) generate_main(data_dir) def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '64', '--decoder-embed-dim', '64', '--fp16'], run_validation=True) generate_main(data_dir) ((not torch.cuda.is_available()), 'test requires a GPU') def test_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_amp') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--amp']) generate_main(data_dir) ((not torch.cuda.is_available()), 'test requires a GPU') def test_transformer_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en', ['--encoder-layers', '2', '--decoder-layers', '2', '--encoder-embed-dim', '64', '--decoder-embed-dim', '64', '--amp'], run_validation=True) generate_main(data_dir) ((not torch.cuda.is_available()), 'test requires a GPU') def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--joined-dictionary']) train_translation_model(data_dir, 'levenshtein_transformer', ['--apply-bert-init', '--early-exit', '6,6,6', '--criterion', 'nat_loss'], task='translation_lev') gen_config = ['--task', 'translation_lev', '--iter-decode-max-iter', '9', '--iter-decode-eos-penalty', '0', '--print-step'] generate_main(data_dir, gen_config) generate_main(data_dir, gen_config, path=os.pathsep.join([os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'checkpoint_last.pt')])) def test_fsdp_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fsdp_sharded') as data_dir: log = os.path.join(data_dir, 'train.log') create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--log-file', log, '--ddp-backend', 'fully_sharded'], world_size=world_size) generate_main(data_dir) assert os.path.exists(log) def test_fsdp_sharded_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fsdp_sharded') as data_dir: log = os.path.join(data_dir, 'train.log') create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--log-file', log, '--ddp-backend', 'fully_sharded', '--use-sharded-state'], world_size=world_size) generate_main(data_dir, ['--checkpoint-shard-count', str(world_size)]) assert os.path.exists(log)
def main(): torch.set_default_dtype(torch.double) np.set_printoptions(precision=3) import notears.utils as ut ut.set_random_seed(123) (n, d, s0, graph_type, sem_type) = (200, 5, 9, 'ER', 'mim') B_true = ut.simulate_dag(d, s0, graph_type) np.savetxt('W_true.csv', B_true, delimiter=',') X = ut.simulate_nonlinear_sem(B_true, n, sem_type) np.savetxt('X.csv', X, delimiter=',') model = NotearsMLP(dims=[d, 10, 1], bias=True) W_est = notears_nonlinear(model, X, lambda1=0.01, lambda2=0.01) assert ut.is_dag(W_est) np.savetxt('W_est.csv', W_est, delimiter=',') acc = ut.count_accuracy(B_true, (W_est != 0)) print(acc)
def test_import_visitor(): source = 'import operator\nimport itertools as itools\n\nimport urllib.parse\nimport tests.arbpack.arbmod as z\n\n# from mod import submod\nfrom tests.arbpack import arbmod2\nfrom tests.arbpack import arbmod3 as ab3\nfrom tests.arbpack import arbmod4_avoid\n\n# from mod import attr\nfrom decimal import Decimal\nfrom fractions import Fraction as myfraction\n\nfrom math import ceil, floor\nfrom tests.arbpack.arbmultiattr import arb_attr as x, arb_func as y\n' visitor = moduleloader.ImportVisitor() ns = visitor.get_namespace(source) assert (len(ns) == 13) exec_me = "preexisting = 'updated'\narb = len('hello')\n\nmodded = operator.mod(6, 4)\nprod = list(itools.product('AB', 'ab'))\nurlhost = urllib.parse.urlparse(' = arbmod2.arb_func_in_arbmod2('ab2 value')\nab3_out = ab3.arb_func_in_arbmod3(123)\nab4 = arbmod4_avoid.arb_func_in_arbmod4(True)\ndec = int(Decimal(4).sqrt())\nfrac = myfraction(1, 3).denominator\n\nceiling = ceil(6.1)\nthefloor = floor(6.1)\narb_attr_out = x\nfunc_res = y('test me')\n " locals = {'preexisting': 'initial value'} exec(exec_me, ns, locals) assert (locals['preexisting'] == 'updated') assert (locals['arb'] == 5) assert (locals['modded'] == 2) assert (locals['prod'] == [('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')]) assert (locals['urlhost'] == 'arbhost') assert (locals['ab2'] == 'ab2 value') assert (locals['ab4'] is True) assert (locals['ab3_out'] == 123) assert (locals['dec'] == 2) assert (locals['frac'] == 3) assert (locals['ceiling'] == 7) assert (locals['thefloor'] == 6) assert (locals['arb_attr_out'] == 123.456) assert (locals['func_res'] == 'test me')
class LoginForm(Form): def __init__(self, view, login_session): super().__init__(view, 'login') self.use_layout(FormLayout()) if self.exception: self.layout.add_alert_for_domain_exception(self.exception) self.layout.add_input(TextInput(self, login_session.fields.email_address)) self.layout.add_input(PasswordInput(self, login_session.fields.password)) self.define_event_handler(login_session.events.log_in) self.add_child(Button(self, login_session.events.log_in, style='primary'))
class APISession(requests.Session): base_url = ' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.headers.update({'Accept': 'application/json', 'User-Agent': f'pythondotorg/create_initial_data ({requests.utils.default_user_agent()})'}) def request(self, method, url, **kwargs): url = urljoin(self.base_url, url) response = super().request(method, url, **kwargs) response.raise_for_status() return response
def _dataclass_from_dict(klass, in_val): if is_dataclass(klass): fieldtypes = {f.name: f.type for f in fields(klass)} val = {} for dict_key in in_val: if ((dict_key in fieldtypes) and hasattr(fieldtypes[dict_key], 'from_dict')): val[dict_key] = fieldtypes[dict_key].from_dict(in_val[dict_key]) else: val[dict_key] = _dataclass_from_dict(fieldtypes[dict_key], in_val[dict_key]) return klass(**val) else: return in_val
def find_dynamicsymbols(expression, exclude=None): t_set = set([dynamicsymbols._t]) if exclude: if iterable(exclude): exclude_set = set(exclude) else: raise TypeError('exclude kwarg must be iterable') else: exclude_set = set() return (set([i for i in expression.atoms(AppliedUndef, sm.Derivative) if (i.free_symbols == t_set)]) - exclude_set)
def json_content(): return [{'id': 102, 'project': {'id': 2, 'name': 'Gitlab Ce', 'name_with_namespace': 'Gitlab Org / Gitlab Ce', 'path': 'gitlab-ce', 'path_with_namespace': 'gitlab-org/gitlab-ce'}, 'author': {'name': 'Administrator', 'username': 'root', 'id': 1}, 'action_name': 'marked', 'target_type': 'MergeRequest', 'target': {'id': 34, 'iid': 7, 'project_id': 2, 'assignee': {'name': 'Administrator', 'username': 'root', 'id': 1}}}]
class PythonCause(IncompatibilityCause): def __init__(self, python_version: str, root_python_version: str) -> None: self._python_version = python_version self._root_python_version = root_python_version def python_version(self) -> str: return self._python_version def root_python_version(self) -> str: return self._root_python_version
class CalcAddCargoCommand(wx.Command): def __init__(self, fitID, cargoInfo): wx.Command.__init__(self, True, 'Add Cargo') self.fitID = fitID self.cargoInfo = cargoInfo def Do(self): pyfalog.debug('Doing addition of cargo {} to fit {}'.format(self.cargoInfo, self.fitID)) fit = Fit.getInstance().getFit(self.fitID) cargo = next((c for c in fit.cargo if (c.itemID == self.cargoInfo.itemID)), None) if (cargo is not None): cargo.amount += self.cargoInfo.amount else: cargo = self.cargoInfo.toCargo() fit.cargo.append(cargo) if (cargo not in fit.cargo): pyfalog.warning('Failed to append to list') return False return True def Undo(self): pyfalog.debug('Undoing addition of cargo {} to fit {}'.format(self.cargoInfo, self.fitID)) from .remove import CalcRemoveCargoCommand cmd = CalcRemoveCargoCommand(fitID=self.fitID, cargoInfo=self.cargoInfo) return cmd.Do()
class ShardedTensorIOPreparerTest(unittest.TestCase): def _verify_subdivided_shards(self, subdivided: List[Tuple[(torch.Tensor, List[int], List[int])]], dim: int, expected_num_sub_shards: int, expected_combined: torch.Tensor, expected_offsets: List[int], expected_sizes: List[int]) -> None: (_, offsets, sizes) = copy.deepcopy(subdivided[0]) for (_, sub_offsets, sub_sizes) in subdivided[1:]: self.assertEqual(len(offsets), len(sub_offsets)) self.assertEqual(len(sizes), len(sub_sizes)) for i in range(len(offsets)): if (i != dim): self.assertEqual(sub_offsets[i], offsets[i]) self.assertEqual(sub_sizes[i], sizes[i]) self.assertEqual(sub_offsets[dim], (offsets[dim] + sizes[dim])) sizes[dim] += sub_sizes[dim] sub_views = [sub_view for (sub_view, _, _) in subdivided] combined = torch.concat(sub_views, dim) self.assertEqual(len(subdivided), expected_num_sub_shards) self.assertTrue(torch.allclose(combined, expected_combined)) self.assertEqual(offsets, expected_offsets) self.assertEqual(sizes, expected_sizes) def test_subdivide_shard(self) -> None: tensor = torch.randn(256, 256) subdivided = ShardedTensorIOPreparer.subdivide_shard(shard=tensor, offsets=[512, 0], sizes=[256, 256], dim=0, max_shard_sz_bytes=77) self._verify_subdivided_shards(subdivided=subdivided, dim=0, expected_num_sub_shards=256, expected_combined=tensor, expected_offsets=[512, 0], expected_sizes=[256, 256]) subdivided = ShardedTensorIOPreparer.subdivide_shard(shard=tensor, offsets=[512, 0], sizes=[256, 256], dim=0, max_shard_sz_bytes=1999) self._verify_subdivided_shards(subdivided=subdivided, dim=0, expected_num_sub_shards=256, expected_combined=tensor, expected_offsets=[512, 0], expected_sizes=[256, 256]) subdivided = ShardedTensorIOPreparer.subdivide_shard(shard=tensor, offsets=[512, 0], sizes=[256, 256], dim=0, max_shard_sz_bytes=4001) self._verify_subdivided_shards(subdivided=subdivided, dim=0, expected_num_sub_shards=86, expected_combined=tensor, expected_offsets=[512, 0], expected_sizes=[256, 256]) subdivided = ShardedTensorIOPreparer.subdivide_shard(shard=tensor, offsets=[0, 512], sizes=[256, 256], dim=1, max_shard_sz_bytes=4001) self._verify_subdivided_shards(subdivided=subdivided, dim=1, expected_num_sub_shards=86, expected_combined=tensor, expected_offsets=[0, 512], expected_sizes=[256, 256]) subdivided = ShardedTensorIOPreparer.subdivide_shard(shard=tensor, offsets=[512, 0], sizes=[256, 256], dim=0, max_shard_sz_bytes=300000) self._verify_subdivided_shards(subdivided=subdivided, dim=0, expected_num_sub_shards=1, expected_combined=tensor, expected_offsets=[512, 0], expected_sizes=[256, 256])
def create_new_database(game_enum: RandovaniaGame, output_path: Path) -> GameDescription: items = [ItemResourceInfo(0, 'Powerful Weapon', 'Weapon', 1), ItemResourceInfo(1, 'Victory Key', 'VictoryKey', 1), ItemResourceInfo(2, 'Health', 'Health', 500)] resource_database = ResourceDatabase(game_enum=game_enum, item=items, event=[], trick=[], damage=[], version=[], misc=[], requirement_template={}, damage_reductions={}, energy_tank_item=items[(- 1)], base_damage_reduction=default_base_damage_reduction) dock_types = [DockType('Door', 'Door', frozendict()), DockType('Other', 'Other', frozendict())] impossible_weak = DockWeakness(0, 'Not Determined', frozendict(), Requirement.impossible(), None) dock_weakness_database = DockWeaknessDatabase(dock_types, weaknesses={dock_types[0]: {'Normal': DockWeakness(0, 'Normal', frozendict(), Requirement.trivial(), None)}, dock_types[1]: {'Not Determined': impossible_weak}}, dock_rando_params={}, default_weakness=(dock_types[1], impossible_weak), dock_rando_config=DockRandoConfig(force_change_two_way=False, resolver_attempts=100, to_shuffle_proportion=1.0)) intro_node = GenericNode(identifier=NodeIdentifier.create('Main', 'First Area', 'Spawn Point'), node_index=0, heal=False, location=None, description='', layers=('default',), extra={}, valid_starting_location=True) game_db = GameDescription(game=game_enum, dock_weakness_database=dock_weakness_database, resource_database=resource_database, layers=('default',), victory_condition=ResourceRequirement.simple(items[1]), starting_location=intro_node.identifier, initial_states={}, minimal_logic=None, region_list=RegionList([Region(name='Main', areas=[Area(name='First Area', nodes=[intro_node], connections={intro_node: {}}, extra={})], extra={})])) data = data_writer.write_game_description(game_db) data_writer.write_as_split_files(data, output_path) pretty_print.write_human_readable_game(game_db, output_path) return game_db
class Effect6701(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): lvl = src.level fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Rig Projectile Weapon')), 'drawback', (src.getModifiedItemAttr('rigDrawbackBonus') * lvl), **kwargs)
def test_refund_transfer_with_reroute(): transfer_amount = TokenAmount(1000) block_number = BlockNumber(10) our_address = factories.ADDR (refund_pkey, refund_address) = factories.make_privkey_address() prng = random.Random() transfer_description = create(TransferDescriptionProperties(secret=UNIT_SECRET, amount=transfer_amount)) channels = channels_setup(TokenAmount((transfer_amount * 2)), our_address, refund_address) fee_1 = 20 fee_3 = 40 routes = channels.get_routes() routes[0].estimated_fee = fee_1 routes[1].estimated_fee = 30 routes[2].estimated_fee = fee_3 init = ActionInitInitiator(transfer=transfer_description, routes=routes) initial_state = None iteration = initiator_manager.state_transition(payment_state=initial_state, state_change=init, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=prng, block_number=block_number) current_state = iteration.new_state initiator_state = get_transfer_at_index(current_state, 0) original_transfer = initiator_state.transfer amount_with_fee_and_margin1 = calculate_safe_amount_with_fee(transfer_amount, fee_1) assert (original_transfer.balance_proof.locked_amount == amount_with_fee_and_margin1) assert (original_transfer.lock.amount == amount_with_fee_and_margin1) refund_transfer = factories.create(factories.LockedTransferSignedStateProperties(amount=original_transfer.balance_proof.locked_amount, initiator=our_address, target=original_transfer.target, expiration=original_transfer.lock.expiration, payment_identifier=original_transfer.payment_identifier, canonical_identifier=channels[0].canonical_identifier, sender=refund_address, pkey=refund_pkey)) assert (channels[0].partner_state.address == refund_address) state_change = ReceiveTransferCancelRoute(transfer=refund_transfer, balance_proof=refund_transfer.balance_proof, sender=refund_transfer.balance_proof.sender) iteration = initiator_manager.state_transition(payment_state=current_state, state_change=state_change, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=prng, block_number=block_number) assert (iteration.new_state is not None) route_cancelled = search_for_item(iteration.events, EventUnlockFailed, {}) route_failed = search_for_item(iteration.events, EventRouteFailed, {}) assert route_cancelled, 'The previous transfer must be cancelled' assert route_failed, 'Must emit event that the first route failed' state_change = ActionTransferReroute(transfer=refund_transfer, balance_proof=refund_transfer.balance_proof, sender=refund_transfer.balance_proof.sender, secret=random_secret()) iteration = initiator_manager.state_transition(payment_state=current_state, state_change=state_change, channelidentifiers_to_channels=channels.channel_map, addresses_to_channel=channels.addresses_to_channel(), pseudo_random_generator=prng, block_number=block_number) new_transfer = search_for_item(iteration.events, SendLockedTransfer, {}) assert new_transfer, 'No mediated transfer event emitted, should have tried a new route' msg = 'the new transfer must use a new secret / secrethash' assert (new_transfer.transfer.lock.secrethash != refund_transfer.lock.secrethash), msg amount_with_fee_and_margin3 = calculate_safe_amount_with_fee(transfer_amount, fee_3) assert (new_transfer.transfer.balance_proof.locked_amount == amount_with_fee_and_margin3) assert (new_transfer.transfer.lock.amount == amount_with_fee_and_margin3) initiator_state = get_transfer_at_index(iteration.new_state, 0) assert (initiator_state is not None)
def measure_with_final_permutation(circuit: cirq.Circuit, qubits: List[cirq.Qid], *, mutate=False) -> Tuple[(cirq.Circuit, List[cirq.Qid])]: if mutate: c2 = circuit else: c2 = circuit.copy() (mom_classes, stats) = validate_well_structured(c2, allow_terminal_permutations=True) if stats.has_measurement: raise ValueError('Circuit already has measurements') mapping = {} if stats.has_permutation: for op in c2.moments[(- 1)].operations: if isinstance(op.gate, cirq.QubitPermutationGate): permuted_qs = op.qubits gate = op.gate for (i, q) in enumerate(permuted_qs): mapping[q] = permuted_qs[gate.permutation[i]] c2.moments.pop((- 1)) final_qubits = [mapping.get(q, q) for q in qubits] c2.append(cirq.measure(*qubits, key='z')) return (c2, final_qubits)
def test_CheckParameter(): mu = pt.constant(0) sigma = pt.scalar('sigma') x_rv = pt.random.normal(mu, sigma, name='x') x_vv = pt.constant(0) x_logp = logp(x_rv, x_vv) x_logp_fn = function([sigma], x_logp) with pytest.raises(ParameterValueError, match='sigma > 0'): x_logp_fn((- 1))
def test_tree_set(): tree = SumSegmentTree(4) tree[2] = 1.0 tree[3] = 3.0 assert np.isclose(tree.sum(), 4.0) assert np.isclose(tree.sum(0, 2), 0.0) assert np.isclose(tree.sum(0, 3), 1.0) assert np.isclose(tree.sum(2, 3), 1.0) assert np.isclose(tree.sum(2, (- 1)), 1.0) assert np.isclose(tree.sum(2, 4), 4.0)
def get_dataset(dataset: str, split: str) -> Dataset: if (dataset == 'imagenet'): return _imagenet(split) elif (dataset == 'imagenet32'): return _imagenet32(split) elif (dataset == 'cifar10'): return _cifar10(split) elif (dataset == 'cifar10_vit'): return _cifar10vit(split)
class FloorplanGenerator(): _props = None def __init__(self): self.context = bpy.context self.scene = bpy.context.scene self._register() def __del__(self): self._unregister() def _unregister(): del bpy.types.Scene.prop_floorplan def _register(self): try: bpy.utils.register_class(FloorplanProperty) except ValueError: pass bpy.types.Scene.prop_floorplan = bpy.props.PointerProperty(type=FloorplanProperty) self._props = btools.utils.dict_from_prop(self.scene.prop_floorplan) def build_from_props(self, pdict): self._props.update(pdict) btools.utils.prop_from_dict(self.scene.prop_floorplan, pdict) return floorplan_builder(self.context, self.scene.prop_floorplan) def build_random(self): properties = btools.utils.dict_from_prop(self._prop_class) properties['type'] = random.choices(['RECTANGULAR', 'H-SHAPED', 'RANDOM', 'COMPOSITE'], weights=[0.8, 0.5, 0.8, 0.7], k=1)[(- 1)] if (properties['type'] in ['RECTANGULAR', 'H-SHAPED', 'RANDOM', 'COMPOSITE']): properties['width'] = random.choice(range(2, 5)) properties['length'] = random.choice(range(2, 5)) else: properties['radius'] = random.choice(range(2, 5)) if (properties['type'] == 'RANDOM'): properties['seed'] = random.randint(0, 1000) properties['extension_amount'] = random.randint(1, 3) if (properties['type'] == 'COMPOSITE'): for ke in ['tl1', 'tl2', 'tl3', 'tl4']: properties[ke] = random.choice(range(0, 5)) if (properties['type'] == 'H-SHAPED'): for ke in ['tl1', 'tl2', 'tl3', 'tl4']: properties[ke] = random.choice(range(0, 5)) for ke in ['tw1', 'tw2', 'tw3', 'tw4']: properties[ke] = btools.utils.clamp(((random.random() * max([properties['width'], properties['length']])) / 2), 1.0, 1000) return self.build_from_props(properties)
_traceback def pq_compute_single_core(proc_id, annotation_set, gt_folder, pred_folder, categories, ow_eval, simi_matrix_path): pq_stat = PQStat() idx = 0 if ow_eval: simiAccess = SIMIaccess(simi_matrix_path) '\n\n file = os.path.join("/home/xp4/open-metrics/fc-clip/output/", "per_image_pq-sq-rq.txt")\n fn = open(file, \'a\')\n ' for (gt_ann, pred_ann) in annotation_set: if ((idx % 100) == 0): print('Core: {}, {} from {} images processed'.format(proc_id, idx, len(annotation_set))) idx += 1 pan_gt = np.array(Image.open(os.path.join(gt_folder, gt_ann['file_name'])), dtype=np.uint32) pan_gt = rgb2id(pan_gt) pan_pred = np.array(Image.open(os.path.join(pred_folder, pred_ann['file_name'])), dtype=np.uint32) pan_pred = rgb2id(pan_pred) gt_segms = {el['id']: el for el in gt_ann['segments_info']} pred_segms = {el['id']: el for el in pred_ann['segments_info']} pred_labels_set = set((el['id'] for el in pred_ann['segments_info'])) (labels, labels_cnt) = np.unique(pan_pred, return_counts=True) for (label, label_cnt) in zip(labels, labels_cnt): if (label not in pred_segms): if (label == VOID): continue raise KeyError('In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'.format(gt_ann['image_id'], label)) pred_segms[label]['area'] = label_cnt pred_labels_set.remove(label) if (pred_segms[label]['category_id'] not in categories): raise KeyError('In the image with ID {} segment with ID {} has unknown category_id {}.'.format(gt_ann['image_id'], label, pred_segms[label]['category_id'])) if (len(pred_labels_set) != 0): raise KeyError('In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'.format(gt_ann['image_id'], list(pred_labels_set))) pan_gt_pred = ((pan_gt.astype(np.uint64) * OFFSET) + pan_pred.astype(np.uint64)) gt_pred_map = {} (labels, labels_cnt) = np.unique(pan_gt_pred, return_counts=True) for (label, intersection) in zip(labels, labels_cnt): gt_id = (label // OFFSET) pred_id = (label % OFFSET) gt_pred_map[(gt_id, pred_id)] = intersection gt_matched = set() pred_matched = set() if ow_eval: gt_ov_matched = dict() pred_ov_matched = dict() for (label_tuple, intersection) in gt_pred_map.items(): (gt_label, pred_label) = label_tuple if (gt_label not in gt_segms): continue if (pred_label not in pred_segms): continue if (gt_segms[gt_label]['iscrowd'] == 1): continue union = (((pred_segms[pred_label]['area'] + gt_segms[gt_label]['area']) - intersection) - gt_pred_map.get((VOID, pred_label), 0)) iou = (intersection / union) if ((iou > 0.5) and (gt_segms[gt_label]['category_id'] == pred_segms[pred_label]['category_id'])): pq_stat[gt_segms[gt_label]['category_id']].iou += iou pq_stat[gt_segms[gt_label]['category_id']].tp += 1 gt_matched.add(gt_label) pred_matched.add(pred_label) if ((iou > 0.5) and (gt_segms[gt_label]['category_id'] != pred_segms[pred_label]['category_id']) and ow_eval): if (categories[gt_segms[gt_label]['category_id']]['isthing'] != categories[pred_segms[pred_label]['category_id']]['isthing']): continue similarity = simiAccess.findSimiElement(gt_segms[gt_label]['category_id'], pred_segms[pred_label]['category_id']) pq_stat[gt_segms[gt_label]['category_id']].iou += (iou * similarity) pq_stat[gt_segms[gt_label]['category_id']].tp += (1 * similarity) gt_ov_matched.update({gt_label: similarity}) pred_ov_matched.update({pred_label: similarity}) crowd_labels_dict = {} for (gt_label, gt_info) in gt_segms.items(): if (gt_label in gt_matched): continue if (gt_info['iscrowd'] == 1): crowd_labels_dict[gt_info['category_id']] = gt_label continue if (ow_eval and (gt_ov_matched.get(gt_label) is not None)): pq_stat[gt_info['category_id']].fn += (1 - gt_ov_matched.get(gt_label)) else: pq_stat[gt_info['category_id']].fn += 1 for (pred_label, pred_info) in pred_segms.items(): if (pred_label in pred_matched): continue intersection = gt_pred_map.get((VOID, pred_label), 0) if (pred_info['category_id'] in crowd_labels_dict): intersection += gt_pred_map.get((crowd_labels_dict[pred_info['category_id']], pred_label), 0) if ((intersection / pred_info['area']) > 0.5): continue if (ow_eval and (pred_ov_matched.get(pred_label) is not None)): pq_stat[pred_info['category_id']].fp += (1 - pred_ov_matched.get(pred_label)) else: pq_stat[pred_info['category_id']].fp += 1 print('Core: {}, all {} images processed'.format(proc_id, len(annotation_set))) return pq_stat
class PhoneDecoder(): def __init__(self, model_path, inference_config): self.model_path = Path(model_path) self.config = inference_config self.inventory = Inventory(model_path, inference_config) self.unit = self.inventory.unit def compute(self, logits, lang_id=None, topk=1, emit=1.0, timestamp=False): mask = self.inventory.get_mask(lang_id, approximation=self.config.approximate) logits = mask.mask_logits(logits) emit_frame_idx = [] cur_max_arg = (- 1) for i in range(len(logits)): logit = logits[i] logit[0] /= emit arg_max = np.argmax(logit) if ((arg_max != cur_max_arg) and (arg_max != 0)): emit_frame_idx.append(i) cur_max_arg = arg_max decoded_seq = [] for idx in emit_frame_idx: logit = logits[idx] exp_prob = np.exp((logit - np.max(logit))) probs = (exp_prob / exp_prob.sum()) top_phones = logit.argsort()[(- topk):][::(- 1)] top_probs = sorted(probs)[(- topk):][::(- 1)] stamp = f'{(self.config.window_shift * idx):.3f} {self.config.window_size:.3f} ' if (topk == 1): phones_str = ' '.join(mask.get_units(top_phones)) if timestamp: phones_str = (stamp + phones_str) decoded_seq.append(phones_str) else: phone_prob_lst = [f'{phone} ({prob:.3f})' for (phone, prob) in zip(mask.get_units(top_phones), top_probs)] phones_str = ' '.join(phone_prob_lst) if timestamp: phones_str = (stamp + phones_str) decoded_seq.append(phones_str) if timestamp: phones = '\n'.join(decoded_seq) elif (topk == 1): phones = ' '.join(decoded_seq) else: phones = ' | '.join(decoded_seq) return phones
def tokenize(sentence, regex=SENTENCE_SPLIT_REGEX, keep=["'s"], remove=[',', '?']): sentence = sentence.lower() for token in keep: sentence = sentence.replace(token, (' ' + token)) for token in remove: sentence = sentence.replace(token, '') tokens = regex.split(sentence) tokens = [t.strip() for t in tokens if (len(t.strip()) > 0)] return tokens
def get_data_loaders(dataset, data_root=None, augment=False, batch_size=64, num_workers=8, shuffle=True, load_in_mem=False, hdf5=False, pin_memory=True, drop_last=True, start_itr=0, num_epochs=500, use_multiepoch_sampler=False, **kwargs): data_root += ('/%s' % root_dict[dataset]) print(('Using dataset root location %s' % data_root)) which_dataset = dset_dict[dataset] norm_mean = [0.5, 0.5, 0.5] norm_std = [0.5, 0.5, 0.5] image_size = imsize_dict[dataset] dataset_kwargs = {'index_filename': ('%s_imgs.npz' % dataset)} if ('hdf5' in dataset): train_transform = None else: if augment: print('Data will be augmented...') if (dataset in ['C10', 'C100']): train_transform = [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()] else: train_transform = [RandomCropLongEdge(), transforms.Resize(image_size), transforms.RandomHorizontalFlip()] else: print('Data will not be augmented...') if (dataset in ['C10', 'C100']): train_transform = [] else: train_transform = [CenterCropLongEdge(), transforms.Resize(image_size)] train_transform = transforms.Compose((train_transform + [transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std)])) train_set = which_dataset(root=data_root, transform=train_transform, load_in_mem=load_in_mem, **dataset_kwargs) loaders = [] start_itr = 0 if use_multiepoch_sampler: print(('Using multiepoch sampler from start_itr %d...' % start_itr)) loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory} sampler = MultiEpochSampler(train_set, num_epochs, start_itr, batch_size) train_loader = DataLoader(train_set, batch_size=batch_size, sampler=sampler, **loader_kwargs) else: loader_kwargs = {'num_workers': num_workers, 'pin_memory': pin_memory, 'drop_last': drop_last} train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=shuffle, **loader_kwargs) loaders.append(train_loader) return loaders
class FitSpawner(gui.multiSwitch.TabSpawner): def __init__(self, multiSwitch): self.multiSwitch = multiSwitch self.mainFrame = mainFrame = gui.mainFrame.MainFrame.getInstance() mainFrame.Bind(EVT_FIT_SELECTED, self.fitSelected) self.multiSwitch.tabs_container.handleDrag = self.handleDrag def fitSelected(self, event): count = (- 1) for (index, page) in enumerate(self.multiSwitch._pages): if (not isinstance(page, gui.builtinViews.emptyView.BlankPage)): try: if (page.activeFitID == event.fitID): count += 1 self.multiSwitch.SetSelection(index) wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(event.fitID,))) break except (KeyboardInterrupt, SystemExit): raise except Exception as e: pyfalog.critical('Caught exception in fitSelected') pyfalog.critical(e) if (count < 0): startup = getattr(event, 'startup', False) from_import = getattr(event, 'from_import', False) sFit = Fit.getInstance() openFitInNew = sFit.serviceFittingOptions['openFitInNew'] mstate = wx.GetMouseState() modifierKey = (mstate.GetModifiers() == wx.MOD_CONTROL) if (from_import or ((not openFitInNew) and modifierKey) or startup or (openFitInNew and (not modifierKey))): self.multiSwitch.AddPage() view = self.multiSwitch.GetSelectedPage() if (not isinstance(view, FittingView)): view = FittingView(self.multiSwitch) pyfalog.debug((' Created new view:' + repr(view))) self.multiSwitch.ReplaceActivePage(view) view.fitSelected(event) def handleDrag(self, type, fitID): if (type == 'fit'): for page in self.multiSwitch._pages: if (isinstance(page, FittingView) and (page.activeFitID == fitID)): index = self.multiSwitch.GetPageIndex(page) self.multiSwitch.SetSelection(index) wx.PostEvent(self.mainFrame, GE.FitChanged(fitIDs=(fitID,))) return elif isinstance(page, gui.builtinViews.emptyView.BlankPage): view = FittingView(self.multiSwitch) self.multiSwitch.ReplaceActivePage(view) view.handleDrag(type, fitID) return view = FittingView(self.multiSwitch) self.multiSwitch.AddPage(view) view.handleDrag(type, fitID)
(scope='module') def test_image_rgba_merc(test_area_merc): arr = xr.DataArray(_get_fake_da((- 80), 40, (test_area_merc.shape + (4,))), dims=('y', 'x', 'bands'), coords={'bands': ['R', 'G', 'B', 'A']}, attrs={'name': 'test-rgba', 'start_time': datetime.datetime(2013, 2, 22, 12, 0), 'area': test_area_merc, 'mode': 'RGBA'}) return get_enhanced_image(arr)
def print_platform_version_info(): import scipy import platform import matplotlib from visualqc import __version__ print('version info: visualqc {}'.format(__version__)) print('numpy {} / scipy {} / matplotlib {}\npython {}'.format(np.__version__, scipy.__version__, matplotlib.__version__, sys.version)) print('platform {}\n{}\n\n'.format(platform.platform(), platform.version())) try: print('\tLinux distribution: {}'.format(platform.freedesktop_os_release())) except: pass
class ResNet(nn.Module): def __init__(self, depth=28, widen_factor=10, dropout_rate=0): super(ResNet, self).__init__() self.in_planes = 16 assert (((depth - 4) % 6) == 0), 'Wide-resnet depth should be 6n+4' n = int(((depth - 4) / 6)) k = widen_factor print(('Wide-Resnet %dx%d' % (depth, k))) nStages = [16, (16 * k), (32 * k), (64 * k)] self.conv1 = conv3x3(3, nStages[0]) self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1) self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2) self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2) self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9) def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, dropout_rate, stride)) self.in_planes = planes return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = F.relu(self.bn1(out)) out = F.adaptive_avg_pool2d(out, 1).squeeze((- 1)).squeeze((- 1)) return out
def objective(x_train, y_train, W1, b1, z1, a1, W2, b2, z2, a2, W3, b3, z3, u, v1, v2, rho): r1 = torch.sum((((z1 - torch.matmul(W1, x_train)) - b1) * ((z1 - torch.matmul(W1, x_train)) - b1))) r2 = torch.sum((((z2 - torch.matmul(W2, a1)) - b2) * ((z2 - torch.matmul(W2, a1)) - b2))) r3 = torch.sum((((z3 - torch.matmul(W3, a2)) - b3) * ((z3 - torch.matmul(W3, a2)) - b3))) loss = common.cross_entropy_with_softmax(y_train, z3) obj = (loss + torch.trace(torch.matmul(((z3 - torch.matmul(W3, a2)) - b3), torch.transpose(u, 0, 1)))) obj = (((obj + ((rho / 2) * r1)) + ((rho / 2) * r2)) + ((rho / 2) * r3)) obj = ((obj + ((rho / 2) * torch.sum((((a1 - common.relu(z1)) + v1) * ((a1 - common.relu(z1)) + v1))))) + ((rho / 2) * torch.sum((((a2 - common.relu(z2)) + v2) * ((a2 - common.relu(z2)) + v2))))) return obj
def initialize(forced_gui: (GUIType | None)=None): def import_gtk(): global guilib try: import webview.platforms.gtk as guilib logger.debug('Using GTK') return True except (ImportError, ValueError): logger.exception('GTK cannot be loaded') return False def import_qt(): global guilib try: import webview.platforms.qt as guilib return True except ImportError: logger.exception('QT cannot be loaded') return False def import_cocoa(): global guilib try: import webview.platforms.cocoa as guilib return True except ImportError: logger.exception('PyObjC cannot be loaded') return False def import_winforms(): global guilib try: import webview.platforms.winforms as guilib return True except ImportError: logger.exception('pythonnet cannot be loaded') return False def try_import(guis: list[Callable[([], Any)]]) -> bool: while guis: import_func = guis.pop(0) if import_func(): return True return False global forced_gui_ if (not forced_gui): forced_gui = ('qt' if ('KDE_FULL_SESSION' in os.environ) else None) forced_gui = cast(GUIType, (os.environ['PYWEBVIEW_GUI'].lower() if (('PYWEBVIEW_GUI' in os.environ) and (os.environ['PYWEBVIEW_GUI'].lower() in ['qt', 'gtk', 'cef', 'mshtml', 'edgechromium'])) else forced_gui)) forced_gui_ = forced_gui if (platform.system() == 'Darwin'): if (forced_gui == 'qt'): guis = [import_qt, import_cocoa] else: guis = [import_cocoa, import_qt] if (not try_import(guis)): raise WebViewException('You must have either PyObjC (for Cocoa support) or Qt with Python bindings installed in order to use pywebview.') elif ((platform.system() == 'Linux') or (platform.system() == 'OpenBSD')): if (forced_gui == 'qt'): guis = [import_qt, import_gtk] else: guis = [import_gtk, import_qt] if (not try_import(guis)): raise WebViewException('You must have either QT or GTK with Python extensions installed in order to use pywebview.') elif (platform.system() == 'Windows'): if (forced_gui == 'qt'): guis = [import_qt] else: guis = [import_winforms] if (not try_import(guis)): raise WebViewException('You must have pythonnet installed in order to use pywebview.') else: raise WebViewException('Unsupported platform. Only Windows, Linux, OS X, OpenBSD are supported.') guilib.setup_app() return guilib
class WID2Section(Section): wid2 = WID2.T() sta2 = STA2.T(optional=True) eid2s = List.T(EID2.T()) bea2 = BEA2.T(optional=True) dat2 = DAT2.T() chk2 = CHK2.T() def read(cls, reader): blocks = dict(eid2s=[]) expect = [(b'WID2 ', WID2, 1)] if (reader.version_dialect[0] == 'GSE2.0'): expect.append((b'STA2 ', STA2, 0)) else: expect.append((b'STA2 ', STA2, 1)) expect.extend([(b'EID2 ', EID2, 0), (b'BEA2 ', BEA2, 0), (b'DAT2', DAT2, 1), (b'CHK2 ', CHK2, 1)]) for (k, handler, required) in expect: line = reader.readline() reader.pushback() if (line is None): raise DeserializeError('incomplete waveform section') if line.upper().startswith(k): block = handler.read(reader) if (k == b'EID2 '): blocks['eid2s'].append(block) else: blocks[str(k.lower().rstrip().decode('ascii'))] = block elif required: raise DeserializeError(('expected %s block' % k)) else: continue return cls(**blocks) def write(self, writer): self.wid2.write(writer) if self.sta2: self.sta2.write(writer) for eid2 in self.eid2s: eid2.write(writer) if self.bea2: self.bea2.write(writer) self.dat2.write(writer) self.chk2.write(writer) def pyrocko_trace(self, checksum_error='raise'): from pyrocko import ims_ext, trace assert (checksum_error in ('raise', 'warn', 'ignore')) raw_data = self.dat2.raw_data nsamples = self.wid2.nsamples deltat = (1.0 / self.wid2.sample_rate) tmin = self.wid2.time if self.sta2: net = self.sta2.network else: net = '' sta = self.wid2.station loc = self.wid2.location cha = self.wid2.channel if raw_data: ydata = ims_ext.decode_cm6(b''.join(raw_data), nsamples) if (checksum_error != 'ignore'): if (ims_ext.checksum(ydata) != self.chk2.checksum): mess = 'computed checksum value differs from stored value' if (checksum_error == 'raise'): raise DeserializeError(mess) elif (checksum_error == 'warn'): logger.warning(mess) tmax = None else: tmax = (tmin + ((nsamples - 1) * deltat)) ydata = None return trace.Trace(net, sta, loc, cha, tmin=tmin, tmax=tmax, deltat=deltat, ydata=ydata) def from_pyrocko_trace(cls, tr, lat=None, lon=None, elevation=None, depth=None): from pyrocko import ims_ext ydata = tr.get_ydata() raw_data = ims_ext.encode_cm6(ydata) return cls(wid2=WID2(nsamples=tr.data_len(), sample_rate=(1.0 / tr.deltat), time=tr.tmin, station=tr.station, location=tr.location, channel=tr.channel), sta2=STA2(network=tr.network, lat=lat, lon=lon, elevation=elevation, depth=depth), dat2=DAT2(raw_data=[raw_data[(i * 80):((i + 1) * 80)] for i in range((((len(raw_data) - 1) // 80) + 1))]), chk2=CHK2(checksum=ims_ext.checksum(ydata)))
_REGISTRY.register() class DANN(TrainerXU): def __init__(self, cfg): super().__init__(cfg) self.build_critic() self.ce = nn.CrossEntropyLoss() self.bce = nn.BCEWithLogitsLoss() def build_critic(self): cfg = self.cfg print('Building critic network') fdim = self.model.fdim critic_body = build_head('mlp', verbose=cfg.VERBOSE, in_features=fdim, hidden_layers=[fdim, fdim], activation='leaky_relu') self.critic = nn.Sequential(critic_body, nn.Linear(fdim, 1)) print('# params: {:,}'.format(count_num_param(self.critic))) self.critic.to(self.device) self.optim_c = build_optimizer(self.critic, cfg.OPTIM) self.sched_c = build_lr_scheduler(self.optim_c, cfg.OPTIM) self.register_model('critic', self.critic, self.optim_c, self.sched_c) self.revgrad = ReverseGrad() def forward_backward(self, batch_x, batch_u): (input_x, label_x, input_u) = self.parse_batch_train(batch_x, batch_u) domain_x = torch.ones(input_x.shape[0], 1).to(self.device) domain_u = torch.zeros(input_u.shape[0], 1).to(self.device) global_step = (self.batch_idx + (self.epoch * self.num_batches)) progress = (global_step / (self.max_epoch * self.num_batches)) lmda = ((2 / (1 + np.exp(((- 10) * progress)))) - 1) (logit_x, feat_x) = self.model(input_x, return_feature=True) (_, feat_u) = self.model(input_u, return_feature=True) loss_x = self.ce(logit_x, label_x) feat_x = self.revgrad(feat_x, grad_scaling=lmda) feat_u = self.revgrad(feat_u, grad_scaling=lmda) output_xd = self.critic(feat_x) output_ud = self.critic(feat_u) loss_d = (self.bce(output_xd, domain_x) + self.bce(output_ud, domain_u)) loss = (loss_x + loss_d) self.model_backward_and_update(loss) loss_summary = {'loss_x': loss_x.item(), 'acc_x': compute_accuracy(logit_x, label_x)[0].item(), 'loss_d': loss_d.item()} if ((self.batch_idx + 1) == self.num_batches): self.update_lr() return loss_summary
class PreActBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) nn.init.kaiming_normal_(self.conv1.weight, mode='fan_out') self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) nn.init.kaiming_normal_(self.conv2.weight, mode='fan_out') if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.leaky_relu(x, 0.2) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.leaky_relu(out, 0.2)) out += shortcut return out
def test_replace_alphabet_2() -> None: fsm1 = Fsm(alphabet={Charclass('z'), (~ Charclass('z'))}, states={0, 1, 2}, initial=0, finals={1}, map={0: {Charclass('z'): 2, (~ Charclass('z')): 1}, 1: {Charclass('z'): 2, (~ Charclass('z')): 1}, 2: {Charclass('z'): 2, (~ Charclass('z')): 2}}) fsm2 = fsm1.replace_alphabet({Charclass('z'): [(~ Charclass())], (~ Charclass('z')): []}) assert (fsm2.map == {0: {(~ Charclass()): 2}, 1: {(~ Charclass()): 2}, 2: {(~ Charclass()): 2}})
class BatteryIcon(base._TextBox): orientations = base.ORIENTATION_HORIZONTAL defaults = [('battery', 0, 'Which battery should be monitored'), ('update_interval', 60, 'Seconds between status updates'), ('theme_path', default_icon_path(), 'Path of the icons')] icon_names = ('battery-missing', 'battery-caution', 'battery-low', 'battery-good', 'battery-full', 'battery-caution-charging', 'battery-low-charging', 'battery-good-charging', 'battery-full-charging', 'battery-full-charged') def __init__(self, **config) -> None: if ('update_delay' in config): warnings.warn('Change from using update_delay to update_interval for battery widget, removed in 0.15', DeprecationWarning) config['update_interval'] = config.pop('update_delay') base._TextBox.__init__(self, 'BAT', bar.CALCULATED, **config) self.add_defaults(self.defaults) if self.theme_path: self.length_type = bar.STATIC self.length = 0 self.surfaces = {} self.current_icon = 'battery-missing' self._battery = self._load_battery(**config) def _load_battery(**config): return load_battery(**config) def timer_setup(self) -> None: self.update() self.timeout_add(self.update_interval, self.timer_setup) def _configure(self, qtile, bar) -> None: base._TextBox._configure(self, qtile, bar) self.setup_images() def setup_images(self) -> None: d_imgs = images.Loader(self.theme_path)(*self.icon_names) new_height = (self.bar.height - self.actual_padding) for (key, img) in d_imgs.items(): img.resize(height=new_height) if (img.width > self.length): self.length = int((img.width + (self.actual_padding * 2))) self.surfaces[key] = img.pattern def update(self) -> None: status = self._battery.update_status() icon = self._get_icon_key(status) if (icon != self.current_icon): self.current_icon = icon self.draw() def draw(self) -> None: if self.theme_path: self.drawer.clear((self.background or self.bar.background)) self.drawer.ctx.set_source(self.surfaces[self.current_icon]) self.drawer.ctx.paint() self.drawer.draw(offsetx=self.offset, width=self.length) else: self.text = self.current_icon[8:] base._TextBox.draw(self) def _get_icon_key(status: BatteryStatus) -> str: key = 'battery' percent = status.percent if (percent < 0.2): key += '-caution' elif (percent < 0.4): key += '-low' elif (percent < 0.8): key += '-good' else: key += '-full' state = status.state if (state == BatteryState.CHARGING): key += '-charging' elif (state == BatteryState.FULL): key += '-charged' return key
def traj_segment_generator(pi, env, horizon, nenvs, stochastic, dropoutpi_keep_prob, dropoutvf_keep_prob, isbnpitrainmode, isbnvftrainmode): t = 0 ac = ([env.action_space.sample()] * nenvs) new = ([True] * nenvs) rew = ([0.0] * nenvs) ob = env.reset() cur_ep_ret = [] cur_ep_len = [] ep_rets = [] ep_lens = [] for _ in range(nenvs): ep_rets.append([]) ep_lens.append([]) cur_ep_ret.append(0) cur_ep_len.append(0) obs = np.array([ob for _ in range(horizon)]) rews = np.zeros([horizon, nenvs], 'float32') vpreds = np.zeros([horizon, nenvs], 'float32') news = np.zeros([horizon, nenvs], 'int32') acs = np.array([ac for _ in range(horizon)]) prevacs = acs.copy() while True: prevac = ac stepdict = {'stochastic': stochastic} if (dropoutpi_keep_prob is not None): stepdict.update({'dropoutpi_keep_prob': 1.0}) if (dropoutvf_keep_prob is not None): stepdict.update({'dropoutvf_keep_prob': 1.0}) if (isbnpitrainmode is not None): stepdict.update({'isbnpitrainmode': False}) if (isbnvftrainmode is not None): stepdict.update({'isbnvftrainmode': False}) (ac, vpred, _, _) = pi.step(ob, **stepdict) if ((t > 0) and ((t % horizon) == 0)): (yield {'ob': obs, 'rew': rews, 'vpred': vpreds, 'new': news, 'ac': acs, 'prevac': prevacs, 'nextvpred': (vpred * (1 - new)), 'ep_rets': ep_rets, 'ep_lens': ep_lens}) (_, vpred, _, _) = pi.step(ob, **stepdict) ep_rets = [] ep_lens = [] for _ in range(nenvs): ep_rets.append([]) ep_lens.append([]) i = (t % horizon) obs[i] = ob vpreds[i] = vpred news[i] = new acs[i] = ac prevacs[i] = prevac (ob, rew, new, _) = env.step(ac) rews[i] = rew for j in range(nenvs): cur_ep_len[j] += 1 if isinstance(rew, float): cur_ep_ret[j] += rew newj = new else: cur_ep_ret[j] += rew[j] newj = new[j] if newj: ep_rets[j].append(cur_ep_ret[j]) ep_lens[j].append(cur_ep_len[j]) cur_ep_ret[j] = 0 cur_ep_len[j] = 0 if isinstance(rew, float): ob = env.reset() t += 1
class RotationLogarithmicModel(RotationCostModel): slope: float overhead: float gateset: Optional[str] = None approximation_protocol: Optional[str] = None reference: Optional[str] = None def rotation_cost(self, error_budget: float) -> AlgorithmSummary: return AlgorithmSummary(t_gates=math.ceil((((- self.slope) * math.log2(error_budget)) + self.overhead))) def prepartion_overhead(self, error_budget) -> AlgorithmSummary: return AlgorithmSummary()
.parametrize(('local_config', 'fresh'), [({}, True), ({'dependencies': [uuid.uuid4().hex]}, True), ({'dependencies': [uuid.uuid4().hex], 'dev-dependencies': [uuid.uuid4().hex]}, True), ({'dependencies': [uuid.uuid4().hex], 'dev-dependencies': None}, True), ({'dependencies': [uuid.uuid4().hex], 'groups': [uuid.uuid4().hex]}, False)]) def test_content_hash_with_legacy_is_compatible(local_config: dict[(str, list[str])], fresh: bool, locker: Locker) -> None: relevant_content = {} for key in locker._legacy_keys: relevant_content[key] = local_config.get(key) locker = locker.__class__(lock=locker.lock, local_config=local_config) old_content_hash = sha256(json.dumps(relevant_content, sort_keys=True).encode()).hexdigest() content_hash = locker._get_content_hash() assert ((content_hash == old_content_hash) or fresh)
def test_music_settings(skip_qtbot: pytestqt.qtbot.QtBot) -> None: cosmetic_patches = SuperMetroidCosmeticPatches() dialog = SuperCosmeticPatchesDialog(None, cosmetic_patches) skip_qtbot.addWidget(dialog) for (music_mode, radio_button) in dialog.radio_buttons.items(): assert ((music_mode == dialog.cosmetic_patches.music) == radio_button.isChecked())
class DuckFactory(AbstractDuckFactory): def createMallardDuck(self) -> Quackable: return MallardDuck() def createRedheadDuck(self) -> Quackable: return RedheadDuck() def createDuckCall(self) -> Quackable: return DuckCall() def createRubberDuck(self) -> Quackable: return RubberDuck()
class GRU_encoder(nn.Module): def __init__(self, hidden_states=256): super(GRU_encoder, self).__init__() self.encoder = nn.GRU(342, 64, num_layers=1) self.mapping = nn.Linear(64, hidden_states) self.bn = nn.BatchNorm1d(hidden_states) def forward(self, x, flag='unsupervised'): x = x.view((- 1), 342, 500) x = x.permute(2, 0, 1) (_, ht) = self.encoder(x) x = ht[(- 1)] if (flag == 'supervised'): return x else: x = self.bn(self.mapping(x)) return x
def visualize_detection_results(result_dict, tag, global_step, categories, summary_dir='', export_dir='', agnostic_mode=False, show_groundtruth=False, min_score_thresh=0.2, max_num_predictions=20): if (not set(['original_image', 'detection_boxes', 'detection_scores', 'detection_classes']).issubset(set(result_dict.keys()))): raise ValueError('result_dict does not contain all expected keys.') if (show_groundtruth and ('groundtruth_boxes' not in result_dict)): raise ValueError('If show_groundtruth is enabled, result_dict must contain groundtruth_boxes.') logging.info('Creating detection visualizations.') category_index = label_map_util.create_category_index(categories) image = np.squeeze(result_dict['original_image'], axis=0) detection_boxes = result_dict['detection_boxes'] detection_scores = result_dict['detection_scores'] detection_classes = np.int32(result_dict['detection_classes']) detection_keypoints = result_dict.get('detection_keypoints', None) detection_masks = result_dict.get('detection_masks', None) if show_groundtruth: groundtruth_boxes = result_dict['groundtruth_boxes'] groundtruth_keypoints = result_dict.get('groundtruth_keypoints', None) vis_utils.visualize_boxes_and_labels_on_image_array(image, groundtruth_boxes, None, None, category_index, keypoints=groundtruth_keypoints, use_normalized_coordinates=False, max_boxes_to_draw=None) vis_utils.visualize_boxes_and_labels_on_image_array(image, detection_boxes, detection_classes, detection_scores, category_index, instance_masks=detection_masks, keypoints=detection_keypoints, use_normalized_coordinates=False, max_boxes_to_draw=max_num_predictions, min_score_thresh=min_score_thresh, agnostic_mode=agnostic_mode) if export_dir: export_path = os.path.join(export_dir, 'export-{}.png'.format(tag)) vis_utils.save_image_array_as_png(image, export_path) summary = tf.Summary(value=[tf.Summary.Value(tag=tag, image=tf.Summary.Image(encoded_image_string=vis_utils.encode_image_array_as_png_str(image)))]) summary_writer = tf.summary.FileWriter(summary_dir) summary_writer.add_summary(summary, global_step) summary_writer.close() logging.info('Detection visualizations written to summary with tag %s.', tag)
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, bn_norm, with_ibn, baseWidth, cardinality, stride=1, downsample=None): super(Bottleneck, self).__init__() D = int(math.floor((planes * (baseWidth / 64)))) C = cardinality self.conv1 = nn.Conv2d(inplanes, (D * C), kernel_size=1, stride=1, padding=0, bias=False) if with_ibn: self.bn1 = IBN((D * C), bn_norm) else: self.bn1 = get_norm(bn_norm, (D * C)) self.conv2 = nn.Conv2d((D * C), (D * C), kernel_size=3, stride=stride, padding=1, groups=C, bias=False) self.bn2 = get_norm(bn_norm, (D * C)) self.conv3 = nn.Conv2d((D * C), (planes * 4), kernel_size=1, stride=1, padding=0, bias=False) self.bn3 = get_norm(bn_norm, (planes * 4)) self.relu = nn.ReLU(inplace=True) self.downsample = downsample def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if (self.downsample is not None): residual = self.downsample(x) out += residual out = self.relu(out) return out
def _get_callee_type(call: CallExpr) -> (CallableType | None): callee_node: (Node | None) = call.callee if isinstance(callee_node, RefExpr): callee_node = callee_node.node if isinstance(callee_node, Decorator): callee_node = callee_node.func if (isinstance(callee_node, (Var, SYMBOL_FUNCBASE_TYPES)) and callee_node.type): callee_node_type = get_proper_type(callee_node.type) if isinstance(callee_node_type, Overloaded): return find_shallow_matching_overload_item(callee_node_type, call) elif isinstance(callee_node_type, CallableType): return callee_node_type return None
def act_quantization(b, grid, power=True): def uniform_quant(x, b=3): xdiv = x.mul(((2 ** b) - 1)) xhard = xdiv.round().div(((2 ** b) - 1)) return xhard def power_quant(x, grid): shape = x.shape xhard = x.view((- 1)) value_s = grid.type_as(x) idxs = (xhard.unsqueeze(0) - value_s.unsqueeze(1)).abs().min(dim=0)[1] xhard = value_s[idxs].view(shape) return xhard class _uq(torch.autograd.Function): def forward(ctx, input, alpha): input = input.div(alpha) input_c = input.clamp(min=0, max=1) if power: input_q = power_quant(input_c, grid) else: input_q = uniform_quant(input_c, b) ctx.save_for_backward(input, input_q) input_q = input_q.mul(alpha) return input_q def backward(ctx, grad_output): grad_input = grad_output.clone() (input, input_q) = ctx.saved_tensors i = (input > 1.0).float() grad_alpha = (grad_output * (i + ((input_q - input) * (1 - i)))).sum() grad_input = (grad_input * (1 - i)) return (grad_input, grad_alpha) return _uq().apply
def from_ieee_block(block: Union[(bytes, bytearray)], datatype: BINARY_DATATYPES='f', is_big_endian: bool=False, container: Callable[([Iterable[Union[(int, float)]]], Sequence[Union[(int, float)]])]=list) -> Sequence[Union[(int, float)]]: (offset, data_length) = parse_ieee_block_header(block) if (data_length == (- 1)): data_length = (len(block) - offset) if (len(block) < (offset + data_length)): raise ValueError(('Binary data is incomplete. The header states %d data bytes, but %d where received.' % (data_length, (len(block) - offset)))) return from_binary_block(block, offset, data_length, datatype, is_big_endian, container)
.requires_user_action class CaretColorInitTestCase(InteractiveTestCase, _DRYHelperMixin): def test_caret_color_init_rgb(self): color = (255, 0, 0) self.build_window(color) app.run() self.ask_color(color) def test_caret_color_init_rgba(self): color = (255, 0, 0, 80) self.build_window(color) app.run() self.ask_color(color)
def get_checkpoint_id(key): if (key in all_methods): setting = 'hr_to_lr' method = key elif ((key in [(method + '-inst') for method in all_methods]) or (key in [(method + '-instruction') for method in all_methods])): setting = 'hr_to_lr_inst_all' method = '-'.join(key.split('-')[:(- 1)]) elif (key in [('%s/%s' % (method, setting)) for method in all_methods for setting in all_settings]): (method, setting) = key.split('/') else: return None return (method, setting, os.path.join(checkpoint_dir, method, setting, 'model.pt'))
(name='help-analysis') _readme_flag def help_analysis(readme): get_wrapper(readme)('\nThe overall process is:\n\n1) Fragment structures in a SMILES file, to produce fragments.\n\n2) Index the fragments to produces matched molecular pairs.\n(you might include property information at this point)\n\n3) Load property information.\n\n4) Find transforms for a given structure; and/or\n\n5) Predict a property for a structure given the known\n property for another structure; and/or\n\n6) Apply 1-cut rules to generate new structures from a given\n structure.\n\nSome terminology:\n\nA fragmentation cuts 1, 2, or 3 non-ring bonds to\nconvert a structure into a "constant" part and a "variable" part. The\nsubstructure in the variable part is a single fragment, and often\nconsidered the R-groups, while the constant part contains one\nfragment for each cut, and it often considered as containing the\ncore.\n\nThe matched molecular pair indexing process finds all pairs which have\nthe same constant part, in order to define a transformation from one\nvariable part to another variable part. A "rule" stores information\nabout a transformation, including a list of all the pairs for that\nrule.\n\nThe "rule environment" extends the transformation to include\ninformation about the local environment of the attachment points on\nthe constant part. The environment fingerprint is based on the RDKit\ncircular fingerprints for the attachment points, expressed as a\ncanonical SMARTS pattern, and alternatively, as a "pseudo"-SMILES\nstring, which is a bit less precise but easier to understand and\nvisualize.\n\nThe fingerprint SMARTS pattern describes the Morgan circular\nfingerprint invariants around the attachment points. Here\'s a 2-cut\nexample split across three lines:\n\n\x08\n```\n[#0;X1;H0;+0;!R:1]-[#6;X4;H1;+0;R](-[#6;X4;H2;+0;R])-[#6;X4;H2;+0;R].\n[#0;X1;H0;+0;!R:2]-[#7;X3;H0;+0;R](-[#6;X4;H2;+0;R])-[#6;X4;H2;+0;R].\n[#0;X1;H0;+0;!R:3]-[#6;X3;H0;+0;R](:[#6;X3;H1;+0;R]):[#6;X3;H1;+0;R]\n```\n\nThe SMARTS modifiers, like "H0" to require no hydrogens, are needed to\nmatch the Morgan invariants but are quite the eye-full. The\npsuedosmiles alternative is:\n\n\x08\n```\n[*:1]-[CH](-[CH2](~*))-[CH2](~*).\n[*:2]-[N](-[CH2](~*))-[CH2](~*).\n[*:3]-[c](:[cH](~*)):[cH](~*)\n```\n\nThis can be processed by RDKit, if sanitization is disabled, and\nturned into an image.\n\nCAUTION! The "`(~*)`" terms are used to represent the SMARTS\nconnectivity terms "X<digit>", but they do not necessarily all\nrepresent distinct atoms!\n\nThere is one rule environment for each available radius. Larger radii\ncorrespond to more specific environments. The "rule environment\nstatistics" table stores information about the distribution of\nproperty changes for all of the pairs which contain the given rule and\nenvironment, with one table for each property.\n\n### 1) Fragment structures\n\nUse "`smifrag`" to see how a given SMILES is fragmented. Use "`fragment`"\nto fragment all of the compounds in a SMILES file.\n\n"`mmpdb smifrag`" is a diagnostic tool to help understand how a given\nSMILES will be fragmented and to experiment with the different\nfragmentation options. For example:\n\n\x08\n```shell\n% mmpdb smifrag \'c1ccccc1OC\'\n | variable | | constant \n#cuts | enum.label | #heavies | symm.class | smiles | order | #heavies | symm.class | smiles | with-H \n------+++++-------++++\n 1 | N | 2 | 1 | [*]OC | 0 | 6 | 1 | [*]c1ccccc1 | c1ccccc1 \n 1 | N | 6 | 1 | [*]c1ccccc1 | 0 | 2 | 1 | [*]OC | CO \n 2 | N | 1 | 11 | [*]O[*] | 01 | 7 | 12 | [*]C.[*]c1ccccc1 | - \n 1 | N | 1 | 1 | [*]C | 0 | 7 | 1 | [*]Oc1ccccc1 | Oc1ccccc1\n 1 | N | 7 | 1 | [*]Oc1ccccc1 | 0 | 1 | 1 | [*]C | C \n```\n\nUse "`mmpdb fragment`" to fragment a SMILES file and produce a fragment\nfile for the MMP analysis. Start with the test data file named\n"test_data.smi" containing the following structures:\n\n\x08\n```text\nOc1ccccc1 phenol \nOc1ccccc1O catechol \nOc1ccccc1N 2-aminophenol \nOc1ccccc1Cl 2-chlorophenol \nNc1ccccc1N o-phenylenediamine \nNc1cc(O)ccc1N amidol \nOc1cc(O)ccc1O hydroxyquinol \nNc1ccccc1 phenylamine \nC1CCCC1N cyclopentanol \n```\n\nthen run the following command generate a fragment database.\n\n\x08\n```shell\n% mmpdb fragment test_data.smi -o test_data.fragdb\n```\n\nFragmentation can take a while. You can save time by asking the code\nto reuse fragmentations from a previous run. If you do that then the\nfragment command will reuse the old fragmentation parameters. (You\ncannot override them with command-line options.). Here is an example:\n\n\x08\n```shell\n% mmpdb fragment data_file.smi -o new_data_file.fragdb \\\n --cache old_data_file.fragdb\n```\n\nThe "`--cache`" option will greatly improve the fragment performance when\nthere are only a few changes from the previous run.\n\nThe fragmentation algorithm is configured to ignore structures which\nare too big or have too many rotatable bonds. There are also options\nwhich change where to make cuts and the number of cuts to make. Use\nthe "`--help`" option on each command for details.\n\nUse "`mmpdb help-smiles-format`" for details about to parse different\nvariants of the SMILES file format.\n\n\n### 2) Index the MMPA fragments to create a database\n\n\nThe "`mmpa index`" command indexes the output fragments from "`mmpa\nfragment`" by their variable fragments, that is, it finds\nfragmentations with the same R-groups and puts them together. Here\'s\nan example:\n\n\x08\n```shell\n% mmpdb index test_data.fragdb -o test_data.mmpdb\n```\n\nThe output from this is an SQLite database.\n\nIf you have activity/property data and you do not want the database to\ninclude structures where there is no data, then you can specify\nthe properties file as well:\n\n\x08\n```shell\n% mmpdb index test_data.fragdb -o test_data.mmpdb --properties test_data.csv\n```\n\nUse "`mmpdb help-property-format`" for more details about the property\nfile format.\n\nFor more help use "`mmpdb index --help`".\n\n### 3) Add properties to a database\n\nUse "`mmpdb loadprops`" to add or modify activity/property data in the\ndatabase. Here\'s an example property file named \'test_data.csv\' with\nmolecular weight and melting point properties:\n\n\x08\n```text\nID MW MP \nphenol 94.1 41 \ncatechol 110.1 105 \n2-aminophenol 109.1 174 \n2-chlorophenol 128.6 8 \no-phenylenediamine 108.1 102 \namidol 124.1 * \nhydroxyquinol 126.1 140 \nphenylamine 93.1 -6 \ncyclopentanol 86.1 -19 \n```\n\nThe following loads the property data to the MMPDB database file\ncreated in the previous section:\n\n\x08\n```shell\n% mmpdb loadprops -p test_data.csv test_data.mmpdb\nUsing dataset: MMPs from \'test_data.fragdb\'\nReading properties from \'tests/test_data.csv\'\nRead 2 properties for 9 compounds from \'tests/test_data.csv\'\nImported 9 \'MW\' records (9 new, 0 updated).\nImported 8 \'MP\' records (8 new, 0 updated).\nNumber of rule statistics added: 533 updated: 0 deleted: 0\nLoaded all properties and re-computed all rule statistics.\n```\n\nUse "`mmpdb help-property-format`" for more details about the property\nfile format.\n\nFor more help use "`mmpdb loadprops --help`". Use "`mmpdb list`" to see\nwhat properties are already loaded.\n\n### 4) Identify possible transforms\n\n\nUse "`mmpdb transform`" to transform an input structure using the rules\nin a database. For each transformation, it can estimate the effect on\nany properties. The following looks at possible ways to transform\n2-pyridone using the test dataset created in the previous section, and\npredict the effect on the "MW" property (the output is reformatted for\nclarity):\n\n\x08\n```shell\n% mmpdb transform --smiles \'c1cccnc1O\' test_data.mmpdb --property MW\nID SMILES MW_from_smiles MW_to_smiles MW_radius\n1 Clc1ccccn1 [*:1]O [*:1]Cl 1\n2 Nc1ccccn1 [*:1]O [*:1]N 1\n3 c1ccncc1 [*:1]O [*:1][H] 1\n\x08\n MW_smarts MW_pseudosmiles MW_rule_environment_id \n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 299\n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 276\n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 268\n\x08\nMW_count MW_avg MW_std MW_kurtosis MW_skewness\n 1 18.5\n 3 -1 0 0\n 4 -16 0 0\n\x08\nMW_min MW_q1 MW_median MW_q3 MW_max MW_paired_t MW_p_value\n 18.5 18.5 18.5 18.5 18.5\n -1 -1 -1 -1 -1 1e+08 \n-16 -16 -16 -16 -16 1e+08\t\n```\n\nThis says that "c1cccnc1O" can be transformed to "Clc1ccccn1" using\nthe transformation \\[\\*:1\\]O>>\\[\\*:1\\]Cl (that is, replace the\noxygen with a chlorine). The best transformation match has a radius\nof 1, which includes the aromatic carbon at the attachment point but\nnot the aromatic nitrogen which is one atom away.\n\nThere is only one pair for this transformation, and it predicts a shift\nin molecular weight of 18.5. This makes sense as the [OH] is replaced\nwith a [Cl].\n\nOn the other hand, there are three pairs which transform it to\npyridine. The standard deviation of course is 0 because it\'s a simple\nmolecular weight calculation. The 1e+08.0 is the mmpdb way of\nwriting "positive infinity".\n\nMelting point is more complicated. The following shows that in the\ntransformation of 2-pyridone to pyridine there are still 3 matched\npairs and in this case the average shift is -93C with a standard\ndeviation of 76.727C:\n\n\x08\n```shell\n% mmpdb transform --smiles \'c1cccnc1O\' test_data.mmpdb --property MP\nID SMILES MP_from_smiles MP_to_smiles MP_radius \n1 Clc1ccccn1 [*:1]O [*:1]Cl 1\n2 Nc1ccccn1 [*:1]O [*:1]N 1\n3 c1ccncc1 [*:1]O [*:1][H] 1\n\x08\nMP_smarts MP_pseudosmiles MP_rule_environment_id\n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 299\n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 276\n[#0;X1;H0;+0;!R:1]-[#6;X3;H0;+0;R] [*:1]-[#6](~*)(~*) 268\n\x08\nMP_count MP_avg MP_std MP_kurtosis MP_skewness \n 1 -97 \n 3 -16.667 75.235 -1.5 -0.33764 \n 3 -93 76.727 -1.5 -0.32397 \n\x08\nMP_min MP_q1 MP_median MP_q3 MP_max MP_paired_t MP_p_value\n -97 -97 -97 -97 -97 \n -72 -65.75 -47 40 69 0.3837 0.73815\n-180 -151 -64 -42.25 -35 -2.0994 0.17062\n```\n\nYou might try enabling the "`--explain`" option to see why the algorithm\nselected a given tranformation.\n\nFor more help use "`mmpdb transform --help`".\n\n\n### 5) Use MMP to make a prediction\n\nUse "`mmpdb predict`" to predict the property change in a transformation\nfrom a given reference structure to a given query structure. Use this\nwhen you want to limit the transform results when you know the\nstarting and ending structures. The following predicts the effect on\nmolecular weight in transforming 2-pyridone to pyridone:\n\n\x08\n```shell\n% mmpdb predict --smiles \'c1cccnc1\' --reference \'c1cccnc1O\' \\\n test_data.mmpdb --property MP\npredicted delta: -93 +/- 76.7268\n```\n\nThis is the same MP_value and MP_std from the previous section using\n\'`transform`\'.\n\nThe reference value may also be included in the calulation, to give a\npredicted value.\n\n\x08\n```shell\n% mmpdb predict --smiles \'c1cccnc1\' --reference \'c1cccnc1O\' \\\n test_data.mmpdb --property MP --value -41.6\npredicted delta: -93 predicted value: -134.6 +/- 76.7268\n```\n\nI\'ll redo the calculation with the molecular weight property, and have\nmmpdb do the trival calculation of adding the known weight to the\npredicted delta:\n\n\x08\n```shell\n% mmpdb predict --smiles \'c1cccnc1\' --reference \'c1cccnc1O\' \\\n test_data.mmpdb --property MW --value 95.1\npredicted delta: -16 predicted value: 79.1 +/- 0\n```\n\nYou might try enabling the "`--explain`" option to see why the algorithm\nselected a given transformation, or use "`--save-details`" to save the \nlist of possible rules to the file `pred_detail_rules.txt` and to save \nthe list of rule pairs to `pred_detail_pairs.txt`.\n\n### 6) Use MMP to generate new structures\n\nThe rules in a MMP database give a sort of "playbook" about the\ntransformations which might be explored in medicinal chemistry. These\nrule can be applied to a given structure to generate new related\nstructures, following a method related to the transform command but\nignoring any property information. Here\'s an example using the default\nradius of 0, which means the environment fingerprint is ignored. (The\ncolumns have been re-formatted for the documentation.)\n\n\x08\n```shell\n% mmpdb generate --smiles \'c1ccccc1C(O)C\' test_data.mmpdb\nstart constant from_smiles to_smiles r pseudosmiles final\nCC(O)c1ccccc1 *C(C)c1ccccc1 [*:1]O [*:1][H] 0 [*:1](~*) CCc1ccccc1\nCC(O)c1ccccc1 *C(C)c1ccccc1 [*:1]O [*:1]N 0 [*:1](~*) CC(N)c1ccccc1\nCC(O)c1ccccc1 *C(C)c1ccccc1 [*:1]O [*:1]Cl 0 [*:1](~*) CC(Cl)c1ccccc1\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccccc1O 0 [*:1](~*) CC(O)c1ccccc1O\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccccc1N 0 [*:1](~*) CC(O)c1ccccc1N\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1cc(O)ccc1N 0 [*:1](~*) CC(O)c1cc(O)ccc1N\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccc(O)cc1N 0 [*:1](~*) CC(O)c1ccc(O)cc1N\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]C1CCCC1 0 [*:1](~*) CC(O)C1CCCC1\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccccc1Cl 0 [*:1](~*) CC(O)c1ccccc1Cl\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccc(N)c(N)c1 0 [*:1](~*) CC(O)c1ccc(N)c(N)c1\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1cc(O)ccc1O 0 [*:1](~*) CC(O)c1cc(O)ccc1O\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccc(O)c(O)c1 0 [*:1](~*) CC(O)c1ccc(O)c(O)c1\nCC(O)c1ccccc1 *C(C)O [*:1]c1ccccc1 [*:1]c1ccc(O)cc1O 0 [*:1](~*) CC(O)c1ccc(O)cc1O\n\n\x08\n#pairs pair_from_id pair_from_smiles pair_to_id pair_to_smiles\n4 2-aminophenol Nc1ccccc1O phenylamine Nc1ccccc1\n3 phenol Oc1ccccc1 phenylamine Nc1ccccc1\n1 catechol Oc1ccccc1O 2-chlorophenol Oc1ccccc1Cl\n2 phenylamine Nc1ccccc1 2-aminophenol Nc1ccccc1O\n2 phenylamine Nc1ccccc1 o-phenylenediamine Nc1ccccc1N\n1 phenylamine Nc1ccccc1 amidol Nc1ccc(O)cc1N\n1 phenylamine Nc1ccccc1 amidol Nc1ccc(O)cc1N\n1 phenylamine Nc1ccccc1 cyclopentanol NC1CCCC1\n1 phenol Oc1ccccc1 2-chlorophenol Oc1ccccc1Cl\n1 phenol Oc1ccccc1 amidol Nc1ccc(O)cc1N\n1 phenol Oc1ccccc1 hydroxyquinol Oc1ccc(O)c(O)c1\n1 phenol Oc1ccccc1 hydroxyquinol Oc1ccc(O)c(O)c1\n1 phenol Oc1ccccc1 hydroxyquinol Oc1ccc(O)c(O)c1\n```\n\nThe second half the output shows the number of known pairs for the\ngiven rule environment (use `--min-pairs N` to require at least N\npairs), and gives a representative pair from the dataset.\n\nIn the above example, all of the fragmentations in the specified\n`--smiles` are used. Alternatively, you may specify `--smiles` and one\nof `--constant` or `--query` to use that specific fragmentation, or\nuse `--constant` and `--query` (without `--smiles`) to specify the\nexact pair.\n\nThere is also an option to generate `--subqueries`. This generates all\nof the unique 1-cut fragmentations of the query, and uses them as\nadditional queries. I\'ll use the `--constant` to specify the phynol\ngroup, leaving the aminomethanol available as the query. I\'ll use\n`--subqueries` to include fragments of the query. I\'ll limit the\noutput `--columns` to the start and final SMILES structures, and the\nnumber of pairs. I\'ll use `--explain` to display debug information,\nand finally, I\'ll use `--no-header` to make the output a bit less\ncomplicated:\n\n\x08\n```shell\n% mmpdb generate --smiles \'c1ccccc1C(O)N\' --constant \'*c1ccccc1\' test_data.mmpdb \\\n --subqueries --columns start,final,#pairs --explain --no-header\nNumber of subqueries: 4\nSubqueries are: [\'*CN\', \'*CO\', \'*N\', \'*O\']\nUsing constant SMILES *c1ccccc1 with radius 0.\nEnvironment SMARTS: [#0;X1;H0;+0;!R:1] pseudoSMILES: [*:1](~*)\nNumber of matching environment rules: 42\nQuery SMILES [*:1]C(N)O is not a rule_smiles in the database.\nQuery SMILES [*:1]CN is not a rule_smiles in the database.\nQuery SMILES [*:1]CO is not a rule_smiles in the database.\nNc1ccccc1\tOc1ccccc1\t3\nNc1ccccc1\tc1ccccc1\t2\nNc1ccccc1\tClc1ccccc1\t1\nNumber of rules for [*:1]N: 3\nOc1ccccc1\tc1ccccc1\t4\nOc1ccccc1\tNc1ccccc1\t3\nOc1ccccc1\tClc1ccccc1\t1\nNumber of rules for [*:1]O: 3\n```\n\n')
class TInternetRadio(TestCase): def setUp(self): quodlibet.config.init() self.bar = InternetRadio(SongLibrary()) def test_can_filter(self): self.assertTrue(self.bar.can_filter('foo')) self.assertTrue(self.bar.can_filter_text()) def test_status_bar_text(self): self.assertEqual(self.bar.status_text(1), '1 station') self.assertEqual(self.bar.status_text(101, 123), '101 stations') .network ((is_windows() or is_osx()), "Don't need to test downloads all the time") def test_click_add_station(self): self.bar._update_button.emit('clicked') assert (not self.bar.has_stations) run_gtk_loop() assert self.bar.has_stations def tearDown(self): self.bar.destroy() quodlibet.config.quit()
class CallContext(): __slots__ = ('args', 'keywords', 'callee') def __init__(self, args: list[NodeNG], keywords: (list[Keyword] | None)=None, callee: (InferenceResult | None)=None): self.args = args if keywords: arg_value_pairs = [(arg.arg, arg.value) for arg in keywords] else: arg_value_pairs = [] self.keywords = arg_value_pairs self.callee = callee
def _validate_setting(setting, value): if (setting == SettingCodes.ENABLE_PUSH): if (value not in (0, 1)): return ErrorCodes.PROTOCOL_ERROR elif (setting == SettingCodes.INITIAL_WINDOW_SIZE): if (not (0 <= value <= )): return ErrorCodes.FLOW_CONTROL_ERROR elif (setting == SettingCodes.MAX_FRAME_SIZE): if (not (16384 <= value <= )): return ErrorCodes.PROTOCOL_ERROR elif (setting == SettingCodes.MAX_HEADER_LIST_SIZE): if (value < 0): return ErrorCodes.PROTOCOL_ERROR elif (setting == SettingCodes.ENABLE_CONNECT_PROTOCOL): if (value not in (0, 1)): return ErrorCodes.PROTOCOL_ERROR return 0
class _BaseMedium(TelegramObject): __slots__ = ('file_id', 'file_size', 'file_unique_id') def __init__(self, file_id: str, file_unique_id: str, file_size: Optional[int]=None, *, api_kwargs: Optional[JSONDict]=None): super().__init__(api_kwargs=api_kwargs) self.file_id: str = str(file_id) self.file_unique_id: str = str(file_unique_id) self.file_size: Optional[int] = file_size self._id_attrs = (self.file_unique_id,) async def get_file(self, *, read_timeout: ODVInput[float]=DEFAULT_NONE, write_timeout: ODVInput[float]=DEFAULT_NONE, connect_timeout: ODVInput[float]=DEFAULT_NONE, pool_timeout: ODVInput[float]=DEFAULT_NONE, api_kwargs: Optional[JSONDict]=None) -> 'File': return (await self.get_bot().get_file(file_id=self.file_id, read_timeout=read_timeout, write_timeout=write_timeout, connect_timeout=connect_timeout, pool_timeout=pool_timeout, api_kwargs=api_kwargs))
class M4CDecodingBCEWithMaskLoss(nn.Module): def __init__(self): super().__init__() self.one = torch.Tensor([1.0]) def forward(self, scores, targets, loss_mask): assert ((scores.dim() == 3) and (loss_mask.dim() == 2)) losses = F.binary_cross_entropy_with_logits(scores, targets, reduction='none') losses *= loss_mask.unsqueeze((- 1)) count = torch.max(torch.sum(loss_mask), self.one.to(losses.device)) loss = (torch.sum(losses) / count) return loss
class KeyMapper(QtCore.QObject): keyMappingChanged = QtCore.Signal() def setShortcut(self, action): if (action.menuPath in pyzo.config.shortcuts2): shortcuts = pyzo.config.shortcuts2[action.menuPath] action.setShortcuts(shortcuts.split(',')) pyzo.main.addAction(action) shortcuts = shortcuts.replace(',', ', ').replace(' ', ' ') action._shortcutsText = shortcuts.rstrip(', ')
def main(): with open(FLAGS.hq_replay_set) as f: replay_list = sorted(json.load(f)) race_vs_race = os.path.basename(FLAGS.hq_replay_set).split('.')[0] global_feature_vec_path = os.path.join(FLAGS.parsed_replay_path, 'SpatialFeatureTensor', race_vs_race) races = set(race_vs_race.split('_vs_')) stats = {} for race in races: path = os.path.join(global_feature_vec_path, race) if (not os.path.isdir(path)): os.makedirs(path) stat = load_stat(os.path.join(FLAGS.parsed_replay_path, 'Stat', '{}.json'.format(race))) stats[race] = {'max': np.asarray([stat[('max_' + k)] for k in max_keys]), 'action_id': stat['action_id']} pbar = tqdm(total=len(replay_list), desc='#Replay') with Pool(FLAGS.n_workers) as p: for _ in p.imap(Parser(race_vs_race, races, stats), replay_list): pbar.update()
class CollectiveUtilsTest(unittest.TestCase): _and_log def setUp(self) -> None: os.environ['MASTER_ADDR'] = str(MASTER_ADDR) os.environ['MASTER_PORT'] = str(get_free_port()) os.environ['GLOO_DEVICE_TRANSPORT'] = 'TCP' os.environ['NCCL_SOCKET_IFNAME'] = 'lo' self.WORLD_SIZE = 2 def tearDown(self) -> None: del os.environ['GLOO_DEVICE_TRANSPORT'] del os.environ['NCCL_SOCKET_IFNAME'] super().tearDown() def _run_multi_process_test(self, world_size: int, backend: str, callable: Callable[([], None)]) -> None: processes = [] ctx = multiprocessing.get_context('spawn') for rank in range(world_size): p = ctx.Process(target=callable, args=(rank, world_size, backend)) p.start() processes.append(p) for p in processes: p.join() self.assertEqual(0, p.exitcode) def _test_is_leader(cls, rank: int, world_size: int, backend: str) -> None: dist.init_process_group(rank=rank, world_size=world_size, backend=backend) pg = dist.new_group(ranks=[0, 1], backend=backend) if (pg.rank() == 0): assert (is_leader(pg, 0) is True) assert (is_leader(pg, 1) is False) else: assert (is_leader(pg, 1) is True) assert (is_leader(pg, 0) is False) dist.destroy_process_group() def test_is_leader(self) -> None: self._run_multi_process_test(world_size=self.WORLD_SIZE, backend='gloo', callable=self._test_is_leader) def _test_invoke_on_rank_and_broadcast_result(cls, rank: int, world_size: int, backend: str) -> None: dist.init_process_group(rank=rank, world_size=world_size, backend=backend) pg = dist.new_group(ranks=[0, 1], backend=backend) func = mock.MagicMock() func.return_value = pg.rank() res = invoke_on_rank_and_broadcast_result(pg=pg, rank=0, func=func) assert (res == 0), f'Expect res to be 0 (got {res})' if (pg.rank() == 0): func.assert_called_once() else: func.assert_not_called() func.reset_mock() res = invoke_on_rank_and_broadcast_result(pg=pg, rank=1, func=func) assert (res == 1), f'Expect res to be 1 (got {res})' if (pg.rank() == 0): func.assert_not_called() else: func.assert_called_once() dist.destroy_process_group() def test_invoke_on_rank_and_broadcast_result(self) -> None: self._run_multi_process_test(world_size=self.WORLD_SIZE, backend='gloo', callable=self._test_invoke_on_rank_and_broadcast_result) def _test_run_on_leader_decorator(cls, rank: int, world_size: int, backend: str) -> None: dist.init_process_group(rank=rank, world_size=world_size, backend=backend) pg = dist.new_group(ranks=[0, 1], backend=backend) _on_leader(pg, 0) def _test_run_on_0(rank: int) -> int: return rank res = _test_run_on_0(pg.rank()) assert (res == 0) _on_leader(pg, 1) def _test_run_on_1(rank: int) -> int: return rank res = _test_run_on_1(pg.rank()) assert (res == 1) dist.destroy_process_group() def test_run_on_leader_decorator(self) -> None: self._run_multi_process_test(world_size=self.WORLD_SIZE, backend='gloo', callable=self._test_run_on_leader_decorator)
_fixtures(WebFixture, DisclosedInputFixture) def test_input_values_retained_upon_domain_exception(web_fixture, disclosed_input_fixture): fixture = disclosed_input_fixture fixture.raise_domain_exception_on_submit = True fixture.default_trigger_field_value = False wsgi_app = web_fixture.new_wsgi_app(enable_js=True, child_factory=fixture.MyForm.factory()) web_fixture.reahl_server.set_app(wsgi_app) browser = web_fixture.driver_browser browser.open('/') assert (not browser.is_element_present(XPath.input_labelled('Email'))) browser.click(XPath.input_labelled('Trigger field')) browser.type(XPath.input_labelled('Email'), '') browser.click(XPath.button_labelled('click me')) assert browser.is_element_present(XPath.paragraph().including_text('Exception raised')) assert browser.is_selected(XPath.input_labelled('Trigger field')) assert (browser.get_value(XPath.input_labelled('Email')) == '')
def get_func_target(builder: IRBuilder, fdef: FuncDef) -> AssignmentTarget: if fdef.original_def: return builder.lookup(fdef.original_def) if (builder.fn_info.is_generator or builder.fn_info.add_nested_funcs_to_env): return builder.lookup(fdef) return builder.add_local_reg(fdef, object_rprimitive)
def do_parse(file_path): try: with io.open(file_path, 'rb') as fp: json_data = fp.read() except Exception as e: return (str(e), None) h = {} h['encoding'] = 'unknown' try: h = chardet.detect(json_data) try: with io.open(file_path, 'r', encoding=h['encoding']) as fp: json_data = fp.read() except Exception as e: return (str(e), None) except Exception: try: with io.open(file_path, 'r') as fp: json_data = fp.read() except Exception as e: return (str(e), None) parser = jsoncomment.JsonComment(json) try: j = parser.loads(json_data) rv = '' try: jsonschema.validate(j, scoop_schema_data) return ('', j) except Exception as e: err = str(e) err = parse_validation_error(err) m = re.search('(Failed validating.*)', err) if (m is not None): err = m.group(1) else: err = ('Failed schema validation against %s' % scoop_schema_name) if re.search('additionalProperties', err): return (rv, j) rv = err return (rv, j) except Exception as e: lines = json_data.splitlines() s = '' for line in lines: line = re.sub('^\\s*//.*$', '', line) s += (line + '\n') try: j = parser.loads(s) except Exception: j = None rv = ('%s (%s)' % (str(e), h['encoding'])) return (rv, j)
class TestConnTrackCollector(CollectorTestCase): def setUp(self): config = get_collector_config('ConnTrackCollector', {'interval': 10, 'bin': 'true', 'dir': self.getFixtureDirPath()}) self.collector = ConnTrackCollector(config, None) def test_import(self): self.assertTrue(ConnTrackCollector) (Collector, 'publish') def test_should_work_with_synthetic_data(self, publish_mock): self.collector.collect() metrics = {'ip_conntrack_count': 33.0, 'ip_conntrack_max': 36.0} self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) ('os.access', Mock(return_value=True)) (Collector, 'publish') def test_should_fail_gracefully(self, publish_mock): patch_communicate = patch('subprocess.Popen.communicate', Mock(return_value=((('sysctl: cannot stat /proc/sys/net/net' + 'filter/nf_conntrack_count: ') + 'No such file or directory'), ''))) patch_communicate.start() self.collector.collect() patch_communicate.stop() self.assertPublishedMany(publish_mock, {}) ('os.access', Mock(return_value=False)) (Collector, 'publish') def test_should_fail_gracefully_2(self, publish_mock): self.collector.collect() self.assertPublishedMany(publish_mock, {})