code
stringlengths
101
5.91M
def _normalize_tabular_data(tabular_data, headers): if (hasattr(tabular_data, 'keys') and hasattr(tabular_data, 'values')): if hasattr(tabular_data.values, '__call__'): keys = list(tabular_data.keys()) rows = list(zip_longest(*list(tabular_data.values()))) elif hasattr(tabular_data, 'index'): keys = list(tabular_data.keys()) vals = tabular_data.values names = tabular_data.index rows = [([v] + list(row)) for (v, row) in zip(names, vals)] else: raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if (headers == 'keys'): headers = list(map(_text_type, keys)) else: rows = list(tabular_data) if ((headers == 'keys') and hasattr(tabular_data, 'dtype') and getattr(tabular_data.dtype, 'names')): headers = tabular_data.dtype.names elif ((headers == 'keys') and (len(rows) > 0) and isinstance(rows[0], tuple) and hasattr(rows[0], '_fields')): headers = list(map(_text_type, rows[0]._fields)) elif ((headers == 'keys') and (len(rows) > 0)): headers = list(map(_text_type, list(range(len(rows[0]))))) if ((headers == 'firstrow') and (len(rows) > 0)): headers = list(map(_text_type, rows[0])) rows = rows[1:] headers = list(headers) rows = list(map(list, rows)) if (headers and (len(rows) > 0)): nhs = len(headers) ncols = len(rows[0]) if (nhs < ncols): headers = (([''] * (ncols - nhs)) + headers) return (rows, headers)
def run_experiment(hparams, run_opts, datasets): idx_examples = np.arange(datasets['train'].dataset.tensors[0].shape[0]) n_examples_perclass = [idx_examples[np.where((datasets['train'].dataset.tensors[1] == c))[0]].shape[0] for c in range(hparams['n_classes'])] n_examples_perclass = np.array(n_examples_perclass) class_weights = (n_examples_perclass.max() / n_examples_perclass) hparams['class_weights'] = class_weights checkpointer = sb.utils.checkpoints.Checkpointer(checkpoints_dir=os.path.join(hparams['exp_dir'], 'save'), recoverables={'model': hparams['model'], 'counter': hparams['epoch_counter']}) hparams['train_logger'] = sb.utils.train_logger.FileTrainLogger(save_file=os.path.join(hparams['exp_dir'], 'train_log.txt')) logger = logging.getLogger(__name__) logger.info('Experiment directory: {0}'.format(hparams['exp_dir'])) logger.info('Input shape: {0}'.format(datasets['train'].dataset.tensors[0].shape[1:])) logger.info('Training set avg value: {0}'.format(datasets['train'].dataset.tensors[0].mean())) datasets_summary = 'Number of examples: {0} (training), {1} (validation), {2} (test)'.format(datasets['train'].dataset.tensors[0].shape[0], datasets['valid'].dataset.tensors[0].shape[0], datasets['test'].dataset.tensors[0].shape[0]) logger.info(datasets_summary) brain = MOABBBrain(modules={'model': hparams['model']}, opt_class=hparams['optimizer'], hparams=hparams, run_opts=run_opts, checkpointer=checkpointer) brain.fit(epoch_counter=hparams['epoch_counter'], train_set=datasets['train'], valid_set=datasets['valid'], progressbar=False) perform_evaluation(brain, hparams, datasets, dataset_key='test') brain.hparams.avg_models = 1 perform_evaluation(brain, hparams, datasets, dataset_key='valid')
def label_payload_parser(accessor, label): return dict_payload_parser(accessor, {'label': label})
class QuotedString(Token): def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): super(QuotedString, self).__init__() quoteChar = quoteChar.strip() if (not quoteChar): warnings.warn('quoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() if (endQuoteChar is None): endQuoteChar = quoteChar else: endQuoteChar = endQuoteChar.strip() if (not endQuoteChar): warnings.warn('endQuoteChar cannot be the empty string', SyntaxWarning, stacklevel=2) raise SyntaxError() self.quoteChar = quoteChar self.quoteCharLen = len(quoteChar) self.firstQuoteChar = quoteChar[0] self.endQuoteChar = endQuoteChar self.endQuoteCharLen = len(endQuoteChar) self.escChar = escChar self.escQuote = escQuote self.unquoteResults = unquoteResults self.convertWhitespaceEscapes = convertWhitespaceEscapes if multiline: self.flags = (re.MULTILINE | re.DOTALL) self.pattern = ('%s(?:[^%s%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) else: self.flags = 0 self.pattern = ('%s(?:[^%s\\n\\r%s]' % (re.escape(self.quoteChar), _escapeRegexRangeChars(self.endQuoteChar[0]), (((escChar is not None) and _escapeRegexRangeChars(escChar)) or ''))) if (len(self.endQuoteChar) > 1): self.pattern += (('|(?:' + ')|(?:'.join((('%s[^%s]' % (re.escape(self.endQuoteChar[:i]), _escapeRegexRangeChars(self.endQuoteChar[i]))) for i in range((len(self.endQuoteChar) - 1), 0, (- 1))))) + ')') if escQuote: self.pattern += ('|(?:%s)' % re.escape(escQuote)) if escChar: self.pattern += ('|(?:%s.)' % re.escape(escChar)) self.escCharReplacePattern = (re.escape(self.escChar) + '(.)') self.pattern += (')*%s' % re.escape(self.endQuoteChar)) try: self.re = re.compile(self.pattern, self.flags) self.reString = self.pattern except sre_constants.error: warnings.warn(('invalid pattern (%s) passed to Regex' % self.pattern), SyntaxWarning, stacklevel=2) raise self.name = _ustr(self) self.errmsg = ('Expected ' + self.name) self.mayIndexError = False self.mayReturnEmpty = True def parseImpl(self, instring, loc, doActions=True): result = (((instring[loc] == self.firstQuoteChar) and self.re.match(instring, loc)) or None) if (not result): raise ParseException(instring, loc, self.errmsg, self) loc = result.end() ret = result.group() if self.unquoteResults: ret = ret[self.quoteCharLen:(- self.endQuoteCharLen)] if isinstance(ret, basestring): if (('\\' in ret) and self.convertWhitespaceEscapes): ws_map = {'\\t': '\t', '\\n': '\n', '\\f': '\x0c', '\\r': '\r'} for (wslit, wschar) in ws_map.items(): ret = ret.replace(wslit, wschar) if self.escChar: ret = re.sub(self.escCharReplacePattern, '\\g<1>', ret) if self.escQuote: ret = ret.replace(self.escQuote, self.endQuoteChar) return (loc, ret) def __str__(self): try: return super(QuotedString, self).__str__() except Exception: pass if (self.strRepr is None): self.strRepr = ('quoted string, starting with %s ending with %s' % (self.quoteChar, self.endQuoteChar)) return self.strRepr
def mkdir_p(path): try: os.makedirs(path) except OSError as exc: if ((exc.errno == errno.EEXIST) and os.path.isdir(path)): pass else: raise
.parametrize('backend_name', ['numpy', 'tensorflow', 'pytorch', 'PyTorch']) def test_backend_slotted_attributes(backend_name): pyhf.set_backend(backend_name) for attr in ['name', 'precision', 'dtypemap', 'default_do_grad']: assert (getattr(pyhf.tensorlib, attr) is not None)
class FreezeCommand(Command): usage = '\n %prog [options]' log_streams = ('ext://sys.stderr', 'ext://sys.stderr') def add_options(self): self.cmd_opts.add_option('-r', '--requirement', dest='requirements', action='append', default=[], metavar='file', help='Use the order in the given requirements file and its comments when generating output. This option can be used multiple times.') self.cmd_opts.add_option('-f', '--find-links', dest='find_links', action='append', default=[], metavar='URL', help='URL for finding packages, which will be added to the output.') self.cmd_opts.add_option('-l', '--local', dest='local', action='store_true', default=False, help='If in a virtualenv that has global access, do not output globally-installed packages.') self.cmd_opts.add_option('--user', dest='user', action='store_true', default=False, help='Only output packages installed in user-site.') self.cmd_opts.add_option(cmdoptions.list_path()) self.cmd_opts.add_option('--all', dest='freeze_all', action='store_true', help='Do not skip these packages in the output: {}'.format(', '.join(DEV_PKGS))) self.cmd_opts.add_option('--exclude-editable', dest='exclude_editable', action='store_true', help='Exclude editable package from output.') self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): format_control = FormatControl(set(), set()) wheel_cache = WheelCache(options.cache_dir, format_control) skip = set(stdlib_pkgs) if (not options.freeze_all): skip.update(DEV_PKGS) cmdoptions.check_list_path_option(options) freeze_kwargs = dict(requirement=options.requirements, find_links=options.find_links, local_only=options.local, user_only=options.user, paths=options.path, isolated=options.isolated_mode, wheel_cache=wheel_cache, skip=skip, exclude_editable=options.exclude_editable) for line in freeze(**freeze_kwargs): sys.stdout.write((line + '\n')) return SUCCESS
def test_clean_inplace(df_broken_email: pd.DataFrame) -> None: df_clean = clean_email(df_broken_email, 'messy_email', inplace=True) df_check = pd.DataFrame({'messy_email_clean': ['', '', None, '', None, None, None, None]}) assert df_check.equals(df_clean)
def load_data(file): data = pd.read_pickle(file) data.drop('Meth', axis=1, inplace=True) data.drop('Eth', axis=1, inplace=True) data.drop('Time', axis=1, inplace=True) return data
def search_network(nnp, name): for n in nnp.protobuf.network: if (n.name == name): return n return None
def mk_auto_soundness_step_instr(ctx: LeanGenContext): instr = ctx.func.lean_desc[ctx.lean_desc_num] if isinstance(instr, LeanPreprocessedNop): return if isinstance(instr, LeanPreprocessedAddAp): mk_auto_soundness_add_ap(ctx, instr) elif isinstance(instr, LeanPreprocessedConst): mk_auto_soundness_const(ctx, instr) elif (isinstance(instr, LeanPreprocessedAssertEq) or isinstance(instr, LeanPreprocessedCompoundAssertEq)): mk_auto_soundness_assert(ctx, instr) elif isinstance(instr, LeanPreprocessedReference): mk_auto_soundness_ref(ctx, instr) elif (isinstance(instr, LeanPreprocessedTempVar) and is_alloc_tempvar(instr)): mk_auto_soundness_alloc_tempvar(ctx, instr) elif isinstance(instr, LeanPreprocessedTempVar): mk_auto_soundness_tempvar(ctx, instr) elif isinstance(instr, LeanPreprocessedFuncCall): mk_auto_soundness_call(ctx, instr) else: raise Exception('mk_auto_soundness_step_instr: not handled yet')
def buildDataFeatures(usedFeatures): Q = '\ndataFeatures as (\nSELECT distinct ip, p, server FROM (\n' format_i = 0 for f in usedFeatures: (format_i, breakl) = formatUnions(format_i) Q += (breakl + f) Q += '\n)),' return Q
class DeepEnsembleTrajectorySampler(TrajectorySampler[DeepEnsembleModel]): def __init__(self, model: DeepEnsembleModel, diversify: bool=False, seed: Optional[int]=None): if (not isinstance(model, DeepEnsembleModel)): raise NotImplementedError(f'EnsembleTrajectorySampler only works with DeepEnsembleModel models, that support ensemble_size and ensemble_distributions methods; received {model.__repr__()}') super().__init__(model) self._model = model self._diversify = diversify self._seed = (seed or int(tf.random.uniform(shape=(), maxval=10000, dtype=tf.int32))) def __repr__(self) -> str: return f'{self.__class__.__name__}({self._model!r}' def get_trajectory(self) -> TrajectoryFunction: return deep_ensemble_trajectory(self._model, self._diversify, self._seed) def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction: tf.debugging.Assert(isinstance(trajectory, deep_ensemble_trajectory), [tf.constant([])]) trajectory.resample() return trajectory def resample_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction: tf.debugging.Assert(isinstance(trajectory, deep_ensemble_trajectory), [tf.constant([])]) trajectory.resample() return trajectory
class DivNode(NumBinopNode): cdivision = None truedivision = None ctruedivision = False cdivision_warnings = False zerodivision_check = None def find_compile_time_binary_operator(self, op1, op2): func = compile_time_binary_operators[self.operator] if ((self.operator == '/') and (self.truedivision is None)): if (isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types)): func = compile_time_binary_operators['//'] return func def calculate_constant_result(self): op1 = self.operand1.constant_result op2 = self.operand2.constant_result func = self.find_compile_time_binary_operator(op1, op2) self.constant_result = func(self.operand1.constant_result, self.operand2.constant_result) def compile_time_value(self, denv): operand1 = self.operand1.compile_time_value(denv) operand2 = self.operand2.compile_time_value(denv) try: func = self.find_compile_time_binary_operator(operand1, operand2) return func(operand1, operand2) except Exception as e: self.compile_time_value_error(e) def _check_truedivision(self, env): if (self.cdivision or env.directives['cdivision']): self.ctruedivision = False else: self.ctruedivision = self.truedivision def infer_type(self, env): self._check_truedivision(env) return self.result_type(self.operand1.infer_type(env), self.operand2.infer_type(env), env) def analyse_operation(self, env): self._check_truedivision(env) NumBinopNode.analyse_operation(self, env) if self.is_cpp_operation(): self.cdivision = True if (not self.type.is_pyobject): self.zerodivision_check = ((self.cdivision is None) and (not env.directives['cdivision']) and ((not self.operand2.has_constant_result()) or (self.operand2.constant_result == 0))) if (self.zerodivision_check or env.directives['cdivision_warnings']): self.operand1 = self.operand1.coerce_to_simple(env) self.operand2 = self.operand2.coerce_to_simple(env) def compute_c_result_type(self, type1, type2): if ((self.operator == '/') and self.ctruedivision and (not type1.is_cpp_class) and (not type2.is_cpp_class)): if ((not type1.is_float) and (not type2.is_float)): widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type) widest_type = PyrexTypes.widest_numeric_type(type2, widest_type) return widest_type return NumBinopNode.compute_c_result_type(self, type1, type2) def zero_division_message(self): if self.type.is_int: return 'integer division or modulo by zero' else: return 'float division' def generate_evaluation_code(self, code): if ((not self.type.is_pyobject) and (not self.type.is_complex)): if (self.cdivision is None): self.cdivision = (code.globalstate.directives['cdivision'] or self.type.is_float or ((self.type.is_numeric or self.type.is_enum) and (not self.type.signed))) if (not self.cdivision): code.globalstate.use_utility_code(UtilityCode.load_cached('DivInt', 'CMath.c').specialize(self.type)) NumBinopNode.generate_evaluation_code(self, code) self.generate_div_warning_code(code) def generate_div_warning_code(self, code): in_nogil = self.in_nogil_context if (not self.type.is_pyobject): if self.zerodivision_check: if (not self.infix): zero_test = ('%s(%s)' % (self.type.unary_op('zero'), self.operand2.result())) else: zero_test = ('%s == 0' % self.operand2.result()) code.putln(('if (unlikely(%s)) {' % zero_test)) if in_nogil: code.put_ensure_gil() code.putln(('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())) if in_nogil: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln('}') if (self.type.is_int and self.type.signed and (self.operator != '%')): code.globalstate.use_utility_code(UtilityCode.load_cached('UnaryNegOverflows', 'Overflow.c')) if (self.operand2.type.signed == 2): minus1_check = ('unlikely(%s == -1)' % self.operand2.result()) else: type_of_op2 = self.operand2.type.empty_declaration_code() minus1_check = ('(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (type_of_op2, self.operand2.result(), type_of_op2)) code.putln(('else if (sizeof(%s) == sizeof(long) && %s && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {' % (self.type.empty_declaration_code(), minus1_check, self.operand1.result()))) if in_nogil: code.put_ensure_gil() code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");') if in_nogil: code.put_release_ensured_gil() code.putln(code.error_goto(self.pos)) code.putln('}') if (code.globalstate.directives['cdivision_warnings'] and (self.operator != '/')): code.globalstate.use_utility_code(UtilityCode.load_cached('CDivisionWarning', 'CMath.c')) code.putln(('if (unlikely((%s < 0) ^ (%s < 0))) {' % (self.operand1.result(), self.operand2.result()))) warning_code = ('__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)' % {'FILENAME': Naming.filename_cname, 'LINENO': Naming.lineno_cname}) if in_nogil: result_code = 'result' code.putln(('int %s;' % result_code)) code.put_ensure_gil() code.putln(code.set_error_info(self.pos, used=True)) code.putln(('%s = %s;' % (result_code, warning_code))) code.put_release_ensured_gil() else: result_code = warning_code code.putln(code.set_error_info(self.pos, used=True)) code.put(('if (unlikely(%s)) ' % result_code)) code.put_goto(code.error_label) code.putln('}') def calculate_result_code(self): if (self.type.is_complex or self.is_cpp_operation()): return NumBinopNode.calculate_result_code(self) elif (self.type.is_float and (self.operator == '//')): return ('floor(%s / %s)' % (self.operand1.result(), self.operand2.result())) elif (self.truedivision or self.cdivision): op1 = self.operand1.result() op2 = self.operand2.result() if self.truedivision: if (self.type != self.operand1.type): op1 = self.type.cast_code(op1) if (self.type != self.operand2.type): op2 = self.type.cast_code(op2) return ('(%s / %s)' % (op1, op2)) else: return ('__Pyx_div_%s(%s, %s)' % (self.type.specialization_name(), self.operand1.result(), self.operand2.result()))
class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, dummy_wrapper, dummy_exception, dummy_traceback_): return True
def csv_rel2abs_path_convertor(csv_filenames: str, delimiter: str=' ', encoding='utf8') -> None: for filename in tqdm(csv_filenames): (absolute_path, basename) = os.path.split(os.path.abspath(filename)) relative_paths = list() labels = list() with open(filename, 'r', encoding=encoding) as f: csvreader = csv.reader(f, delimiter=delimiter) for row in csvreader: relative_paths.append(row[0]) labels.append(row[1]) export_filename = os.path.join(absolute_path, '{}_abs{}'.format(*os.path.splitext(basename))) with open(export_filename, 'w', encoding=encoding) as f: csvwriter = csv.writer(f, delimiter=delimiter) for i in trange(0, len(relative_paths)): csvwriter.writerow([os.path.abspath(os.path.join(absolute_path, relative_paths[i])), labels[i]])
class TestRedis(): (scope='class', autouse=True) def flush_db(self): urls_con.flushall() def test_store_and_fetch_cookies(self): assert (Cookies.fetch_cookies() is None) Cookies.store_cookies(FAKE_STR, FAKE_STR) assert (Cookies.fetch_cookies() is not None) def test_del_cookies(self): Cookies.delete_cookies(FAKE_STR) assert (Cookies.fetch_cookies() is None) def test_store_urls(self): Urls.store_crawl_url(FAKE_STR, 1) assert (urls_con.get(FAKE_STR) is not None) def test_store_and_fetch_name_id(self): IdNames.store_id_name(FAKE_STR, FAKE_ID) rs = IdNames.fetch_uid_by_name(FAKE_STR) assert (rs == FAKE_ID)
def add_boolean_modifier(mesh_object: bpy.types.Object, another_mesh_object: bpy.types.Object, operation: str='DIFFERENCE') -> None: modifier: bpy.types.SubsurfModifier = mesh_object.modifiers.new(name='Boolean', type='BOOLEAN') modifier.object = another_mesh_object modifier.operation = operation
class DDPMPipeline(DiffusionPipeline): def __init__(self, unet, scheduler): super().__init__() scheduler = scheduler.set_format('pt') self.register_modules(unet=unet, scheduler=scheduler) _grad() def __call__(self, batch_size: int=1, generator: Optional[torch.Generator]=None, output_type: Optional[str]='pil', return_dict: bool=True, **kwargs) -> Union[(ImagePipelineOutput, Tuple)]: if ('torch_device' in kwargs): device = kwargs.pop('torch_device') warnings.warn('`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0. Consider using `pipe.to(torch_device)` instead.') if (device is None): device = ('cuda' if torch.cuda.is_available() else 'cpu') self.to(device) image = torch.randn((batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size), generator=generator) image = image.to(self.device) self.scheduler.set_timesteps(1000) for t in self.progress_bar(self.scheduler.timesteps): model_output = self.unet(image, t).sample image = self.scheduler.step(model_output, t, image, generator=generator).prev_sample image = ((image / 2) + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if (output_type == 'pil'): image = self.numpy_to_pil(image) if (not return_dict): return (image,) return ImagePipelineOutput(images=image)
_checkpoint_hooks class EpochCounter(): def __init__(self, limit): self.current = 0 self.limit = int(limit) def __iter__(self): return self def __next__(self): if (self.current < self.limit): self.current += 1 logger.info(f'Going into epoch {self.current}') return self.current raise StopIteration _as_saver def _save(self, path): with open(path, 'w') as fo: fo.write(str(self.current)) _as_loader def _recover(self, path, end_of_epoch=True, device=None): del device with open(path) as fi: saved_value = int(fi.read()) if end_of_epoch: self.current = saved_value else: self.current = (saved_value - 1)
class PathTableaux(UniqueRepresentation, Parent): def __init__(self): Parent.__init__(self, category=Sets()) def _element_constructor_(self, *args, **kwds): return self.element_class(self, *args, **kwds)
class LAR_reg(atomic_reg): OP_NAME = 'LAR' _fields_ = [('opd0_w_str', ctypes.c_uint64, 1), ('opd1_w_str', ctypes.c_uint64, 1), ('opd2_const', ctypes.c_uint64, 1), ('res0_prec', ctypes.c_uint64, 3), ('opd0_prec', ctypes.c_uint64, 3), ('opd1_prec', ctypes.c_uint64, 3), ('opd2_n_str', ctypes.c_uint64, 3), ('opd0_sign', ctypes.c_uint64, 1), ('opd1_sign', ctypes.c_uint64, 1), ('start_tid', ctypes.c_uint64, 6), ('res0_c', ctypes.c_uint64, 8), ('res0_w', ctypes.c_uint64, 8), ('rsvd0', ctypes.c_uint64, 2), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5), ('rsvd1', ctypes.c_uint64, 1), ('tsk_opd_num', ctypes.c_uint64, 2), ('rsvd2', ctypes.c_uint64, 6), ('pwr_step', ctypes.c_uint64, 4), ('rsvd3', ctypes.c_uint64, 1), ('res0_addr', ctypes.c_uint64, 16), ('opd0_addr', ctypes.c_uint64, 16), ('opd1_addr', ctypes.c_uint64, 16), ('opd2_addr', ctypes.c_uint64, 16)] opd0_w_str: int opd1_w_str: int opd2_const: int res0_prec: int opd0_prec: int opd1_prec: int opd2_n_str: int opd0_sign: int opd1_sign: int start_tid: int res0_c: int res0_w: int rsvd0: int tsk_typ: int tsk_eu_typ: int rsvd1: int tsk_opd_num: int rsvd2: int pwr_step: int rsvd3: int res0_addr: int opd0_addr: int opd1_addr: int opd2_addr: int length: int = 128
def generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices, path): pred_test = [] for (X, y) in all_iter: X = X.permute(0, 3, 1, 2) X = X.to(device) net.eval() pred_test.extend(net(X).cpu().argmax(axis=1).detach().numpy()) gt = gt_hsi.flatten() x_label = np.zeros(gt.shape) for i in range(len(gt)): if (gt[i] == 0): gt[i] = 17 x_label[i] = 16 gt = (gt[:] - 1) x_label[total_indices] = pred_test x = np.ravel(x_label) y_list = list_to_colormap(x) y_gt = list_to_colormap(gt) y_re = np.reshape(y_list, (gt_hsi.shape[0], gt_hsi.shape[1], 3)) gt_re = np.reshape(y_gt, (gt_hsi.shape[0], gt_hsi.shape[1], 3)) classification_map(y_re, gt_hsi, 300, (path + '.png')) classification_map(gt_re, gt_hsi, 300, (path + '_gt.png')) print('------Get classification maps successful-------')
def test_create_digraph_1d(graph_1d): ground_truth = nx.DiGraph() ground_truth.add_nodes_from(np.array(['a', 'b', 'c', 'd'])) graph_1d._create_digraph() assert nx.is_isomorphic(ground_truth, graph_1d.hierarchy_) assert (list(ground_truth.nodes) == list(graph_1d.hierarchy_.nodes)) assert (list(ground_truth.edges) == list(graph_1d.hierarchy_.edges))
def sim_data(N=100, T=120, init_state={'pos': {'mean': np.array([0.0, 0.0, 0.3]), 'cov': np.diag((np.array([0.5, 1, 0.01]) ** 2))}, 'vel': {'mean': np.array([(- 1.4), 4.5, 2.3]), 'cov': np.eye(3)}}, deltaT=0.005, max_bounces=None, bounce_fac=np.array([0.9, 0.9, 0.8]), lin_air_drag=np.array([0, 0, 0]), quad_air_drag=0.15): X = [] Xd = [] times = [] is_bounce = [] bounce_times = [] s = (lambda x: np.random.multivariate_normal(x['mean'], x['cov'])) for i in range(N): (time, x, xd, is_bounce_n, bounce_time_n) = sim_ball_traj(init_pos=s(init_state['pos']), init_vel=s(init_state['vel']), lin_air_drag=lin_air_drag, quad_air_drag=quad_air_drag, bounce_fac=bounce_fac, deltaT=deltaT, T=T, max_bounces=max_bounces) X.append(x) Xd.append(xd) times.append(time) is_bounce.append(is_bounce_n) bounce_times.append(bounce_time_n) return (np.array(times), np.array(X), np.array(Xd), is_bounce, bounce_times)
class Exp(Flow): def __init__(self): super().__init__() self.epsilon = 1e-08 def forward(self, x): y = x.exp() log_det_jac = x return (y, log_det_jac) .export def inverse(self, y): x = (y + self.epsilon).log() inv_log_det_jac = (- y) return (x, inv_log_det_jac)
def compute_statistics_of_path(path, model, batch_size, dims, device, num_workers=1): if path.endswith('.npz'): with np.load(path) as f: (m, s) = (f['mu'][:], f['sigma'][:]) else: files = ((list(glob.glob((path + '/**/*.JPEG'), recursive=True)) + list(glob.glob((path + '/**/*.png'), recursive=True))) + list(glob.glob((path + '/**/*.jpg'), recursive=True))) files = [pathlib.Path(f) for f in files] (m, s) = calculate_activation_statistics(files, model, batch_size, dims, device, num_workers) return (m, s)
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='\nPrediction script for a 3D stardist model, usage: stardist-predict -i input.tif -m model_folder_or_pretrained_name -o output_folder\n\n') parser.add_argument('-i', '--input', type=str, nargs='+', required=True, help='input file (tiff)') parser.add_argument('-o', '--outdir', type=str, default='.', help='output directory') parser.add_argument('--outname', type=str, nargs='+', default='{img}.stardist.tif', help='output file name (tiff)') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('-m', '--model', type=str, default=None, help='model folder / pretrained model to use') parser.add_argument('--axes', type=str, default=None, help="axes to use for the input, e.g. 'XYC'") parser.add_argument('--n_tiles', type=int, nargs=3, default=None, help='number of tiles to use for prediction') parser.add_argument('--pnorm', type=float, nargs=2, default=[1, 99.8], help='pmin/pmax to use for normalization') parser.add_argument('--prob_thresh', type=float, default=None, help='prob_thresh for model (if not given use model default)') parser.add_argument('--nms_thresh', type=float, default=None, help='nms_thresh for model (if not given use model default)') parser.add_argument('-v', '--verbose', action='store_true') args = parser.parse_args() from csbdeep.utils import normalize from csbdeep.models.base_model import get_registered_models from stardist.models import StarDist3D from tifffile import imwrite, imread get_registered_models(StarDist3D, verbose=True) if pathlib.Path(args.model).is_dir(): model = StarDist3D(None, name=args.model) else: model = StarDist3D.from_pretrained(args.model) if (model is None): raise ValueError(f'''unknown model: {args.model} available models: {get_registered_models(StarDist2D, verbose=True)}''') for fname in args.input: if args.verbose: print(f'reading image {fname}') if (not (pathlib.Path(fname).suffix.lower() in ('.tif', '.tiff'))): raise ValueError('only tiff files supported in 3D for now') img = imread(fname) if (not (img.ndim in (3, 4))): raise ValueError(f'currently only 3d (or 4D with channel) images are supported by the prediction script') if (args.axes is None): args.axes = {3: 'ZYX', 4: 'ZYXC'}[img.ndim] if (len(args.axes) != img.ndim): raise ValueError(f'dimension of input ({img.ndim}) not the same as length of given axes ({len(args.axes)})') if args.verbose: print(f'loaded image of size {img.shape}') if args.verbose: print(f'normalizing...') img = normalize(img, *args.pnorm) (labels, _) = model.predict_instances(img, n_tiles=args.n_tiles, prob_thresh=args.prob_thresh, nms_thresh=args.nms_thresh) out = pathlib.Path(args.outdir) out.mkdir(parents=True, exist_ok=True) imwrite((out / args.outname.format(img=pathlib.Path(fname).with_suffix('').name)), labels)
def graphVisIntersection(node_neighbor, region, filename, size=2048): img = np.zeros((size, size), dtype=np.uint8) for (node, nei) in node_neighbor.iteritems(): loc0 = node if (len(nei) != 2): x0 = int((((loc0[1] - region[1]) / (region[3] - region[1])) * size)) y0 = int((((region[2] - loc0[0]) / (region[2] - region[0])) * size)) cv2.circle(img, (x0, y0), 5, 255, (- 1)) cv2.imwrite(filename, img)
class SignLanguageTokenizer(BaseTokenizer): def __init__(self, **kwargs) -> None: self.hamnosys_tokenizer = HamNoSysTokenizer(**kwargs) self.signwriting_tokenizer = SignWritingTokenizer(**kwargs, starting_index=len(self.hamnosys_tokenizer)) super().__init__([]) self.i2s = {**self.hamnosys_tokenizer.i2s, **self.signwriting_tokenizer.i2s} self.s2i = {**self.hamnosys_tokenizer.s2i, **self.signwriting_tokenizer.s2i} def tokenize(self, text: str, bos=False, eos=False) -> List[str]: if text.isascii(): return self.signwriting_tokenizer.tokenize(text, bos=bos, eos=eos) return self.hamnosys_tokenizer.tokenize(text, bos=bos, eos=eos) def text_to_tokens(self, text: str) -> List[str]: if text.isascii(): return self.signwriting_tokenizer.text_to_tokens(text) return self.hamnosys_tokenizer.text_to_tokens(text) def tokens_to_text(self, tokens: List[str]) -> str: if all((t.isascii() for t in tokens)): return self.signwriting_tokenizer.tokens_to_text(tokens) return self.hamnosys_tokenizer.tokens_to_text(tokens) def post_process(self, tokens: List[str], generate_unk: bool=True): return ' '.join(tokens)
() def pytest_itemcollected(item): global _old_fpu_mode mode = get_fpu_mode() if (_old_fpu_mode is None): _old_fpu_mode = mode elif (mode != _old_fpu_mode): _collect_results[item] = (_old_fpu_mode, mode) _old_fpu_mode = mode
def get_logger_model(name, log_level=logging.DEBUG): logger = logging.root.manager.loggerDict[name] logger_es = logging.root.manager.loggerDict['EarlyStopping'] logger_es.addFilter(TimeFilter()) logger_es.addHandler(logger.handlers[0]) logger_es.setLevel(log_level) logger.setLevel(log_level) return logger
def plot_rws(X, window=100, k=5, lim=1000): shift = 75 X = X[window:] t = range(len(X)) colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] num_figs = (int(np.ceil((k / 5))) + 1) fig = plt.figure(figsize=(15, (num_figs * 2))) j = 0 ax = fig.add_subplot(num_figs, 5, (j + 1)) idx = t[j:(window + j)] ax.plot(idx, X[j], lw=2, color=colors[j]) plt.title(('window %d' % j), size=16) plt.ylim([(- 1), 1]) j = 1 ax = fig.add_subplot(num_figs, 5, (j + 1)) idx = t[j:(window + j)] ax.plot(idx, X[j], lw=2, color=colors[j]) ax.set_yticklabels([]) plt.title(('window %d' % j), size=16) plt.ylim([(- 1), 1]) for i in range(2, k): j = (i * shift) idx = t[j:(window + j)] ax = fig.add_subplot(num_figs, 5, (i + 1)) ax.plot(idx, X[j], lw=2, color=colors[(i + 1)]) ax.set_yticklabels([]) plt.title(('window %d' % j), size=16) plt.ylim([(- 1), 1]) plt.tight_layout() plt.show()
def worker_urls(urls): assert isinstance(urls, list) assert isinstance(urls[0], str) worker_info = torch.utils.data.get_worker_info() if (worker_info is not None): wid = worker_info.id num_workers = worker_info.num_workers if ((wid == 0) and (len(urls) < num_workers)): warnings.warn(f'num_workers {num_workers} > num_shards {len(urls)}') return urls[wid::num_workers] else: return urls
def test_array_as_generated_dataset(): array = ak.Array([[{'x': 1, 'y': [1.1]}, {'x': 2, 'y': [2.2, 0.2]}], [], [{'x': 3, 'y': [3.0, 0.3, 3.3]}]]) generator = ak._connect.cling.togenerator(array.layout.form, flatlist_as_rvec=False) lookup = ak._lookup.Lookup(array.layout) source_code = f''' double go_fast(ssize_t length, ssize_t* ptrs) {{ auto awkward_array = {generator.dataset()}; double out = 0.0; for (auto list : awkward_array) {{ for (auto record : list) {{ for (auto item : record.y()) {{ out += item; }} }} }} return out; }} ''' generator.generate(cppyy.cppdef) cppyy.cppdef(source_code) out = cppyy.gbl.go_fast(len(array), lookup.arrayptrs) assert (out == ak.sum(array['y']))
def dna_transformation(prev_image, dna_input): prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) image_height = int(prev_image.get_shape()[1]) image_width = int(prev_image.get_shape()[2]) inputs = [] for xkern in range(DNA_KERN_SIZE): for ykern in range(DNA_KERN_SIZE): inputs.append(tf.expand_dims(tf.slice(prev_image_pad, [0, xkern, ykern, 0], [(- 1), image_height, image_width, (- 1)]), [3])) inputs = tf.concat(3, inputs) kernel = (tf.nn.relu((dna_input - RELU_SHIFT)) + RELU_SHIFT) kernel = tf.expand_dims((kernel / tf.reduce_sum(kernel, [3], keep_dims=True)), [4]) return tf.reduce_sum((kernel * inputs), [3], keep_dims=False)
class HomsetWithBase(Homset): def __init__(self, X, Y, category=None, check=True, base=None): if (base is None): base = X.base_ring() Homset.__init__(self, X, Y, check=check, category=category, base=base)
class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 def run(self): try: for item in self._source: self._queue.put(item) self.count += 1 if ((self._max_len is not None) and (self.count >= self._max_len)): break self._queue.put(_sentinel) except Exception as e: self._queue.put(e)
def write_sentences_to_conllu(filename, sents): with open(filename, 'w', encoding='utf-8') as outfile: for lines in sents: lines = maybe_add_fake_dependencies(lines) for line in lines: print(line, file=outfile) print('', file=outfile)
class TFOpenAIGPTLMHeadModel(): def __init__(self, *args, **kwargs): requires_tf(self) def from_pretrained(self, *args, **kwargs): requires_tf(self)
def html_per_unit(task, layer, unit, alignment, num_align): html = ('\n <tr>\n <td align="left">[%s / layer %02d / Unit %04d]<br>\n ' % (task, layer, unit)) for i in range(num_align): (concept, doa) = alignment[unit][i] concept = concept.replace('MORPH_', '[#]') html += ('<span style="background-color: %s" >%s</span> (%.2lf) / ' % (colors[i], concept, doa)) html += '</tr>' return html.decode('utf-8', errors='ignore')
def _reindent_code(codestr): codestr = io.StringIO(codestr) ret = io.StringIO() run_reindent(codestr, ret, config={'dry-run': False, 'help': False, 'to': 4, 'from': (- 1), 'tabs': True, 'encoding': 'utf-8', 'is-tabs': False, 'tabsize': 4, 'all-tabs': False}) return ret.getvalue()
def test_transpose(): A = np.random.rand(M, N).astype(np.float32) B = np.zeros([M, N], dtype=np.float32) transpose_test(A, B) realB = np.transpose(A) rel_error = (np.linalg.norm((B - realB)) / np.linalg.norm(realB)) print('Relative_error:', rel_error) assert (rel_error <= 1e-05)
def get_array_prepare(*args): wrappers = sorted(((getattr(x, '__array_priority__', 0), (- i), x.__array_prepare__) for (i, x) in enumerate(args) if hasattr(x, '__array_prepare__'))) if wrappers: return wrappers[(- 1)][(- 1)] return None
def weights_init_kaiming(m): classname = m.__class__.__name__ if (classname.find('Linear') != (- 1)): nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out') if m.bias: nn.init.constant_(m.bias, 0.0) elif (classname.find('Conv') != (- 1)): nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in') if m.bias: nn.init.constant_(m.bias, 0.0) elif (classname.find('BatchNorm') != (- 1)): if m.affine: nn.init.constant_(m.weight, 1.0) nn.init.constant_(m.bias, 0.0)
_model def mobilenetv3_large_075(pretrained=False, **kwargs): model = _gen_mobilenet_v3('mobilenetv3_large_075', 0.75, pretrained=pretrained, **kwargs) return model
class ContextNLU(): def __init__(self): self.word2index = pickle.load(open((THIS_PATH + '/vocab.pkl'), 'rb')) slot2index = pickle.load(open((THIS_PATH + '/slot.pkl'), 'rb')) intent2index = pickle.load(open((THIS_PATH + '/intent.pkl'), 'rb')) self.index2intent = {v: k for (k, v) in intent2index.items()} self.index2slot = {v: k for (k, v) in slot2index.items()} self.model = SDEN(len(self.word2index), 100, 64, len(slot2index), len(intent2index)) self.model.load_state_dict(torch.load((THIS_PATH + '/sden.pkl'))) self.model.eval() self.history = [Variable(torch.LongTensor([2])).view(1, (- 1))] def reset(self): self.history = [Variable(torch.LongTensor([2])).view(1, (- 1))] def predict(self, current): current = tagger.morphs(current) current = prepare_sequence(current, self.word2index).view(1, (- 1)) history = pad_to_history(self.history, self.word2index) (s, i) = self.model(history, current) slot_p = s.max(1)[1] intent_p = i.max(1)[1] slot = [self.index2slot[s] for s in slot_p.data.tolist()] intent = self.index2intent[intent_p.data[0]] if (len(self.history) == [Variable(torch.LongTensor([2])).view(1, (- 1))]): self.history.pop() self.history.append(current) return (slot, intent)
((not workspace.has_gpu_support), 'No gpu support.') class BrewGPUTest(unittest.TestCase): def test_relu(self): Xpos = (np.ones((5, 5)).astype(np.float32) - 0.5) Xneg = (np.ones((5, 5)).astype(np.float32) - 1.5) workspace.FeedBlob('xpos', Xpos) workspace.FeedBlob('xneg', Xneg) model = ModelHelper(name='test_model') brew.relu(model, 'xpos', 'out_xpos', use_cudnn=True) brew.relu(model, 'xneg', 'out_xneg', use_cudnn=True) model.Validate() workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) pos = workspace.FetchBlob('out_xpos') self.assertAlmostEqual(pos.mean(), 0.5) neg = workspace.FetchBlob('out_xneg') self.assertAlmostEqual(neg.mean(), 0) def test_tanh(self): X = (np.ones((5, 5)).astype(np.float32) - 0.5) workspace.FeedBlob('x', X) model = ModelHelper(name='test_model') brew.tanh(model, 'x', 'out_tanh', use_cudnn=True) model.Validate() workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) out = workspace.FetchBlob('out_tanh') self.assertAlmostEqual(out.mean(), np.tanh(0.5), places=5)
.parametrize('seed', [313]) .parametrize('axis', [0, 1, 2, (- 1)]) .parametrize('decay_rate', [0.9]) .parametrize('eps', [1e-05]) .parametrize('output_stat, batch_stat', [[False, False], [False, True], [True, True]]) .parametrize('ctx, func_name', ctxs) .parametrize('no_scale, no_bias', [[False, False], [True, True]]) .parametrize('no_mean', [True, False]) .parametrize('no_variance', [True, False]) def test_batch_normalization_forward_backward(seed, axis, decay_rate, eps, output_stat, batch_stat, ctx, func_name, no_scale, no_bias, no_mean, no_variance): from nbla_test_utils import function_tester rng = np.random.RandomState(seed) inputs = list(create_inputs(rng, axis)) axes = [axis] if ((not batch_stat) and (no_mean or no_variance)): vinputs = [] for i in inputs: vinputs.append(nn.Variable(i.shape, True)) vinputs = mask_vinputs(vinputs, no_scale, no_bias, no_mean, no_variance) with pytest.raises(ValueError): F.batch_normalization(*vinputs, axes=axes, decay_rate=decay_rate, eps=eps, batch_stat=batch_stat, output_stat=output_stat) return else: inputs = mask_inputs(inputs, no_scale, no_bias, no_mean, no_variance) insert_identity = [] if batch_stat: insert_identity = [True, True, True, False, False] function_tester(rng, F.batch_normalization, ref_batch_normalization, inputs, func_args=[axes, decay_rate, eps, batch_stat, output_stat], backward=[True, True, True, False, False], ctx=ctx, func_name=func_name, dstep=0.01, atol_b=0.01, insert_identity=insert_identity) if (no_mean and no_variance): return vinputs = [] for i in inputs: vinputs.append(nn.Variable(i.shape, True)) vinputs[(- 1)].d = i vinputs = mask_vinputs(vinputs, no_scale, no_bias, no_mean, no_variance) for i in range(5): inputs[0] = rng.randn(*inputs[0].shape) vinputs[0].d[...] = inputs[0] ref_y = ref_batch_normalization(*(inputs + [axes, decay_rate, eps, batch_stat, output_stat])) with nn.context_scope(ctx), nn.auto_forward(): y = F.batch_normalization(*(vinputs + [axes, decay_rate, eps, batch_stat, output_stat])) if (not no_mean): assert_allclose(vinputs[3].d, inputs[3], atol=1e-07) if (not no_variance): assert_allclose(vinputs[4].d, inputs[4]) batch_stat = False if (no_mean or no_variance): return if output_stat: return ref_y = ref_batch_normalization(*(inputs + [axes, decay_rate, eps, batch_stat, output_stat])) with nn.context_scope(ctx), nn.auto_forward(): y = F.batch_normalization(*(vinputs + [axes, decay_rate, eps, batch_stat, output_stat])) assert_allclose(ref_y, y.d, atol=1e-06)
def main(args): Rankings = defaultdict(list) for path in args.input: print_message(f'#> Loading the rankings in {path} ..') with open(path) as f: for line in file_tqdm(f): (qid, pid, rank, score) = line.strip().split('\t') (qid, pid, rank) = map(int, [qid, pid, rank]) score = float(score) Rankings[qid].append((score, rank, pid)) with open(args.output, 'w') as f: print_message(f'#> Writing the output rankings to {args.output} ..') for qid in tqdm.tqdm(Rankings): ranking = sorted(Rankings[qid], reverse=True) for (rank, (score, original_rank, pid)) in enumerate(ranking): rank = (rank + 1) if ((args.depth > 0) and (rank > args.depth)): break line = [qid, pid, rank, score] line = ('\t'.join(map(str, line)) + '\n') f.write(line)
def convert(data_dir: str, out_data_dir: str): images_dir_name = os.path.join(out_data_dir, 'images') pose_dir_name = os.path.join(out_data_dir, 'pose') os.makedirs(images_dir_name, exist_ok=True) os.makedirs(pose_dir_name, exist_ok=True) def get_subdir(name): if name.endswith('_train.json'): return 'train' elif name.endswith('_val.json'): return 'val' elif name.endswith('_test.json'): return 'test' return '' def get_out_prefix(name): if name.endswith('_train.json'): return '0_' elif name.endswith('_val.json'): return '1_' elif name.endswith('_test.json'): return '2_' return '' jsons = {x: (get_subdir(x), get_out_prefix(x)) for x in glob(os.path.join(data_dir, '*.json'))} cam_trans = np.diag(np.array([1.0, (- 1.0), (- 1.0), 1.0])) world_trans = np.array([[0.0, (- 1.0), 0.0, 0.0], [0.0, 0.0, (- 1.0), 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) assert (len(jsons) > 0), f"No jsons found in {data_dir}, can't convert" cnt = 0 example_fpath = None tj = {} for (tj_path, (tj_subdir, tj_out_prefix)) in jsons.items(): with open(tj_path, 'r') as f: tj = json.load(f) if ('frames' not in tj): print(f'No frames in json {tj_path}, skipping') continue for frame in tj['frames']: fpath = os.path.join(data_dir, frame['file_path']) if (not os.path.isfile(fpath)): fpath = os.path.join(data_dir, tj_subdir, (os.path.basename(frame['file_path']) + '.png')) example_fpath = fpath if (not os.path.isfile(fpath)): print('Could not find image:', frame['file_path'], '(this may be ok)') continue ext = os.path.splitext(fpath)[1] c2w = np.array(frame['transform_matrix']) c2w = ((world_trans c2w) cam_trans) image_fname = (tj_out_prefix + f'{cnt:05d}') pose_path = os.path.join(pose_dir_name, (image_fname + '.txt')) np.savetxt(pose_path, c2w) new_fpath = os.path.join(images_dir_name, (image_fname + ext)) shutil.copyfile(fpath, new_fpath) cnt += 1 assert (len(tj) > 0), f"No valid jsons found in {data_dir}, can't convert" w = tj.get('w') h = tj.get('h') if ((w is None) or (h is None)): assert (example_fpath is not None) (w, h) = Image.open(example_fpath).size fx = float(((0.5 * w) / np.tan((0.5 * tj['camera_angle_x'])))) if ('camera_angle_y' in tj): fy = float(((0.5 * h) / np.tan((0.5 * tj['camera_angle_y'])))) else: fy = fx cx = tj.get('cx', (w * 0.5)) cy = tj.get('cy', (h * 0.5)) intrin_mtx = np.array([[fx, 0.0, cx, 0.0], [0.0, fy, cy, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) np.savetxt(os.path.join(out_data_dir, 'intrinsics.txt'), intrin_mtx)
def compute_A_inv_b(A: TensorType, b: TensorType) -> tf.Tensor: L = tf.linalg.cholesky(A) L_inv_b = tf.linalg.triangular_solve(L, b) A_inv_b = tf.linalg.triangular_solve(L, L_inv_b, adjoint=True) return A_inv_b
def register_Ns3Ipv4EndPoint_methods(root_module, cls): cls.add_constructor([param('ns3::Ipv4EndPoint const &', 'arg0')]) cls.add_constructor([param('ns3::Ipv4Address', 'address'), param('uint16_t', 'port')]) cls.add_method('BindToNetDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'netdevice')]) cls.add_method('ForwardIcmp', 'void', [param('ns3::Ipv4Address', 'icmpSource'), param('uint8_t', 'icmpTtl'), param('uint8_t', 'icmpType'), param('uint8_t', 'icmpCode'), param('uint32_t', 'icmpInfo')]) cls.add_method('ForwardUp', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('uint16_t', 'sport'), param('ns3::Ptr< ns3::Ipv4Interface >', 'incomingInterface')]) cls.add_method('GetBoundNetDevice', 'ns3::Ptr< ns3::NetDevice >', []) cls.add_method('GetLocalAddress', 'ns3::Ipv4Address', []) cls.add_method('GetLocalPort', 'uint16_t', []) cls.add_method('GetPeerAddress', 'ns3::Ipv4Address', []) cls.add_method('GetPeerPort', 'uint16_t', []) cls.add_method('IsRxEnabled', 'bool', []) cls.add_method('SetDestroyCallback', 'void', [param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) cls.add_method('SetIcmpCallback', 'void', [param('ns3::Callback< void, ns3::Ipv4Address, unsigned char, unsigned char, unsigned char, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) cls.add_method('SetLocalAddress', 'void', [param('ns3::Ipv4Address', 'address')]) cls.add_method('SetPeer', 'void', [param('ns3::Ipv4Address', 'address'), param('uint16_t', 'port')]) cls.add_method('SetRxCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::Ipv4Header, unsigned short, ns3::Ptr< ns3::Ipv4Interface >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) cls.add_method('SetRxEnabled', 'void', [param('bool', 'enabled')]) return
def test_before_add_examples(testdir, simple_openapi): testdir.make_test('\\ndef before_add_examples(context, examples):\n new = schemathesis.models.Case(\n operation=context.operation,\n query={"foo": "bar"}\n )\n examples.append(new)\n\()\(phases=[Phase.explicit])\ndef test_a(case):\n assert case.query == {"foo": "bar"}\n\n\ndef another_hook(context, examples):\n new = schemathesis.models.Case(\n operation=context.operation,\n query={"spam": "baz"}\n )\n examples.append(new)\n\nIDX = 0\n\()\.apply(another_hook, name="before_add_examples")\(phases=[Phase.explicit])\ndef test_b(case):\n global IDX\n if IDX == 0:\n assert case.query == {"spam": "baz"}\n if IDX == 1:\n assert case.query == {"foo": "bar"}\n IDX += 1\n ', schema=simple_openapi) result = testdir.runpytest() result.assert_outcomes(passed=2)
class LimeImage(ExplainerBase): explanation_type = 'local' alias = ['lime'] def __init__(self, predict_function: Callable, mode: str='classification', **kwargs): super().__init__() assert (mode == 'classification'), 'Only supports classification tasks for image data.' self.mode = mode self.predict_fn = (lambda x: predict_function(Image(x, batched=True))) def explain(self, X, **kwargs) -> MaskExplanation: if ('top_labels' not in kwargs): kwargs['top_labels'] = 2 explanations = MaskExplanation() if (max(X.shape[1], X.shape[2]) > 256): X = Resize(256).transform(X) for i in range(X.shape[0]): explanation = lime_image.LimeImageExplainer().explain_instance(image=X[i].to_numpy()[0], classifier_fn=self.predict_fn, hide_color=kwargs.get('hide_color', None), top_labels=kwargs.get('top_labels', 2), num_features=kwargs.get('num_features', 100000), num_samples=kwargs.get('num_samples', 1000), batch_size=kwargs.get('batch_size', 10), segmentation_fn=kwargs.get('segmentation_fn', None), random_seed=kwargs.get('random_seed', None)) (images, masks) = ([], []) for label in explanation.top_labels: (image, mask) = explanation.get_image_and_mask(label=label, positive_only=kwargs.get('positive_only', False), num_features=kwargs.get('num_features', 5), hide_rest=kwargs.get('hide_rest', False)) images.append(image) masks.append(mask) explanations.add(explanation.top_labels, images, masks) return explanations
class SEATestsUniform(unittest.TestCase): def setUp(self): super(SEATestsUniform, self).setUp() self.testval = 5 self.unidata = ([self.testval] * 200) time = list(range(200)) self.epochs = [20, 40, 60, 80, 100, 120, 140, 160, 180] with warnings.catch_warnings(): warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') self.obj = seapy.Sea(self.unidata, time, self.epochs, verbose=False) self.obj.sea() def testMeanUniform(self): ntest.assert_array_equal(self.obj.semean, ([self.testval] * ((int(self.obj.window) * 2) + 1))) def testMedianUniform(self): ntest.assert_array_equal(self.obj.semedian, ([self.testval] * ((int(self.obj.window) * 2) + 1))) def testMeanMedEquality(self): ntest.assert_array_equal(self.obj.semedian, self.obj.semean) def testDatetimeEquiv(self): sttime = dt.datetime(2010, 1, 1) time = [(sttime + dt.timedelta(minutes=x)) for x in range(200)] epochs = [(sttime + dt.timedelta(minutes=x)) for x in self.epochs] window = dt.timedelta(minutes=3) delta = dt.timedelta(minutes=1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') compobj = seapy.Sea(self.unidata, time, epochs, window=window, delta=delta, verbose=False) compobj.sea() ntest.assert_array_equal(self.obj.semedian, compobj.semedian) ntest.assert_array_equal(self.obj.semean, compobj.semean) def testSeaLen(self): self.assertEqual(len(self.obj), len(self.epochs)) def testRandomEpochsNoArgs(self): with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') newsea = self.obj.random() self.assertEqual(len(newsea), len(self.epochs)) def testRandomEpochsArgs(self): n_req = 27 with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') newsea = self.obj.random(n_req) self.assertEqual(len(newsea), n_req) def testRandomType(self): newsea = self.obj.random() self.assertEqual(type(newsea), type(self.obj)) def testRandomBeforeSea(self): n_req = 27 sttime = dt.datetime(2010, 1, 1) time = [(sttime + dt.timedelta(minutes=x)) for x in range(200)] epochs = [(sttime + dt.timedelta(minutes=x)) for x in self.epochs] window = dt.timedelta(minutes=3) delta = dt.timedelta(minutes=1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') compobj = seapy.Sea(self.unidata, time, epochs, window=window, delta=delta, verbose=False) newsea = compobj.random(n_req) self.assertEqual(len(newsea), n_req) def testRandomBoundType(self): newsea = self.obj.random() with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') newsea.sea(mad=True, quartiles=False) self.assertEqual(newsea.bound_type, 'mad') newsea = self.obj.random() with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') newsea.sea(ci=True, ci_quan='mean') self.assertEqual(newsea.bound_type, 'ci') def testSeaCIFunc(self): time = list(range(200)) window = dt.timedelta(minutes=3) delta = dt.timedelta(minutes=1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', 'Window size changed .*', UserWarning, '^spacepy\\.seapy$') compobj1 = seapy.Sea(self.unidata, time, self.epochs, window=window, delta=delta, verbose=False) compobj1.sea(ci=95, ci_quan=np.mean) compobj2 = seapy.Sea(self.unidata, time, self.epochs, window=window, delta=delta, verbose=False) compobj2.sea(ci=95, ci_quan='mean') ntest.assert_allclose(compobj1.bound_low, compobj2.bound_low) def testSeaDict(self): namelist = ['O1', 'O2'] sd = seapy.seadict([self.obj, self.obj], namelist) for key in sd.keys(): self.assertTrue((key in namelist)) def testSeaDictFail(self): namelist = ['O1'] namelist2 = ['O2'] with self.assertRaises(ValueError): sd = seapy.seadict([self.obj, self.obj], namelist) with self.assertRaises(ValueError): sd = seapy.seadict({'O1': self.obj, 'O2': self.obj}, namelist2) def testSeaPlotShowFalse(self): ax = self.obj.plot(show=False) self.assertTrue(isinstance(ax, matplotlib.axes.SubplotBase))
class _ProfileReBenchDB(_ReBenchDB): def _send_data(self, cache): self.ui.debug_output_info('ReBenchDB: Prepare data for sending\n') num_profiles = 0 all_data = [] for (run_id, data_points) in cache.items(): profile_data = [dp.as_dict() for dp in data_points] num_profiles += len(profile_data) all_data.append({'runId': run_id.as_dict(), 'p': profile_data}) self.ui.debug_output_info('ReBenchDB: Sending {num_m} profiles. startTime: {st}\n', num_m=num_profiles, st=self._start_time) return self._rebench_db.send_results({'data': all_data, 'env': determine_environment(), 'startTime': self._start_time, 'source': determine_source_details(self._configurator)}, num_profiles)
def compute_fitness(chromesome, code_2, codebert_tgt, tokenizer_tgt, orig_prob, orig_label, true_label, code_1, names_positions_dict, args): temp_replace = map_chromesome(chromesome, code_1, 'java') new_feature = convert_code_to_features(temp_replace, code_2, tokenizer_tgt, true_label, args) new_dataset = CodePairDataset([new_feature], args) (new_logits, preds) = codebert_tgt.get_results(new_dataset, args.eval_batch_size) fitness_value = (orig_prob - new_logits[0][orig_label]) return (fitness_value, preds[0])
def save_model(model, optimizer, opt, epoch, save_file): print('==> Saving...') state = {'opt': opt, 'model': model.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch} torch.save(state, save_file) del state
def test_native_torch_tensor_sub(): batch_dim = Dim(2, name='batch_dim') feature_dim = Dim(3, name='feature_dim') tensor_bf = Tensor('tensor', dims=[batch_dim, feature_dim], dtype='float32', raw_tensor=torch.ones(2, 3)) tensor_f = Tensor('tensor', dims=[feature_dim], dtype='float32', raw_tensor=torch.arange((- 1), 2, dtype=torch.float32)) from returnn.frontend import _native mod = _native.get_module() with _CheckNoPythonCalls(): res1 = mod.tensor_sub(tensor_bf, tensor_bf) res2 = mod.tensor_sub(tensor_bf, tensor_f) res3 = mod.tensor_sub(tensor_bf, 3.0) assert (isinstance(res1, Tensor) and isinstance(res1.raw_tensor, torch.Tensor)) assert (res1.dims == (batch_dim, feature_dim)) assert (res1.raw_tensor.detach().numpy().tolist() == [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]) assert (isinstance(res2, Tensor) and isinstance(res2.raw_tensor, torch.Tensor)) assert (res2.dims == (batch_dim, feature_dim)) assert (res2.raw_tensor.detach().numpy().tolist() == [[2.0, 1.0, 0.0], [2.0, 1.0, 0.0]]) assert (isinstance(res3, Tensor) and isinstance(res3.raw_tensor, torch.Tensor)) assert (res3.dims == (batch_dim, feature_dim)) assert (res3.raw_tensor.detach().numpy().tolist() == [[(- 2.0), (- 2.0), (- 2.0)], [(- 2.0), (- 2.0), (- 2.0)]])
_method class Constant(): def __init__(self, name, conversions=None, latex=None, mathml='', domain='complex'): self._conversions = (conversions if (conversions is not None) else {}) self._latex = (latex if (latex is not None) else name) self._mathml = mathml self._name = name self._domain = domain for (system, value) in self._conversions.items(): setattr(self, ('_%s_' % system), partial(self._generic_interface, value)) setattr(self, ('_%s_init_' % system), partial(self._generic_interface_init, value)) from .expression import PynacConstant self._pynac = PynacConstant(self._name, self._latex, self._domain) self._serial = self._pynac.serial() constants_table[self._serial] = self constants_name_table[self._name] = self register_symbol(self.expression(), self._conversions) def __richcmp__(self, other, op): if ((self.__class__ == other.__class__) and (self._name == other._name)): return (op in [op_EQ, op_GE, op_LE]) else: return NotImplemented def __reduce__(self): return (unpickle_Constant, (self.__class__.__name__, self._name, self._conversions, self._latex, self._mathml, self._domain)) def domain(self): return self._domain def expression(self): return self._pynac.expression() def _symbolic_(self, SR): return SR(self.expression()) def name(self): return self._name def __repr__(self): return self._name def _latex_(self): return self._latex def _mathml_(self): return self._mathml def _generic_interface(self, value, I): return I(value) def _generic_interface_init(self, value): return value def _interface_(self, I): try: s = self._conversions[I.name()] return I(s) except KeyError: pass try: return getattr(self, ('_%s_' % I.name()))(I) except AttributeError: pass raise NotImplementedError def _gap_(self, gap): return gap(('"%s"' % self)) def _singular_(self, singular): return singular(('"%s"' % self))
def get_data(split, repeats, batch_size, images_per_class, shuffle_buffer): data = data_builder.as_dataset(split=split) if (split == 'train'): data = data.batch(50000) data = data.as_numpy_iterator().next() data = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(data['image']), tf.data.Dataset.from_tensor_slices(data['label']))) data = data.map((lambda x, y: {'image': x, 'label': y})) else: data = data.map((lambda d: {'image': d['image'], 'label': d['label']})) def _pp(data): im = data['image'] if (split == 'train'): im = tf.image.resize(im, [160, 160]) im = tf.image.random_crop(im, [128, 128, 3]) im = tf.image.flip_left_right(im) else: im = tf.image.resize(im, [128, 128]) im = ((im - 127.5) / 127.5) data['image'] = im data['label'] = tf.one_hot(data['label'], 10) return {'image': data['image'], 'label': data['label']} data = data.repeat(repeats) data = data.shuffle(shuffle_buffer) data = data.map(_pp) return data.batch(batch_size)
class ResidualBlock(nn.Module): def __init__(self, linear_size, p_dropout=0.5): super(ResidualBlock, self).__init__() self.l_size = linear_size self.relu = nn.ReLU(inplace=True) self.dropout = nn.Dropout(p_dropout) self.w1 = nn.Linear(self.l_size, self.l_size) self.batch_norm1 = nn.BatchNorm1d(self.l_size) self.w2 = nn.Linear(self.l_size, self.l_size) self.batch_norm2 = nn.BatchNorm1d(self.l_size) def forward(self, x): y = self.w1(x) y = self.batch_norm1(y) y = self.relu(y) y = self.dropout(y) y = self.w2(y) y = self.batch_norm2(y) y = self.relu(y) y = self.dropout(y) out = (x + y) return out
def _register_cleanup(processes): def _cleanup_processes(): print('Cleaning up process...') time.sleep(0.5) for p in processes: p.terminate() atexit.register(_cleanup_processes)
class SecStructFeature(EdgeFeature): def __init__(self, include_from=False, include_to=True): self.include_from = include_from self.include_to = include_to assert (include_from or include_to) def get_values(self, seq, from_index, to_index): feature_values = {} if self.include_from: from_secstruct = seq['secondary'][from_index] feature_name = f'sec_struct_from_{from_secstruct}' feature_values[feature_name] = 1 if self.include_to: to_secstruct = seq['secondary'][to_index] feature_name = f'sec_struct_to_{to_secstruct}' feature_values[feature_name] = 1 return feature_values
def main(): gui = ti.GUI('Mandelbrot set zoom', res=(width, height)) for i in range(100000): render((i * 0.03)) gui.set_image(pixels) gui.show()
def mnasnet_075(pretrained=False, **kwargs): model = _gen_mnasnet_b1('mnasnet_075', 0.75, pretrained=pretrained, **kwargs) return model
def force_out_of_place(model: torch.nn.Module): state = dict() for m in model.modules(): if (hasattr(m, 'inplace') and isinstance(m.inplace, bool)): state[m] = m.inplace m.inplace = False (yield) for (m, s) in state.items(): m.inplace = s
def package_path(): global _pkg_path if _pkg_path: return _pkg_path if (_EnvPkgPath in os.environ): path = os.environ[_EnvPkgPath] assert os.path.isdir(path), ('import pkg path via env %s: is not a dir: %r' % (_EnvPkgPath, path)) else: path = _DefaultPkgPath os.makedirs(path, exist_ok=True) _pkg_path = path return path
class ToTensor(object): def __init__(self, norm_value=255): self.norm_value = norm_value def __call__(self, pic): if isinstance(pic, np.ndarray): img = torch.from_numpy(pic.transpose((2, 0, 1))) return img.float().div(self.norm_value) if ((accimage is not None) and isinstance(pic, accimage.Image)): nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) pic.copyto(nppic) return torch.from_numpy(nppic) if (pic.mode == 'I'): img = torch.from_numpy(np.array(pic, np.int32, copy=False)) elif (pic.mode == 'I;16'): img = torch.from_numpy(np.array(pic, np.int16, copy=False)) else: img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) if (pic.mode == 'YCbCr'): nchannel = 3 elif (pic.mode == 'I;16'): nchannel = 1 else: nchannel = len(pic.mode) img = img.view(pic.size[1], pic.size[0], nchannel) img = img.transpose(0, 1).transpose(0, 2).contiguous() if isinstance(img, torch.ByteTensor): return img.float().div(self.norm_value) else: return img def randomize_parameters(self): pass
def mk_pat_db_internal(inputFilePath, outputFilePath): with open(inputFilePath, 'r') as fin: with open(outputFilePath, 'w') as fout: fout.write('static char const g_pattern_database[] =\n') for line in fin: fout.write(('"%s\\n"\n' % line.strip('\n'))) fout.write(';\n')
def densepose_inference(densepose_predictor_output: Any, detections: List[Instances]): k = 0 for detection_i in detections: if (densepose_predictor_output is None): continue n_i = len(detection_i) PredictorOutput = type(densepose_predictor_output) output_i_dict = {} for field in fields(densepose_predictor_output): field_value = getattr(densepose_predictor_output, field.name) if isinstance(field_value, torch.Tensor): output_i_dict[field.name] = field_value[k:(k + n_i)] else: output_i_dict[field.name] = field_value detection_i.pred_densepose = PredictorOutput(**output_i_dict) k += n_i
def test_singling_out_queries(): df = pd.DataFrame({'c1': [1, 1], 'c2': [2, 3]}) queries = UniqueSinglingOutQueries() queries.check_and_append('c1 == 1', df=df) assert (len(queries) == 0) queries.check_and_append('c1 == 1 and c2 == 3', df=df) assert (len(queries) == 1)
def get_inference_engine(cfg): engines = all_subclasses(BaseInferenceEngine) try: class_index = [cls.__name__ for cls in engines].index(cfg.INFERENCE.ENGINE) except: raise ValueError('Inference engine {} not found.'.format(cfg.INFERENCE.ENGINE)) engine = list(engines)[class_index] return engine(cfg)
class Flickr8k(data.Dataset): def __init__(self, root, ann_file, transform=None, target_transform=None): self.root = os.path.expanduser(root) self.ann_file = os.path.expanduser(ann_file) self.transform = transform self.target_transform = target_transform parser = Flickr8kParser(self.root) with open(self.ann_file) as fh: parser.feed(fh.read()) self.annotations = parser.annotations self.ids = list(sorted(self.annotations.keys())) def __getitem__(self, index): img_id = self.ids[index] img = Image.open(img_id).convert('RGB') if (self.transform is not None): img = self.transform(img) target = self.annotations[img_id] if (self.target_transform is not None): target = self.target_transform(target) return (img, target) def __len__(self): return len(self.ids)
def test_point_precision(expected, observed): expected_return = float((1 / 5)) returned = point_precision(expected, observed) assert (returned == expected_return)
def main(): total_count = 0 greedy_succ = 0 with open('./attack_gi.csv') as rf: reader = csv.DictReader(rf) for row in reader: if (not (int(row['Index']) > 42)): if (not (row['Is Success'] == '-4')): total_count += 1 if ((row['Is Success'] == '1') and (row['Attack Type'] == 'Greedy')): greedy_succ += 1 print(greedy_succ) print(total_count) print((float(int(greedy_succ)) / int(total_count)))
class SubprocVecEnv(VecEnv): def __init__(self, env_fns): self.waiting = False self.closed = False nenvs = len(env_fns) (self.remotes, self.work_remotes) = zip(*[Pipe() for _ in range(nenvs)]) self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn))) for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)] for p in self.ps: p.daemon = False p.start() for remote in self.work_remotes: remote.close() self.remotes[0].send(('get_spaces', None)) (self.state_space, observation_space, action_space) = self.remotes[0].recv() VecEnv.__init__(self, len(env_fns), observation_space, action_space) def get_visit_counts(self): for remote in self.remotes: remote.send(('get_visit_counts', None)) return sum((remote.recv() for remote in self.remotes)) def step_async(self, actions, envs): for (action, env_i) in zip(actions, envs): self.remotes[env_i].send(('step', action)) self.waiting = True def step_wait(self, envs): results = [self.remotes[i].recv() for i in envs] self.waiting = False (state, obs, rews, dones, infos) = zip(*results) return (self._stack(state), self._stack(obs), np.array(rews), np.array(dones), infos) def _stack(self, items, *args): if (len(args) == 0): return self._stack(items, 0) sub_items = items for dim in args[::(- 1)]: sub_items = sub_items[dim] if (type(sub_items) in (tuple, list)): return [self._stack(items, i, *args) for i in range(len(sub_items))] else: will_stack = [] for i in range(len(items)): sub_items = items[i] for dim in args[:(- 1)][::(- 1)]: sub_items = sub_items[dim] will_stack.append(sub_items) return np.stack(will_stack) def step(self, actions, env_mask=None): if (env_mask is None): env_mask = np.ones(len(self.remotes)) envs = np.where(env_mask)[0] self.step_async(actions, envs) return self.step_wait(envs) def reset(self, need_reset=None): if (need_reset is None): need_reset = [True for _ in range(len(self.remotes))] for (remote, nr) in zip(self.remotes, need_reset): if nr: remote.send(('reset', None)) else: remote.send(('get_st_obs', None)) results = [remote.recv() for remote in self.remotes] (state, obs) = zip(*results) return (self._stack(state), self._stack(obs)) def close(self, force=False): if self.closed: return if force: for p in self.ps: p.terminate() main_pgid = os.getpgid(os.getpid()) for proc in psutil.process_iter(): if ((proc.name() in ('vizdoom', 'python')) and (os.getpgid(proc.pid) == main_pgid) and (proc.pid != os.getpid())): proc.kill() else: if self.waiting: for remote in self.remotes: remote.recv() for remote in self.remotes: remote.send(('close', None)) for p in self.ps: p.join() self.closed = True
class SumPricesRegression(BaseDataset): def __init__(self, root: str=C.ROOT, load_jsonl: bool=False, download: bool=False): self.download = download if load_jsonl: self.__load_jsonl(root=root) else: root = os.path.join(root, C.Tasks.SUM_PRICES_REGRESSION) self.__load_pickle(root=root) def __load_jsonl(self, root: str): self.jsonls: list = sorted(glob.glob(os.path.join(root, f'*.{C.JSONL}'))) if (len(self.jsonls) == 0): raise RuntimeError(M.DATASET_NOT_FOUND) self.df: pd.DataFrame = pd.read_json(self.jsonls[0], orient=C.RECORDS, lines=True) for jsonl in self.jsonls[1:]: df_: pd.DataFrame = pd.read_json(jsonl, orient=C.RECORDS, lines=True) self.df = pd.concat([self.df, df_]) y = [] items_list = self.df[C.Keys.ITEMS] for items in items_list: sum_prices = 0.0 for item in items: sum_prices += float(item[C.Keys.PRICE]) y.append(sum_prices) self.y: np.ndarray = np.array(y) self.x: np.ndarray = np.array(self.df.like_num) def __load_pickle(self, root: str): self.pickles: list = sorted(glob.glob(os.path.join(root, f'*.{C.PICKLE}'))) if (len(self.pickles) == 0): if self.download: os.makedirs(root, exist_ok=True) for f in C.FILES_SUM_PRICES_REGRESSION: url = os.path.join(*[C.BASE_URL, C.Tasks.SUM_PRICES_REGRESSION, f]) save_path = os.path.join(root, f) print('{}: {}'.format(C.DOWNLOAD, url)) urllib.request.urlretrieve(url, save_path) self.pickles: list = sorted(glob.glob(os.path.join(root, f'*.{C.PICKLE}'))) else: raise RuntimeError(M.DATASET_NOT_FOUND) with open(self.pickles[0], 'rb') as f: (self.x, self.y) = pickle.load(f) for p in self.pickles[1:]: with open(p, 'rb') as f: (x_, y_) = pickle.load(f) self.x = np.vstack([self.x, x_]) self.y = np.hstack([self.y, y_]) def load_dataset(self, train_size: int=10000, test_size: int=10000, covariate_shift: bool=False, target_shift: bool=False, train_mu: float=50, train_sigma: float=10, test_mu: float=80, test_sigma: float=10, random_seed: int=128, max_iter: int=100): if (target_shift and covariate_shift): raise RuntimeError(M.INVALID_SHIFT_ARGUMENTS) N = len(self.x) if ((not target_shift) and (not covariate_shift)): (x_train, x_pool, y_train, y_pool) = train_test_split(self.x, self.y, train_size=train_size, random_state=random_seed) (_, x_test, _, y_test) = train_test_split(x_pool, y_pool, test_size=test_size, random_state=random_seed) elif target_shift: np.random.seed(random_seed) ind = np.argsort(self.y).astype(np.uint32) x = self.x[ind] y = self.y[ind] test_ind_mu = np.searchsorted(y, test_mu) test_ind_sigma = (test_ind_mu - np.searchsorted(y, (test_mu - test_sigma))) test_ind = np.random.normal(test_ind_mu, test_ind_sigma, test_size).astype(np.uint32) test_ind = np.delete(test_ind, np.where((test_ind >= len(y)))[0], axis=0) x_test = x[test_ind] y_test = y[test_ind] x = np.delete(x, test_ind, axis=0) y = np.delete(y, test_ind, axis=0) train_ind_mu = np.searchsorted(y, train_mu) train_ind_sigma = (train_ind_mu - np.searchsorted(y, (train_mu - train_sigma))) train_ind = np.random.normal(train_ind_mu, train_ind_sigma, train_size).astype(np.uint32) train_ind = np.delete(train_ind, np.where((train_ind >= len(y)))[0], axis=0) x_train = x[train_ind] y_train = y[train_ind] elif covariate_shift: np.random.seed(random_seed) ind = np.argsort(self.x).astype(np.uint32) x = self.x[ind] y = self.y[ind] test_ind_mu = np.searchsorted(x, test_mu) test_ind_sigma = (test_ind_mu - np.searchsorted(x, (test_mu - test_sigma))) test_ind = np.random.normal(test_ind_mu, test_ind_sigma, test_size).astype(np.uint32) test_ind = np.delete(test_ind, np.where((test_ind >= len(y)))[0], axis=0) x_test = x[test_ind] y_test = y[test_ind] x = np.delete(x, test_ind, axis=0) y = np.delete(y, test_ind, axis=0) train_ind_mu = np.searchsorted(x, train_mu) train_ind_sigma = (train_ind_mu - np.searchsorted(x, (train_mu - train_sigma))) train_ind = np.random.normal(train_ind_mu, train_ind_sigma, train_size).astype(np.uint32) train_ind = np.delete(train_ind, np.where((train_ind >= len(x)))[0], axis=0) x_train = x[train_ind] y_train = y[train_ind] return ((x_train, y_train), (x_test, y_test))
def _ensure_spacing(coord, spacing, p_norm, max_out): tree = cKDTree(coord) indices = tree.query_ball_point(coord, r=spacing, p=p_norm) rejected_peaks_indices = set() naccepted = 0 for (idx, candidates) in enumerate(indices): if (idx not in rejected_peaks_indices): candidates.remove(idx) dist = distance.cdist([coord[idx]], coord[candidates], distance.minkowski, p=p_norm).reshape((- 1)) candidates = [c for (c, d) in zip(candidates, dist) if (d < spacing)] rejected_peaks_indices.update(candidates) naccepted += 1 if ((max_out is not None) and (naccepted >= max_out)): break output = np.delete(coord, tuple(rejected_peaks_indices), axis=0) if (max_out is not None): output = output[:max_out] return output
class djbfft_info(system_info): section = 'djbfft' dir_env_var = 'DJBFFT' notfounderror = DJBFFTNotFoundError def get_paths(self, section, key): pre_dirs = system_info.get_paths(self, section, key) dirs = [] for d in pre_dirs: dirs.extend((self.combine_paths(d, ['djbfft']) + [d])) return [d for d in dirs if os.path.isdir(d)] def calc_info(self): lib_dirs = self.get_lib_dirs() incl_dirs = self.get_include_dirs() info = None for d in lib_dirs: p = self.combine_paths(d, ['djbfft.a']) if p: info = {'extra_objects': p} break p = self.combine_paths(d, ['libdjbfft.a', ('libdjbfft' + so_ext)]) if p: info = {'libraries': ['djbfft'], 'library_dirs': [d]} break if (info is None): return for d in incl_dirs: if (len(self.combine_paths(d, ['fftc8.h', 'fftfreq.h'])) == 2): dict_append(info, include_dirs=[d], define_macros=[('SCIPY_DJBFFT_H', None)]) self.set_info(**info) return return
class DatasetFile(DatasetRaw): def __init__(self, args, split='train'): data_dir = osp.join(args.data_dir, split) if (not ('class2id' in args.keys())): class2id = dict() for i in range(args.num_classes): class2id[str(i)] = i else: class2id = args.get('class2id') if osp.isdir(data_dir): npy_style = False elif osp.exists((data_dir + '.npy')): data_dir += '.npy' npy_style = True else: raise IOError(('data directory not exists: %s' % data_dir)) data = None targets = None if npy_style: tmp = np.load(data_dir, allow_pickle=True).item() ori_data = tmp['data'] ori_targets = tmp['targets'] data = [] targets = [] for i in range(len(ori_data)): id_ = class2id.get(str(int(ori_targets[i])), 0) if (id_ >= 0): data.append(ori_data[i]) targets.append(id_) data = np.array(data) targets = np.array(targets) assert (len(data) == len(targets)) else: img_list = get_all_files(data_dir, ['jpg', 'jpeg', 'png']) (data, targets) = self._gen_metas(img_list, class2id) args['data'] = data args['targets'] = targets args['load_data_from_file'] = (not npy_style) args['split'] = split super().__init__(**args) def _gen_metas(self, img_list, class2id): (data, targets) = ([], []) for i in img_list: cls_id = class2id.get(i.split('/')[(- 2)], 0) if (cls_id < 0): continue data.append(i) targets.append(cls_id) targets = np.array(targets) return (data, targets)
def _worker_start(): env = None policy = None max_length = None try: while True: msgs = {} while True: try: msg = queue.get_nowait() msgs[msg[0]] = msg[1:] except Empty: break if ('stop' in msgs): break elif ('update' in msgs): (env, policy) = msgs['update'] elif ('demo' in msgs): (param_values, max_length) = msgs['demo'] policy.set_param_values(param_values) rollout(env, policy, max_path_length=max_length, animated=True, speedup=5) elif max_length: rollout(env, policy, max_path_length=max_length, animated=True, speedup=5) except KeyboardInterrupt: pass
def broadcast_all(*values): values = list(values) scalar_idxs = [i for i in range(len(values)) if isinstance(values[i], Number)] tensor_idxs = [i for i in range(len(values)) if (values[i].__class__.__name__ == 'Tensor')] if ((len(scalar_idxs) + len(tensor_idxs)) != len(values)): raise ValueError('Input arguments must all be instances of numbers.Number or torch.tensor.') if tensor_idxs: broadcast_shape = _broadcast_shape([values[i].size() for i in tensor_idxs]) for idx in tensor_idxs: values[idx] = values[idx].expand(broadcast_shape) template = values[tensor_idxs[0]] for idx in scalar_idxs: values[idx] = template.new(template.size()).fill_(values[idx]) else: for idx in scalar_idxs: values[idx] = torch.tensor(float(values[idx])) return values
class TestOldSerialization(TestCase, SerializationMixin): def _test_serialization_container(self, unique_key, filecontext_lambda): tmpmodule_name = 'tmpmodule{}'.format(unique_key) def import_module(name, filename): import importlib.util spec = importlib.util.spec_from_file_location(name, filename) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) sys.modules[module.__name__] = module return module with filecontext_lambda() as checkpoint: fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing', '_internal', 'data', 'network1.py') module = import_module(tmpmodule_name, fname) torch.save(module.Net(), checkpoint) checkpoint.seek(0) with warnings.catch_warnings(record=True) as w: loaded = torch.load(checkpoint) self.assertTrue(isinstance(loaded, module.Net)) if can_retrieve_source: self.assertEquals(len(w), 0) fname = get_file_path_2(os.path.dirname(os.path.dirname(torch.__file__)), 'torch', 'testing', '_internal', 'data', 'network2.py') module = import_module(tmpmodule_name, fname) checkpoint.seek(0) with warnings.catch_warnings(record=True) as w: loaded = torch.load(checkpoint) self.assertTrue(isinstance(loaded, module.Net)) if can_retrieve_source: self.assertEquals(len(w), 1) self.assertTrue(w[0].category, 'SourceChangeWarning') def test_serialization_container(self): self._test_serialization_container('file', tempfile.NamedTemporaryFile) def test_serialization_container_filelike(self): self._test_serialization_container('filelike', BytesIOContext) def test_serialization_offset(self): a = torch.randn(5, 5) b = torch.randn(1024, 1024, 512, dtype=torch.float32) m = torch.nn.Conv2d(1, 1, (1, 3)) (i, j) = (41, 43) with tempfile.NamedTemporaryFile() as f: pickle.dump(i, f) torch.save(a, f) pickle.dump(j, f) torch.save(b, f) torch.save(m, f) self.assertTrue((f.tell() > (((2 * 1024) * 1024) * 1024))) f.seek(0) i_loaded = pickle.load(f) a_loaded = torch.load(f) j_loaded = pickle.load(f) b_loaded = torch.load(f) m_loaded = torch.load(f) self.assertTrue(torch.equal(a, a_loaded)) self.assertTrue(torch.equal(b, b_loaded)) self.assertTrue((m.kernel_size == m_loaded.kernel_size)) self.assertEqual(i, i_loaded) self.assertEqual(j, j_loaded) def test_serialization_offset_filelike(self): a = torch.randn(5, 5) b = torch.randn(1024, 1024, 512, dtype=torch.float32) (i, j) = (41, 43) with BytesIOContext() as f: pickle.dump(i, f) torch.save(a, f) pickle.dump(j, f) torch.save(b, f) self.assertTrue((f.tell() > (((2 * 1024) * 1024) * 1024))) f.seek(0) i_loaded = pickle.load(f) a_loaded = torch.load(f) j_loaded = pickle.load(f) b_loaded = torch.load(f) self.assertTrue(torch.equal(a, a_loaded)) self.assertTrue(torch.equal(b, b_loaded)) self.assertEqual(i, i_loaded) self.assertEqual(j, j_loaded) def run(self, *args, **kwargs): with serialization_method(use_zip=False): return super(TestOldSerialization, self).run(*args, **kwargs)
def nested_ner_performance(pred_start, pred_end, pred_span, gold_start, gold_end, gold_span, ner_cate, label_lst, threshold=0.5, dims=2): cate_idx2label = {idx: value for (idx, value) in enumerate(label_lst)} if (dims == 1): ner_cate = cate_idx2label[ner_cate] pred_span_triple = nested_transform_span_triple(pred_start, pred_end, pred_span, ner_cate, threshold=threshold) gold_span_triple = nested_transform_span_triple(gold_start, gold_end, gold_span, ner_cate, threshold=threshold) return (pred_span_triple, gold_span_triple) elif (dims == 2): pred_span_triple_lst = [] gold_span_triple_lst = [] acc_lst = [] for (pred_start_item, pred_end_item, pred_span_item, gold_start_item, gold_end_item, gold_span_item, ner_cate_item) in zip(pred_start, pred_end, pred_span, gold_start, gold_end, gold_span, ner_cate): (pred_span_triple, gold_span_triple) = nested_ner_performance(pred_start_item, pred_end_item, pred_span_item, gold_start_item, gold_end_item, gold_span_item, ner_cate_item, label_lst, dims=1) pred_span_triple_lst.append(pred_span_triple) gold_span_triple_lst.append(gold_span_triple) tmp_acc_s = compute_acc(pred_start_item, gold_start_item) tmp_acc_e = compute_acc(pred_end_item, gold_end_item) acc_lst.append(((tmp_acc_s + tmp_acc_e) / 2.0)) (span_precision, span_recall, span_f1) = nest_span_f1.nested_calculate_f1(pred_span_triple_lst, gold_span_triple_lst, dims=2) average_acc = (sum(acc_lst) / (len(acc_lst) * 1.0)) return (average_acc, span_precision, span_recall, span_f1) else: raise ValueError('Please notice that dims can only be 1 or 2 !')
class MVTecDataset(AnomalibDataset): def __init__(self, task: TaskType, transform: A.Compose, root: (Path | str), category: str, split: ((str | Split) | None)=None) -> None: super().__init__(task=task, transform=transform) self.root_category = (Path(root) / Path(category)) self.split = split def _setup(self) -> None: self.samples = make_mvtec_dataset(self.root_category, split=self.split, extensions=IMG_EXTENSIONS)
class MemDevSim(NICSim): def __init__(self) -> None: super().__init__() self.mem_latency = 500 self.addr = self.size = ((1024 * 1024) * 1024) self.as_id = 0 def full_name(self) -> str: return ('mem.' + self.name) def sockets_cleanup(self, env: ExpEnv) -> tp.List[str]: return [env.dev_mem_path(self), env.dev_shm_path(self)] def sockets_wait(self, env: ExpEnv) -> tp.List[str]: return [env.dev_mem_path(self)]
_utils.test(debug=True) def test_vector_swizzle_taichi(): def foo(): v = ti.math.vec3(0) v = ti.math.vec3(0, 0, 0) v = ti.math.vec3([0, 0], 0) v = ti.math.vec3(0, v.xx) v = ti.math.vec3(0, v.xy) v.rgb += 1 assert all((v.xyz == (1, 1, 1))) v.zyx += ti.math.vec3(1) assert all((v.stp == ti.math.vec3(2, 2, 2))) assert (v.x == 2) assert (v.r == 2) assert (v.s == 2) w = ti.floor(v).yxz assert all((w == v)) z = ti.math.vec4(w.xyz, 2) assert all((z == w.xxxx)) foo()
def assert_list(x, msg='not a list: {}'): if isinstance(x, list): return (True, None) return (False, msg.format(type(x)))
class NLabelsPerPatientLabeler(Labeler): def __init__(self, labeler: Labeler, num_labels: int=1, seed: int=1): self.labeler: Labeler = labeler self.num_labels: int = num_labels self.seed: int = seed def label(self, patient: Patient) -> List[Label]: labels: List[Label] = self.labeler.label(patient) if (len(labels) <= self.num_labels): return labels elif (self.num_labels == (- 1)): return labels hash_to_label_list: List[Tuple[(int, int, Label)]] = [(i, compute_random_num(self.seed, patient.patient_id, i), labels[i]) for i in range(len(labels))] hash_to_label_list.sort(key=(lambda a: a[1])) n_hash_to_label_list: List[Tuple[(int, int, Label)]] = hash_to_label_list[:self.num_labels] n_hash_to_label_list.sort(key=(lambda a: a[0])) n_labels: List[Label] = [hash_to_label[2] for hash_to_label in n_hash_to_label_list] return n_labels def get_labeler_type(self) -> LabelType: return self.labeler.get_labeler_type()
def encode(buf, width, height): assert (((width * height) * 3) == len(buf)) bpp = 3 def raw_data(): row_bytes = (width * bpp) for row_start in range((((height - 1) * width) * bpp), (- 1), (- row_bytes)): (yield b'\x00') (yield buf[row_start:(row_start + row_bytes)]) def chunk(tag, data): return [struct.pack('!I', len(data)), tag, data, struct.pack('!I', ( & zlib.crc32(data, zlib.crc32(tag))))] SIGNATURE = b'\x89PNG\r\n\x1a\n' COLOR_TYPE_RGB = 2 COLOR_TYPE_RGBA = 6 bit_depth = 8 return b''.join(((([SIGNATURE] + chunk(b'IHDR', struct.pack('!2I5B', width, height, bit_depth, COLOR_TYPE_RGB, 0, 0, 0))) + chunk(b'IDAT', zlib.compress(b''.join(raw_data()), 9))) + chunk(b'IEND', b'')))
class RealTopologicalStructure(Singleton): chart = RealChart name = 'topological' scalar_field_algebra = ScalarFieldAlgebra homset = TopologicalManifoldHomset def subcategory(self, cat): return cat
def get_all_images(path: Union[(str, List[str])]) -> List[str]: print(path, len(os.listdir(path))) if os.path.isdir(path): images = os.listdir(path) images = [os.path.join(path, item) for item in images if is_image_file(item)] return images elif is_image_file(path): return [path] else: raise Exception(f'{path} is neither a path to a valid image file nor a path to folder containing images')
def capture_image(): stream = io.BytesIO() with PiCamera() as camera: camera.resolution = (640, 480) camera.capture(stream, format='jpeg') stream.seek(0) return Image.open(stream)
def add_track_to_constraint(camera_object: bpy.types.Object, track_to_target_object: bpy.types.Object) -> None: constraint = camera_object.constraints.new(type='TRACK_TO') constraint.target = track_to_target_object constraint.track_axis = 'TRACK_NEGATIVE_Z' constraint.up_axis = 'UP_Y'
def predict(model, data, batch_size): batcher = Batcher(data, batch_size) predicted = [] for (batch, size, start, end) in batcher: d = prepare(batch) model.eval() logits = model(d).cpu() predicted.extend(torch.max(logits, 1)[1]) return torch.stack(predicted)
def get_backend_from_tensors(*args): for x in args: if isinstance(x, Tensor): return x._raw_backend return _global_rf
def pipe_and_output(input, output=None, num_threads=1, processor=None, name=None, capacity=None, group=None, num_runtime_threads=1, final_outputs=None): assert (num_threads > 0) (result, task) = _pipe_step(input, output, num_threads, processor, name, capacity, group, num_runtime_threads, final_outputs) output = None if (final_outputs is not None): output = task.outputs() if (type(final_outputs) not in (list, tuple)): output = output[0] return (result, output)
class SSLCherryPyServer(ServerAdapter): def run(self, handler): cert = SSL_CERT privkey = SSL_PRIVKEY server = WSGIServer((self.host, self.port), handler) server.ssl_adapter = SecuredSSLServer(cert, privkey) try: server.start() finally: server.stop()