code
stringlengths
101
5.91M
def test_dbscan_feature(): eps = 0.8 min_samples = 10 metric = 'euclidean' (core_samples, labels) = dbscan(X, metric=metric, eps=eps, min_samples=min_samples) n_clusters_1 = (len(set(labels)) - int(((- 1) in labels))) assert (n_clusters_1 == n_clusters) db = DBSCAN(metric=metric, eps=eps, mi...
_cache(maxsize=1000) def measure_state_with_cache_fock_density(state: Tuple[Tuple[complex]], povms: Tuple[Tuple[Tuple[complex]]]) -> Tuple[(List[array], List[float])]: state = array(state) povms = [array(povm) for povm in povms] prob_list = [trace((state povm)).real for povm in povms] state_list = [] ...
class ParameterNetTest(BasePytorchTest): def __init__(self, unit_test): super().__init__(unit_test) def create_feature_network(self, input_shape): return ParameterNet()
class PegasusForConditionalGeneration(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
class GoogleSearchGetSearchTrends(VirtualFunctionTool): name = 'GoogleSearchGetSearchTrends' summary = 'Access the search trends on Google.' parameters: List[ArgParameter] = [{'name': 'time_range', 'type': 'string', 'description': "The time range to get the search trends, in the format of 'YYYY-MM-DD..YYYY-...
def CalculateRadius(mol): Distance = Chem.GetDistanceMatrix(mol) temp = Distance.max(axis=0) res = temp.min() if (res == 0): res = MINVALUE return np.log10(res)
def test_call_while(): A = np.random.randint(1, 10, size=(10,), dtype=np.int32) ref = np.copy(A) ref[0] = 0 i = 1 fib = 1 while ((fib < 50) and (i < 10)): ref[i] = fib fib += ref[i] i += 1 call_while(A) assert np.array_equal(A, ref)
class TiuHead(HeadDef): _fields_ = [('cmd_short', ctypes.c_uint64, 1), ('cmd_id', ctypes.c_uint64, 20), ('cmd_id_dep', ctypes.c_uint64, 20), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 5)] cmd_short: int cmd_id: int cmd_id_dep: int tsk_typ: int tsk_eu_typ: int def op_typ...
class OpenAILLM(object): def __init__(self, model_name: str): self.model_name = model_name self.model_max_length = 4097 self.usages = [] self.completions = [] self.responses = [] def generate(self, prompt: str, messages: list=[], **gen_kwargs) -> str: if (self.mod...
def get_score(submission_path='../env/submission.csv'): submission = pd.read_csv(submission_path, delimiter=';') return (submission.columns[0] > submission.columns[1])
class PyNNDescent(BaseANN): def __init__(self, metric, index_param_dict, n_search_trees=1): if ('n_neighbors' in index_param_dict): self._n_neighbors = int(index_param_dict['n_neighbors']) else: self._n_neighbors = 30 if ('pruning_degree_multiplier' in index_param_dic...
class ImageNetDataset(data.Dataset): def __init__(self, root, anno_file, loader=default_loader, transform=None, target_transform=None): self.read_file(anno_file) self.root = root self.loader = loader self.transform = transform self.target_transform = target_transform def ...
def visualize_img(*no_title_images, cols=1, show=True, **images): n = (len(images) + len(no_title_images)) rows = math.ceil((n / cols)) plt.figure(figsize=((5 * cols), (5 * rows))) cols = np.ceil((n / rows)) for (i, image) in enumerate(no_title_images): plt.subplot(rows, cols, (i + 1)) ...
class Graphics(WithEqualityById, SageObject): def __init__(self): self._axes_color = (0, 0, 0) self._axes_label_color = (0, 0, 0) self._axes_width = 0.8 self._bbox_extra_artists = [] self._extra_kwds = {} self._fontsize = 10 self._axes_labels_size = 1.6 ...
def test_generate_delta_assert_model_in_tags(tensor_key, named_tensor): tensor_codec = TensorCodec(NoCompressionPipeline()) (tensor_name, origin, round_number, report, tags) = tensor_key tensor_key = TensorKey(tensor_name, origin, round_number, report, ('model',)) metadata = [{'int_to_float': proto.int_...
class PointMagneticField(BaseRx): def __init__(self, locations, orientation='x', component='real', **kwargs): self.projField = 'h' super().__init__(locations, orientation, component, **kwargs)
def test_range_indices(): subset1 = Indices.from_string('1') subset2 = Range.from_string('0:2:1') assert subset2.covers_precise(subset1) assert (subset1.covers_precise(subset2) is False) subset1 = Indices.from_string('3') subset2 = Range.from_string('0:4:2') assert (subset2.covers_precise(su...
_func def sample0(qf: ti.types.ndarray(ndim=2), u: int, v: int) -> float: return sample_impl(qf, u, v)
def is_tensor(x): if is_torch_fx_proxy(x): return True if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True if is_flax_avail...
class TinyNetworkSETN(nn.Module): def __init__(self, C, N, max_nodes, num_classes, search_space, affine, track_running_stats): super(TinyNetworkSETN, self).__init__() self._C = C self._layerN = N self.max_nodes = max_nodes self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size...
def modern_uninstall(spkg_name, sage_local, files, verbose=False): spkg_scripts = pth.join(sage_local, 'var', 'lib', 'sage', 'scripts') spkg_scripts = os.environ.get('SAGE_SPKG_SCRIPTS', spkg_scripts) spkg_scripts = pth.join(spkg_scripts, spkg_name) files.sort(key=(lambda f: ((- f.count(os.sep)), f))) ...
class ContextualLexer(): def __init__(self): self.lexers = LEXERS self.set_parser_state(None) def set_parser_state(self, state): self.parser_state = state def lex(self, stream): newline_types = NEWLINE_TYPES ignore_types = IGNORE_TYPES lexers = LEXERS ...
class _LocalOptimizer(object): global_lock = Lock() def __init__(self, optim_cls, local_params_rref, *args, **kwargs): self._local_params = [rref.local_value() for rref in local_params_rref] self.optim = optim_cls(self._local_params, *args, **kwargs) def step(self, autograd_ctx_id): ...
def get_pretrained_cfg(model: str, tag: str): if (model not in _PRETRAINED): return {} model_pretrained = _PRETRAINED[model] return model_pretrained.get(_clean_tag(tag), {})
def _tokenize_ogb_arxiv_datasets(d): def merge_by_ids(meta_data, node_ids, categories): meta_data.columns = ['ID', 'Title', 'Abstract'] meta_data['ID'] = meta_data['ID'].astype(np.int64) meta_data.columns = ['mag_id', 'title', 'abstract'] data = pd.merge(node_ids, meta_data, how='lef...
class OpenImagesParserCfg(): categories_filename: str img_info_filename: str bbox_filename: str img_label_filename: str = '' masks_filename: str = '' img_filename: str = '%s.jpg' task: str = 'obj' prefix_levels: int = 1 add_background: bool = True has_labels: bool = True bbox...
class MspOE(OodMethod): def __init__(self, dataset: Dataset): super().__init__(WideResNet(depth=40, num_classes=dataset.get_num_classes(), widen_factor=2, dropRate=0.3), MspScorer(), dataset) def get_transform(self): mean = [(x / 255) for x in [125.3, 123.0, 113.9]] std = [(x / 255) for ...
def sin_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes): dy = grad_inputs[0] x0 = inputs[0] dx0 = (dy * F.cos(x0)) return dx0
def load_img(path): image = Image.open(path).convert('RGB') (w, h) = image.size print(f'loaded input image of size ({w}, {h}) from {path}') (w, h) = map((lambda x: (x - (x % 32))), (w, h)) image = image.resize((w, h), resample=PIL.Image.LANCZOS) image = (np.array(image).astype(np.float32) / 255....
def blobs_from_pkl(path, num_classes=400): with path.open(mode='rb') as f: pkl = pickle.load(f, encoding='latin1') blobs = pkl['blobs'] assert ((('last_out_L' + str(num_classes)) + '_w') in blobs), 'Number of --classes argument doesnt matche the last linear layer in pkl' assert ((('l...
class Pruner(): def __init__(self, float_graph: Graph, fw_info: FrameworkInfo, fw_impl: PruningFrameworkImplementation, target_kpi: KPI, representative_data_gen: Callable, pruning_config: PruningConfig, target_platform_capabilities: TargetPlatformCapabilities): self.float_graph = float_graph self.fw...
class GraphLabels(object): def __init__(self, title=TextWithFontSize(fontsize=12), x_label=TextWithFontSize(), y1_label=TextWithFontSize(), y2_label=TextWithFontSize()): self.title = title self.x_label = x_label self.y1_label = y1_label self.y2_label = y2_label
def _clean_address_parts(address_dict: Dict[(str, str)]) -> Dict[(str, str)]: if (not address_dict): return {} result_dict: Dict[(str, str)] = {} cleaning_funcs = {'house_number': _clean_house_number, 'street_prefix': _clean_prefix, 'street_name': _clean_street, 'street_suffix': _clean_suffix, 'stat...
def _show(state: State) -> None: BLACK_CHAR = '' WHITE_CHAR = 'O' POINT_CHAR = '+' print('') for xy in range((state._x.size * state._x.size)): if (state._x.chain_id_board[xy] > 0): print((' ' + BLACK_CHAR), end='') elif (state._x.chain_id_board[xy] < 0): print...
.parametrize('inspecs', inspecs_params()) .parametrize('activation', ['identity', 'sigmoid', 'tanh', 'relu', 'elu', 'crelu']) def test_activation(inspecs, activation, nnabla_opts): func = getattr(F, activation) fb = FunctionBenchmark(func, inspecs, [], {}, nnabla_opts.ext, nnabla_opts.ext_kwargs) fb.benchma...
def get_model_zoo(config_path): cfg_file = model_zoo.get_config_file(config_path) cfg = get_cfg() cfg.merge_from_file(cfg_file) if (not torch.cuda.is_available()): cfg.MODEL.DEVICE = 'cpu' return build_model(cfg)
class SpectralNormStateDictHook(): def __init__(self, fn) -> None: self.fn = fn def __call__(self, module, state_dict, prefix, local_metadata) -> None: if ('spectral_norm' not in local_metadata): local_metadata['spectral_norm'] = {} key = (self.fn.name + '.version') i...
def set_default_rng_seed(seed): global default_rng, default_srng random.seed(seed) default_rng = np.random.RandomState(random.randint(0, 9999)) default_srng = T.shared_randomstreams.RandomStreams(default_rng.randint(9999))
def traverse(x, N, stack, F, X, R, FP): stack.append(x) d = len(stack) N[x] = d F[x] = FP(x) rel = R(x) for y in rel: if (N[y] == 0): traverse(y, N, stack, F, X, R, FP) N[x] = min(N[x], N[y]) for a in F.get(y, []): if (a not in F[x]): ...
class CNXNLILoader(Loader): def __init__(self): super(CNXNLILoader, self).__init__() def _load(self, path: str=None): ds_all = DataSet() with open(path, 'r', encoding='utf-8') as f: head_name_list = f.readline().strip().split('\t') sentence1_index = head_name_list...
class estimateMissingValuesStar(estimateMissingValues): def setUp(self): self.star = ex.genExampleStar() def test_magVEstimatedWhenTrue(self): del self.star.params['magV'] self.assertAlmostEqual(self.star.magV, 9.14, 3) self.assertTrue(('Estimated magV' in self.star.flags.flags))...
class WrongTileError(Exception): def __init__(self, tile_lat, tile_lon, req_lat, req_lon): Exception.__init__() self.tile_lat = tile_lat self.tile_lon = tile_lon self.req_lat = req_lat self.req_lon = req_lon def __str__(self): return ('SRTM tile for %d, %d does no...
class VGG(nn.Module): def __init__(self, quant, num_classes=10, depth=16, batch_norm=False, writer=None): self.linear = nn.Linear self.conv = nn.Conv2d super(VGG, self).__init__() self.features = make_layers(cfg[depth], quant, batch_norm, self.conv) self.classifier = nn.Seque...
class ThreeNN(Function): def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[(torch.Tensor, torch.Tensor)]: assert unknown.is_contiguous() assert known.is_contiguous() (N, _) = unknown.size() m = known.size(0) dist2 = torch.cuda.FloatTensor(N, 3) idx...
def categorical_sample(probs, use_cuda=False): int_acs = torch.multinomial(probs, 1) if use_cuda: tensor_type = torch.cuda.FloatTensor else: tensor_type = torch.FloatTensor acs = Variable(tensor_type(*probs.shape).fill_(0)).scatter_(1, int_acs, 1) return (int_acs, acs)
def test_hyponatremia(tmp_path: pathlib.Path): outcome_codes = {'child_1', 'child_1_1', 'child_2', 'LOINC/LG11363-5', 'LOINC/2951-2', 'LOINC/2947-0'} labeler = _create_specific_labvalue_labeler(HyponatremiaLabValueLabeler, 'severe', outcome_codes) _assert_value_to_label_correct(labeler, 124.9, 129.9, 134.99...
class CNNSelfAttention(nn.Module): def __init__(self, input_dim, hidden_dim, kernel_size, padding, pooling, dropout, output_class_num, **kwargs): super(CNNSelfAttention, self).__init__() self.model_seq = nn.Sequential(nn.AvgPool1d(kernel_size, pooling, padding), nn.Dropout(p=dropout), nn.Conv1d(inpu...
def attention_with_relations(query, key, value, relation_k, relation_v, mask=None, dropout=None): d_k = query.size((- 1)) scores = relative_attention_logits(query, key, relation_k) if (mask is not None): scores = scores.masked_fill((mask == 0), (- .0)) p_attn = F.softmax(scores, dim=(- 1)) i...
def path2tensor(filepath): (tensor, _) = apply_effects_file(str(filepath), [['channels', '1'], ['rate', '16000'], ['norm']]) return tensor.squeeze(0)
def template_simulation(spec, scene, sim_props, delete_on_clean=False, caching=False, save_maya_scene=False): print('\nGarment load') garment = mymaya.MayaGarment(spec) try: garment.load(shader_group=scene.cloth_SG(), obstacles=[scene.body], config=sim_props['config']) except mymaya.PatternLoadi...
def main(unused_argv): for dataset_split in ['train', 'val']: _convert_dataset(dataset_split)
def under_attention(flat_param_key): for key in flat_param_key: if (('attention' in key.lower()) or ('attn' in key.lower())): return True return False
def test_register_processor_variant(): nlp = stanza.Pipeline(dir=TEST_MODELS_DIR, lang='en', processors={'tokenize': 'lol'}, package=None) doc = nlp(EN_DOC) assert (EN_DOC_LOL_TOKENS == '\n\n'.join((sent.tokens_string() for sent in doc.sentences)))
class UNETR(nn.Module): def __init__(self, img_shape=(128, 128, 128), input_dim=4, output_dim=3, embed_dim=768, patch_size=16, num_heads=12, dropout=0.1): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.embed_dim = embed_dim self.img_shape = im...
def make_estimator_distributed_runconfig(FLAGS, estimator, is_distributed, save_checkpoints_steps=100): if is_distributed: (cluster, task_type, task_index) = make_distributed_info_without_evaluator(FLAGS) dump_into_tf_config(cluster, task_type, task_index) device_filters = None if (e...
class EventStorage(): def __init__(self, start_iter=0): self._history = defaultdict(HistoryBuffer) self._smoothing_hints = {} self._latest_scalars = {} self._iter = start_iter self._current_prefix = '' def put_scalar(self, name, value, smoothing_hint=True): name =...
def preprocess_for_inception(images): assert (images.shape[3].value == 3) with tf.control_dependencies([tf.assert_greater_equal(images, 0.0), tf.assert_less_equal(images, 255.0)]): images = tf.identity(images) preprocessed_images = tf.map_fn(fn=tf.contrib.gan.eval.preprocess_image, elems=images, bac...
def bench(args): ctx = get_extension_context('cudnn', device_id=args.device_id) nn.set_default_context(ctx) (_, _) = _bench(args) (et0, et1) = _bench(args) print(f'Elapsed time (monolithic) [s] = {et0}') print(f'Elapsed time (composite) [s] = {et1}')
class Seq2SeqModelOutput(ModelOutput): last_hidden_state: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Option...
class Base(pl.LightningModule): def __init__(self, hparams, **kwargs) -> None: super(Base, self).__init__() self.hparams = hparams def add_model_specific_args(parent_parser): parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument('--lr', typ...
def gen_song(song_name, t, epoch): [sp_min, sp_max, ap_min, ap_max] = np.load('data/timbre_model/min_max_record.npy') condi = get_condition(song_name) (sp, raw_sp) = generate_timbre(0, sp_max, sp_min, condi, None) plt.imshow(np.log(np.transpose(sp)), aspect='auto', origin='bottom', interpolation='none')...
class ModelInfo(): def __init__(self, modelId: Optional[str]=None, author: Optional[str]=None, downloads: Optional[int]=None, tags: List[str]=[], pipeline_tag: Optional[str]=None, siblings: Optional[List[Dict]]=None, **kwargs): self.modelId = modelId self.author = author self.downloads = dow...
def load_block8(state_dict, name_pth, name_tf): load_conv2d(state_dict, (name_pth + '.branch0'), (name_tf + '/Branch_0/Conv2d_1x1')) load_conv2d(state_dict, (name_pth + '.branch1.0'), (name_tf + '/Branch_1/Conv2d_0a_1x1')) load_conv2d(state_dict, (name_pth + '.branch1.1'), (name_tf + '/Branch_1/Conv2d_0b_1x...
def Hasse_bounds(q, genus=1): if (genus == 1): rq = (4 * q).isqrt() else: rq = ((4 * (genus ** 2)) * q).isqrt() return (((q + 1) - rq), ((q + 1) + rq))
def load_weights(mdl, name): if (name == 'vggface2'): features_path = ' logits_path = ' elif (name == 'casia-webface'): features_path = ' logits_path = ' else: raise ValueError('Pretrained models only exist for "vggface2" and "casia-webface"') model_dir = os.path....
def clean_padding(tensor, length, len_dim=1, mask_value=0.0): result = tensor.clone() clean_padding_(result, length, len_dim, mask_value) return result
class PageRank(BaseRanking): def __init__(self, damping_factor: float=0.85, solver: str='piteration', n_iter: int=10, tol: float=1e-06): super(PageRank, self).__init__() check_damping_factor(damping_factor) self.damping_factor = damping_factor self.solver = solver self.n_iter...
def test_branch_test_case_is_minimizing_function(executor_mock): func = ff.BranchDistanceTestCaseFitnessFunction(executor_mock, 0) assert (not func.is_maximisation_function())
def main(): print('[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse buttons to attract/repel. Press R to reset.') res = (512, 512) window = ti.ui.Window('Taichi MLS-MPM-128', res=res, vsync=True) canvas = window.get_canvas() radius = 0.003 reset() gravity[None] = [0, (- 1)...
class LazyUtilityCode(UtilityCodeBase): __name__ = '<lazy>' requires = None def __init__(self, callback): self.callback = callback def put_code(self, globalstate): utility = self.callback(globalstate.rootwriter) globalstate.use_utility_code(utility)
def output_stream(filename=None): if (filename is None): (yield sys.stdout) else: with open(filename, 'w', encoding='utf-8') as fout: (yield fout)
def test_seq_translation(): translate_op = SequenceTranslation(max_shift=16) base_x = torch.tensor([(- 1), 0, 1, 2, (- 1)]) shift = 0 rot_x = translate_op(base_x, shift) assert torch.all((rot_x == base_x)), f'{shift}: {base_x} --> {out_x} != {rot_x}' shift = 3 rot_x = translate_op(base_x, sh...
def test_check_input2(): with pytest.raises(TypeError, match=('Please check you are using the right model object,' + ' or the right order of the attributes!')): trainer = Trainer(dataHandler, None, losses, validation_metrics, save_to_path, yaml_path) trainer.train()
class WeberClassPolynomialDatabase(ClassPolynomialDatabase): def __repr__(self): return 'Weber class polynomial database'
class MergeIdLists(ModelLayer): def __init__(self, model, input_record, name='merged'): super(MergeIdLists, self).__init__(model, name, input_record) assert all((schema.equal_schemas(x, IdList) for x in input_record)), 'Inputs to MergeIdLists should all be IdLists.' assert all(((record.items...
def parse_categories(categories): category_list = [] id_to_index = {} for i in range(len(categories)): category_list.append(categories[i]['name']) id_to_index[categories[i]['id']] = i return (category_list, id_to_index)
def test_md5(mocker): audio_file = b'audio1234' expected_checksum = '6dc00d1bac757abe4ea83308dde68aab' mocker.patch('builtins.open', new=mocker.mock_open(read_data=audio_file)) md5_checksum = validate.md5('test_file_path') assert (expected_checksum == md5_checksum)
def get_symbol(latex, subscript=None, **kwargs): if subscript: return symbols('{0}_{{{1}}}'.format(latex, subscript), **kwargs) else: return symbols('{0}'.format(latex), **kwargs)
def load_tf_weights_in_qdqbert(*args, **kwargs): requires_backends(load_tf_weights_in_qdqbert, ['pytorch_quantization', 'torch'])
def _impl(array, highlevel, behavior, attrs): from awkward._connect.pyarrow import import_pyarrow_compute pc = import_pyarrow_compute('e') with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array) out = ak._do.recursively_apply(layout, ak.operations.str._get_ufunc_...
.openapi_version('3.0') .operations('success', 'text') def test_conditional(testdir, cli, schema_url, snapshot_cli): if (sys.version_info < (3, 9)): dec1 = '\nauth = schemathesis.auth()\_to(method="GET", path="/text")' dec2 = '\nauth = schemathesis.auth()\_to(method="GET", path="/success")' else...
class QRDQN(DQN): def __init__(self, c: int, h: int, w: int, action_shape: Sequence[int], num_quantiles: int=200, device: Union[(str, int, torch.device)]='cpu') -> None: self.action_num = np.prod(action_shape) super().__init__(c, h, w, [(self.action_num * num_quantiles)], device) self.num_qu...
def main() -> None: args = get_arg_parser().parse_args() processed_folder = Path(args.processed_wikidata) benchmark_folder = Path(args.benchmark_folder) aliases_fpath = (benchmark_folder / 'aliases.jsonl') print(f'Loading aliases from {aliases_fpath}.') qid2alias = defaultdict(list) for item...
def load_data(data_dir: str='./tmp', num_imgs: int=25, random: bool=True) -> tuple: if (not os.path.isdir(data_dir)): os.mkdir(data_dir) trns_norm = trans.ToTensor() mnist_test = dset.MNIST(data_dir, train=False, download=True, transform=trns_norm) if random: loader_test = DataLoader(mni...
def load_conf(path): with open(path) as fin: conf_txt = fin.read() conf = yaml.load(conf_txt, Loader=Loader) assert ('raw_yaml' not in conf) conf['raw_yaml'] = conf_txt if (conf['task'] not in ['identification_classification', 'classification']): raise ValueError("task must be either...
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model_type', default=None, type=str, required=True, help=('Model type selected in the list: ' + ', '.join(MODEL_CLASSES.keys()))) parser.add_argument('--model_name_or_path', default=None, type=str, required=True, help=('Path to pre-traine...
def check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file): seen_in_from = set() seen_src_in_from = set() seen_tgt_in_from = set() from_count = 0 with open(from_src_file, encoding='utf-8') as fsrc, open(from_tgt_file, encoding='utf-8') as ftgt: for (s, t) in zip(fsrc, ftgt): ...
class AmotTestDataset(): def __init__(self, dataset_path=config['dataset_path'], max_input_frame=config['frame_max_input_num'], frame_scale=config['frame_sample_scale'], sequence_list=config['test']['sequence_list'], dataset_name=config['dataset_name']): self.dataset_name = dataset_name self.frame_s...
def precision_batch(y_true: np.ndarray, y_pred: np.ndarray) -> float: true_positives = K.sum(K.round((y_true * y_pred))) predicted_positives = K.sum(K.round(y_pred)) return (true_positives / (predicted_positives + K.epsilon()))
class RadialBasis(MLPFunctional): def __init__(self, vars, rb_vars, radii): vars = to_list(vars) if (not all([isinstance(x, Variable) for x in vars])): raise TypeError rb_vars = to_list(rb_vars) if (not all([isinstance(x, RadialBasisBase) for x in rb_vars])): ...
def copy_image_f32_to_rgba8_np(src: ti.types.ndarray(), dst: ti.types.ndarray(), num_components: ti.template(), gray_scale: ti.template()): for I in ti.grouped(src): (i, j) = (I[0], I[1]) px = ti.Vector([0, 0, 0, 255], dt=u32) if ti.static(gray_scale): c = 0.0 c = src...
class TripletNet_Finetune(nn.Module): def __init__(self, model): super(TripletNet_Finetune, self).__init__() if (model == 'resnet18'): model = models.resnet18(pretrained=False) model.fc = torch.nn.Sequential() self.model = model print(self.model) ...
class TestLinting(unittest.TestCase): def test_linting(self): args = ['--output-format', 'parseable', '--rcfile', str(RCFILEPATH)] args += all_python_files() run = Run(args, exit=False) self.assertEqual(0, run.linter.msg_status)
_module() class P3Embed(nn.Module): def __init__(self, sample_ratio=0.0625, scale=4, group_size=32, in_channels=3, layers=4, embed_dim=256, subsample='fps', group='ballquery', normalize_dp=False, radius=0.1, feature_type='dp_df', relative_xyz=True, norm_args={'norm': 'bn1d'}, act_args={'act': 'relu'}, conv_args={'o...
def readme(c): test_path = Path('tests/readme_test') if (test_path.exists() and test_path.is_dir()): shutil.rmtree(test_path) cwd = os.getcwd() os.makedirs(test_path, exist_ok=True) shutil.copy('README.md', (test_path / 'README.md')) os.chdir(test_path) c.run('rundoc run --single-ses...
def create_table(conn, create_table_sql): try: c = conn.cursor() c.execute(create_table_sql) except Error as e: print(e)
.parametrize('observation_shape', [(100,)]) .parametrize('action_size', [2]) def test_mean_q_function_factory(observation_shape: Sequence[int], action_size: int) -> None: factory = MeanQFunctionFactory() assert (factory.get_type() == 'mean') encoder_with_action = _create_encoder_with_action(observation_shap...
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], exclude=[vk_on_mac], debug=True) def test_print_matrix_string_format(): x = ti.Matrix.field(2, 3, dtype=ti.f32, shape=()) y = ti.Vector.field(3, dtype=ti.f32, shape=3) def func(k: ti.f32): x[None][(0, 0)] = (- 1.0) y[2] += 1.0 print('hel...
class Map2D(): def __init__(self, data): self.w = len(data[0]) self.h = len(data) self.data = np.asarray(data) def __getitem__(self, item): return self.data[item] def replace(self, x, y, a): self.data[(x, y)] = a
def _kaplan_meier_reference(times, censored): dtype = [('time', float), ('censored', int)] data = np.array([(t, d) for (t, d) in zip(times, censored)], dtype=dtype) data = np.sort(data, order=('time', 'censored')) times = data['time'] died = np.logical_not(data['censored']) m = times.size n ...