code
stringlengths
101
5.91M
def test_single_objective_max_loss_not_set(): with pytest.raises(TypeError): single_cdv_tmp = SingleObjectiveCDV(normalized=True) single_cdv_tmp.get_descent_vector([loss_1])
('/_check_chat_valid/', methods=['GET']) def is_chat_valid(): backend = get_backend() if backend.is_chat_valid(userid()): return jsonify(valid=True) else: return jsonify(valid=False, message=backend.get_user_message(userid()))
class KvretEvaluator(GenericEvaluator): def __init__(self, reader): super().__init__(reader) (self.entities_flat, self.entitiy_to_slot_dict) = self.get_entities(self.reader.ontology_path) self.informable_slots = self.reader.otlg.informable_slots self.requestable_slots = self.reader.o...
class timeout(): def __init__(self, seconds=1, error_message='Timeout'): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) def __enter__(self): signal.signal(signal.SIGALRM, self.handle_ti...
def get_excess_err(beta_hat, compute_err_func, data): (X, Y, opt_err) = data err = compute_err_func(X, Y, beta_hat) return (err - opt_err)
class MosesDetokenizer(TokenizerI): IsAlnum = text_type(''.join(perluniprops.chars('IsAlnum'))) IsAlpha = text_type(''.join(perluniprops.chars('IsAlpha'))) IsSc = text_type(''.join(perluniprops.chars('IsSc'))) AGGRESSIVE_HYPHEN_SPLIT = (' \\\\-\\ ', '-') ONE_SPACE = (re.compile(' {2,}'), ' ') UN...
_module() class MultiScaleFlipAug(): def __init__(self, transforms, img_scale=None, scale_factor=None, flip=False, flip_direction='horizontal'): self.transforms = Compose(transforms) assert ((img_scale is None) ^ (scale_factor is None)), 'Must have but only one variable can be set' if (img_s...
class TFNextSentencePredictorOutput(ModelOutput): loss: Optional[tf.Tensor] = None logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None
def main(): parser = argparse.ArgumentParser() parser.add_argument('--input', default='', type=str, required=True, help='File path to a text file to be read') parser.add_argument('--output', default='', type=str, required=True, help='File path to a JSON file to be output') parser.add_argument('--lang', ...
def get_env_params(): return d(cls=CFSanityEnv, params=d(dt=DT, step_dt=STEP_DT, ros_prefix='cf/0/', lag=LAG, use_random_goal=True, num_latent=NUM_LATENT_CLASSES, obs_hist_len=OBS_HISTORY_LENGTH, act_hist_len=ACT_HISTORY_LENGTH, horizon=HORIZON))
def check_condition(state, condition): for (k, v) in condition.items(): if isinstance(v, (str, int)): if (not (state[k] == v)): return False elif isinstance(v, list): if (not (state[k] in v)): return False else: raise TypeEr...
def set_style(): sns.set_context('paper') sns.set(font='serif') sns.set_style('white', {'font.family': 'serif', 'font.serif': ['Times', 'Palatino', 'serif']}) import matplotlib matplotlib.rcParams['pdf.fonttype'] = 42 matplotlib.rcParams['ps.fonttype'] = 42
.parametrize('pack', MapieDefaultEstimators()) def test_none_estimator(pack: Tuple[(BaseEstimator, BaseEstimator)]) -> None: (MapieEstimator, DefaultEstimator) = pack mapie_estimator = MapieEstimator(estimator=None) mapie_estimator.fit(X_toy, y_toy) if isinstance(mapie_estimator, MapieClassifier): ...
def de_vectorize_ptr(vec_cpu, rev_vocab, memory, post_process, return_tokens=False): tokens = [] for j in range(len(vec_cpu)): token_id = int(vec_cpu[j]) if ((j == 0) and (token_id == rev_vocab.start_id)): continue if ((token_id == rev_vocab.eos_id) or (token_id == rev_vocab....
def compute_fid(opts, max_real, num_gen): detector_url = ' detector_kwargs = dict(return_features=True) (mu_real, sigma_real) = metric_utils.compute_feature_stats_for_dataset(opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs, rel_lo=0, rel_hi=0, capture_mean_cov=True, max_items=max_real)...
def get_imagenet_train_test_ds(DATA_DIR=IMAGENET_ROOT_DIR): (train_transform, test_transform) = imagenet_transformations() traindir = os.path.join(DATA_DIR, 'train') valdir = os.path.join(DATA_DIR, 'val') ds_train = ImageFolder(traindir, transform=train_transform) ds_test = ImageFolder(valdir, trans...
.torch def test_train_sasrec_with_invalid_loss_type(item_user_sequential_dataset, train_sasrec_loader): with pytest.raises(ValueError): trainer = L.Trainer(max_epochs=1) model = SasRec(tensor_schema=item_user_sequential_dataset._tensor_schema, max_seq_len=5, hidden_size=64) model._loss_type ...
def test_toarrow_NumpyArray_1(): array = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5])) assert isinstance(array.to_arrow(), pyarrow.lib.Array) assert (array.to_arrow().to_pylist() == [0.0, 1.1, 2.2, 3.3, 4.4, 5.5])
def register_Ns3LteRrcSapLogicalChannelConfig_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::LteRrcSap::LogicalChannelConfig const &', 'arg0')]) cls.add_instance_attribute('bucketSizeDurationMs', 'uint16_t', is_const=False) cls.add_instance_attribute('logicalChannelG...
class TestKPIObject(unittest.TestCase): def test_default(self): self.assertTrue(default_kpi.weights_memory, np.inf) self.assertTrue(default_kpi.activation_memory, np.inf) self.assertTrue(default_kpi.total_memory, np.inf) self.assertTrue(default_kpi.bops, np.inf) self.assertTr...
class ResnetUtilsTest(tf.test.TestCase): def testSubsampleThreeByThree(self): x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1]) x = resnet_utils.subsample(x, 2) expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1]) with self.test_session(): self.assertAllClo...
def register_Ns3BaseStationNetDevice_methods(root_module, cls): cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_constructor([]) cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::WimaxPhy >', 'phy')]) cls.add_constructor([param('ns3::Ptr< ns3::Node ...
def pytest_addoption(parser): parser.addoption('--skip-cpu-blas', action='store_true', help='Skip tests that are slow wthout cpu blas.') parser.addoption('--gpu', action='store_true', help='Run tests using gpu.') parser.addoption('--gpu-only', action='store_true', help='Run tests using gpu, and skip CPU tes...
def get_group_gn(dim, dim_per_gp, num_groups): assert ((dim_per_gp == (- 1)) or (num_groups == (- 1))), 'GroupNorm: can only specify G or C/G.' if (dim_per_gp > 0): assert ((dim % dim_per_gp) == 0), 'dim: {}, dim_per_gp: {}'.format(dim, dim_per_gp) group_gn = (dim // dim_per_gp) else: ...
.parametrize('ctx, solver_name', ctxs) .parametrize('decay', [0.0001]) .parametrize('lr', [0.1, 0.001]) .parametrize('momentum', [0.9, 0.5]) .parametrize('seed', [313]) def test_nesterov(seed, lr, momentum, decay, ctx, solver_name): rng = np.random.RandomState(seed) solver_tester(rng, S.Nesterov, RefNesterov, [...
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray: nb_samples = len(samples_idx) samples_to_remove = (nb_samples % batch_size) if (samples_to_remove != 0): samples_idx = samples_idx[:(- samples_to_remove)] sections_split = (nb_samples // batch_size) batch_id...
class DataTrainingArguments(): lang: str = field(default=None, metadata={'help': 'Language id for multilingual model.'}) data_dir: str = field(default=None, metadata={'help': 'The directory for saving the NaturalInstructions train/dev/test splits.'}) task_dir: str = field(default=None, metadata={'help': 'Th...
class _GridSample3dForward(torch.autograd.Function): def forward(ctx, input, grid, padding_mode=0, align_corners=True): assert (input.ndim == 5) assert (grid.ndim == 5) assert (input.shape[0] == grid.shape[0]) assert (grid.shape[4] == 3) output = torch.nn.functional.grid_samp...
class BertSelfattLayer(nn.Module): def __init__(self, config): super(BertSelfattLayer, self).__init__() self.self = BertAttention(config) self.output = BertAttOutput(config) def forward(self, input_tensor, attention_mask): self_output = self.self(input_tensor, input_tensor, atten...
def fpr_evaluate(pred, answers): max_res = (0, 0, 0) for ans in answers: hit = len((pred & ans)) if (hit == 0): res = (0, 0, 0) else: p = (hit / len(pred)) r = (hit / len(ans)) f = ((2 * (p * r)) / (p + r)) res = (f, p, r) ...
def main(): args = parse_args() random.seed(args.seed) infos = [] filenames = list(args.root_audio.rglob('*.wav')) for filename in filenames: frame_dir = ((args.root_frame / filename.parent.name) / filename.stem) n_frames = len(list(frame_dir.rglob('*.jpg'))) label = filename...
def main(): parser = argparse.ArgumentParser(description='text2vec cli') parser.add_argument('--input_file', type=str, help='input file path, text file, required', required=True) parser.add_argument('--output_file', type=str, default='text_embs.csv', help='output file path, output csv file') parser.add_...
class MoCo_augment(object): def __init__(self, opt): self.opt = opt augmentation = [transforms.RandomResizedCrop((opt.imgH, opt.imgW), scale=(0.2, 1.0), interpolation=PIL.Image.BICUBIC), transforms.RandomGrayscale(p=0.2), transforms.ColorJitter(0.4, 0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(),...
def test_top_level_public_api(): assert (dir(pyhf) == ['Model', 'PatchSet', 'Workspace', '__version__', 'compat', 'default_backend', 'exceptions', 'get_backend', 'infer', 'interpolators', 'modifiers', 'optimizer', 'parameters', 'patchset', 'pdf', 'probability', 'schema', 'set_backend', 'simplemodels', 'tensor', 'te...
def format_z3val(val): if isinstance(val, z3.BitVecNumRef): w = val.size() u = val.as_long() s = val.as_signed_long() if (u == s): return '0x{1:0{0}x} ({1})'.format(((w + 3) / 4), u) return '0x{1:0{0}x} ({1}, {2})'.format(((w + 3) / 4), u, s) if isinstance(val...
class T5ForConditionalGeneration(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def bilinear(input1, input2, weight, bias=None): return torch.bilinear(input1, input2, weight, bias)
def add_valid_summary(name, value): (avg, update) = tf.metrics.mean(value) tf.summary.scalar(name, avg, collections=['valid_summary']) return update
_function_dispatch(_center_dispatcher) def center(a, width, fillchar=' '): a_arr = numpy.asarray(a) width_arr = numpy.asarray(width) size = long(numpy.max(width_arr.flat)) if numpy.issubdtype(a_arr.dtype, numpy.string_): fillchar = asbytes(fillchar) return _vec_string(a_arr, (a_arr.dtype.typ...
class HDF5Data(HDF5BaseData): def write(self, fd, group, name, cache=None): dgroup = fd.create_group(group, name) fd.create_array(dgroup, 'type', nm.array(self.get_type())) self.write_data(fd, dgroup, cache) return dgroup def write_data(self, fd, group): raise Exception('...
def token(token_id: int, token_type_id: TokenTypeIds) -> Token: return Token(token_id, token_type_id)
class VGGEncoder(ImageEncoder): def __init__(self, config: ImageEncoderConfig): super().__init__(config) def _forward_backbone(self, x: torch.Tensor): return self.backbone.features[:(- 1)](x)
_function_dispatch(_nanmin_dispatcher) def nanmin(a, axis=None, out=None, keepdims=np._NoValue): kwargs = {} if (keepdims is not np._NoValue): kwargs['keepdims'] = keepdims if ((type(a) is np.ndarray) and (a.dtype != np.object_)): res = np.fmin.reduce(a, axis=axis, out=out, **kwargs) ...
_spec_function('boolq') def get_boolq_spec(only_contrast=False) -> RunSpec: scenario_spec = ScenarioSpec(class_name='helm.benchmark.scenarios.boolq_scenario.BoolQScenario', args={'only_contrast': only_contrast}) adapter_spec = get_generation_adapter_spec(input_noun='Passage', output_noun='Answer') return Ru...
class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm(input.float(), self.normalized_shape, (self.weight.float() if (self.weight is not None) else None), (self.bias.float() if (self.bias is not No...
def tril(A, k=0, format=None): coo_sparse = (coo_array if isinstance(A, sparray) else coo_matrix) A = coo_sparse(A, copy=False) mask = ((A.row + k) >= A.col) row = A.row[mask] col = A.col[mask] data = A.data[mask] new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype) ret...
def _convert_dataset(dataset_split, dataset_dir, dataset_label_dir): img_names = tf.gfile.Glob(os.path.join(dataset_dir, '*.jpg')) random.shuffle(img_names) seg_names = [] for f in img_names: basename = os.path.basename(f).split('.')[0] seg = os.path.join(dataset_label_dir, (basename + '...
class Log(): def __init__(self, dirName, id=0): self.dirName = dirName self.id = id self.dataList = [] self.batch_size = 100 os.makedirs(dirName, exist_ok=True) def save(self, tactileColorL, tactileColorR, tactileDepthL, tactileDepthR, visionColor, visionDepth, gripForce,...
class DatasetMapper_copypaste(DatasetMapper_d2): def __init__(self, is_train: bool, *, augmentations: List[Union[(T.Augmentation, T.Transform)]], augmentations_d2: List[Union[(T.Augmentation, T.Transform)]], augmentations_aa: List[Union[(T.Augmentation, T.Transform)]], augmentations_lsj: List[Union[(T.Augmentation,...
class RotatedPaddleSetBBreakoutWorld(RandomRotatedPaddleBreakoutWorld): warnings.warn('This env. parameter was dropped and should no longer be used.', DeprecationWarning) rotation_range_start = (- 25) rotation_range_end = 25
class MAML(BaseLearner): def __init__(self, model, lr, first_order=False, allow_unused=None, allow_nograd=False): super(MAML, self).__init__() self.module = model self.lr = lr self.first_order = first_order self.allow_nograd = allow_nograd if (allow_unused is None): ...
_utils.test(arch=[ti.opengl, ti.vulkan]) def test_aot_bind_id(): density = ti.field(dtype=ti.f32, shape=(8, 8)) density1 = ti.ndarray(dtype=ti.math.ivec2, shape=(8, 8)) def init(x: ti.f32, density1: ti.types.ndarray(ndim=2)): for (i, j) in density1: density[(i, j)] = x densit...
def get_cache_base(suffix=None): if (suffix is None): suffix = '.distlib' if ((os.name == 'nt') and ('LOCALAPPDATA' in os.environ)): result = os.path.expandvars('$localappdata') else: result = os.path.expanduser('~') if os.path.isdir(result): usable = os.access(result, os...
class RegNetEmbeddings(nn.Module): def __init__(self, config: RegNetConfig): super().__init__() self.embedder = RegNetConvLayer(config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act) self.num_channels = config.num_channels def forward(self, pix...
class Net(nn.Module): def __init__(self, opt): super().__init__() self.sub_mean = ops.MeanShift(255) self.add_mean = ops.MeanShift(255, sign=1) head = [ops.DownBlock(opt.scale), nn.Conv2d((3 * (opt.scale ** 2)), opt.num_channels, 3, 1, 1)] body = list() for _ in range...
class bunch(dict): def __init__(self, **kw): for (name, value) in kw.items(): setattr(self, name, value) def __setattr__(self, name, value): self[name] = value def __getattr__(self, name): try: return self[name] except KeyError: raise Attri...
def conv(x, channels, kernel=4, stride=2, pad=0, pad_type='zero', use_bias=True, sn=False, scope='conv_0'): with tf.variable_scope(scope): if (pad > 0): if (((kernel - stride) % 2) == 0): pad_top = pad pad_bottom = pad pad_left = pad ...
def get_batches(directory, lang, prefix='all_avg_pool'): print(f'Finding in {directory}/{prefix}.{lang}*') files = glob.glob(f'{directory}/{prefix}.{lang}*') emb_files = [] txt_files = [] for emb_fi in files: emb_files.append(emb_fi) txt_fi = emb_fi.replace(prefix, 'sentences') ...
def preprocess_image(image, output_height, output_width, is_training=False): if is_training: return preprocess_for_train(image, output_height, output_width) else: return preprocess_for_eval(image, output_height, output_width)
class A000312(SloaneSequence): def __init__(self): SloaneSequence.__init__(self, offset=0) def _repr_(self): return 'Number of labeled mappings from n points to themselves (endofunctions): n^n.' def _eval(self, n): if (n == 0): return ZZ.one() else: re...
class GroupedOption(click.Option): def __init__(self, *args: Any, group: ParameterGroup, **kwargs: Any): super().__init__(*args, **kwargs) self.group = group
def test_deleted_outputs(): rng = np.random.RandomState(313) x = nn.Variable((2, 3, 4, 5)) (h, m, v) = PF.batch_normalization(x, output_stat=True) del m x.d = rng.randn(*x.shape).astype(np.float32) h.forward() h.backward()
class SymforceGenCodegenTest(TestCase): def generate_cam_example_function(self, output_dir: Path) -> None: def pixel_to_ray_and_back(pixel: sf.Vector2, cam: sf.LinearCameraCal, epsilon: sf.Scalar=0) -> sf.Vector2: (camera_ray, _) = cam.camera_ray_from_pixel(pixel, epsilon) (reproject...
def create_optimizers(nets, args): (net_encoder, net_decoder, crit) = nets optimizer_encoder = torch.optim.SGD(group_weight(net_encoder), lr=args.lr_encoder, momentum=args.beta1, weight_decay=args.weight_decay) optimizer_decoder = torch.optim.SGD(group_weight(net_decoder), lr=args.lr_decoder, momentum=args....
def envs(): descs = [['SFFF', 'FHFH', 'FFFH', 'HFFG'], ['SFFF', 'FFFH', 'FHFH', 'HFFG'], ['SFFF', 'FFFH', 'FHFH', 'FFFG'], ['SFFF', 'FFFF', 'FFFF', 'FFFF'], ['SHFF', 'HHFF', 'FFFF', 'FFFF']] return [GarageEnv(GridWorldEnv(desc=desc)) for desc in descs]
class BiasLayer(nn.Module): def __init__(self, device): super(BiasLayer, self).__init__() self.beta = nn.Parameter(ones(1, requires_grad=True, device=device)) self.gamma = nn.Parameter(zeros(1, requires_grad=True, device=device)) def forward(self, x): return ((self.beta * x) + se...
class NFSPPolicies(OSPolicy): def __init__(self, game, nfsp_policies: List[TabularPolicy]): policies = {} player_ids = list(range(game.num_players())) for policy in nfsp_policies: policies.update(dict.fromkeys(policy.player_ids, policy)) super(NFSPPolicies, self).__init__...
def calculate_val_far(threshold, dist, actual_issame): predict_issame = np.less(dist, threshold) true_accept = np.sum(np.logical_and(predict_issame, actual_issame)) false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame))) n_same = np.sum(actual_issame) n_diff = np.sum(np....
def get_sample_level_sharded_derangements(*args, num_shards=10, **kwargs): (all_features, true_ids, dataset_size, subset_size, nclasses, class_matches) = _get_sample_level_derangements(*args, **kwargs) res = {'unsharded': (all_features, true_ids, dataset_size, subset_size, nclasses, class_matches)} if (num_...
def write_xml(xml_path, tags, image_shape): (h, w, c) = image_shape root = ET.Element('annotation') tree = ET.ElementTree(root) size = ET.Element('size') ET.SubElement(size, 'width').text = str(w) ET.SubElement(size, 'height').text = str(h) ET.SubElement(size, 'depth').text = str(c) root...
def calc_blob_blob_forces_pycuda(r_vectors, *args, **kwargs): number_of_blobs = np.int32(len(r_vectors)) (threads_per_block, num_blocks) = set_number_of_threads_and_blocks(number_of_blobs) L = kwargs.get('periodic_length') eps = kwargs.get('repulsion_strength') b = kwargs.get('debye_length') blo...
class TilingStrategyFullGrid(TilingStrategy): def __init__(self, window_size: Tuple[(int, int)], image_shape: Tuple[(int, int)], overlap: Optional[Tuple[(int, int)]]=(0, 0), **kwargs): if isinstance(overlap, str): overlap = overlap.replace('$', '') overlap = eval(overlap) sel...
def main(): args = get_args() cfg = gorilla.Config.fromfile(args.config) if args.work_dir: cfg.work_dir = args.work_dir else: cfg.work_dir = osp.join('./exps', osp.splitext(osp.basename(args.config))[0]) os.makedirs(osp.abspath(cfg.work_dir), exist_ok=True) timestamp = time.strft...
def _input_to_dataloader(X, y=None, offset=None, max_examples=None, *args, **kwargs): if (offset is None): offset = 0 if (max_examples is not None): max_examples = len(X) X_slice = X[offset:max_examples] y_slice = (y[offset:max_examples] if (y is not None) else None) if isinstance(X,...
class Config(object): def __init__(self, config_dict): self.config = config_dict def __getattr__(self, key): if (key in self.config): return self.config[key] else: raise AttributeError(key) def __getitem__(self, key): return self.config[key] def __...
.torch .parametrize('negative_sampling_strategy, negatives_sharing', [('global_uniform', False), ('global_uniform', True), ('inbatch', False), ('inbatch', True)]) def test_different_sampling_strategies(item_user_sequential_dataset, train_loader, val_loader, negative_sampling_strategy, negatives_sharing): trainer = ...
def test_ClusterNodeGenerator_init(): G = create_stellargraph() with pytest.raises(ValueError): generator = ClusterNodeGenerator(G, clusters=0) with pytest.raises(TypeError): generator = ClusterNodeGenerator(G, clusters=0.5) with pytest.raises(ValueError): generator = ClusterNode...
def get_latest_path(path: str) -> str: if (not os.path.exists(path)): return None files = os.listdir(path) dirs = [f for f in files if os.path.isdir(os.path.join(path, f))] targets = [] for d in dirs: if is_date_string(d): targets.append(d) if (not targets): r...
def TreeRep_no_recursion(d): Main.d = d (Main.G, Main.dist) = Main.eval('TreeRep.metric_to_structure_no_recursion(d)') edges = Main.eval('collect(edges(G))') W = np.zeros_like(Main.dist) for edge in edges: src = (edge.src - 1) dst = (edge.dst - 1) W[(src, dst)] = Main.dist[(s...
class vgg(nn.Module): def __init__(self, vgg_model): super(vgg, self).__init__() self.vgg_layers = vgg_model.features self.layer_name_mapping = {'1': 'relu1_1', '3': 'relu1_2', '6': 'relu2_1', '8': 'relu2_2'} def forward(self, x): output = [] for (name, module) in self.vg...
class EditDistanceOp(NativeOpGenBase): in_info = ({'name': 'a', 'ndim': 2, 'shape': (None, None), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'a_len', 'ndim': 1, 'shape': (None,), 'dtype': 'int32', 'need_contiguous': True, 'gradient': 'disconnected'}, {'name': 'b', 'ndim': 2, 's...
def register_ddp_comm_hook(comm_hook_type: DDPCommHookType, model: DistributedDataParallel, state=None): comm_hook_type.value(model=model, state=state)
class TensorflowExporter(): def __init__(self, nnp, batch_size, ir_version, model_format='TF_PB'): self._nnp = nnp self._batch_size = batch_size self._ir_version = ir_version self._model_format = model_format self.check_nnp_variable_name() def check_nnp_variable_name(self...
_utils.test(arch=get_host_arch_list()) def test_unpack_from_tuple(): a = ti.field(ti.f32, ()) b = ti.field(ti.f32, ()) c = ti.field(ti.f32, ()) list = [2, 3, 4] def func(): (a[None], b[None], c[None]) = list func() assert (a[None] == 2) assert (b[None] == 3) assert (c[None] =...
class TestEQPDirectFactorization(TestCase): def test_nocedal_example(self): H = csc_matrix([[6, 2, 1], [2, 5, 2], [1, 2, 4]]) A = csc_matrix([[1, 0, 1], [0, 1, 1]]) c = np.array([(- 8), (- 3), (- 3)]) b = (- np.array([3, 0])) (x, lagrange_multipliers) = eqp_kktfact(H, c, A, b...
def require_backend(backends): if (BACKEND not in backends): return unittest.skip(('Test requires backend to be one of %s' % backends)) return (lambda func: func)
class PadOrTruncate(): def __init__(self, max_length, fill=0): self.max_length = max_length self.fill = fill def __call__(self, doc): current = len(doc) trimmed = doc[:self.max_length] padding = ([self.fill] * (self.max_length - current)) return np.concatenate([tr...
class Inceptionv4Model(model.Model): def __init__(self): super(Inceptionv4Model, self).__init__('inception4', 299, 32, 0.005) def add_inference(self, cnn): def inception_v4_a(cnn): cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 96, 1, 1)], [('conv', 96, 1, 1)], [('conv', 64, 1, 1),...
def main(): args = parse_args() config = Config.fromfile(args.cfg) random_seed_setting(config) if (args.cfg_options is not None): config.merge_from_dict(args.cfg_options) (logger, train_log_dir) = create_logger(config, args.cfg, 'train') writer_dict = {'writer': SummaryWriter(train_log_d...
class ASRv2(nn.Module): supports_beam_search = True def set_defaults(self): self.defaults = {'feat_dim': 43, 'emb_dim': 300, 'enc_dim': 320, 'enc_layers': '1_1_2_2_1_1', 'dec_dim': 320, 'proj_dim': 300, 'proj_activ': 'tanh', 'dec_type': 'gru', 'dec_init': 'mean_ctx', 'dec_init_size': None, 'dec_init_act...
def get_random_start(video_row, sample_length): start = pd.to_timedelta(video_row['Start']) end = pd.to_timedelta(video_row['End']) valid_end = (end - sample_length) r = random.uniform(0, 1) randstart = (start + (r * (valid_end - start))) return format_timedelta(randstart)
def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor: zero_index = ((- 1) * len(inds)) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute((first_inds + [(zero_index + i) for i in inds]))
def unet_decoder(x, keep_prob, phase, img_channels, truth_channels, layers=3, conv_times=3, features_root=16, filter_size=3, pool_size=2, summaries=True): logging.info('Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}'.format(layers=layers, features=f...
class S3Interface(ObjectStoreInterface): def __init__(self, bucket_name: str): self.auth = compute.AWSAuthentication() self.requester_pays = False self.bucket_name = bucket_name self._cached_s3_clients = {} def provider(self): return 'aws' def path(self): retu...
def chmod(path, mode): log.debug('changing mode of %s to %o', path, mode) try: _chmod(path, mode) except os.error as e: log.debug('chmod failed: %s', e)
def add_all_arguments(parser, train): parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models') parser.add_argument('--seed', type=int, default=43, help='random seed') parser.add_argument('--gpu_ids', type=str, default='0', hel...
def find_module(block, keywords): for (name, module) in block.named_modules(): if any(((keyword in name) for keyword in keywords)): return module submodule_names = [name for (name, _) in block.named_modules()] raise ValueError(f'Could not find keywords {keywords} in: {submodule_names}')
def test_eq_statements_4(default_test_case): statements = [MagicMock(st.Statement, ret_val=MagicMock()), MagicMock(st.Statement, ret_val=MagicMock())] default_test_case._statements = statements other = dtc.DefaultTestCase(ModuleTestCluster(0)) other._statements = statements assert default_test_case....
.overload_method(BitMaskedType, 'extend_valid') def BitMasked_extend_valid(builder, size): if isinstance(builder, BitMaskedType): def extend_valid(builder, size): for _ in range(size): builder.append_valid() return builder._content return extend_valid
class Hdf5Reader(Reader): def __init__(self, file_path: str, category=defs.KEY_IMAGES) -> None: super().__init__(file_path) self.h5 = None self.category = category def get_subject_entries(self) -> list: nb_subjects = len(self.get_subjects()) return [defs.subject_index_to_...