code
stringlengths
101
5.91M
class Visualizer(): def __init__(self, netG, device, out, num_samples=10, num_columns=None, batch_size=100, range=((- 1), 1)): self.netG = netG self.device = device self.out = out self.num_samples = num_samples if (num_columns is None): self.num_columns = self.net...
class LocalInference(): def __init__(self, domain, backend='numpy', structural_zeros={}, metric='L2', log=False, iters=1000, warm_start=False, marginal_oracle='convex', inner_iters=1): self.domain = domain self.backend = backend self.metric = metric self.log = log self.iters ...
class Shortcut1d(nn.Module): def __init__(self, ni, nf): super().__init__() self.act_fn = nn.ReLU(True) self.conv = conv(ni, nf, 1) self.bn = nn.BatchNorm1d(nf) def forward(self, inp, out): return self.act_fn((out + self.bn(self.conv(inp))))
class DataLoader(): def __init__(self, doc, batch_size, args, pretrain=None, vocab=None, evaluation=False, preprocess_tags=True, bert_tokenizer=None, scheme=None): self.batch_size = batch_size self.args = args self.eval = evaluation self.shuffled = (not self.eval) self.doc = ...
class RteProcessor(DataProcessor): def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['sentence1'].numpy().decode('utf-8'), tensor_dict['sentence2'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data...
def make(name, frame_stack, action_repeat, seed): ml1 = metaworld.ML1(name) env = ml1.train_classes[name]() env.seed(seed) task = random.choice(ml1.train_tasks) env.set_task(task) env = RGBArrayAsObservationWrapper(env, ml1, max_path_length=MAX_PATH_LENGTH[name], camera_name=CAMERA[name]) en...
def fft_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, signal_ndim, normalized=False): dy = grad_inputs[0] x0 = inputs[0] dx0 = F.ifft(dy, signal_ndim, normalized) if (not normalized): n = np.prod(dy.shape[((- signal_ndim) - 1):(- 1)]) dx0 = (dx0 * n) return dx0
def test_download_non_default(): with tempfile.TemporaryDirectory(dir=TEST_WORKING_DIR) as test_dir: stanza.download('en', model_dir=test_dir, processors='ner', package='ontonotes_charlm', verbose=False) assert (sorted(os.listdir(test_dir)) == ['en', 'resources.json']) en_dir = os.path.join(...
_dispatch def ihfft2(x, s=None, axes=((- 2), (- 1)), norm=None, overwrite_x=False, workers=None): return (Dispatchable(x, np.ndarray),)
def test_circle_square_class_distribution(): (s0, s1) = setup_class_sizes() (_, y) = make_circle_square(size_classes=[s0, s1]) assert ((np.sum((y == 0)) == s0) and (np.sum((y == 1)) == s1))
def extract_codebook(model, data_loader, device, config): model.train() data_loader.sampler.set_epoch(0) sample_per_npz = 1000 codebook_feats = [] indexs = [] for (i, (image, caption, index)) in enumerate(data_loader): image = image.to(device, non_blocking=True) alpha = (config['...
def __getattr__(name): return _sub_module_deprecation(sub_package='stats', module='stats', private_modules=['_stats_py'], all=__all__, attribute=name)
def ranges_cmp(a, b): diff = (a.start - b.start) if (diff < 0): return (- 1) elif (diff > 0): return (+ 1) else: return 0
class MultiClassesTpFpTest(tf.test.TestCase): def test_tp_fp(self): num_groundtruth_classes = 3 matching_iou_threshold = 0.5 nms_iou_threshold = 1.0 nms_max_output_boxes = 10000 eval1 = per_image_evaluation.PerImageEvaluation(num_groundtruth_classes, matching_iou_threshold, n...
def add_to_map(gway, g, map_f, style_func_args, popup_features=[]): styles = [] for k in ['weight', 'color', 'opacity', 'fillColor', 'fillOpacity', 'radius']: if (k in style_func_args): if callable(style_func_args[k]): styles += [style_func_args[k](g)] else: ...
class BlipIntermediateOutput(ModelOutput): image_embeds: torch.FloatTensor = None text_embeds: Optional[torch.FloatTensor] = None image_embeds_m: Optional[torch.FloatTensor] = None text_embeds_m: Optional[torch.FloatTensor] = None encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions...
def subsample(dataset, fraction): indices = np.arange(len(dataset)) num_to_retain = int(np.round((float(len(dataset)) * fraction))) np.random.shuffle(indices) return Subset(dataset, indices[:num_to_retain])
def test_invalid_or_composition_inside_two_or(): r = Secret(10) (g1, g2, g3, g4) = make_generators(4) st11 = DLRep((r.value * g1), (r * g1)) st12 = DLRep((2 * g2), (r * g2)) st12.set_simulated() st1 = (st11 | st12) st21 = DLRep((7 * g3), (r * g3)) st21.simluation = True st22 = DLRep(...
class PyObjectSaver(SaverElement): def __init__(self, obj): self._obj = obj def load(self, state): def _load(target, state): if hasattr(target, 'load_state_dict'): target.load_state_dict(state) elif isinstance(target, dict): for (k, v) in s...
def gen_model_input(train_set, user_profile, seq_max_len): train_uid = np.array([line[0] for line in train_set]) train_iid = np.array([line[1] for line in train_set]) train_label = np.array([line[2] for line in train_set]) train_seq = [line[3] for line in train_set] train_hist_len = np.array([line[4...
class SensorAttention(tf.keras.layers.Layer): def __init__(self, n_filters, kernel_size, dilation_rate): super(SensorAttention, self).__init__() self.conv_1 = tf.keras.layers.Conv2D(n_filters, kernel_size=kernel_size, dilation_rate=dilation_rate, padding='same', activation='relu') self.conv_...
def compute_metrics(y_gold, y_pred, average='binary'): return {'accuracy': accuracy_score(y_gold, y_pred), 'precision': precision_score(y_gold, y_pred, average=average), 'recall': recall_score(y_gold, y_pred, average=average), 'f1': f1_score(y_gold, y_pred, average=average)}
def find_single_best_rule_success(rule_mapping): best_single_rule_success = 0 for (k, v) in rule_mapping.items(): if (len(v) > best_single_rule_success): best_rule_parts = k best_single_rule_success = len(v) best_rule = ((((best_rule_parts[0] + '_') + best_rule_parts[1]) + '_...
def DataPipeBehindQueues(source_datapipe, protocol, full_stop=False, blocking_request_get=False): if (not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolServer)): raise Exception('Expecting IterDataPipeQueueProtocolServer, got', protocol) source_datapipe = EnsureNonBlockingDataPipe...
class RuleTemplateFactory(RuleFactory): def __init__(self, rules, context): self.rules = rules self.context = context def get_rules(self, map): for rulefactory in self.rules: for rule in rulefactory.get_rules(map): new_defaults = subdomain = None ...
class ConfigParser(): def __init__(self, config, resume=None, modification=None, run_id=None, index=0, overwrite=False): self._config = _update_config(config, modification) self.resume = resume save_dir = Path(self.config['save_dir']) exper_name = self.config['name'] if (run_...
def create_instructions_files(memory_management_agent: Agent, num_files: int, task_id: str, base_filename: str='instructions_') -> None: for i in range(1, (num_files + 1)): content = generate_content(i, task_id, base_filename, num_files) file_name = f'{base_filename}{i}.txt' file_path = get_...
def timeout(seconds=10): signal.alarm(seconds) try: (yield) finally: signal.alarm(0)
def run(keyword, softwares=None): keyword = keyword.lower() if (softwares is None): softwares = ['obj'] output_dir = f'{grabcad_path}/{keyword}' make_dir(output_dir) keyword_id = get_keyword_id(keyword) (model_names, model_images) = get_models(keyword, softwares=softwares) for (model...
def test_enabled_enforce_detection_for_non_facial_input(): black_img = np.zeros([224, 224, 3]) with pytest.raises(ValueError, match='Face could not be detected.'): DeepFace.represent(img_path=black_img) with pytest.raises(ValueError, match='Face could not be detected.'): DeepFace.verify(img1...
def gmres(A, b, verbose=False, convergence='resid', **kwargs): if (convergence == 'resid'): if (scipy_version > 1.0): gmres_func = direct_gmres else: raise Exception("Your version of scipy does not support GMRES with residual convergence. Set convergence='presid', or upgrade...
class ArrayToLoop(NodeTransformer): def __init__(self, ast): self.count = 0 ParentScopeAssigner().visit(ast) self.scope_vars = ScopeVarsDeclarations() self.scope_vars.visit(ast) def visit_Execution_Part_Node(self, node: ast_internal_classes.Execution_Part_Node): newbody =...
def run_aggregator(model_interface, fl_experiment): logger.info('Aggregator has been started.') fl_experiment.start_experiment(model_interface) logger.info('Aggregator has been stopped.')
def _is_checked_function(item): if (not inspect.isfunction(item)): return False if item.__name__.startswith('_'): return False mod = item.__module__ if ((not mod.startswith('sklearn.')) or mod.endswith('estimator_checks')): return False return True
class GetBlasLapackFuncs(Benchmark): param_names = ['dtype1', 'dtype2', 'dtype1_ord', 'dtype2_ord', 'size'] params = [['b', 'G', 'd'], ['d', 'F', '?'], ['C', 'F'], ['C', 'F'], [10, 100, 1000]] def setup(self, dtype1, dtype2, dtype1_ord, dtype2_ord, size): self.arr1 = np.empty(size, dtype=dtype1, ord...
def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): logger = get_root_logger(cfg.log_level) dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset]) loader_cfg = {**dict(seed=cfg.get('seed'), drop_last=False, dist=distributed, num_gpus=len(c...
def import_yuv(seq_path, h, w, tot_frm, yuv_type='420p', start_frm=0, only_y=True): if (yuv_type == '420p'): (hh, ww) = ((h // 2), (w // 2)) elif (yuv_type == '444p'): (hh, ww) = (h, w) else: raise Exception('yuv_type not supported.') (y_size, u_size, v_size) = ((h * w), (hh * ww...
def get_doesnt_existing_features(data_dict): users = data_dict.get('users', None) items = data_dict.get('items', None) features = [FeatureInfo(column=data_dict['user_col'], feature_hint=FeatureHint.QUERY_ID, feature_type=FeatureType.CATEGORICAL), FeatureInfo(column=data_dict['item_col'], feature_hint=Featur...
class CNNCifar(nn.Module): def __init__(self, args): super(CNNCifar, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(((16 * 5) * 5), 120) self.fc2 = nn.Linear(120, 84) self.f...
def convert_coco_blobs_to_cityscape_blobs(model_dict): for (k, v) in model_dict['blobs'].items(): if ((v.shape[0] == NUM_COCO_CLS) or (v.shape[0] == (4 * NUM_COCO_CLS))): coco_blob = model_dict['blobs'][k] print('Converting COCO blob {} with shape {}'.format(k, coco_blob.shape)) ...
def all_reduce(tensor, op=dist.ReduceOp.SUM, async_op=False): world_size = get_world_size() if (world_size == 1): return tensor dist.all_reduce(tensor, op=op, async_op=async_op) return tensor
def run(dataset_dir): dataset_type = 'png' training_filename = _get_output_filename(dataset_dir, '_train') if (tf.gfile.Exists(training_filename) and tf.gfile.Exists(training_filename)): print('Dataset files already exist. Exiting without re-creating them.') return with tf.device('/cpu:0...
class VGG(nn.Module): def __init__(self, features): super(VGG, self).__init__() self.features = features self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Dropout(), nn.Linear(512, 512), nn.ReLU(True), nn.Linear(512, 10)) for m in self.modules(): ...
def plot_similarity_change_vs_original_bias(bias_before, distance_before, distance_after): K = 50 most_similar_idx_before = distance_before.argsort(axis=0)[:K].T most_similar_idx_after = distance_after.argsort(axis=0)[:K].T data = [] for (i, (bias, dis_before, dis_after)) in enumerate(tqdm.tqdm_note...
def log_params(params, logger=logger): for key in sorted(params.keys()): logger.info('{}: {}'.format(key, params[key]))
def assert_array_less(x, y, err_msg='', verbose=True): assert_array_compare(operator.__lt__, x, y, err_msg=err_msg, verbose=verbose, header='Arrays are not less-ordered')
class SymforceCudaCodegenTest(TestCase): def test_codegen(self) -> None: output_dir = self.make_output_dir('symforce_cuda_codegen_test_') def cuda_func(a: sf.Scalar, b: sf.V1, c: sf.V3, d: sf.M22, e: sf.V5, f: sf.M66, g: sf.DataBuffer) -> T.Tuple[(sf.Scalar, sf.V1, sf.V3, sf.M22, sf.V5, sf.M66)]: ...
def RunOperatorsOnce(operators): for op in operators: success = RunOperatorOnce(op) if (not success): return False return True
_grad() class PCAMaskEncoding(nn.Module): def __init__(self, cfg): super().__init__() self.cfg = cfg self.agnostic = cfg.MODEL.MEInst.AGNOSTIC self.whiten = cfg.MODEL.MEInst.WHITEN self.sigmoid = cfg.MODEL.MEInst.SIGMOID self.dim_mask = cfg.MODEL.MEInst.DIM_MASK ...
class TestGLMMEncoder(TestCase): def test_continuous(self): cols = ['unique_str', 'underscore', 'extra', 'none', 'invariant', 321, 'categorical', 'na_categorical', 'categorical_int'] enc = encoders.GLMMEncoder(cols=cols, binomial_target=False) def test_binary(self): cols = ['unique_str',...
def shear_x(image, level): level = ((level / MAX_LEVEL) * 0.3) level = _randomly_negate_tensor(level) image = Image.fromarray(image) image = image.transform(image.size, Image.AFFINE, (1, level, 0, 0, 1, 0), Image.BICUBIC) return np.asarray(image)
def freeze_by_patterns(module, patterns): frozen_params = [] frozen_modules = [] for pattern in patterns: if pattern.startswith('module:'): frozen_modules.append(pattern[7:]) else: frozen_params.append(pattern) freeze_params(module, frozen_params) freeze_modul...
def _indicators(A, b, c, c0, x, y, z, tau, kappa): (x0, y0, z0, tau0, kappa0) = _get_blind_start(A.shape) def r_p(x, tau): return ((b * tau) - A.dot(x)) def r_d(y, z, tau): return (((c * tau) - A.T.dot(y)) - z) def r_g(x, y, kappa): return ((kappa + c.dot(x)) - b.dot(y)) def ...
def _get_axis_wo_b(axis_wb, batch_dim_axis, batch_ndim=None): if (axis_wb < 0): assert (batch_ndim is not None) assert ((axis_wb + batch_ndim) >= 0) axis_wb += batch_ndim assert (0 <= axis_wb < batch_ndim) if (batch_dim_axis is None): return axis_wb if (axis_wb == bat...
def get_class_loss(logits_out, label, num_classes, ld_focal=2.0): loss = tf.reduce_mean((((1 - tf.reduce_sum((tf.nn.softmax(logits_out) * tf.squeeze(tf.one_hot(label, num_classes, on_value=1.0, off_value=0.0, dtype=tf.float32))), axis=1)) ** ld_focal) * tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits_o...
def visualize_ner_doc(doc, language, select=None, colors=None): (model, documents, visualization_colors) = (spacy.blank('en'), [], copy.deepcopy(colors)) (sentences, rtl, RTL_OVERRIDE) = (doc.sentences, is_right_to_left(language), '\u202e') if rtl: sentences = reversed(doc.sentences) if colo...
def test_mask_head_loss(): self = FCNMaskHead(num_convs=1, roi_feat_size=6, in_channels=8, conv_out_channels=8, num_classes=8) proposal_list = [torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]])] gt_bboxes = [torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])] gt_labels = [torch.LongTensor([2])]...
class RibbonTableau_class(RibbonTableau): def __setstate__(self, state): self.__class__ = RibbonTableau self.__init__(RibbonTableaux(), state['_list'])
def make_batch_data_sampler(cfg, sampler, batch_size, drop_last, max_iter, is_train): batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, batch_size, drop_last) if (max_iter != (- 1)): batch_sampler = samplers.IterationBasedBatchSampler(batch_sampler, max_iter) if (cfg.train.sampler == 'i...
def test_state_to_key(): rng = jax.random.PRNGKey(0) state = init(rng) state = state.replace(_hand=jnp.arange(52, dtype=jnp.int32)) key = _state_to_key(state) assert jnp.all((key == jnp.array([0, , , ], dtype=jnp.int32))) state = state.replace(_hand=jnp.arange(52, dtype=jnp.int32)[::(- 1)]) ...
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): end_points = {} def add_and_check_final(name, net): end_points[name] = net return (name == final_endpoint) with tf.variable_scope(scope, 'InceptionV4', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, ...
class FileSequenceMetaData(Closeable): def __init__(self, file_seq): meta_path = (file_seq.path + '.meta') file_existed = os.path.isfile(meta_path) self._d = FileMapping(meta_path) if (not file_existed): self.length = len(file_seq) def close(self): self._d.clo...
def test_resnext(): net = ResNeXt29_2x64d() x = torch.randn(1, 3, 32, 32) y = net(Variable(x)) print(y.size())
class GeneralizedRCNN(nn.Module): def __init__(self, cfg): super(GeneralizedRCNN, self).__init__() self.backbone = build_backbone(cfg) self.rpn = build_rpn(cfg, self.backbone.out_channels) self.roi_heads = build_roi_heads(cfg, self.backbone.out_channels) self.fix_rpn = cfg.MO...
def run(args): tt_dataset = LrHrSet(args.dset.test, args.experiment.lr_sr, args.experiment.hr_sr, stride=None, segment=None, with_path=True, upsample=args.experiment.upsample) tt_loader = distrib.loader(tt_dataset, batch_size=1, shuffle=False, num_workers=args.num_workers) model = _load_model(args) mode...
class TemplateTableIndex(): def __init__(self, index: TableIndex) -> None: for attr in dir(index): if (not attr.startswith('__')): setattr(self, attr, getattr(index, attr)) self.index = index def get_index(self): return self.index def get_key(self): ...
def build_pretraining_dataset(args): transform = DataAugmentationForBlur(args) print(('Data Aug = %s' % str(transform))) return ImageFolder(args.data_path, transform=transform)
class mkl_info(system_info): section = 'mkl' dir_env_var = 'MKLROOT' _lib_mkl = ['mkl_rt'] def get_mkl_rootdir(self): mklroot = os.environ.get('MKLROOT', None) if (mklroot is not None): return mklroot paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep) ...
class PQE(nn.Module): quasipartition_aggregator: QuasipartitionAggregatorBase measure: MeasureBase shape: ShapeBase def __init__(self, num_quasipartition_mixtures: int, num_poisson_processes_per_quasipartition: int=4, measure: str='lebesgue', shape: str='halfline', discounted: bool=False): super...
def load_training_data_per_model(data_loading_funcs: dict, output_path: str, no_batches: int, train_mode: bool=True, load_valid_data: bool=True, no_workers: int=8) -> Tuple[(DataLoader, DataLoader)]: load_data_t = time() if (data_loading_funcs['name'] == 'woi'): (X_tok_train, X_type_train) = data_loadin...
def test_load(skip_remote, dataset): if (dataset is None): pytest.skip() all_data = dataset.load_clips() assert isinstance(all_data, dict) clip_ids = dataset.clip_ids assert (set(clip_ids) == set(all_data.keys())) for clip_id in tqdm.tqdm(clip_ids): clip = all_data[clip_id] ...
def is_torch_tf32_available(): if (not is_torch_available()): return False import torch if ((not torch.cuda.is_available()) or (torch.version.cuda is None)): return False if (torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8): return False if (int(torch....
def print_rank_0(message): if torch.distributed.is_initialized(): if (torch.distributed.get_rank() == 0): print(message, flush=True) else: print(message, flush=True)
def make_params_from_flags(): flag_values = {name: getattr(FLAGS, name) for name in _DEFAULT_PARAMS.keys()} return Params(**flag_values)
def Const(net, value, dtype=None, name=None): assert isinstance(net, core.Net), 'net must be a core.Net instance.' value = np.array(value, dtype=dtype) blob = net.AddExternalInput(net.NextName(prefix=name)) workspace.FeedBlob(str(blob), value) return blob
.parametrize('method', methods) def test_add_reverse_edges_large_graph(method): n = 100000 indices = np.arange(1, n) indptr = np.array((list(range(n)) + [(n - 1)])) data = np.ones((n - 1), dtype=np.int32) graph = csr_matrix((data, indices, indptr), shape=(n, n)) res = maximum_flow(graph, 0, (n -...
class WSGIResponse(BaseResponse, JSONMixin): request: PreparedRequest def on_json_loading_failed(self, e: JSONDecodeError) -> NoReturn: raise e
def test_fortran_frontend_sum2loop_1d_without_offset(): test_string = '\n PROGRAM index_offset_test\n implicit none\n double precision, dimension(7) :: d\n double precision, dimension(3) :: res\n CALL index_test_function(...
class GammaRegressor(_GeneralizedLinearRegressor): _parameter_constraints: dict = {**_GeneralizedLinearRegressor._parameter_constraints} def __init__(self, *, alpha=1.0, fit_intercept=True, solver='lbfgs', max_iter=100, tol=0.0001, warm_start=False, verbose=0): super().__init__(alpha=alpha, fit_intercep...
class FunctionalModel(torch.nn.Module): def __init__(self): super(FunctionalModel, self).__init__() self.w1 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM)) self.w2 = torch.nn.Parameter(torch.randn(_MODEL_DIM, _MODEL_DIM)) self.w3 = torch.nn.Parameter(torch.randn(_MODEL_DIM,...
class Instance(object): def __init__(self, words, relation, head, tail, headpos, tailpos, headtype, tailtype, ner=None, is_noise=None): self.words = words self.relation = relation self.head = head self.tail = tail self.headpos = headpos self.tailpos = tailpos ...
_cache(maxsize=None) def colbertv2_post_request_v2_wrapped(*args, **kwargs): return colbertv2_post_request_v2(*args, **kwargs)
def create_accumulators(params): accums = [] for p in params: if is_subtensor_op(p): (origin, _) = get_subtensor_op_inputs(p) acc = theano.shared(np.zeros_like(origin.get_value(borrow=True), dtype=theano.config.floatX)) else: acc = theano.shared(np.zeros_like(...
class DataLoader(object): def __init__(self, filename): self.data = pickle.load(open(filename, 'rb')) self.dg_node_type_universe = len(atom_index) self.lg_node_type_universe = len(lg_node_type) self.dg_node_ref = np.zeros((len(self.data) + 1), dtype=np.int64) for i in range(l...
def pdist(feature): square_sum = torch.sum((feature ** 2), 1, keepdim=True) square_sum = (square_sum + square_sum.transpose(1, 2)) distance = torch.baddbmm(square_sum, feature.transpose(1, 2), feature, alpha=(- 2.0)) return distance
def gather_features(image_features, text_features, local_loss=False, gather_with_grad=False, rank=0, world_size=1, use_horovod=False): assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.' if use_horovod: assert (hvd is not None), 'Please instal...
def test_getSubscription1(): url = (brokerIp + '/ngsi10/subscribeContext') headers = {'Content-Type': 'application/json'} r = requests.post(url, data=json.dumps(data_ngsi10.subdata1), headers=headers) resp_content = r.content resInJson = resp_content.decode('utf8').replace("'", '"') resp = json....
def save_checkpoint(model, optimizer, scheduler, tokenizer, args): checkpoint_prefix = 'checkpoint' output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step)) os.makedirs(output_dir, exist_ok=True) model_to_save = (model.module if hasattr(model, 'module') else model) ...
def register_Ns3OnoeWifiManager_methods(root_module, cls): cls.add_constructor([param('ns3::OnoeWifiManager const &', 'arg0')]) cls.add_constructor([]) cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) cls.add_method('SetHeSupported', 'void', [param('bool', 'enable')], is_virtual=True) ...
def get_model(point_cloud, is_training, num_class, bn_decay=None): batch_size = point_cloud.get_shape()[0].value num_point = point_cloud.get_shape()[1].value end_points = {} l0_xyz = point_cloud l0_points = None end_points['l0_xyz'] = l0_xyz (l1_xyz, l1_points, l1_indices) = pointnet_sa_modu...
class TestOutputPlainText(OutputPlainText): def __init__(self, *args, **kwds): raise AssertionError('cannot override constructor') def print_to_stdout(self): print('{0} [{1}]'.format(self.text.get_str(), self.__class__.__name__))
def test_einsum(backend): tb = pyhf.tensorlib x = np.arange(20).reshape(5, 4).tolist() assert np.all((tb.tolist(tb.einsum('ij->ji', tb.astensor(x))) == np.asarray(x).T.tolist())) assert (tb.tolist(tb.einsum('i,j->ij', tb.astensor([1, 1, 1]), tb.astensor([1, 2, 3]))) == ([[1, 2, 3]] * 3))
_utils.test() def test_indexing_in_struct_field(): s = ti.Struct.field({'v': ti.types.vector(3, ti.f32), 'm': ti.types.matrix(3, 3, ti.f32)}, shape=()) def foo(): print(s[None].v[(0, 0)]) with pytest.raises(TaichiCompilationError, match='Expected 1 indices, got 2'): foo() def bar(): ...
class SequenceToSequenceClassificationTask(Task): def __init__(self, key_metric: str, deserialization_func: Callable[([bytes], Dict[(str, tf.Tensor)])], n_classes: int, label_name: str, input_name: str='encoder_output', output_name: str='sequence_logits', mask_name: str='sequence_mask'): super().__init__(ke...
def filter_oldstyle_options(**options): filtered = {} for key in options: newkey = key for prefix in ['use_', 'opt_allow_', 'opt_']: newkey = newkey.replace(prefix, '') filtered[newkey] = options[key] return filtered
def loadSessions(filename): (sessions, queries) = ({}, set()) with open(filename, 'r') as f: for l in f: l = l.split('\t') sessionID = l[0] query = l[1] queries.add(query) if (sessionID not in sessions): sessions[sessionID] = []...
class GCN(nn.Module): def __init__(self, num_state, num_node): super(GCN, self).__init__() self.num_state = num_state self.num_node = num_node self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1) self.relu = nn.ReLU(inplace=True) self.conv2 = nn.Conv1d(num_state,...
class RK4(FixedGridODESolver): def step_func(self, func, t, dt, y, u): return rk4_alt_step_func(func, t, dt, y, u=u) def order(self): return 4
def wavefunction1d_discrete(mode: Optional[str]=None) -> Dict[(str, Any)]: ylabel = '$\\psi_j(n)$' if mode: ylabel = constants.MODE_STR_DICT[mode](ylabel) return {'xlabel': 'n', 'ylabel': ylabel}
class Cmd(): executable = None def __init__(self, executable): self.executable = executable def _call(self, command, args, kw, repository=None, call=False): cmd = ([self.executable, command] + list(args)) cwd = None if (repository is not None): cwd = os.getcwd() ...