code
stringlengths
101
5.91M
def run(train_loader: torch.utils.data.DataLoader, valid_loader: torch.utils.data.DataLoader, test_loader: torch.utils.data.DataLoader, args: Any) -> Tuple[(float, float)]: device = ('cuda' if torch.cuda.is_available() else 'cpu') count = train_loader.dataset.category_count print(f'category size: {train_loa...
def gaussian_mixture_log_prob(w: torch.Tensor, pi: float, sigma_1: float, sigma_2: float) -> torch.Tensor: sigma_1 = torch.FloatTensor([sigma_1]).to(w.device) sigma_2 = torch.FloatTensor([sigma_2]).to(w.device) prob_1 = gaussian_log_prob(w, 0.0, sigma_1).exp() prob_2 = gaussian_log_prob(w, 0.0, sigma_2)...
def simGetConvexHullShape(pathAndFilename): (vertices, indices, _) = simImportMesh(0, pathAndFilename, 0, False, 1.0) outVertices = ffi.new('float **') outVerticesCount = ffi.new('int *') outIndices = ffi.new('int **') outIndicesCount = ffi.new('int *') ret = lib.simGetQHull(vertices[0], len(ver...
def test_no_collect_warnings(testdir): testdir.make_test('\nfrom schemathesis.models import *\n ') result = testdir.runpytest() assert ('cannot collect test class' not in result.stdout.str())
_properties class Tasklet(CodeNode): code = CodeProperty(desc='Tasklet code', default=CodeBlock('')) state_fields = ListProperty(element_type=str, desc='Fields that are added to the global state') code_global = CodeProperty(desc='Global scope code needed for tasklet execution', default=CodeBlock('', dtypes....
class RetriBertPreTrainedModel(PreTrainedModel): config_class = RetriBertConfig load_tf_weights = None base_model_prefix = 'retribert' def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_r...
def test_create_base_config(config: Config): config.plugins_allowlist = ['a', 'b'] config.plugins_denylist = ['c', 'd'] os.remove(config.plugins_config_file) plugins_config = config.load_plugins_config() assert (len(plugins_config.plugins) == 4) assert plugins_config.get('a').enabled assert ...
_builder('flickr30k_caption') class Flickr30kCapBuilder(BaseDatasetBuilder): train_dataset_cls = COCOCapDataset eval_dataset_cls = COCOCapEvalDataset DATASET_CONFIG_DICT = {'default': 'configs/datasets/flickr30k/defaults_cap.yaml'}
def get_perdomain_datasets(preprocessed_dir, domain_weights_dict, cache_dir=None, split=None, seed=DEFAULT_SEED, domain_weights=None, domain_names=None, num_skip_examples=0, shuffle=False, shard_reversal=False, keep_in_memory=False): domain_name_to_skip_num = determine_skip_per_domain(num_skip_examples, seed, domai...
def test_notify_end(): global _quiet qsave = _quiet (saveerr, sys.stderr) = (sys.stderr, StringIO()) try: _quiet = False notify('hello, world', end='FOO') finally: _quiet = qsave (saveerr, sys.stderr) = (sys.stderr, saveerr) print(type(saveerr)) assert ('hello...
class TestBoxSphereBoundariesIntersections(TestCase): def test_2d_box_constraints(self): (ta, tb, intersect) = box_sphere_intersections([1, 1], [(- 2), 2], [(- 1), (- 2)], [1, 2], 2, entire_line=False) assert_array_almost_equal([ta, tb], [0, 0.5]) assert_equal(intersect, True) (ta, t...
def _format_url(url: Any, col: str, remove_auth: Union[(bool, List[str])], split: bool, errors: str) -> Any: status = _check_url(url, True) if (status == 'null'): return ((np.nan, np.nan, np.nan, np.nan, 0, 0) if split else (np.nan, 0, 0)) if (status == 'unknown'): if (errors == 'raise'): ...
def test_aug_assign_tasklet_rhs(): def sdfg_aug_assign_tasklet_rhs(A: dace.float64[32], B: dace.float64[32]): for i in range(32): with dace.tasklet: (a << A[i]) (k << B[i]) (b >> A[i]) b = (k + a) sdfg = sdfg_aug_assign_tasklet_...
def validate(val_loader, model, criterion): batch_time = AverageMeter() loss = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() end = time.time() for (step, (input, target)) in enumerate(val_loader): target = target.cuda() output = model(input) ...
def check(source_dir): pyproject = pjoin(source_dir, 'pyproject.toml') if isfile(pyproject): log.info('Found pyproject.toml') else: log.error('Missing pyproject.toml') return False try: with open(pyproject) as f: pyproject_data = toml_load(f) buildsys ...
def eval_checkpoint(device, input_s, input_ids, segment_ids, input_mask, model_object): result = [] input_ids = torch.tensor([input_ids], dtype=torch.long).to(device) input_mask = torch.tensor([input_mask], dtype=torch.short).to(device) segment_ids = torch.tensor([segment_ids], dtype=torch.long).to(devi...
def test_index32(): py_array = ak.index.Index.zeros(10, nplike=numpy.instance(), dtype=np.int32) assert (len(py_array) == 10) assert ('i32' == py_array.form)
def open_api_3_schema_with_yaml_payload(empty_open_api_3_schema): empty_open_api_3_schema['paths'] = {'/yaml': {'post': {'requestBody': {'required': True, 'content': {'text/yaml': {'schema': {'type': 'array', 'items': {'enum': [42]}, 'minItems': 1, 'maxItems': 1}}}}, 'responses': {'200': {'description': 'OK'}}}}} ...
def build_batch(batch, network): network.train(False) with torch.no_grad(): (code_state, code_funcs, file_names, urls) = cal_code_features(network, batch) return (code_state, code_funcs, file_names, urls)
def clone_openwhisk_chart() -> None: try: run_check_process('git clone :apache/openwhisk-deploy-kube.git /tmp/openwhisk-deploy-kube') except (subprocess.CalledProcessError, FileNotFoundError) as e: logging.error('Cannot clone openwhisk chart, reason: {}'.format(e))
class FiniteFreeResolution_free_module(FiniteFreeResolution): _attribute def _maps(self): if isinstance(self._module, Ideal_generic): from sage.matrix.constructor import matrix return [matrix([[self._module.gen()]])] return [self._m()]
class DataBatcher(object): def __init__(self, data_dir, file_list, batch_size, num_epoch, shuffle=False): self.data_dir = data_dir self.file_list = file_list self.batch_size = batch_size self.num_epoch = num_epoch self.shuffle = shuffle self.cur_epoch = 0 self...
def is_RElement(x): from sage.misc.superseded import deprecation deprecation(34804, 'the function is_RElement is deprecated; use isinstance(x, sage.interfaces.abc.RElement) instead') return isinstance(x, RElement)
class MetricGanBrain(sb.Brain): def load_history(self): if os.path.isfile(self.hparams.historical_file): with open(self.hparams.historical_file, 'rb') as fp: self.historical_set = pickle.load(fp) def compute_feats(self, wavs): feats = self.hparams.compute_STFT(wavs) ...
def accuracy_tagging(eval_examples, predict_result_tagging, gold_result_tagging, input_mask_tagging, output_name): output_file = open(output_name, 'w') example_id = (- 1) assert (len(predict_result_tagging) == len(gold_result_tagging)) assert (len(predict_result_tagging) == len(input_mask_tagging)) ...
class Discriminator(keras.Model): def __init__(self, data, learning_rate=0.001, l_w=0, l_b=0, l_gan=0, num_users=100, num_items=100, name='CFGAN-DIS', **kwargs): super().__init__(name=name, **kwargs) tf.random.set_seed(42) self._learning_rate = learning_rate self._l_w = l_w s...
_numpy_output(check_dtype=True) def test_ufunc_left_shift_cc(A: dace.complex64[10], B: dace.complex64[10]): return np.left_shift(A, B)
_model('model_parallel_transformer_lm') class ModelParallelTransformerLanguageModel(TransformerLanguageModel): def add_args(parser): TransformerLanguageModel.add_args(parser) def build_model(cls, args, task): if (not has_megatron_submodule): raise ImportError('\n\nPlease install the ...
class AestheticsScorer(): MODEL_URL_TEMPLATE: str = ' def load_model(clip_model='vit_l_14'): cache_folder: str = os.path.join(get_cached_models_path(), 'emb_reader') ensure_directory_exists(cache_folder) model_path: str = os.path.join(cache_folder, f'sa_0_4_{clip_model}_linear.pth') ...
def get_outcome_feature_vector(dates, index_date): for date in dates: if ((date > index_date) and ((date - index_date).days <= 730)): return 1 return 0
def symbol2onehot96(symbol): order = 0 char_list = list(symbol) if (('o' in char_list) or ('' in char_list)): order = 4 elif (('#' in char_list) and ('5' in char_list)): order = 3 elif (('s' in char_list) and ('u' in char_list)): order = 5 elif (('7' in char_list) or ('9'...
class CountMeter(UnivariateStatistic): def __init__(self): self.count = 0 def update(self, num): self.count += 1 return self def remove(self, num=None): self.count -= 1 return self def get(self): return self.count
class ForecastNNVectorizerParams(Config): feature_type: str = None label_type: str = None sep_token: str = '[SEP]' max_token_len: int = None min_token_count: int = None embedding_dim: int = None output_dir: str = '' vectorizer_metadata_filepath: str = '' vectorizer_model_dirpath: str...
class NVML(): def __enter__(self): self.start() return self def __exit__(self, exc_type, exc_value, traceback): self.stop() def start(self): pynvml.nvmlInit() def stop(self): pynvml.nvmlShutdown() def get_memory_capacity(self): handle = pynvml.nvmlDevi...
class Distance(): def __init__(self, cosmo=[0.3, 0.7, 0.7]): self.OMEGA_M = cosmo[0] self.OMEGA_L = cosmo[1] self.h = cosmo[2] self.w = (- 1.0) self.wpars = None self.Dc = self.comoving_distance self.Dt = self.comoving_transverse_distance self.Dm = sel...
class DefROIAlign(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio, trans_std, aligned=True): super(DefROIAlign, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio self.trans_std = t...
def test_hko(args): assert ((cfg.MODEL.FRAME_STACK == 1) and (cfg.MODEL.FRAME_SKIP == 1)) assert (len(cfg.MODEL.LOAD_DIR) > 0) base_dir = args.save_dir logging_config(folder=base_dir, name='testing') save_cfg(dir_path=base_dir, source=cfg.MODEL) hko_nowcasting_online = HKONowcastingFactory(batch...
class E_attr(nn.Module): def __init__(self, input_dim_a, input_dim_b, output_nc=8): super(E_attr, self).__init__() dim = 64 self.model_a = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_dim_a, dim, 7, 1), nn.ReLU(inplace=True), nn.ReflectionPad2d(1), nn.Conv2d(dim, (dim * 2), 4, 2), nn...
class BaseDataClass(object): def __init__(self, config): self.config = (config or dict()) self.max_src_len = self.config['max_src_len'] self.max_tgt_len = self.config['max_tgt_len'] self.num_mr_attr = len(MR_FIELDS) self.vocab = None self.uni_mr = {'train': None, 'dev...
class Encoder(nn.Module): def __init__(self): super(Encoder, self).__init__() self.enc_skip2_conv = nn.Conv2d(256, 24, kernel_size=1, stride=1) self.enc_skip2_conv_bn = nn.BatchNorm2d(24) self.enc_skip1_conv = nn.Conv2d(512, 24, kernel_size=1, stride=1) self.enc_skip1_conv_bn...
def main(args): os.makedirs(args.output_dir, exist_ok=True) data_dict = defaultdict(dict) scene_prefix = os.path.join(args.input_dir, 'init_and_final_graphs') scenes = os.listdir(scene_prefix) for scene in scenes: print(f''' Start {scene}...''') env_id = int(scene[len('TrimmedTestSce...
def compile_standard(input_data: Dict, base_path: str=None, allow_paths: List=None, output_dir: str=None, overwrite: bool=False, solc_binary: Union[(str, Path)]=None, solc_version: Version=None, allow_empty: bool=False) -> Dict: if ((not input_data.get('sources')) and (not allow_empty)): raise ContractsNotF...
_model def efficientnet_b3_pruned(pretrained=False, **kwargs): kwargs['bn_eps'] = BN_EPS_TF_DEFAULT kwargs['pad_type'] = 'same' model = _gen_efficientnet('efficientnet_b3_pruned', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs) return model
class DataLoader(): def __init__(self, input_src, batch_size, args, pretrain, vocab=None, evaluation=False, sort_during_eval=False): self.batch_size = batch_size self.args = args self.eval = evaluation self.shuffled = (not self.eval) self.sort_during_eval = sort_during_eval ...
def rosenbrock(tensor, alpha=1.0, beta=100): (x, y) = tensor.squeeze() return (((alpha - x) ** 2) + (beta * ((y - (x ** 2)) ** 2)))
class GatedSpatialConv2d(_ConvNd): def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, groups=1, bias=False): kernel_size = _pair(kernel_size) stride = _pair(stride) padding = _pair(padding) dilation = _pair(dilation) super(GatedSpati...
def format_simulated_data(args): simulation_pipeline = args.pipeline args.pipeline = 'generate_and_correct' chatbot = Chatbot(args) dlg_history = [] dlg_claims = set() make_parent_directories(args.output_file) content_list = [] metadata_list = [] example_id = 0 turn_num = 0 d...
def download(vid_file): with open(vid_file) as fp: vid_list = [line.strip() for line in fp] url_list = ' url_key = ('&username=%s&accesskey=%s&release=latest&src=stanford' % (args.user, args.key)) testfile = urllib.URLopener() for i in range(len(vid_list)): wnid = vid_list[i] ...
class GME_NP_rew(GridMap.GridMapEnv): def __init__(self, name='', gridMap=None, workingDir='./'): super(GME_NP_rew, self).__init__(name, gridMap, workingDir) gm2d = GridMap.GridMap2D(50, 50, outOfBoundValue=0) gm2d.valueStartingBlock = 0 gm2d.valueEndingBlock = 1000 gm2d.valu...
def get_optimizer(args, net): if (args.backbone_lr > 0.0): base_params = [] resnet_params = [] resnet_name = [] resnet_name.append('layer0') resnet_name.append('layer1') len_resnet = len(resnet_name) else: param_groups = net.parameters() if (args.backb...
class GenericActivation(tf.keras.layers.Layer): def __init__(self, forward_fn, activation_type, name, **kwargs): super(GenericActivation, self).__init__(name=((name + '-') + activation_type), **kwargs) self._forward_fn = forward_fn self._activation_type = activation_type def call(self, t...
class TestCategoricalLSTMModel(TfGraphTestCase): def setup_method(self): super().setup_method() self.batch_size = 1 self.time_step = 1 self.feature_shape = 2 self.output_dim = 1 self.obs_inputs = np.full((self.batch_size, self.time_step, self.feature_shape), 1.0) ...
_module() class LinearClsHead(ClsHead): def __init__(self, num_classes, in_channels, init_cfg=dict(type='Normal', layer='Linear', std=0.01), *args, **kwargs): super(LinearClsHead, self).__init__(*args, init_cfg=init_cfg, **kwargs) self.in_channels = in_channels self.num_classes = num_classes...
_mode('orion') class OrionHyperparameterOptimizationReporter(HyperparameterOptimizationReporter): def __init__(self, objective_key): super().__init__(objective_key=objective_key) self.orion_client = None self._trial_id = None self._check_client() def _check_client(self): ...
(sh=True) .slow def test_hydra_sweep(tmp_path): command = ([startfile, '-m', ('hydra.sweep.dir=' + str(tmp_path)), 'model.optimizer.lr=0.005,0.01', '++trainer.fast_dev_run=true'] + overrides) run_sh_command(command)
def ref_grad_pow2_quantize(x, dy, sign, with_zero, n, m, quantize, ste_fine_grained, **kw): if (not ste_fine_grained): return dy.flatten() n_ = ((n - 1) if sign else n) n_ = ((n_ - 1) if with_zero else n_) ref_p_max = (2 ** m) ref_p_min = (2 ** (m - ((1 << n_) - 1))) ref_pruning_threshol...
def subtokenizer(identifier): if (identifier == 'MONKEYS_AT'): return [identifier] splitter_regex = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)') identifiers = re.split('[._\\-]', identifier) subtoken_list = [] for identifier in identifiers: matches = splitter_r...
def generate_model(opt): assert (opt.model in ['resnet', 'resnet_AE', 'resnet_mask', 'resnet_comp', 'unet', 'icnet', 'icnet_res', 'icnet_res_2D', 'icnet_res_2Dt', 'icnet_DBI', 'icnet_deep', 'icnet_deep_gate', 'icnet_deep_gate_2step']) if (opt.model == 'resnet'): assert (opt.model_depth in [10, 18, 34, 5...
class dts_Target(dts_ConvAI2): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def get_vocab(self): counter = collections.Counter() dialogs = self.get_dialogs() for dialog in dialogs: for uttr in dialog: counter.update(simp_token...
class EventCollection(): def __init__(self, path: str): self.path = path if (not os.path.exists(path)): os.mkdir(self.path) def sharded_readers(self) -> Sequence[Callable[([], ContextManager[Iterable[Tuple[(int, RawEvent)]]])]]: return [functools.partial(_create_event_reader,...
def get_name_in_open_scopes(name: ScopedName, open_scopes: List[ScopedName], prefix: str='') -> str: if (len(name) == 0): return '' for scope in sorted(open_scopes, key=len, reverse=True): if name.startswith(scope): return str(get_name_in_scope(name, scope, prefix)) return str(le...
class ImageGPTForImageClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
_model def convformer_b36_384(pretrained=False, **kwargs): model = MetaFormer(depths=[3, 12, 18, 3], dims=[128, 256, 512, 768], token_mixers=SepConv, head_fn=MlpHead, **kwargs) model.default_cfg = default_cfgs['convformer_b36_384'] if pretrained: state_dict = torch.hub.load_state_dict_from_url(url=m...
def test_model(): for dataset_name in ['mnist']: args = load_cfg(dataset_name) (args.train_classes, args.open_set_classes) = get_class_splits(args.dataset, args.split_idx) datasets = get_datasets(args.dataset, transform=args.transform, train_classes=args.train_classes, open_set_classes=args....
class MetaBilinear(nn.Bilinear, MetaModule): __doc__ = nn.Bilinear.__doc__ def forward(self, input1, input2, params=None): if (params is None): params = OrderedDict(self.named_parameters()) bias = params.get('bias', None) return F.bilinear(input1, input2, params['weight'], bi...
class StreamingEpochBatchIterator(EpochBatchIterating): def __init__(self, dataset, epoch=1, num_shards=1, shard_id=0): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.epoch = max(epoch, 1) self._current_epoch_iterator = None self.num_...
class TestInt8QuantSchemeBlobFillOperator(hu.HypothesisTestCase): (**hu.gcs_cpu_only) def test_int8_quant_scheme_blob_fill_op(self, gc, dc): gen_quant_scheme_net = core.Net('gen_quant_scheme') gen_quant_scheme_op = core.CreateOperator('Int8QuantSchemeBlobFill', [], ['quant_scheme'], quantization...
def test_dataset() -> None: dataset_names = ['titanic', 'iris'] for dataset in dataset_names: df = load_dataset(dataset) create_report(df) ddf = _load_dataset_as_dask(dataset) create_report(ddf)
def register_Ns3AmpduTag_methods(root_module, cls): cls.add_constructor([param('ns3::AmpduTag const &', 'arg0')]) cls.add_constructor([]) cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_virtual=True) cls.add_method('GetAmpdu', 'bool', [], is_const=True) cls.add_method('GetIn...
def convert_into_sentences(lines): stack = [] sent_L = [] n_sent = 0 for chunk in lines: if (not chunk.strip()): if stack: sents = text_to_sentences(' '.join(stack).strip().replace('\n', ' ')).split('\n') sent_L.extend(sents) n_sent += ...
class TextOutputHandler(OutputHandler): name = 'text' def __init__(self, path, args): super(TextOutputHandler, self).__init__() self.path = path def write_hypos(self, all_hypos, sen_indices=None): if (self.f is not None): for hypos in all_hypos: self.f.wri...
def main(args): dataset = args.dataset formula_dir_name = args.formula_dir_name data_folder = Path(f'data/spec_datasets/{dataset}/subformulae/{formula_dir_name}/') min_inten = args.min_inten max_peaks = args.max_peaks binned_pred_file = Path(args.binned_pred_file) outfile = args.outfile ...
def get_ax3(transform_dict): origin = create_xyz(transform_dict['origin']) x_axis = create_xyz(transform_dict['x_axis']) y_axis = create_xyz(transform_dict['y_axis']) z_axis = create_xyz(transform_dict['z_axis']) axis3 = gp_Ax3(gp_Pnt(origin), gp_Dir(z_axis), gp_Dir(x_axis)) return axis3
def get_params(opt, size): (w, h) = size new_h = h new_w = w if (opt.resize_or_crop == 'resize_and_crop'): new_h = new_w = opt.loadSize elif (opt.resize_or_crop == 'scale_width_and_crop'): new_w = opt.loadSize new_h = ((opt.loadSize * h) // w) x = random.randint(0, np.max...
def lr_schedule(epoch): if (epoch < (int((EPOCHS * 0.25)) - 1)): return START_LR elif (epoch < (int((EPOCHS * 0.5)) - 1)): return float((START_LR * 0.1)) elif (epoch < (int((EPOCHS * 0.75)) - 1)): return float((START_LR * 0.01)) else: return float((START_LR * 0.001))
def inference(sp): dataset = data_loader.load_processed_data(args) split = ('test' if args.test else 'dev') if (args.dataset_name == 'wikisql'): engine_path = os.path.join(args.data_dir, '{}.db'.format(split)) engine = DBEngine(engine_path) else: engine = None def evaluate(ex...
def first_flag(iterator: Iterable[T]) -> Iterator[Tuple[(bool, T)]]: items = list(iterator) for (i, item) in enumerate(items): (yield ((i == 0), item))
def write_string(s, output_path): with open(output_path, 'w') as output_file: output_file.write(s)
class OneClassSVM(OutlierMixin, BaseLibSVM): _impl = 'one_class' _parameter_constraints: dict = {**BaseLibSVM._parameter_constraints} for unused_param in ['C', 'class_weight', 'epsilon', 'probability', 'random_state']: _parameter_constraints.pop(unused_param) def __init__(self, *, kernel='rbf', ...
class ESPNet(nn.Module): def __init__(self, classes=20, p=2, q=3, encoderFile=None): super().__init__() self.encoder = ESPNet_Encoder(classes, p, q) if (encoderFile != None): self.encoder.load_state_dict(torch.load(encoderFile)) print('Encoder loaded!') self.m...
class _MSDataLoaderIter(_DataLoaderIter): def __init__(self, loader): self.dataset = loader.dataset self.noise_g = loader.noise_g self.collate_fn = loader.collate_fn self.batch_sampler = loader.batch_sampler self.num_workers = loader.num_workers self.pin_memory = (loa...
def tree_to_variable_index(root_node, index_to_code): if (((len(root_node.children) == 0) or (root_node.type == 'string')) and (root_node.type != 'comment')): index = (root_node.start_point, root_node.end_point) (_, code) = index_to_code[index] if (root_node.type != code): return...
.filterwarnings('ignore::DeprecationWarning') def test_from_path_loader_ignore_network_parameters(openapi2_base_url): all_events = list(prepare(openapi2_base_url, loader=schemathesis.openapi.from_path, auth=('user', 'password'), headers={'X-Foo': 'Bar'}, auth_type='basic')) assert (len(all_events) == 1) ass...
class MyDataParallel(Module): def __init__(self, module, device_ids=None, output_device=None, dim=0, gather=True): super(MyDataParallel, self).__init__() if (not torch.cuda.is_available()): self.module = module self.device_ids = [] return if (device_ids is...
def read_vec_flt(file_or_fd): fd = open_or_fd(file_or_fd) binary = fd.read(2).decode() if (binary == '\x00B'): header = fd.read(3).decode() if (header == 'FV '): sample_size = 4 elif (header == 'DV '): sample_size = 8 else: raise UnknownVec...
def check_answer(questions_answers_docs, tokenizer, match_type) -> List[bool]: (answers, (doc_ids, doc_scores)) = questions_answers_docs global dpr_all_documents hits = [] for (i, doc_id) in enumerate(doc_ids): doc = dpr_all_documents[doc_id] text = doc[0] answer_found = False ...
def train_live_update(loggers, loaders, model, optimizer, scheduler, datasets, **kwargs): for dataset in datasets: if (not hasattr(dataset[0], 'keep_ratio')): precompute_edge_degree_info(dataset) if (cfg.dataset.premade_datasets == 'fresh_save_cache'): if (not os.path.exists(f'{cfg.d...
def ZpER(p, prec=None, halt=None, secure=False, *args, **kwds): return Zp(p, (prec, halt, secure), 'relaxed', *args, **kwds)
def extract_YYYYMMDD(filename): m = re.search('(19[5-9]|20[0-4])\\d(0\\d|1[0-2])([0-2]\\d|3[01])', os.path.basename(filename)) if (not m): return None else: return datetime.datetime.strptime(m.group(), '%Y%m%d')
def wordoftuples_to_tupleofwords(wordoftuples): if (not equal((len(t) for t in wordoftuples))): raise ValueError('Not all entries of input have the same length.') def remove_empty_letters(word): return [letter for letter in word if (letter is not None)] return tuple((remove_empty_letters(wor...
class PipProvider(AbstractProvider): def __init__(self, factory, constraints, ignore_dependencies, upgrade_strategy, user_requested): self._factory = factory self._constraints = constraints self._ignore_dependencies = ignore_dependencies self._upgrade_strategy = upgrade_strategy ...
def evaluate_all(dataset, all_boxes, all_segms, all_keyps, all_bodys, output_dir, use_matlab=False): all_results = evaluate_boxes(dataset, all_boxes, output_dir, use_matlab=use_matlab) logger.info('Evaluating bounding boxes is done!') if cfg.MODEL.MASK_ON: results = evaluate_masks(dataset, all_boxes...
def postprocess_fsl10k_annotations(annotations: Dict[(str, Any)], keys_to_drop=('save_for_later', 'well_cut', 'discard', 'comments', 'username', 'num_ratings', 'num_downloads', 'license', 'avg_rating', 'preview_url', 'type', 'pack', 'image')) -> Dict[(str, Any)]: annotations = {k: v for (k, v) in annotations.items(...
(scope='module') def gels(): from sfepy.discrete.fem.geometry_element import GeometryElement gels = {} for key in ['2_3', '2_4', '3_4', '3_8']: gel = GeometryElement(key) gels[key] = gel return gels
.parametrize('mode', ['early', 'fail', 'full']) def test_bayesian_optimizer_continue_optimization(mode: str) -> None: class _CountingObserver(): call_count = 0 def __call__(self, x: tf.Tensor) -> Dataset: self.call_count += 1 if ((self.call_count == 2) and (mode == 'fail')): ...
def split_different_speakers(speaker_dict, test_spk_id): data_split = {k: [] for k in ['train', 'valid', 'test']} data_split['test'].extend(speaker_dict[str(test_spk_id)]) if ((test_spk_id % 2) == 0): valid_spk_num = (test_spk_id - 1) else: valid_spk_num = (test_spk_id + 1) data_spli...
def load_h5_data_label_normal(h5_filename): f = h5py.File(h5_filename) data = f['data'][:] label = f['label'][:] normal = f['normal'][:] return (data, label, normal)
class SpatialCorrelationSamplerFunction(Function): def forward(ctx, input1, input2, kernel_size=1, patch_size=1, stride=1, padding=0, dilation_patch=1): ctx.save_for_backward(input1, input2) (kD, kH, kW) = ctx.kernel_size = _triple(kernel_size) (patchD, patchH, patchW) = ctx.patch_size = _tr...
def heatmap_FAIR_keypoint_ref(maps, rois): return [keypoint_utils.heatmaps_to_keypoints(maps, rois)]
class Frame(): label_encoder = {'No significant difference': 0, 'Significantly increased': 1, 'Significantly decreased': 2, 'no significant difference': 0, 'significantly increased': 1, 'significantly decreased': 2} encoder_label = {v: k for (k, v) in label_encoder.items()} def __init__(self, i, c, o, ev, l...