code
stringlengths
101
5.91M
def _run_symbolic_function(*args, **kwargs): from torch.onnx import utils return utils._run_symbolic_function(*args, **kwargs)
class DCGANCycEncDecRandomVP(object): name = 'gan_cyc_encdec_randomvp' def __init__(self, g_net, e_net, d_net, x_sampler, z_sampler, val_sampler, prefix, config, verbose=1): self.g_net = g_net self.e_net = e_net self.d_net = d_net self.x_sampler = x_sampler self.z_sampler...
def load_image(image_path, model=None): if model: camera = model.camera else: camera = re.search('(stereo|mono_(left|right|rear))', image_path).group(0) if (camera == 'stereo'): pattern = BAYER_STEREO else: pattern = BAYER_MONO img = Image.open(image_path) img = d...
class Mention(object): def __init__(self, doc_id, sent_id, tokens_numbers, tokens, mention_str, head_text, head_lemma, is_singleton, is_continuous, coref_chain): self.doc_id = doc_id self.sent_id = sent_id self.start_offset = tokens_numbers[0] self.end_offset = tokens_numbers[(- 1)] ...
class STSBenchmarkEval(STSEval): def __init__(self, task_path, seed=1111): logging.debug('\n\n***** Transfer task : STSBenchmark*****\n\n') self.seed = seed self.samples = [] train = self.loadFile(os.path.join(task_path, 'sts-train.csv')) dev = self.loadFile(os.path.join(task...
class PartitionsGreatestLE(UniqueRepresentation, IntegerListsLex): def __init__(self, n, k): IntegerListsLex.__init__(self, n, max_slope=0, min_part=1, max_part=k) self.n = n self.k = k def _repr_(self): return ('Partitions of %s having parts less than or equal to %s' % (self.n, ...
def prep_txt(trsfile, tmpbase, dictfile): words = [] with open(trsfile, 'r') as fid: for line in fid: line = line.strip() for pun in [',', '.', ':', ';', '!', '?', '"', '(', ')', '--', '---']: line = line.replace(pun, ' ') for wrd in line.split(): ...
def count_parameters(model: nn.Module) -> float: return (sum((p.numel() for p in model.parameters() if p.requires_grad)) / 1000000.0)
class FiniteWordPath_north_east_iter(WordDatatype_iter, FiniteWordPath_north_east, FiniteWord_class): pass
class Wikitext103Scenario(Scenario): name = 'wikitext_103' description = 'The WikiText language modeling dataset containing over 103 million words' tags = ['language_modeling'] def get_instances(self, output_path: str) -> List[Instance]: data_path = os.path.join(output_path, 'data') ensu...
def recursively_load_weights(fairseq_model, hf_model, is_finetuned): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.unispeech.feature_extractor for (name, value) in fairseq_dict.items(): is_used = False if ('conv_layers' in name): l...
class Net(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(10, 20) self.relu = nn.ReLU()
def test_control_cg_restart_periodic(ocp): ocp.config.set('AlgoCG', 'cg_method', 'DY') ocp.config.set('AlgoCG', 'cg_periodic_restart', 'True') ocp.config.set('AlgoCG', 'cg_periodic_its', '5') ocp.solve(algorithm='ncg', rtol=0.01, atol=0.0, max_iter=10) assert (ocp.solver.relative_norm <= ocp.solver....
def main(args, override_args=None): utils.import_user_module(args) use_fp16 = args.fp16 use_cuda = (torch.cuda.is_available() and (not args.cpu)) if (override_args is not None): overrides = vars(override_args) overrides.update(eval(getattr(override_args, 'model_overrides', '{}'))) el...
def load_psicov_data(path, batch_size): all_feat_paths = [f'{path}/deepcov/features/', f'{path}/psicov/features/', f'{path}/cameo/features/'] all_dist_paths = [f'{path}/deepcov/distance/', f'{path}/psicov/distance/', f'{path}/cameo/distance/'] deepcov_list = load_list(f'{path}/deepcov.lst', (- 1)) lengt...
class ConsumingSplitter(BaseCrossValidator, GroupsConsumerMixin): def __init__(self, registry=None): self.registry = registry def split(self, X, y=None, groups='default', metadata='default'): if (self.registry is not None): self.registry.append(self) record_metadata_not_defau...
def main(): args = parse_args() num_gpus = torch.cuda.device_count() cfg = load_cfg_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() assert (cfg.TEST.BATCH_SIZE == 1) isCPU = args.cpu output_dir = cfg.OUTPUT_DIR if output_dir: config_path = osp.splitext...
def sysconfig_get_config_vars(*args): real_vars = old_get_config_vars(*args) if (sys.platform == 'win32'): lib_dir = os.path.join(sys.real_prefix, 'libs') if (isinstance(real_vars, dict) and ('LIBDIR' not in real_vars)): real_vars['LIBDIR'] = lib_dir elif (isinstance(real_var...
_tf _keras_nlp class GPTTokenizationTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizers = [GPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CHECKPOINTS] self.tf_tokenizers = [TFGPT2Tokenizer.from_pretrained(checkpoint) for checkpoint in TOKENIZER_CH...
class Dict(object): def __init__(self, data=None, lower=False): self.idxToLabel = {} self.labelToIdx = {} self.frequencies = {} self.lower = lower self.special = [] if (data is not None): if (type(data) == str): self.loadFile(data) ...
class Alt(RE): def __init__(self, *re_list): self.re_list = re_list nullable = 0 match_nl = 0 nullable_res = [] non_nullable_res = [] i = 1 for re in re_list: self.check_re(i, re) if re.nullable: nullable_res.append(re) ...
def fusionv3s(**kwargs): sq = squeezenet1_1(pretrained=True) model = CreateNetFusionV3(sq, stack=True) return model
def train_batch_cached(args, batch, clusterings): batch = to_device(batch, args.computation.device) features = batch distance = _train_batch(args, features, clusterings) return distance
class SimCLR(BaseSelfSupervisedModel): def __init__(self, backbone: nn.Module, params: Namespace): super().__init__(backbone, params) self.head = ProjectionHead(backbone.final_feat_dim, out_dim=params.model_simclr_projection_dim) self.ssl_loss_fn = NTXentLoss(temperature=params.model_simclr_...
_interact(A=(lambda : slider((- 7), 7, 1, 1)), B=(lambda : slider((- 7), 7, 1, 1)), C=(lambda : slider((- 7), 7, 1, (- 2)))) def quadratic_equation(A, B, C): x = SR.var('x') f = symbolic_expression((((A * (x ** 2)) + (B * x)) + C)).function(x) html('<h2>The Solutions of the Quadratic Equation</h2>') htm...
class XLNetTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES padding_side = 'left' def __init__(self, vocab_file, do_lower_case=False, remove_space=True, keep_ac...
class SelfAttention(nn.Module): def __init__(self, in_dim, activation, with_attn=False): super(SelfAttention, self).__init__() self.chanel_in = in_dim self.activation = activation self.with_attn = with_attn self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=(in_dim ...
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training): features = [] if (is_training and (not example.is_impossible)): start_position = example.start_position end_position = example.end_position actual_text = ' '.join(...
class NumericalObsField(NumericalDataFrameField): def __init__(self, *args, **kwargs): super().__init__(*args, field_type='obs', **kwargs)
def reload_session(model_name): keras.backend.clear_session() model = keras.models.load_model(model_name) return model
class MegaForTokenClassification(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
def get_convection(H_hat, U_hat, K, VFSp, VCp, FSTp, FCTp, work, u_dealias, curl_dealias, curl_hat, mat, la, **context): conv_ = getConvection(params.convection) H_hat = conv_(H_hat, U_hat, K, VFSp, VCp, FSTp, FCTp, work, u_dealias, curl_dealias, curl_hat, mat, la) return H_hat
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False): p_numerators = Counter() p_denominators = Counter() (hyp_lengths, ref_lengths) = (0, 0) assert (len(list_of_references) == len(hypotheses)), 'The number of hypotheses and their...
class PartialHypothesis(object): def __init__(self, initial_states=None, use_stats=True): self.predictor_states = initial_states self.trgt_sentence = [] (self.score, self.base_score) = (0.0, 0.0) self.score_breakdown = [] self.word_to_consume = None self.statistics = ...
.parametrize('module', MODULES) def test_networkpass_on_generate_function(module): (_, inputs) = module verbose = 1 callback = nnp_graph.NnpNetworkPass(verbose) _generate_function_by_name('Convolution') def change_convolution_param(f): print('{}'.format(f.proto.convolution_param.pad.dim[:]))...
class AdvContrastiveSummarizer(nn.Module): def __init__(self, args): super(AdvContrastiveSummarizer, self).__init__() self.tau = args.tau self.neg_eps = args.neg_eps self.pos_eps = args.pos_eps self.t5_model = T5ForConditionalGeneration.from_pretrained(args.t5_model) ...
def format_ops(ops, sort_outputs=True): if (hasattr(ops, '__iter__') and (not isinstance(ops, str))): l = [(op.name if hasattr(op, 'name') else str(op)) for op in ops] if sort_outputs: return sorted(l) return l else: return (ops.name if hasattr(ops, 'name') else str(o...
def fanin_init(tensor): size = tensor.size() if (len(size) == 2): fan_in = size[0] elif (len(size) > 2): fan_in = np.prod(size[1:]) else: raise Exception('Shape must be have dimension at least 2.') bound = (1.0 / np.sqrt(fan_in)) return tensor.data.uniform_((- bound), bou...
def create_semi_supervised_annotation(path, anno_num=342996): with open(path, 'r') as f: data = json.load(f) coco_api = COCO(path) imgs = coco_api.imgs img_ids = list(imgs.keys()) random.shuffle(img_ids) num = 0 image_infos = [] for img_id in img_ids: image_info = imgs[im...
class SR(object): def __init__(self, K): self.K = K self.name = '{}'.format(K) def apply(self, suggestions, targets): def _sr_at_k(ranks, k): found_queries = 0 for rank in ranks: found_queries += sum([1.0 for (r, p) in rank if (p <= k)]) ...
class FindStatCompoundMap(Element, FindStatCombinatorialMap): def __init__(self, id, domain=None, codomain=None, check=True): if isinstance(id, (int, Integer)): id = (FINDSTAT_MAP_PADDED_IDENTIFIER % id) elif isinstance(id, FindStatCombinatorialMap): id = id.id_str() ...
class TFSemanticSegmenterOutput(ModelOutput): loss: Optional[tf.Tensor] = None logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None
def D_logistic_r2(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0): _ = (opt, training_set) latents = tf.random.normal(([minibatch_size] + G.input_shapes[0][1:])) fake_images_out = G.get_output_for(latents, labels, is_training=True) real_scores_out = D.get_output_for(reals, labels, is...
class Function(object): def __init__(self, type_name, inputs, params): self.type_name = type_name self.inputs = inputs self.params = params self.ntop = self.params.get('ntop', 1) if ('ntop' in self.params): del self.params['ntop'] self.in_place = self.para...
class AugmenterInfoAssessment(AugAssessmentBase): def __call__(self, X_tar_tr: np.ndarray, Y_tar_tr: np.ndarray, X_tar_te: np.ndarray, Y_tar_te: np.ndarray, augmenter_output: tuple, epoch: Optional[int]=None): (_X, _, _, acceptance_ratio) = augmenter_output return {'Augmented data size': len(_X), 'A...
def build_ddp(model, device='cuda', *args, **kwargs): assert (device in ['cuda', 'mlu']), 'Only available for cuda or mlu devices.' if (device == 'cuda'): model = model.cuda() elif (device == 'mlu'): from mmcv.device.mlu import MLUDistributedDataParallel ddp_factory['mlu'] = MLUDistr...
def main(args): jobs_to_complete = set() (jobs, arrival_times) = utils.parse_trace(args.trace_file) if ((args.window_start is not None) and (args.window_end is not None)): for i in range(args.window_start, args.window_end): jobs_to_complete.add(JobIdPair(i, None)) else: for i...
class CallbackHandler(TrainerCallback): def __init__(self, callbacks, model, optimizer, lr_scheduler): self.callbacks = [] for cb in callbacks: self.add_callback(cb) self.model = model self.optimizer = optimizer self.lr_scheduler = lr_scheduler self.train_...
def parameters_union(left: (JSONMapping | None), right: (JSONMapping | None), *, exclude: (Collection[tuple[(str, JSONSerializable)]] | None)=None) -> (JSONMapping | None): has_exclusions = ((exclude is not None) and (len(exclude) > 0)) items = [] if (left is not None): items.append(left.items()) ...
class EmbeddingTyingHead(BaseHead): def __init__(self, item_embedder: BertEmbedding, n_items: int): super().__init__() self._item_embedder = item_embedder self.out_bias = torch.nn.Parameter(torch.Tensor(n_items)) self.out_bias.data.normal_(0, 0.01) def get_item_embeddings(self) -...
def create_conda_lock_file(build_metadata): build_name = build_metadata['build_name'] folder_path = Path(build_metadata['folder']) environment_path = (folder_path / f'{build_name}_environment.yml') platform = build_metadata['platform'] lock_file_basename = build_name if (not lock_file_basename.e...
def main(): parser = argparse.ArgumentParser() parser.add_argument('src_root', default='src', help='Root directory with all source files. Expected structure is root dir -> language dirs -> package dirs -> text files to process') parser.add_argument('tgt_root', default='tgt', help='Root directory with all t...
def create_experiment_directory(experiment_directory, hyperparams_to_save=None, overrides={}, log_config=DEFAULT_LOG_CONFIG, save_env_desc=True): try: if sb.utils.distributed.if_main_process(): if (not os.path.isdir(experiment_directory)): os.makedirs(experiment_directory) ...
class ResNet(nn.Module): def __init__(self, block, layers, in_chans=3, num_classes=1000, fully_conv=False, remove_avg_pool_layer=False, output_stride=32): self.output_stride = output_stride self.current_stride = 4 self.current_dilation = 1 self.remove_avg_pool_layer = remove_avg_pool...
class Score(): def total_score(self, Y, sample_weight=None): return np.average(self.score(Y), weights=sample_weight) def grad(self, Y, natural=True): grad = self.d_score(Y) if natural: metric = self.metric() grad = np.linalg.solve(metric, grad) return grad
def parse_safeloras_embeds(safeloras) -> Dict[(str, torch.Tensor)]: embeds = {} metadata = safeloras.metadata() for key in safeloras.keys(): meta = metadata.get(key) if ((not meta) or (meta != EMBED_FLAG)): continue embeds[key] = safeloras.get_tensor(key) return embed...
def max_limited_min(weights, lengths, max_ratio, min_inst_ratio): n = weights.shape[0] glob_sort = global_argsort(weights) rev_glob_sort = reversed([(glob_sort[0][i], glob_sort[1][i]) for i in range(len(glob_sort[0]))]) remaining = [math.ceil((l * min_inst_ratio)) for l in lengths] max_tokens = math...
def test_2d_access(): print('Running without simplification...') A = np.random.rand(4, 2) expected = A.copy() expected[(0, 0)] = (100.0 if (expected[(1, 1)] < 0.5) else (- 100.0)) sdfg = arr2dtest.to_sdfg(simplify=False) sdfg(A=A) assert np.allclose(A, expected)
def findmax(cp, len2): size = 2 sample_count = _sample_count(cp, size) if ((len(cp) % 2) != 0): raise error('Strings should be even-sized') if ((len2 < 0) or (sample_count < len2)): raise error('Input sample should be longer') if (sample_count == 0): return 0 result = _su...
def write_analogy_from_pairs(analogy_pair, description, file): line = '{}\n{}\n{}\n{}\n\n' analogies = pairs_from_array(analogy_pair) num_ops = len(analogies) with open(file, 'a') as f: f.write((((('\n\n# ' + description) + ': ') + str(num_ops)) + '\n\n')) for i in range(num_ops): ...
def K(dom, v): if z3_debug(): _z3_assert(is_sort(dom), 'Z3 sort expected') ctx = dom.ctx if (not is_expr(v)): v = _py2expr(v, ctx) return ArrayRef(Z3_mk_const_array(ctx.ref(), dom.ast, v.as_ast()), ctx)
class GCNBlock(BasicBlock): def __init__(self, in_channels, out_channels, norm='layer'): super(GCNBlock, self).__init__(norm, in_channels) self.gcn = GCNConv(in_channels, out_channels)
def one_of_k_encoding(x, allowable_set): if (x not in allowable_set): raise Exception('input {0} not in allowable set{1}:'.format(x, allowable_set)) return [(x == s) for s in allowable_set]
class TFCvtStage(tf.keras.layers.Layer): def __init__(self, config: CvtConfig, stage: int, **kwargs): super().__init__(**kwargs) self.config = config self.stage = stage if self.config.cls_token[self.stage]: self.cls_token = self.add_weight(shape=(1, 1, self.config.embed_d...
def profile_analyst(func: Optional[object]=None, class_hooks: Optional[Iterable[str]]=None): profiler_kwargs = {'schedule': schedule(), 'on_trace_ready': None, 'record_shapes': True, 'profile_memory': True, 'with_stack': True, 'with_flops': True, 'with_modules': True, 'class_hooks': class_hooks} wrapped_func = ...
class ThroughputNormalizedByCostSumWithPerfSLOs(Policy): def __init__(self, solver, num_threads=None): self._num_threads = num_threads Policy.__init__(self, solver) self._name = 'ThroughputNormalizedByCostSum_PerfSLOs' def get_allocation(self, unflattened_throughputs, scale_factors, clus...
def singleton_analysis(data): gold_singletons = 0 pred_singletons = 0 non_singleton_evaluator = CorefEvaluator() gold_cluster_lens = [] pred_cluster_lens = [] overlap_sing = 0 total_sing = 0 pred_sing = 0 for instance in data: gold_clusters = set([tuple(cluster[0]) for cluste...
class CityscapesLabelTool(QtGui.QMainWindow): def __init__(self): super(CityscapesLabelTool, self).__init__() configDir = os.path.dirname(__file__) self.configFile = os.path.join(configDir, 'cityscapesLabelTool.conf') self.config = configuration() self.config.load(self.config...
class FastLinearCombinationClamp(EpilogueFunctorBase): tag = 'cutlass::epilogue::thread::FastLinearCombinationClamp' def __init__(self, element_output, epilogue_vector_length, *args) -> None: super().__init__() self.template_arguments = [DataTypeTag[element_output], str(epilogue_vector_length)] ...
class LinearAddModel(nn.Module): def __init__(self): super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = torch.add(x, 5) x = self.fc2(x) ...
def build_c3d(pretrained=True): model = C3D(pretrained=pretrained, model_dir='/PRETRAINED_MODEL_DIR') modules = list(model.children())[:(- 6)] model = nn.Sequential(*modules) return model
class MlpGoalEncoder(nn.Module): def __init__(self, goal_vocab_size, k, nembed, nhid, init_range): super(MlpGoalEncoder, self).__init__() self.cnt_enc = nn.Embedding(goal_vocab_size, nembed) self.val_enc = nn.Embedding(goal_vocab_size, nembed) self.encoder = nn.Sequential(nn.Tanh(), ...
def get_git_hash_suffix() -> str: git_hash: Optional[str] = get_git_hash() git_hash_suffix = ('-nogit' if (git_hash is None) else f'-{git_hash}') return git_hash_suffix
class Resnet3dEmbeddingMultiDecoder(Resnet3d): def __init__(self, tw=8, sample_size=112, e_dim=7, decoders=None): super(Resnet3dEmbeddingMultiDecoder, self).__init__(tw=tw, sample_size=sample_size) resnet = resnet50_no_ts(sample_size=sample_size, sample_duration=tw) self.encoder = Encoder3d(...
def add_edge_dummy(G, parent_prefix, parent_node, stmt, ad_hoc_count): stmt_check = '' if (len(stmt_check) > 0): if (stmt_check in stmt): print('Found stmt', stmt) parent_node_ = (parent_prefix + parent_node) assert (parent_node_ in list(G.nodes())), ((('Node not added to graph:\n' +...
class pretrain_dataset(Dataset): def __init__(self, ann_file, laion_path, transform, img_mixup=False): self.img_root = '/CC3M/images' self.ann_pretrain = [] for f in ann_file: print(('loading ' + f)) ann = json.load(open(f, 'r')) self.ann_pretrain += ann ...
def test_keras_predictor_repr_includes_class_name() -> None: model = _DummyKerasPredictor() assert (type(model).__name__ in repr(model))
class Net(nn.Module): def __init__(self, res_blocks=18): super(Net, self).__init__() self.conv_input = ConvLayer(3, 16, kernel_size=11, stride=1) self.dense0 = nn.Sequential(ResidualBlock(16), ResidualBlock(16), ResidualBlock(16)) self.conv2x = ConvLayer(16, 32, kernel_size=3, stride...
class PerceiverPreTrainedModel(metaclass=DummyObject): _backends = ['torch'] def __init__(self, *args, **kwargs): requires_backends(self, ['torch'])
_model_architecture('masked_lm', 'xlm_base') def xlm_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_...
def avg_pool3d(inputs, kernel_size, scope, stride=[2, 2, 2], padding='VALID'): with tf.variable_scope(scope) as sc: (kernel_d, kernel_h, kernel_w) = kernel_size (stride_d, stride_h, stride_w) = stride outputs = tf.nn.avg_pool3d(inputs, ksize=[1, kernel_d, kernel_h, kernel_w, 1], strides=[1, ...
def GetNodeEcc(tspec, *args): if (type(tspec) == PUNGraph): return GetNodeEcc_PUNGraph(tspec, *args) if (type(tspec) == PUndirNet): return GetNodeEcc_PUndirNet(tspec, *args) if (type(tspec) == PDirNet): return GetNodeEcc_PDirNet(tspec, *args) if (type(tspec) == PNGraph): ...
def log_string(out_str): global LOG_FOUT LOG_FOUT.write((out_str + '\n')) LOG_FOUT.flush() print(out_str)
def check_files(name): if ('Monolithic' in name): trial_files = files[:3] else: trial_files = files for file in trial_files: if (not os.path.exists(f'{name}/{file}')): print(file) print(f'Missing File: {name}') break
def broadcast_parameters(model): if (cfg.NUM_GPUS == 1): return def _do_broadcast(all_blobs): assert ((len(all_blobs) % cfg.NUM_GPUS) == 0), 'Unexpected value for NUM_GPUS. Make sure you are not running single-GPU inference with NUM_GPUS > 1.' blobs_per_gpu = int((len(all_blobs) / cfg.NU...
.parametrize('shuffle', [False, True]) def test_simple_data_source(test_data_csv_png_20, shuffle): src_data = [] with open(test_data_csv_png_20) as f: for l in f.readlines(): values = [x.strip() for x in l.split(',')] img_file_name = os.path.join(os.path.dirname(test_data_csv_png...
_model def gluon_resnext50_32x4d(pretrained=False, num_classes=1000, in_chans=3, **kwargs): default_cfg = default_cfgs['gluon_resnext50_32x4d'] model = ResNet(Bottleneck, [3, 4, 6, 3], cardinality=32, base_width=4, num_classes=num_classes, in_chans=in_chans, **kwargs) model.default_cfg = default_cfg if ...
def getLabelID(self, instID): if (instID < 1000): return instID else: return int((instID / 1000))
class Timer(): def __enter__(self): self.start = timeit.default_timer() return self def __exit__(self, *args): self.stop = timeit.default_timer() self.time = (self.stop - self.start) def __str__(self): return f'Duration: {self.time}, Start: {self.start}, Stop: {self.s...
class KleinFourGroup(PermutationGroup_unique): def __init__(self): gens = [(1, 2), (3, 4)] PermutationGroup_generic.__init__(self, gens) def _repr_(self): return 'The Klein 4 group of order 4, as a permutation group'
class FairseqMultiModel(BaseFairseqModel): def __init__(self, encoders, decoders): super().__init__() assert (encoders.keys() == decoders.keys()) self.keys = list(encoders.keys()) for key in self.keys: assert isinstance(encoders[key], FairseqEncoder) assert is...
class DetailBranch(BaseModule): def __init__(self, detail_channels=(64, 64, 128), in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), init_cfg=None): super(DetailBranch, self).__init__(init_cfg=init_cfg) detail_branch = [] for i in range(len(detail_channels)): ...
class UnetGenerator(nn.Module): def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, gpu_ids=[]): super(UnetGenerator, self).__init__() self.gpu_ids = gpu_ids unet_block = UnetSkipConnectionBlock((ngf * 8), (ngf * 8), input_nc=None, submodu...
def test_larger_than_dependency_config(config_sop, F, bcs, J, y, p, geometry): config_sop.set('ShapeGradient', 'dist_max', '0.5') with pytest.raises(ConfigError) as e_info: cashocs.ShapeOptimizationProblem(F, bcs, J, y, p, geometry.boundaries, config=config_sop) assert ('The value of key dist_max in...
class TabularNLPAutoML(TabularAutoML): _default_config_path = 'text_config.yml' _time_scores = {'lgb': 1, 'lgb_tuned': 3, 'linear_l2': 0.7, 'cb': 2, 'cb_tuned': 6, 'nn': 1} def __init__(self, task: Task, timeout: int=3600, memory_limit: int=16, cpu_limit: int=4, gpu_ids: Optional[str]='all', timing_params: ...
def register_Ns3SpectrumChannelHelper_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::SpectrumChannelHelper const &', 'arg0')]) cls.add_method('AddPropagationLoss', 'void', [param('std::string', 'name'), param('std::string', 'n0', default_value='""'), param('ns3::Attribut...
def FunctionSoftsplat(tenInput, tenFlow): tenMax = _FunctionSoftsplat.apply(tenInput, tenFlow) return tenMax
class PositionwiseFeedForward(nn.Module): def __init__(self, d_in, d_hid, dropout=0.1): super().__init__() self.w_1 = nn.Linear(d_in, d_hid) self.w_2 = nn.Linear(d_hid, d_in) self.layer_norm = nn.LayerNorm(d_in) self.dropout = nn.Dropout(dropout) def forward(self, x): ...
def discriminative_loss_single(prediction, correct_label, feature_dim, label_shape, delta_v, delta_d, param_var, param_dist, param_reg): correct_label = tf.reshape(correct_label, [(label_shape[1] * label_shape[0])]) reshaped_pred = tf.reshape(prediction, [(label_shape[1] * label_shape[0]), feature_dim]) (un...
class InPlaceABN(autograd.Function): def forward(ctx, x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation=ACT_LEAKY_RELU, slope=0.01): ctx.training = training ctx.momentum = momentum ctx.eps = eps ctx.activation = activation ctx.slop...