code
stringlengths
101
5.91M
def op0_part_lean(reg: Register, off: int): return (("op0:='op0" + reg_offset_lean(reg, off)) + ', ')
class PersLandscape(ABC): def __init__(self, dgms: list=[], hom_deg: int=0) -> None: if (not isinstance(hom_deg, int)): raise TypeError('hom_deg must be an integer') if (hom_deg < 0): raise ValueError('hom_deg must be positive') if (not isinstance(dgms, (list, tuple, ...
def backward_version(done_fwds, done_bwds, se) -> int: return (my_version(done_bwds, se) + expected_staleness(done_fwds, done_bwds, se))
class RoomGrid(MiniGridEnv): def __init__(self, room_size=7, num_rows=3, num_cols=3, max_steps=100, seed=0): assert (room_size > 0) assert (room_size >= 3) assert (num_rows > 0) assert (num_cols > 0) self.room_size = room_size self.num_rows = num_rows self.num...
class TestBackends(JitTestCase): def __init__(self, name): super().__init__(name) self.basic_module_test = BasicModuleTest(name) self.nested_module_test = NestedModuleTest(name) def setUp(self): super().setUp() if (not TEST_WITH_ROCM): self.basic_module_test.s...
def test_broadcast_and_apply_levels(): arrays = [ak.highlevel.Array([[[0.0, 1.1, 2.2], []], [[3.3, 4.4]], [[5.5], [6.6, 7.7, 8.8, 9.9]]]).layout, ak.highlevel.Array([[[10, 20], [30]], [[40]], [[50, 60, 70], [80, 90]]]).layout] assert (ak.operations.concatenate(arrays, axis=0).to_list() == [[[0.0, 1.1, 2.2], []]...
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data): plt.figure() plot_results(all_batch_sizes, all_errors['pca'], label='PCA') plot_results(all_batch_sizes, all_errors['ipca'], label='IncrementalPCA') plt.legend(loc='lower left') plt.suptitle(('Algorithm error vs. batch_size for n_...
def load_config(config_name='config.yaml'): cfg_f = open(os.path.join(BASE_DIR, config_name), 'r+') cfg = yaml.load(cfg_f) return cfg
def get_data_tensors(df): padded_shapes = {'len_book_idxs': [], 'book_idxs': [None], 'book_idx': [], 'label': []} dataset = tf.data.Dataset.from_generator(get_data_points_generator(df), {k: tf.int64 for k in padded_shapes}).shuffle(123).repeat(None).padded_batch(batch_size=256, padded_shapes=padded_shapes) ...
def parse_args(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('-p', '--path', default='../data/caches/output_contrastive_temp_contrastive_inferred_cache_186629', type=str) parser.add_argument('-k', '--k', default=1000, type=int) parser.add_argument('-w', '--num-workers', de...
def merge_roidb(roidbs): roidb = roidbs[0] for r in roidbs[1:]: roidb.extend(r) return roidb
class GatherPoints(Function): def forward(ctx, features: torch.Tensor, indices: torch.Tensor) -> torch.Tensor: assert features.is_contiguous() assert indices.is_contiguous() (B, npoint) = indices.size() (_, C, N) = features.size() output = torch.cuda.FloatTensor(B, C, npoint)...
class TestPartitionScheme(unittest.TestCase): def setUp(self): self.ps1 = PartitionScheme(order=[pe.BATP, pe.OUTP, pe.OFMP, pe.INPP], pdims=[(2, 3), (3, 1), (1, 5), (5, 2)]) self.ps2 = PartitionScheme(order=list(range(pe.NUM)), pdims=[(2, 2), (5, 5), (3, 3), (1, 1)]) self.nr1 = NodeRegion(or...
class netcdf_file(): def __init__(self, filename, mode='r', mmap=None, version=1, maskandscale=False): if (mode not in 'rwa'): raise ValueError("Mode must be either 'r', 'w' or 'a'.") if hasattr(filename, 'seek'): self.fp = filename self.filename = 'None' ...
def register_data_path(collaborator_name, data_path=None, silent=False): from click import prompt from os.path import isfile if (data_path and is_directory_traversal(data_path)): echo('Data path is out of the openfl workspace scope.') sys.exit(1) default_data_path = f'data/{collaborator_...
def conv4x4t(in_planes, out_planes, stride=1, groups=1, dilation=1): return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
def sess_interest_extractor(tr_input, sess_max_count, TR): tr_out = [] for i in range(sess_max_count): tr_out.append(TR([tr_input[i], tr_input[i]])) sess_fea = concat_func(tr_out, axis=1) return sess_fea
def _get_trainer_configuration(benchmark_name, mode): tf.keras.backend.clear_session() benchmark = benchmarks.Benchmark(benchmark_name, mode=mode) if (mode == 'posterior'): amortizer = AmortizedPosterior(InvertibleNetwork(**NETWORK_SETTINGS[benchmark_name][mode])) elif (mode == 'likelihood'): ...
def tf32_mode(cudnn=None, matmul=None): cudnn_old = torch.backends.cudnn.allow_tf32 matmul_old = torch.backends.cuda.matmul.allow_tf32 try: if (cudnn is not None): torch.backends.cudnn.allow_tf32 = cudnn if (matmul is not None): torch.backends.cuda.matmul.allow_tf32 =...
class ConfigGlobalGenerator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=3, norm_layer=nn.BatchNorm2d, padding_type='reflect', conv_kind='default', deconv_kind='convtranspose', activation=nn.ReLU(True), up_norm_layer=nn.BatchNorm2d, affine=None, up_activation=nn.ReLU(True),...
def register_Ns3PyVizRxPacketSample_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::PyViz::RxPacketSample const &', 'arg0')]) cls.add_instance_attribute('from', 'ns3::Mac48Address', is_const=False) return
def colonize(msg, sep=': '): if (not msg): return '' else: return (msg + sep)
def _split_tensors_from_obj(obj: Any, tensors: List[torch.Tensor]) -> Any: if torch.is_tensor(obj): placeholder = _TensorPlaceholder(index=len(tensors)) tensors.append(obj) return placeholder elif isinstance(obj, dict): return {k: _split_tensors_from_obj(v, tensors) for (k, v) in...
class LOLTrainImageNet(LOLTrain): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.transforms = tf.Compose([RandomCrop(self.crop_size), RandomHorizontalFlip(), XToImageNetYToLDMTensor()]) def __getitem__(self, idx): (x, t) = (Image.open(self.image_paths[idx]), ...
class TestNet(nn.Module): def __init__(self): super(TestNet, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3) self.bn1 = nn.BatchNorm2d(num_features=64) self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3) self.bn2 = nn...
class MultiGraphAttentionCNN(Layer): def __init__(self, output_dim, num_filters=None, num_attention_heads=1, attention_combine='concat', attention_dropout=0.5, activation=None, use_bias=False, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_reg...
def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ['torch'])
class Logger(): def __init__(self, path): self.full_path = ('%s/log.txt' % path) self.log_file = open(self.full_path, 'w+') self.log_file.close() self.map = {} def add_value(self, tag, value): self.map[tag] = value def log(self, iter): self.log_file = open(sel...
class TransformerDecoderLayerThin(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', normalize_before=False): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) self.multihead_attn = nn.MultiheadAttenti...
def _update_config(config, options, args): for opt in options: value = getattr(args, _get_opt_name(opt.flags)) if (value is not None): _set_by_path(config, opt.target, value) if ('target2' in opt._fields): _set_by_path(config, opt.target2, value) i...
class TestLSTMModel(TfGraphTestCase): def setup_method(self): super().setup_method() self.batch_size = 1 self.time_step = 1 self.feature_shape = 2 self.output_dim = 1 self.obs_inputs = np.full((self.batch_size, self.time_step, self.feature_shape), 1.0) self.ob...
class TorchMD_GN_optimized(pt.nn.Module): def __init__(self, model): if (model.rbf_type != 'gauss'): raise ValueError('Only rbf_type="gauss" is supproted') if model.trainable_rbf: raise ValueError('trainalbe_rbf=True is not supported') if (model.activation != 'ssp'): ...
def register_Ns3SixLowPanHelper_methods(root_module, cls): cls.add_constructor([param('ns3::SixLowPanHelper const &', 'arg0')]) cls.add_constructor([]) cls.add_method('AssignStreams', 'int64_t', [param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')]) cls.add_method('Install', 'ns3::NetDevic...
def load_data(data_dir, task, k, seed, split): data_dir = os.path.join(data_dir, 'k-shot', task, '{}-{}'.format(k, seed)) data = [] if os.path.exists(os.path.join(data_dir, '{}.tsv'.format(split))): with open(os.path.join(data_dir, '{}.tsv'.format(split)), 'r') as f: for line in f: ...
class PolynomialLineSearch(line_search.LineSearch): def __init__(self, db: database.Database, optimization_problem: _typing.OptimizationProblem) -> None: super().__init__(db, optimization_problem) self.armijo_stepsize_initial = self.stepsize self.decrease_measure_w_o_step = 1.0 self....
def post_register_types(root_module): enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',') if ('EmuFdNetDevice' not in enabled_features): if ('ns3::EmuFdNetDeviceHelper' in root_module): root_module.classes.remove(root_module['ns3::EmuFdNetDeviceHelper']) if ('TapFdNetDevice'...
class RoundRobinZipDatasets(FairseqDataset): def __init__(self, datasets, eval_key=None): super().__init__() assert isinstance(datasets, OrderedDict) self.datasets = datasets self.eval_key = eval_key self.longest_dataset = None self.longest_dataset_key = None ...
def menu_func_import(self, context): self.layout.operator(LoadObjAsBase.bl_idname, text='Obj As Base Frame') self.layout.operator(LoadRigidAsAnimation.bl_idname, text='Json as Animation Frame')
def test_sanitize_serialized_check(serialized_check): sanitize_serialized_check(serialized_check) assert (serialized_check.example.extra_headers['X-Token'] == DEFAULT_REPLACEMENT) assert (serialized_check.history[0].case.extra_headers['X-Token'] == DEFAULT_REPLACEMENT)
def check_submodules(): from transformers.utils import direct_transformers_import transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) import_structure_keys = set(transformers._import_structure.keys()) with open(os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'), 'r') as f: init_conten...
class CP949Prober(MultiByteCharSetProber): def __init__(self): super(CP949Prober, self).__init__() self.coding_sm = CodingStateMachine(CP949_SM_MODEL) self.distribution_analyzer = EUCKRDistributionAnalysis() self.reset() def charset_name(self): return 'CP949' def lang...
class SetIntersectionPrompter(prompter.Prompter): intersection_prompt = '<Instruction> Find the intersection of two sets of numbers. Output only the set of numbers that are present in both sets, no additional text. </Instruction>\n\n<Examples>\nInput Set 1: [13, 16, 30, 6, 21, 7, 31, 15, 11, 1, 24, 10, 9, 3, 20, 8]...
def LF_short_sentence(span): rgx = re.compile('(no|not|never|cannot|negative for|negative|neg|absent|ruled out|without|absence of|den(y|ied|ies))', re.I) v = (len(span.sentence.words) < 5) v &= (not rgx.search(span.sentence.text)) return (NON_NEGATED if v else ABSTAIN)
def video_processing_spatial(dist): video_name = dist video_name_dis = video_name video_capture = cv2.VideoCapture() video_capture.open(video_name) cap = cv2.VideoCapture(video_name) video_channel = 3 video_height_crop = 448 video_width_crop = 448 video_length = int(cap.get(cv2.CAP_P...
class StandardScaler(TransformerMixin, BaseEstimator): def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy def transform(self, X, copy=None): return self
class NonSaturatingWithR1(BaseAdversarialLoss): def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False, mask_scale_mode='nearest', extra_mask_weight_for_gen=0, use_unmasked_for_gen=True, use_unmasked_for_discr=True): self.gp_coef = gp_coef self.weight = weight ...
def main(): args = parser.parse_args() if (args.gpu is not None): print('Use GPU: {} for training'.format(args.gpu)) print("=> creating model '{}'".format(args.arch)) if (args.arch == 'resnet50'): model = MoPro(resnet50, args, width=1) elif (args.arch == 'resnet50x2'): model ...
def get_session(): config = tf.ConfigProto() config.gpu_options.allow_growth = True return tf.Session(config=config)
def main(unused_argv): if (not FLAGS.file_name): print('Usage: inspect_checkpoint --file_name=checkpoint_file_name [--tensor_name=tensor_to_print]') sys.exit(1) else: print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name, FLAGS.all_tensors)
def test_lang_to_langcode(): assert ('hi' == lang_to_langcode('Hindi')) assert ('hi' == lang_to_langcode('HINDI')) assert ('hi' == lang_to_langcode('hindi')) assert ('hi' == lang_to_langcode('HI')) assert ('hi' == lang_to_langcode('hi'))
.script def softmax_entropy(x: torch.Tensor) -> torch.Tensor: return (- (x.softmax(1) * x.log_softmax(1)).sum(1))
class HubertConfig(PretrainedConfig): model_type = 'hubert' def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_layer_norm=True, feat_proj_dropout=...
class WordDatatype_callable_with_caching(WordDatatype_callable): def __init__(self, parent, callable, length=None): super().__init__(parent, callable, length) self._letter_cache = {} def __iter__(self): if (self._len is Infinity): domain = itertools.count() else: ...
def cifar100_transform(normalize: bool=True, channels_last: bool=True, random_crop: Optional[int]=None): transform_list = [] if random_crop: transform_list.append(transforms.RandomCrop(random_crop)) transform_list.append(transforms.ToTensor()) if normalize: mean = [0.4914, 0.4822, 0.4465...
class DummyActorPolicy(BaseActorPolicy): def __init__(self): super(DummyActorPolicy, self).__init__(identifier='dummy_policy') self.action = None return def act(self, obs): return self.action def add_action(self, action): self.action = action return
def test_scvi_with_minified_adata_save_then_load(save_path): (model, adata, _, _) = prep_model() scvi.settings.seed = 1 (qzm, qzv) = model.get_latent_representation(give_mean=False, return_dist=True) model.adata.obsm['X_latent_qzm'] = qzm model.adata.obsm['X_latent_qzv'] = qzv scvi.settings.seed...
class GradMultiply(torch.autograd.Function): def forward(ctx, x, scale): ctx.scale = scale res = x.new(x) return res def backward(ctx, grad): return ((grad * ctx.scale), None)
def _kernel_valid(k): if isinstance(k, (list, tuple)): for ki in k: return _kernel_valid(ki) assert ((k >= 3) and (k % 2))
def get_completion_script(prog_name, complete_var, shell): cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH) return ((script % {'complete_func': '_{}_completion'.format(cf_name), 'script_names': prog_name, 'autocomplete_var'...
def test_main(): from topaz import main try: main.main() except SystemExit: pass
.parametrize('statement', [MagicMock(gao.GenericConstructor), MagicMock(gao.GenericMethod), MagicMock(gao.GenericFunction), MagicMock(gao.GenericField)]) def test_append_generic_statement(test_case_mock, statement): called = False def mock_method(t, s, position=0, allow_none=True, recursion_depth=11): n...
_kl(Uniform, Normal) def _kl_uniform_normal(p, q): common_term = (p.high - p.low) t1 = ((math.sqrt((math.pi * 2)) * q.scale) / common_term).log() t2 = (common_term.pow(2) / 12) t3 = (((p.high + p.low) - (2 * q.loc)) / 2).pow(2) return (t1 + ((0.5 * (t2 + t3)) / q.scale.pow(2)))
def classifier(x, n_classes=10): x = BatchNormalization()(x) x = ReLU()(x) x = AveragePooling2D(pool_size=8)(x) x = GlobalAvgPool2D()(x) h = Dense(n_classes, kernel_initializer='he_normal')(x) g = Dense(1, kernel_regularizer=l2(WEIGHT_DECAY))(x) g = BatchNormalization()(g) g = Activation...
class TwitterManagerReadTweet(VirtualFunctionTool): name = 'TwitterManagerReadTweet' summary = 'Read the content of a specific tweet by its ID.' parameters: List[ArgParameter] = [{'name': 'tweet_id', 'type': 'string', 'description': 'The unique identifier of the tweet to read.', 'required': True}] retur...
def make_nonuniform_grid(SimBorders: List[int], dx_default: List[int], Boxes: List[dict], grad_Mesh=0.05, step=1.0) -> (np.array, np.array, np.array): NX = int(((np.ceil(SimBorders[1]) - np.floor(SimBorders[0])) / step)) NY = int(((np.ceil(SimBorders[3]) - np.floor(SimBorders[2])) / step)) NZ = int(((np.cei...
class Partition0(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/StatelessEmbedding[embed_tokens]', 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[0]/T5LayerSelfAttention[0]/T5LayerNorm[layer_norm]', 'T5ForConditionalGene...
class WeakModularForms(FormsSpace_abstract, Module, UniqueRepresentation): def __classcall__(cls, group=HeckeTriangleGroup(3), base_ring=ZZ, k=QQ(0), ep=None, n=None): (group, base_ring, k, ep, n) = canonical_parameters(group, base_ring, k, ep, n) return super().__classcall__(cls, group=group, base_...
def filter_ops(ops, positive_filter): ops = util.make_list_of_op(ops) if (positive_filter is not True): ops = [op for op in ops if positive_filter(op)] return ops
def score_lm(args): using_nbest = (args.nbest_list is not None) (pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir) = rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, args.shard_id, arg...
def affect_init(real_weight, imag_weight, init_func, criterion): (a, b) = init_func(real_weight.size(0), real_weight.size(1), None, criterion) (a, b) = (torch.from_numpy(a), torch.from_numpy(b)) real_weight.data = a.type_as(real_weight.data) imag_weight.data = b.type_as(imag_weight.data)
class SYSID_reg(atomic_reg): OP_NAME = 'SYSID' _fields_ = [('imm0', ctypes.c_uint64, 32), ('reg_idx0', ctypes.c_uint64, 8), ('intr_en', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64, 4), ('tsk_eu_typ', ctypes.c_uint64, 3), ('reg_idx1', ctypes.c_uint64, 8), ('reg_idx2', ctypes.c_uint64, 8), ('imm1', ctypes.c_...
def _assert_strides_are_log2_contiguous(strides): for (i, stride) in enumerate(strides[1:], 1): assert (stride == (2 * strides[(i - 1)])), 'Strides {} {} are not log2 contiguous'.format(stride, strides[(i - 1)])
def test_graphviz_toy(): clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2, criterion='gini', random_state=2) clf.fit(X, y) contents1 = export_graphviz(clf, out_file=None) contents2 = 'digraph Tree {\nnode [shape=box, fontname="helvetica"] ;\nedge [fontname="helvetica"] ;\n0 [label="x[0] <= ...
def p_target(s, terminator): pos = s.position() expr = p_starred_expr(s) if (s.sy == ','): s.next() exprs = [expr] while (s.sy != terminator): exprs.append(p_starred_expr(s)) if (s.sy != ','): break s.next() return ExprNodes...
def generate(num=1000): _deck = deck.copy() res = [] np.random.shuffle(_deck) landlord = _deck[:17] landlord.sort() other = _deck[17:] for _ in range(num): np.random.shuffle(other) card_play_data = {'landlord': (landlord + other[:3]), 'landlord_up': other[3:20], 'landlord_dow...
def read_ycbcr420(src_path, tar_paths, wdt, hgt, print_dir): ycbcr420_nfrms = read_planar(src_path, fmt=(((hgt, wdt), ((hgt // 2), (wdt // 2)), ((hgt // 2), (wdt // 2))) * nfrms)) for (idx, tar_path) in enumerate(tar_paths): ycrcb = np.empty((hgt, wdt, 3), np.uint8) ycrcb[(..., 0)] = ycbcr420_nf...
class CoupledPlanningPair(AgentPair): def __init__(self, agent): super().__init__(agent, agent, allow_duplicate_agents=True) def joint_action(self, state): joint_action_plan = self.a0.mlp.get_low_level_action_plan(state, self.a0.heuristic, delivery_horizon=self.a0.delivery_horizon, goal_info=Tru...
class ShannonEntropy(BaseMetric): def __init__(self, recommendations, config, params, eval_objects): super().__init__(recommendations, config, params, eval_objects) self._cutoff = self._evaluation_objects.cutoff self._num_items = self._evaluation_objects.num_items self._item_count = ...
class Vectors(object): def __init__(self, itos: List[str], vectors: torch.FloatTensor, unk_init=None): if (len(itos) != vectors.size(0)): raise ValueError(f'Vocaburaly size {len(itos)} does not match vector size {vectors.size(0)}') self.itos = itos self.vectors = vectors ...
class ToyPLC1(PLC): def pre_loop(self, sleep=0.1): print('DEBUG: toy plc1 enters pre_loop') self.send(SENSOR3_1, 2, PLC1_ADDR) time.sleep(sleep) def main_loop(self, sleep=0.5): print('DEBUG: toy plc1 enters main_loop') count = 0 END = 6000000.0 while True:...
def parse_args(args: list) -> Namespace: parser = argparse.ArgumentParser(description='Compute metrics') parser.add_argument('--predictions', type=str, required=True, help='Input TSV file with predictions') parser.add_argument('--ground-truth', type=str, required=True, help='Input TSV file with ground truth...
class ObjectSummarizer(nn.Module): def __init__(self, model_cfg: DictConfig): super().__init__() this_cfg = model_cfg.object_summarizer self.value_dim = model_cfg.value_dim self.embed_dim = this_cfg.embed_dim self.num_summaries = this_cfg.num_summaries self.add_pe = t...
def process_result(shared): if shared.had_exception.value: shared.result.result_str = 'error' elif (shared.had_timeout.value == 1): shared.result.result_str = 'timeout' elif shared.result.found_confirmed_counterexample.value: shared.result.result_str = 'unsafe' elif shared.result...
def SoftThreshold(t, threshold_ratio, name=None): assert (threshold_ratio >= 0) with tf.name_scope(values=[t, threshold_ratio], name=name, default_name='soft_thresholding') as name: saved_shape = tf.shape(t) t2 = tf.reshape(t, tf.concat(axis=0, values=[tf.slice(saved_shape, [0], [1]), (- 1)])) ...
def toms748_scan(data, model, bounds_low, bounds_up, level=0.05, atol=2e-12, rtol=0.0001, from_upper_limit_fn=False, **hypotest_kwargs): cache = {} def f_cached(poi): if (poi not in cache): cache[poi] = hypotest(poi, data, model, return_expected_set=True, **hypotest_kwargs) return ca...
def show_yaml(base: Path): yaml_path = (base / 'hydra/config.yaml') cfg = OmegaConf.load(str(yaml_path)) st.markdown(f'```{cfg.pretty()}```')
class MLP(nn.Module): def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super(MLP, self).__init__() self.num_layers = num_layers h = ([hidden_dim] * (num_layers - 1)) self.layers = nn.ModuleList((nn.Linear(n, k) for (n, k) in zip(([input_dim] + h), (h + [output_dim])...
.parametrize('precision_level', [b'32b', b'64b']) def test_set_precision_by_bytestring(precision_level): pyhf.set_backend(pyhf.tensorlib.name, precision=precision_level) assert (pyhf.tensorlib.precision == precision_level.decode('utf-8'))
class SawyerGripperEnv(gym.Env): reward_per_step = (- 0.01) def __init__(self, config_path=_get_default_config_path()): px.init(mode=p.GUI) self.cfg = OmegaConf.load(config_path) self.robot = SawyerGripper(**self.cfg.sawyer_gripper) self.obj = px.Body(**self.cfg.object) s...
class _Writer(): def __init__(self) -> None: self.writer = None def write(self, write_dict: dict) -> None: if (self.writer is None): raise Exception('[ERR-CFG] Writer is None!') for key in write_dict.keys(): if write_dict[key]['plot']: self.writer....
def masked_softmax(attn_odds, masks): attn_odds.masked_fill_(masks, (- float('inf'))) attn = nn.Softmax(dim=(- 1))(attn_odds) return attn
def restore_training(log_dir, exp_name, args, env_saved=True, env=None): tabular_log_file = os.path.join(log_dir, 'progress_restored.{}.{}.csv'.format(str(time.time())[:10], socket.gethostname())) text_log_file = os.path.join(log_dir, 'debug_restored.{}.{}.log'.format(str(time.time())[:10], socket.gethostname()...
def submit_pai_task(pai_cmd, datasource): (user, passwd, address, project) = MaxComputeConnection.get_uri_parts(datasource) cmd = ['odpscmd', '--instance-priority', '9', '-u', user, '-p', passwd, '--project', project, '--endpoint', address, '-e', pai_cmd] exitcode = run_command_and_log(cmd) if (exitcode...
class ScalarNode(Node): id = 'scalar' def __init__(self, tag, value, start_mark=None, end_mark=None, style=None): self.tag = tag self.value = value self.start_mark = start_mark self.end_mark = end_mark self.style = style
def _accuracy(expected, observed, data, start, end, cm): (tn, fp, fn, tp) = cm(expected, observed, data, start, end) if (tn is None): raise ValueError('Cannot obtain accuracy score for overlap segment method.') return ((tp + tn) / (((tn + fp) + fn) + tp))
class Translator(nn.Module): def __init__(self, num_tok, dim, dim_out, mult=2, depth=5): super().__init__() self.blocks = nn.ModuleList([translator_base(num_tok, dim, dim, mult=2) for d in range(depth)]) self.gelu = nn.GELU() self.tail = translator_base(num_tok, dim, dim_out, mult=2)...
def _toqclass_helper(im): data = None colortable = None if hasattr(im, 'toUtf8'): im = str(im.toUtf8(), 'utf-8') if isPath(im): im = Image.open(im) if (im.mode == '1'): format = QImage.Format_Mono elif (im.mode == 'L'): format = QImage.Format_Indexed8 colo...
class TemporaryDirectory(): def __enter__(self): self.tmpdir = tempfile.mkdtemp() return self.tmpdir def __exit__(self, type, value, traceback): shutil.rmtree(self.tmpdir)
def default_hp_space_sigopt(trial): return [{'bounds': {'min': 1e-06, 'max': 0.0001}, 'name': 'learning_rate', 'type': 'double', 'transformamtion': 'log'}, {'bounds': {'min': 1, 'max': 6}, 'name': 'num_train_epochs', 'type': 'int'}, {'bounds': {'min': 1, 'max': 40}, 'name': 'seed', 'type': 'int'}, {'categorical_val...
def compute_mask(shape, sample_lens, mask_prob, mask_length): (bs, padded_sample_len) = shape min_sample_len = min(sample_lens) num_mask = int(((((mask_prob * min_sample_len) / float(mask_length)) + random.random()) + 1)) mask_idcs = [] for i in range(bs): sample_len = sample_lens[i] ...