code
stringlengths
101
5.91M
class TestExpit(object): def check_expit_out(self, dtype, expected): a = np.linspace((- 4), 4, 10) a = np.array(a, dtype=dtype) actual = expit(a) assert_almost_equal(actual, expected) assert_equal(actual.dtype, np.dtype(dtype)) def test_float32(self): expected = n...
.parametrize('ctx, func_name', ctxs) .parametrize('inshape, outsize, scale, sdim_only', [((3,), (6,), None, True), ((3,), (8,), None, True), ((3,), (2,), None, True), ((3,), (1,), None, True), ((3,), None, (2.5,), True), ((3,), None, (0.5,), True), ((2, 3, 4), (10,), None, False), ((2, 3, 5), None, (1.3,), False), ((3,...
def unsupported_if_adata_minified(fn: Callable) -> Callable: (fn) def wrapper(self, *args, **kwargs): if (getattr(self, 'minified_data_type', None) is not None): raise ValueError(f'The {fn.__qualname__} function currently does not support minified data.') return fn(self, *args, **kwa...
def prepare_logger(name, path, log_level=logging.DEBUG): logger = logging.getLogger(name) logger.addFilter(TimeFilter()) logger.setLevel(log_level) logfilepath = f"{path}/{name}-{datetime.datetime.now().strftime('%b-%d-%Y_%H-%M-%S')}.log" fh = logging.FileHandler(logfilepath) sh = logging.Stream...
class ResidualBlock(torch.nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1) self.relu = nn.ReLU(...
class IAAAffine2(DualIAATransform): def __init__(self, scale=(0.7, 1.3), translate_percent=None, translate_px=None, rotate=0.0, shear=((- 0.1), 0.1), order=1, cval=0, mode='reflect', always_apply=False, p=0.5): super(IAAAffine2, self).__init__(always_apply, p) self.scale = dict(x=scale, y=scale) ...
def test_BitMaskedArray_RecordArray_NumpyArray(): v2a = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([True, True, True, True, False, False, False, False, True, False, True, False, True]))), ak.contents.recordarray.RecordArray([ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2....
def test_make_system_bad_shape(): assert_raises(ValueError, utils.make_system, np.zeros((5, 3)), None, np.zeros(4), np.zeros(4))
.torch def test_sasrec_training_dataset_getitem(sequential_dataset): batch = SasRecTrainingDataset(sequential_dataset, 8, label_feature_name='item_id', padding_value=(- 1))[0] assert (batch.query_id.item() == 0) assert all((batch.padding_mask == torch.tensor([0, 0, 0, 0, 0, 0, 0, 1], dtype=torch.bool))) ...
class LayerNorm3D(nn.Module): def __init__(self, normalized_shape, reshape, eps=1e-06): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape).reshape(reshape)) self.bias = nn.Parameter(torch.zeros(normalized_shape).reshape(reshape)) self.eps = eps self.di...
class CvtEmbeddings(nn.Module): def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate): super().__init__() self.convolution_embeddings = CvtConvEmbeddings(patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding) se...
def warmup_linear(x, warmup=0.002): if (x < warmup): return (x / warmup) return (1.0 - x)
def _gen_unsupported_methods_properties(): tensor_attrs = set(filter((lambda x: (x[0] != '_')), dir(torch.Tensor))) tensor = torch.tensor([2]) funcs_template = dedent('\n def func(x):\n return x.{op}()\n ') deprecated_apis = set(['volatile', 'resize', 'reinforce', 'new', 'name', 'map2_', 'h...
class FinitelyGeneratedMatrixGroup_gap(MatrixGroup_gap): def __reduce__(self): return (MatrixGroup, (tuple((g.matrix() for g in self.gens())) + ({'check': False},))) def as_permutation_group(self, algorithm=None, seed=None): from sage.groups.perm_gps.permgroup import PermutationGroup if ...
def _get_param_mapping(module: _torch.nn.Module, seen: _typing.List[_torch.Tensor], mapping: _typing.List[int]) -> _typing.List[int]: for param in module._parameters.values(): if (param is None): continue found = _find_param_in_list(param, seen) if (found is None): ma...
class Network(object): def __init__(self, inputs, trainable=True, is_training=False, num_classes=21): self.inputs = inputs self.terminals = [] self.layers = dict(inputs) self.trainable = trainable self.use_dropout = tf.placeholder_with_default(tf.constant(1.0), shape=[], name...
def main(): parser = argparse.ArgumentParser(description='Draw the fisheye mask from Kalibr calibration output') parser.add_argument('yaml_path', type=str, help='Kalibr yaml file path for the fisheye mask') parser.add_argument('--output_folder', default='./', type=str, help='folder to output the fisheye mas...
def to_default_event_representation(music: 'Music', dtype=int) -> ndarray: seq = to_default_event_sequence(music) return np.array(seq, dtype=dtype)
class TestHsm(hu.HypothesisTestCase): def test_hsm_search(self): samples = 10 dim_in = 5 X = (np.random.rand(samples, dim_in).astype(np.float32) - 0.5) w = (np.random.rand(hierarchy_proto.size, dim_in).astype(np.float32) - 0.5) b = (np.random.rand(hierarchy_proto.size).astype...
class BasicBlock(nn.Module): def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, ker...
def query_type4py_api(module_name: str, source_code: str) -> (Type4pyData | None): try: LOGGER.info('Retrieving Type4Py data for %s', module_name) return requests.post((config.configuration.type_inference.type4py_uri + 'api/predict?tc=0&fp=0'), source_code.encode('utf-8'), timeout=config.configurati...
def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True): if (model_type not in MODEL_CLASSES): raise ValueError('Unrecognized model type, should be one of {}.'.format(list(MODEL_CLASSES.keys()))) (config_class, ...
_model() class ElectricField(SimOutput): type = goos.ModelNameType('output.electric_field') wavelength = goos.types.FloatType()
def _eval_fvd(opts, i3d, G, dataset, device, num_videos=512): num_videos_per_gpu = (((num_videos - 1) // opts.num_gpus) + 1) grid_z = torch.randn([num_videos_per_gpu, G.z_dim], device=device).split(1) if (G.c_dim > 0): grid_c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(num_...
class MRQAExample(object): def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self....
def add_mask_rcnn_losses(model, blob_mask): loss_mask = model.net.SigmoidCrossEntropyLoss([blob_mask, 'masks_int32'], 'loss_mask', scale=(model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK)) loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask]) model.AddLosses('loss_mask') return loss_gradie...
def extract_tgen_plot_data(args): json_path = f'{args.prefix}/tgen.analysis.json' if (not os.path.exists(json_path)): json_path += '.xz' if (not os.path.exists(json_path)): logging.warning(f'Unable to find tgen analysis data at {json_path}.') return data = load_json_data(json_pat...
.parametrize('verbose', ['dummy', (- 1), 1.5, [1, 2]]) def test_invalid_verbose(verbose: Any) -> None: with pytest.raises(ValueError, match='.*Invalid verbose argument.*'): check_verbose(verbose)
def get_result_prefix(dirname, name, suffix): name = os.path.join(dirname, name) counter = 0 while glob.glob('{}_{:02d}*.{}'.format(name, counter, suffix)): counter += 1 return '{}_{:02d}.{}'.format(name, counter, suffix)
def compute_total_bops(graph: Graph, fw_info: FrameworkInfo, fw_impl: FrameworkImplementation) -> np.ndarray: bops = [] for n in graph.get_topo_sorted_nodes(): if n.has_weights_to_quantize(fw_info): incoming_edges = graph.incoming_edges(n, sort_by_attr=EDGE_SINK_INDEX) assert (le...
class BaseScore(): def __init__(self, label_name='labels', logits_name='logits'): super().__init__() self.label_name = label_name self.logits_name = logits_name def __call__(self, **kwargs): raise NotImplementedError()
def _find_modules_with_constants(project_path: (str | os.PathLike)) -> OrderedSet[str]: modules: OrderedSet[str] = OrderedSet() for package in find_packages(project_path, exclude=['*.tests', '*.tests.*', 'tests.*', 'tests', 'test', 'test.*', '*.test.*', '*.test']): package_name = package.replace('.', '/...
def get_intervention_types(): return ['man_direct', 'man_indirect', 'woman_direct', 'woman_indirect']
class _CZ_entangle_operation(_two_qubit_operation): def get_circuit(self, var_param_assignment=None): QC = QuantumCircuit(self.num_qubits) QC = self.apply_param_vectors(QC, CZGate, var_param_assignment) return QC
def test_case41(): url = (brokerIp + '/ngsi-ld/v1/subscriptions/') headers = {'Content-Type': 'application/json', 'Link': '<{{link}}>; rel=" type="application/ld+json"'} r = requests.post(url, data=json.dumps(ld_data.subdata29), headers=headers) print(r.content) print(r.status_code) url = (broke...
def is_end(): global START_TIME diff = 300 current_time = time.time() elasp = (current_time - START_TIME) timeout_seconds = utils.time_to_seconds(ARGS.timeout) return (elasp >= (timeout_seconds + diff))
_display_as_base class _UFuncCastingError(UFuncTypeError): def __init__(self, ufunc, casting, from_, to): super().__init__(ufunc) self.casting = casting self.from_ = from_ self.to = to
class ComplexExtension(Extension): name = 'c' cls = complex def encode(self, s, v): return (v.real, v.imag) def decode(self, s, v): return complex(v[0], v[1])
_bpe('bytes') class Bytes(object): def __init__(self, args): pass def add_args(parser): pass def encode(x: str) -> str: encoded = byte_encode(x) escaped = encoded.replace(SPACE, SPACE_ESCAPE) return SPACE.join(list(escaped)) def decode(x: str) -> str: unes...
def test_atomic_reprepare(kurucz_atomic_data): kurucz_atomic_data.prepare_atom_data([14, 20]) lines = kurucz_atomic_data.lines.reset_index() assert lines['atomic_number'].isin([14, 20]).all() assert (len(lines.loc[(lines['atomic_number'] == 14)]) > 0) assert (len(lines.loc[(lines['atomic_number'] ==...
class NetDefNode(): def __init__(self, name, optype, p=None, op=None): self.name = name self.optype = optype self.ops = {} self.prev = {} self.insertInput(p) self.visited = False self.op = op def insertInput(self, p): if isinstance(p, list): ...
def test_assemble_matrix(data): from sfepy.discrete.common.extmods.assemble import assemble_matrix mtx = sps.csr_matrix(nm.ones((data.num, data.num), dtype=nm.float64)) mtx.data[:] = 0.0 assemble_matrix(mtx.data, mtx.indptr, mtx.indices, data.mtx_in_els, data.iels, 1, data.conn, data.conn) aux = nm....
def add_arguments(parser): parser.register('type', 'bool', (lambda v: (v.lower() == 'true'))) parser.add_argument('--num_units', type=int, default=32, help='Network size.') parser.add_argument('--num_layers', type=int, default=2, help='Network depth.') parser.add_argument('--num_encoder_layers', type=in...
class BDEUType(Enum): EU_MUL = 0 EU_MAC = 1 EU_ADD = 2 EU_SUB = 3 EU_MAX = 4 EU_MIN = 5 EU_SHIFT = 6 EU_AND = 7 EU_OR = 8 EU_XOR = 9 EU_SELECT_GT = 10 EU_SELECT_EQ = 11 EU_DIVIDE = 12 EU_TAYLOR = 13 EU_FP32_TO_INT = 14 EU_INT_NORMALIZE = 15 EU_FP32_NOR...
def require_tf2onnx(test_case): if (not is_tf2onnx_available()): return unittest.skip('test requires tf2onnx')(test_case) else: return test_case
def test_iterable(value): try: iter(value) except TypeError: return False return True
def register_Ns3CallbackImpl__Void_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns...
def _remove_pretrained_embedding_params(params: Params): keys = params.keys() if ('pretrained_file' in keys): del params['pretrained_file'] for value in params.values(): if isinstance(value, Params): _remove_pretrained_embedding_params(value)
def test(): device = torch.device('cuda:1') gpu = 1 print('GPU : ', gpu) model = EDSR(synchronize_norm=False, gpu=gpu, scale_factor=2).cuda(gpu) ckpt = torch.load(os.path.join('/mnt/nas/workspace/sr1', 'EDSR_x2.pt')) print('CKPT Loaded') model.load_state_dict(ckpt, strict=True) t = trans...
_cache() def multishuffle(shuffle_widths, forward=True): import numpy as np from . import jit bit_width = np.sum(shuffle_widths, dtype=np.int64) if (bit_width not in [8, 16, 32, 64]): raise ValueError(f'Total bit width must be one of [8, 16, 32, 64], not {bit_width}') dtype = np.dtype(f'u{(b...
def computeSER(greedy, inputs, tasks): assert (len(tasks) == 1) dataset_class = getattr(dialogues, tasks[0].dataset_name) dataset = dataset_class() act_values = [] for input in inputs: act_values.append(QUOTED_MATCH_REGEX.findall(input)) return dataset.compute_ser(greedy, act_values)
def _impl(array, axis, highlevel, behavior, attrs): with HighLevelContext(behavior=behavior, attrs=attrs) as ctx: layout = ctx.unwrap(array, allow_record=False) axis = regularize_axis(axis) if (axis is None): def action(layout, continuation, **kwargs): if layout.is_regular: ...
def createSegments(assigns, lenSegment): assignment = [] correctAssignment = [] for (cluster, othercluster) in assigns: correctAssignment += np.full(lenSegment, cluster, dtype=int).tolist() assignment.append(((cluster, othercluster), lenSegment)) return (assignment, correctAssignment)
class discriminator_wrapper(nn.Module): def __init__(self, discriminator): super().__init__() self.dis = discriminator self.trans = dataset_iters.AugmentWrapper_DIS() def forward(self, x, y=None, aug=False): if aug: x = self.trans(x, self.training) logits = se...
class DiscretePolicy(nn.Module): def __init__(self, obs_shape, *args, **kwargs): super(DiscretePolicy, self).__init__() if (type(obs_shape[0]) is int): if (len(obs_shape) == 1): self.network = MLPNet(obs_shape[0], *args, **kwargs) else: raise N...
class SubModule(torch.nn.Module): def __init__(self): super(SubModule, self).__init__() self.mod1 = nn.Identity() self.mod2 = nn.ReLU() def forward(self, x): x = self.mod1(x) x = self.mod2(x) return x
class Conv1x1Linear(nn.Module): def __init__(self, in_channels, out_channels, stride=1, bn=True): super(Conv1x1Linear, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, 1, stride=stride, padding=0, bias=False) self.bn = None if bn: self.bn = nn.BatchNorm2d...
class CheckpointEveryNSteps(Callback): def __init__(self, save_step_frequency, prefix='N-Step-Checkpoint', use_modelcheckpoint_filename=False): self.save_step_frequency = save_step_frequency self.prefix = prefix self.use_modelcheckpoint_filename = use_modelcheckpoint_filename def on_batc...
class LeanStructDefs(): identifiers: IdentifierManager = dataclasses.field(default_factory=(lambda : IdentifierManager())) names: Dict[(ScopedName, StructDefinition)] = dataclasses.field(default_factory=(lambda : {})) unique_tuple_names: Dict[(str, List[ScopedName])] = dataclasses.field(default_factory=(lam...
class Clusterer(base_clusterer.BaseClusterer): def __init__(self, **kwargs): super().__init__(**kwargs) def get_labels(self, x, y): return torch.randint(low=0, high=self.k, size=y.shape).long().cuda()
def test_train_save_weights(train_data_fx, model_fx): name_prefix = 'weights' default_path = os.path.join(os.curdir, name_prefix) h = model_fx.train(train_data_fx[0], train_data_fx[1], epochs=10, save_weights={'path': default_path, 'freq': 10, 'best': True}) files = list(filter((lambda f: f.startswith((...
def main(args): misc.init_distributed_mode(args) global_rank = misc.get_rank() if (args.log_dir is None): args.log_dir = args.output_dir print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) print('{}'.format(args).replace(', ', ',\n')) device = torch.device(args.devic...
def get_label(post): label = (1 if (((post.ans1 == 'Yes') and (post.ans2 == 'Yes')) or ((post.ans2 == 'Yes') and (post.ans3 == 'Yes')) or ((post.ans1 == 'Yes') and (post.ans3 == 'Yes'))) else 0) return label
.parametrize('batch_size', [1, 2, 3, len(data)]) def test_FixedBatchSizeBatchSampler(batch_size): dataset = data iter1 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=False))) iter2 = list(iter(FixedBatchSizeBatchSampler(dataset, batch_size, shuffle=True))) indices1 = sorted(_merge_b...
def GetPageRank_PNGraph(Graph, PRankH, C=0.85, Eps=0.0001, MaxIter=100): return _snap.GetPageRank_PNGraph(Graph, PRankH, C, Eps, MaxIter)
class SQSQueue(object): def __init__(self, queue_name: str, visibility_timeout: int=3600, wait_if_empty: int=100, fetch_wait_time_seconds: int=20, retry_times: int=30): credentials = aws_credentials() self.client = boto3.client('sqs', region_name=credentials['AWS_DEFAULT_REGION'], aws_secret_access_...
def assert_x_equal(x_true, x_train): tm.assert_index_equal(x_true.columns, x_train.columns, exact=True) tm.assert_index_equal(x_true.index, x_train.index, exact=True) tm.assert_frame_equal(x_true, x_train, check_index_type=False, check_column_type=True, check_names=False)
class UnMaskedWeightedNLLLoss(nn.Module): def __init__(self, weight=None): super(UnMaskedWeightedNLLLoss, self).__init__() self.weight = weight self.loss = nn.NLLLoss(weight=weight, reduction='sum') def forward(self, pred, target): if (type(self.weight) == type(None)): ...
class OnnxHandler(): def __init__(self): self._func_set = cvt.func_set_onnx_support() self._nonsupport_set = set() if FuncInfo.params.files: self._nnp_set = set() for f in FuncInfo.params.files: nnp = cvt.nnabla.NnpImporter(f, expand_network=(not FuncI...
def get_args(): parser = argparse.ArgumentParser('MUST training and evaluation script', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--save_ckpt_freq', default=10, type=int) parser.add_argument('--eval_freq', default=1, type=int) parser.add_argument(...
class GeneralData(NiceRepr): def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if (meta_info is not None): self.set_meta_info(meta_info=meta_info) if (data is not None): self.set_data(data) def set_meta...
class LSUNClass(data.Dataset): def __init__(self, root, transform=None, target_transform=None): import lmdb self.root = os.path.expanduser(root) self.transform = transform self.target_transform = target_transform self.env = lmdb.open(root, max_readers=1, readonly=True, lock=F...
def read_span_predictor_examples(question): def _process_sent(sent): if (type(sent) != str): return [_process_sent(s) for s in sent] return sent.replace('', '-').replace('&', 'and').replace('&amp;', 'and') def is_whitespace(c): if ((c == ' ') or (c == '\t') or (c == '\r') or ...
class CVAE_Linear(nn.Module): def __init__(self, size, latent_size=512, num_samples=10, alpha=1, cvae_num_stack=1): super(CVAE_Linear, self).__init__() self.size = size self.latent_size = latent_size self.cvae_num_stack = cvae_num_stack self.inp_post = nn.Linear(((16 * 2) + (...
.parametrize('class_decorator, pre_parametrize_decorator, post_parametrize_decorator', (('()', '', ''), ('()', '', ''), ('', f'({AUTH_CLASS_NAME})', ''), ('', '', f'({AUTH_CLASS_NAME})')), ids=('global', 'schema', 'test-pre-parametrize', 'test-post-parametrize')) def test_different_scopes(testdir, schema_definition, cl...
def export_mesh(vertices, triangles, filename, mesh_name='mcubes_mesh'): import collada mesh = collada.Collada() vert_src = collada.source.FloatSource('verts-array', vertices, ('X', 'Y', 'Z')) geom = collada.geometry.Geometry(mesh, 'geometry0', mesh_name, [vert_src]) input_list = collada.source.Inpu...
class DummyToolWithMessage(BaseTool): name = 'dummy_tool_with_message' description = 'Called when tool is not actually run.' def _get_message(self, msg: str) -> str: return msg def _run(self, msg: str) -> str: return self._get_message(msg) async def _arun(self, msg: str) -> str: ...
def parse_requirements(fname='requirements.txt', with_version=True): import re import sys from os.path import exists require_fpath = fname def parse_line(line): if line.startswith('-r '): target = line.split(' ')[1] for info in parse_require_file(target): ...
def write_lines(lines, path): f = io.open(path, 'w', encoding='utf-8') print('Currently writing lines to file ...') f.writelines(lines) f.close() print('Lines successfully written to file!')
class COCOPanopticEvaluator(DatasetEvaluator): def __init__(self, dataset_name: str, output_dir: Optional[str]=None): self._metadata = MetadataCatalog.get(dataset_name) self._thing_contiguous_id_to_dataset_id = {v: k for (k, v) in self._metadata.thing_dataset_id_to_contiguous_id.items()} sel...
.parametrize('kernel', ['triangular', 'gaussian', 'epanechnikov', 'cosine']) def test_continuous_ope_performance(kernel): ipw = KernelizedInverseProbabilityWeighting(kernel=kernel, bandwidth=0.1) snipw = KernelizedSelfNormalizedInverseProbabilityWeighting(kernel=kernel, bandwidth=0.1) dim_context = 2 re...
class LazySchema(): fixture_name: str base_url: ((str | None) | NotSet) = NOT_SET method: (Filter | None) = NOT_SET endpoint: (Filter | None) = NOT_SET tag: (Filter | None) = NOT_SET operation_id: (Filter | None) = NOT_SET app: Any = NOT_SET hooks: HookDispatcher = field(default_factory=...
def get_coords(device: JaxDevice) -> HardwareMesh: if hasattr(device, 'coords'): return (*device.coords, device.core_on_chip) return (device.process_index, (device.id % jax.local_device_count()))
class OneCycleLR(Callback): def __init__(self, max_lr, end_percentage=0.1, scale_percentage=None, maximum_momentum=0.95, minimum_momentum=0.85, verbose=True): super(OneCycleLR, self).__init__() if ((end_percentage < 0.0) or (end_percentage > 1.0)): raise ValueError('`end_percentage` must...
def get_docker_volumes(args): opts = (getattr(args, 'volume') or []) return [easy_volume(opt).split(':') for opt in opts]
def parse_args(): parser = argparse.ArgumentParser(description='Fusing multiple scores') parser.add_argument('--scores', nargs='+', help='list of scores', default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl']) parser.add_argument('--coefficients', nargs='+', type=float, help='coefficients of each score file',...
def _run_doctests(tests, full_name, verbose, doctest_warnings): flags = ((NORMALIZE_WHITESPACE | ELLIPSIS) | IGNORE_EXCEPTION_DETAIL) runner = DTRunner(full_name, checker=Checker(), optionflags=flags, verbose=verbose) output = io.StringIO(newline='') success = True tmp_stderr = (sys.stdout if doctes...
class HeckeAlgebra_full(HeckeAlgebra_base): def _repr_(self): return ('Full Hecke algebra acting on %s' % self.module()) def __richcmp__(self, other, op): if (not isinstance(other, HeckeAlgebra_full)): return NotImplemented return richcmp(self.module(), other.module(), op) ...
class TestModel(nn.Module): def __call__(self, x): x = nn.Conv(features=8, kernel_size=(3, 3))(x) x = activation(x) x = nn.Conv(features=16, kernel_size=(3, 3))(x) x = activation(x) x = nn.Conv(features=32, kernel_size=(3, 3))(x) x = activation(x) x = jnp.mean...
_model def mixnet_xxl(pretrained=False, **kwargs): model = _gen_mixnet_m('mixnet_xxl', channel_multiplier=2.4, depth_multiplier=1.3, pretrained=pretrained, **kwargs) return model
def option_to_map(option: str): kv_strs = option.split(';') options = {} for kv_str in kv_strs: kv_str = kv_str.strip() idx = kv_str.find('=') if (idx < 0): continue key = kv_str[0:idx] val = kv_str[(idx + 1):] options[key] = val return options
def main(): global args parser = arg_parser() args = parser.parse_args() cfg = setup_cfg(args) train_split = cfg.DATASET.TRAIN_SPLIT val_split = cfg.DATASET.VAL_SPLIT test_split = cfg.DATASET.TEST_SPLIT train_dataset = build_dataset(cfg, train_split) val_dataset = build_dataset(cfg, ...
def test_horizon_0_1000000_days(tmp_path: pathlib.Path): time_horizon = TimeHorizon(datetime.timedelta(days=0), datetime.timedelta(days=1000000)) labeler = DummyLabeler([2], time_horizon) events_with_labels: EventsWithLabels = [(event((2000, 1, 3), 2, None), True), (event((2001, 10, 5), 1, None), True), (ev...
class FEVERSentenceTextFormatter(FeverFormatter): def __init__(self, idx, db, ls): super().__init__(idx, ls) self.db = db def get_doc_line(self, doc, line): lines = self.db.get_doc_lines(doc) return lines.split('\n')[line].split('\t')[1] def format_line(self, line): a...
def test_empty_image(): image = np.zeros((6, 6), dtype=bool) with expected_warnings(['entirely zero']): assert_array_equal(convex_hull_image(image), image)
class BindingSiteFeature(EdgeFeature): def get_values(self, item, from_index, to_index, dense=False): if (item['site_indic'][to_index] == 1): return {'binding_site_to': 1} elif dense: return {'binding_site_to': 0} else: return {}
def test_transfer_fields_default_batch(): adata1 = synthetic_iid() protein_adata1 = synthetic_iid() mdata1 = mudata.MuData({'rna': adata1, 'protein': protein_adata1}) adata2 = synthetic_iid() del adata2.obs['batch'] adata2.X = adata1.X protein_adata2 = synthetic_iid() mdata2 = mudata.MuD...
class SFractionalIdealClass(FractionalIdealClass): def _repr_(self): if self.is_trivial(): return 'Trivial S-ideal class' return ('Fractional S-ideal class %s' % self._value._repr_short())
_module class ClsHead(nn.Module): def __init__(self, with_avg_pool=False, in_channels=2048, num_classes=1000): super(ClsHead, self).__init__() self.with_avg_pool = with_avg_pool self.in_channels = in_channels self.num_classes = num_classes self.criterion = nn.CrossEntropyLoss...
def generator_train_with_corruption(args): skip_rate = (args.train_size / float(len(train_data))) (positive, negative) = (list(), list()) random.shuffle(train_data) for i in range(len(train_data)): (h, r, t) = train_data[i] if args.is_balanced_tr: if (random.random() > trfreq...