code
stringlengths
101
5.91M
def test_treebank(): assert ('hi_hdtb' == treebank_to_short_name('UD_Hindi-HDTB')) assert ('hi_fire2013' == treebank_to_short_name('Hindi-fire2013')) assert ('hi_fire2013' == treebank_to_short_name('Hindi-Fire2013')) assert ('hi_fire2013' == treebank_to_short_name('Hindi-FIRE2013')) assert ('hi_fire...
def max_memory_allocated(device: Union[(Device, int)]=None) -> int: return memory_stats(device=device)['allocated_bytes.all.peak']
def create_var_with_large_initial_value(initial_value: np.ndarray, *args, **kwargs): assert isinstance(initial_value, np.ndarray) zeros = tf.zeros(initial_value.shape, initial_value.dtype) var = tf.Variable(zeros, *args, **kwargs) return var
class InferenceModel(): def __init__(self, ptr): self.ptr = ptr def __del__(self): if self.ptr: check(lib.tract_inference_model_destroy(byref(self.ptr))) def _valid(self): if (self.ptr == None): raise TractError('invalid inference model (maybe already consumed...
def reset_max_memory_cached(device: Union[(Device, int)]=None) -> None: warnings.warn('torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, which resets /all/ peak memory stats.', FutureWarning) return reset_peak_memory_stats(device=device)
def resnet34(num_classes=1000): model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes) return model
class CraftingTable(Space): def __init__(self, max_items=1, *args, **kwargs): super().__init__(*args, **kwargs) self.max_items = max_items self.unique_info_dim = len(MINECRAFT_ITEMS) self._reward_this_step = 0 def reset(self, agent_infos): super().reset(agent_infos) ...
def test_Mult_Div(): SD = FunctionSpace(N, 'C', bc=(0, 0), dtype='D') SN = FunctionSpace(N, 'C', basis='ShenNeumann', dtype='D') Cm = inner_product((SN, 0), (SD, 1)) Bm = inner_product((SN, 0), (SD, 0)) uk = (np.random.randn(N) + (np.random.randn(N) * 1j)) vk = (np.random.randn(N) + (np.random.r...
def basiclemma(self, M): a = self(self.basiclemmavec(M)) assert (gcd(a, M) == 1) return a
class TranslateShape(Shape): node_type = 'goos.shape.translate' def __init__(self, shape: Shape, offset: np.ndarray) -> None: super().__init__(shape) self._offset = offset def eval_const_flags(self, inputs: List[ShapeFlow.ConstFlags]) -> ShapeFlow.ConstFlags: return inputs[0] def...
def _as_tuple(inp, arg_name, fn_name): is_inp_tuple = True if (not isinstance(inp, tuple)): inp = (inp,) is_inp_tuple = False for (i, el) in enumerate(inp): if (not isinstance(el, torch.Tensor)): if is_inp_tuple: raise TypeError('The {} given to {} must be...
def assertDictAlmostEqual(d1: Dict, d2: Dict) -> None: assert (set(d1.keys()) == set(d2.keys())) for key in d1: assert_allclose(d1[key], d2[key])
def test_same_num_polygons_load(): with pytest.raises(ValueError, match='same number'): with open(os.path.join(TESTDATA, 'tlc_test_gds2.gds'), 'rb') as fp: gds_file = gds.GDSImport(fp)
def coarsening(model, graph, edge_weight_function: EdgeWeightFunction, node_weight_function: NodeWeightFunction, L, P, basic_blocks, special_blocks, depth) -> List[Tuple[(Graph, List[List[Node]], Graph, UnionFind)]]: print(f'-I- Coarsening: got graph with {graph.num_nodes} nodes') mgr = CoarseningMgr(model, gra...
def convert_sst2roots(paths, dataset_name, *args): convert_sst_general(paths, dataset_name, 'binaryroot')
.operations('multipart') def test_internal_exceptions(any_app_schema, mocker): mocker.patch('schemathesis.Case.call', side_effect=ValueError) mocker.patch('schemathesis.Case.call_wsgi', side_effect=ValueError) (_, *others, finished) = from_schema(any_app_schema, hypothesis_settings=hypothesis.settings(max_e...
def build_emb_model(n_features, n_outputs, hidden_nodes, emb_size, max_id, compile=False, optimizer='adam', lr=0.01, loss=crps_cost_function, activation='relu', reg=None): if (type(hidden_nodes) is not list): hidden_nodes = [hidden_nodes] features_in = Input(shape=(n_features,)) id_in = Input(shape=...
def log_grads(model, tb_writer, tb_index): def weights_grads(model): grad = {} weights = {} for (name, param) in model.named_parameters(): if (param.grad is not None): grad[name] = param.grad weights[name] = param.data return (grad, weights...
def test_lagrangian_particles(): N = (20, 20) F0 = FunctionSpace(N[0], 'F', dtype='D', domain=(0.0, 1.0)) F1 = FunctionSpace(N[1], 'F', dtype='d', domain=(0.0, 1.0)) T = TensorProductSpace(comm, (F0, F1)) TV = VectorSpace(T) (x, y) = sp.symbols('x,y') psi = (((1.0 / np.pi) * (sp.sin((np.pi *...
(frozen=True) class NudityCheckRequest(): image_locations: List[str] = field(default_factory=list)
_memoize_get_funcs def get_blas_funcs(names, arrays=(), dtype=None, ilp64=False): if isinstance(ilp64, str): if (ilp64 == 'preferred'): ilp64 = HAS_ILP64 else: raise ValueError("Invalid value for 'ilp64'") if (not ilp64): return _get_funcs(names, arrays, dtype, 'B...
def log_react_trial(agents, trial_n): (correct, incorrect, halted) = summarize_react_trial(agents) log = f''' BEGIN TRIAL {trial_n} Trial summary: Correct: {len(correct)}, Incorrect: {len(incorrect)}, Halted: {len(halted)} ''' log += ' BEGIN CORRECT AGENTS \n\n' for agent in correct: log += (rem...
def read_32t(fobj, start_length, size): (start, length) = start_length fobj.seek(start) sig = fobj.read(4) if (sig != b'\x00\x00\x00\x00'): raise SyntaxError('Unknown signature, expecting 0x') return read_32(fobj, ((start + 4), (length - 4)), size)
class ModelArguments(): model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[s...
def run_on_one_sequence(sess, clstm_model, batch_img, batch_flow, two_stream=True): with sess.as_default(): if two_stream: prob = sess.run(clstm_model.preds, feed_dict={images: batch_img, flows: batch_flow, K.learning_phase(): 0}) print(prob) (gb_grad_value, target_conv_l...
def get_cube(n_cells, **kwargs): cube = kwargs.get('cube', np.zeros((n_cells, n_cells, n_cells))) return cube
.core .parametrize('borders', [None, {'rank': [5, 9]}]) def test_correct_borders(borders): model = ALSWrap() res = model._prepare_param_borders(borders) assert (res.keys() == model._search_space.keys()) assert ('rank' in res) assert isinstance(res['rank'], dict) assert (res['rank'].keys() == mod...
def combinatorial_map_trivial(f=None, order=None, name=None): if (f is None): return (lambda f: f) else: return f
def find_span(context, tokens, answer): offset = 0 spans = [] scanning = None process = [] for (i, token) in enumerate(tokens): while (context[offset:(offset + len(token))] != token): offset += 1 if (offset >= len(context)): break if (scanning ...
(0.5) _service.route('/query_order', methods=['POST']) def query_order(): try: entities = request.json['entities'] oid = int(entities['oid']) query_res = simple_db.query_order(oid) if (query_res != 'None'): return json_resp(True, msg=query_res) else: r...
def merge_dicts(dict_a, dict_b): from ast import literal_eval for (key, value) in dict_a.items(): if (key not in dict_b): raise KeyError('Invalid key in config file: {}'.format(key)) if (type(value) is dict): dict_a[key] = value = AttrDict(value) if isinstance(val...
def binary_cross_entropy(input, target, weight=None, size_average=None, reduce=None, reduction='elementwise_mean'): if ((size_average is not None) or (reduce is not None)): reduction = _Reduction.legacy_get_enum(size_average, reduce) else: reduction = _Reduction.get_enum(reduction) if (not (...
def val_cihp(net_, testloader, testloader_flip, test_graph, epoch, writer, criterion, classes=20): (adj1_test, adj2_test, adj3_test) = test_graph num_img_ts = len(testloader) net_.eval() pred_list = [] label_list = [] running_loss_ts = 0.0 miou = 0 for (ii, sample_batched) in enumerate(z...
class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): super(BasicBlock, self).__init__() padding = (2 - stride) if (dilation > 1): padding = dilation dd = dilation pad = padding if ((dow...
def filter_df(df): remove_indices = [] for (i, image_path) in enumerate(df['vv_image_path'].tolist()): image = cv2.imread(image_path, 0) image_values = list(np.unique(image)) binary_value_check = ((image_values == [0, 255]) or (image_values == [0]) or (image_values == [255])) if ...
def test_nested_default_arg(): class MyClass(): def __call__(self, arr: dace.float64[20], qmin: float=0.0): self.nested(arr, qmin) def nested(self, arr: dace.float64[20], qmin: float): arr[:] = qmin a = MyClass() def tester(arr: dace.float64[20], qmin2: float): ...
class TextDFDatasetForDisc(Dataset): def __init__(self, df, in_memory: bool=False, split: str=None, train_ratio: float=1, omitted_labels=None, reduced_labels=None, reduced_labels_keep_num=None): if (omitted_labels is not None): df = df.loc[(~ df['truth'].isin(omitted_labels))] if (reduce...
def move_satisifies_memory_constraint(v: SimpleNode, dst: int, state: PartitionState) -> bool: params_per_node = state.params_per_node params_per_stage = state.params_per_stage return ((params_per_stage[dst] + params_per_node[v]) < state.L_max)
def add_backward_pass(sdfg: SDFG, state: SDFGState, outputs: typing.List[typing.Union[(nd.AccessNode, str)]], inputs: typing.List[typing.Union[(nd.AccessNode, str)]]): sdfg.validate() backward_state = sdfg.add_state_after(state) gen = BackwardPassGenerator(sdfg=sdfg, state=state, given_gradients=outputs, re...
def test_load_gds(): with open(os.path.join(TESTDATA, 'rect.gds'), 'rb') as fp: gds_file = gds.GDSImport(fp) polygons = gds_file.get_polygons((100, 0)) assert (len(polygons) == 1) np.testing.assert_almost_equal(polygons[0], [[(- 1), 0.7], [(- 5), 0.7], [(- 5), 0.2], [(- 1), 0.2]]) boxes = gd...
def block_inception_a(inputs, scope=None, reuse=None): with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv...
_module() class PyGPointNextDecoder(nn.Module): def __init__(self, block, decoder_blocks=[1, 1, 1, 1], decoder_layers=2, in_channels=6, width=32, strides=[1, 4, 4, 4, 4], nsample=[8, 16, 16, 16, 16], radius=0.1, radius_scaling=2, nsample_scaling=1, aggr_args={'feature_type': 'dp_fj', 'reduction': 'max'}, group_args...
def union(**members): class UnionInstance(UnionType): _members = members for key in members: setattr(UnionInstance, key, None) return UnionInstance
def test(): net = PReLU_GoogLeNet() x = torch.randn(1, 3, 32, 32) y = net(x) print(y.size())
def findChildren(node, path): result = [] if (len(path) == 1): for i in node.childNodes: if (i.nodeType == Node.ELEMENT_NODE): if (i.tagName == path[0]): result.append(i) else: for i in node.childNodes: if (i.nodeType == Node.ELEMEN...
def _make_callback(args, save_models): m_name = (str(args['output_name']) + '_{epoch:03d}.h5') filepath = os.path.join(save_models, m_name) early_stopping_monitor = EarlyStopping(monitor=args['monitor'], patience=args['patience']) checkpoint = ModelCheckpoint(filepath=filepath, monitor=args['monitor'], ...
def _compute_threshold_by_top_percentage(attributions, percentage=60.0): if ((percentage < 0) or (percentage > 100)): raise ValueError('percentage must be in [0, 100]') if (percentage == 100): return np.min(attributions) flat_attributions = attributions.flatten() attribution_sum = np.sum...
class SpatialGraphConv(nn.Module): def __init__(self, in_channels, out_channels, max_graph_distance): super(SpatialGraphConv, self).__init__() self.s_kernel_size = (max_graph_distance + 1) self.gcn = nn.Conv2d(in_channels, (out_channels * self.s_kernel_size), 1) def forward(self, x, A): ...
def main(): parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model') parser.add_argument('--model_name', type=str, default='transfo-xl-wt103', help='pretrained model name') parser.add_argument('--split', type=str, default='test', choices=['all', 'valid', 'test'], help='which split ...
class TestExample(unittest.TestCase): def test_explain(self): base_folder = os.path.dirname(os.path.abspath(__file__)) task = TabularClassification(base_folder).train_adult() predict_function = (lambda z: task.model.predict_proba(task.transform.transform(z))) class_names = task.trans...
class NetworkXLabelCooccurenceClustererTests(ClassifierBaseTest): def test_actually_works_on_proper_params(self): for (X, y) in self.get_multilabel_data_for_tests('sparse'): assert sp.issparse(y) for clusterer in get_networkx_clusterers(): partition = clusterer.fit_pr...
def test_solve_generalized_discrete_are(): mat = _load_data('gendare__data.npz') cases = [(np.array([[0.276923, 0.8234578, 0.950222], [0., 0.6948286, 0.], [0., 0.3170995, 0.4387444]]), np.array([[0.3815585, 0.1868726], [0.7655168, 0.4897644], [0.7951999, 0.4455862]]), np.eye(3), np.eye(2), np.array([[0.646313, ...
class Classifier(torch.nn.Module): def __init__(self, input_size, device='cpu', lin_blocks=0, lin_neurons=256, out_neurons=1211): super().__init__() self.blocks = nn.ModuleList() for block_index in range(lin_blocks): self.blocks.extend([_BatchNorm1d(input_size=input_size), Linear...
def run_cn(_trainMode, _dataType, _oRate, _var, _kmix, _GPU_ID): (_n, _oRange, _hdims, _actv, _maxEpoch, _PLOT_EVERY, _SAVE_NET, _SAVE_FIG) = get_common_config() (x, y, t) = data4reg(_type=_dataType, _n=_n, _oRange=_oRange, _oRate=_oRate, measVar=_var) xtest = np.linspace(start=(- 3), stop=3, num=1000).resh...
.parametrize('ctx, func_name', ctxs) .parametrize('axis', [0, 1, 2, (- 1), (- 2), (- 3)]) .parametrize('seed', [313]) .parametrize('different_size', [False, True]) .parametrize('num_inputs', [2, 3]) def test_concatenate_forward_backward(seed, axis, different_size, num_inputs, ctx, func_name): from nbla_test_utils i...
def _compute_transformation_angles(sim): Gtot_vec = sim.angular_momentum() Gtot_vec = np.array(Gtot_vec) Gtot = np.sqrt((Gtot_vec Gtot_vec)) Ghat = (Gtot_vec / Gtot) Ghat_z = Ghat[(- 1)] Ghat_perp = np.sqrt((1 - (Ghat_z ** 2))) theta1 = ((np.pi / 2) - np.arctan2(Ghat[1], Ghat[0])) theta...
def setup(app): def adds(pth): print(('Adding stylesheet: %s' % pth)) app.add_css_file(pth) adds('fields.css') app.connect('builder-inited', _make_estimator_overview)
def test_normalize(): from topaz.commands import normalize parser = normalize.add_arguments()
def ref_binary_weight_affine(x, w, wb, alpha, b, base_axis, quantize_zero_to): shape = list(x.shape[:base_axis]) shape += [(- 1)] out_shape = w.shape[1:] binw = binarize(w.reshape(w.shape[0], (- 1)), quantize_zero_to) y = np.dot(x.reshape(*shape), binw) if (b is not None): y += b.reshape...
def set_device(cuda, local_rank): if cuda: torch.cuda.set_device(local_rank) device = torch.device('cuda') else: device = torch.device('cpu') return device
def main(path_to_debug_info=None, gdb_argv=None, no_import=False): parser = optparse.OptionParser(usage=usage) parser.add_option('--gdb-executable', dest='gdb', default='gdb', help='gdb executable to use [default: gdb]') parser.add_option('--verbose', '-v', dest='verbosity', action='count', default=0, help=...
def mixup_data(x, y, l): indices = torch.randperm(x.shape[0]).to(x.device) mixed_x = ((l * x) + ((1 - l) * x[indices])) (y_a, y_b) = (y, y[indices]) return (mixed_x, y_a, y_b)
class VCondConfig(ModelConfig): arch: str = 'v-cond' identifier: str = MISSING data_modality: str = 'state+language' mask_ratio: float = 0.75 language_model: str = 'distilbert-base-uncased' hf_cache: str = to_absolute_path('data/hf-cache') language_dim: int = 768 vocab_size: int = 30522 ...
class NodeIndexer(): _node_map: Dict[(Node, int)] _index_map: List[Node] def _add_node(self, node: Node): next_id = len(self._index_map) self._node_map[node] = next_id self._index_map.append(node) def __init__(self, prog: Node): self._node_map = dict() self._index...
class ResBlock(nn.Module): def __init__(self, in_channel, channel): super().__init__() self.conv = nn.Sequential(nn.ReLU(), nn.Conv2d(in_channel, channel, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(channel, in_channel, 1)) def forward(self, input): out = self.conv(input) out...
class OverlapPatchEmbed(BaseModule): def __init__(self, patch_size=7, stride=4, in_chans=3, embed_dim=768, norm_cfg=dict(type='BN', requires_grad=True)): super().__init__() patch_size = (patch_size, patch_size) self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,...
class Conv2dBenchmark(op_bench.TorchBenchmarkBase): def init(self, IC, OC, kernel, stride, N, H, W, G, pad, device): self.input = torch.rand(N, IC, H, W, device=device) self.conv2d = nn.Conv2d(IC, OC, kernel, stride=stride, groups=G, padding=pad).to(device=device) self.set_module_name('Conv2...
def test_benchmarkset_rbv2_glmnet(): fidelity_config = {'trainsize': 0.5, 'repl': 9} test_instance = '15' b = test_benchmarkset_abstract('rbv2_glmnet', test_instance, fidelity_config)
class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
class CTRLModelLanguageGenerationTest(unittest.TestCase): def test_lm_generate_ctrl(self): model = CTRLLMHeadModel.from_pretrained('ctrl') input_ids = torch.Tensor([[11859, 586, 20984, 8]]).long() expected_output_ids = [11859, 586, 20984, 8, 13391, 3, 980, 8258, 72, 327, 148, 2, 53, 29, 226,...
def normalize(text: str) -> str: for key in normalization_map: text = text.replace(key, normalization_map[key]) return text
.expansion class ExpandDotCuBLAS(ExpandTransformation): environments = [environments.cublas.cuBLAS] def expansion(node, parent_state, parent_sdfg, n=None, **kwargs): ((desc_x, stride_x), (desc_y, stride_y), desc_res, sz) = node.validate(parent_sdfg, parent_state) dtype = desc_x.dtype.base_type ...
def get_outermost_dist_attr(dist, attr): while ((not hasattr(dist, attr)) and hasattr(dist, 'base_dist')): dist = dist.base_dist return getattr(dist, attr, None)
.parametrize('observation_shape', [(100,), (4, 84, 84), ((100,), (200,))]) .parametrize('q_func_factory', [MeanQFunctionFactory(), QRQFunctionFactory()]) .parametrize('scalers', [None, 'min_max']) def test_bcq(observation_shape: Shape, q_func_factory: QFunctionFactory, scalers: Optional[str]) -> None: (observation_...
def parse_args(): parser = argparse.ArgumentParser(description='Script that converts vi quad to silver standard trees') selftrain.common_args(parser) selftrain.add_length_args(parser) parser.add_argument('--input_file', default='extern_data/vietnamese/ViQuAD/train_ViQuAD.json', help='Path to the ViQuAD ...
((not have_sympy), 'SymPy not installed') def test_loggamma(): x = Symbol('x') e1 = sympy.loggamma(sympy.Symbol('x')) e2 = loggamma(x) assert (sympify(e1) == e2) assert (e2._sympy_() == e1)
def conv_bn_relu_layer(input_layer, filter_shape, stride): out_channel = filter_shape[(- 1)] filter = create_variables(name='conv_bn_relu', shape=filter_shape) conv_layer = tf.nn.conv2d(input_layer, filter, strides=[1, stride, stride, 1], padding='SAME') bn_layer = batch_normalization_layer(conv_layer, ...
def main(): parser = argparse.ArgumentParser() parser.add_argument('output_pt', default=None, help='Where to write the converted PT file') parser.add_argument('input_vec', default=None, help='Unconverted vectors file') parser.add_argument('max_vocab', type=int, default=(- 1), nargs='?', help='How many v...
class SawyerCoffeePushEnv(SawyerXYZEnv): def __init__(self): hand_low = ((- 0.5), 0.4, 0.05) hand_high = (0.5, 1, 0.5) obj_low = ((- 0.1), 0.6, 0.0) obj_high = (0.1, 0.7, 0.0) goal_low = ((- 0.1), 0.8, (- 0.001)) goal_high = (0.1, 0.9, 0.0) super().__init__(se...
def create_bn_node(source_node: BaseNode, bn_node_weights: Dict[(Any, Any)]): bn_node = BaseNode(name=(source_node.name + '_reconstructed'), framework_attr={NUM_FEATURES: source_node.framework_attr[OUT_CHANNELS], EPSILON: EPSILON_VAL, MOMENTUM: MOMENTUM_VAL}, input_shape=source_node.output_shape, output_shape=sourc...
def haar_like_feature(int_image, r, c, width, height, feature_type=None, feature_coord=None): if (feature_coord is None): feature_type_ = _validate_feature_type(feature_type) return np.hstack(list(chain.from_iterable((haar_like_feature_wrapper(int_image, r, c, width, height, feat_t, feature_coord) f...
def concat_tensor_list_subsample(tensor_list, f): return np.concatenate([t[np.random.choice(len(t), int(np.ceil((len(t) * f))), replace=False)] for t in tensor_list], axis=0)
def beam_search(model, orig_item, preproc_item, beam_size, max_steps): (inference_state, next_choices) = model.begin_inference(orig_item, preproc_item) beam = [Hypothesis(inference_state, next_choices)] finished = [] for step in range(max_steps): if (len(finished) == beam_size): brea...
def test_all_points_mem_vec_diff_clusters(): warnings.filterwarnings('ignore', category=UserWarning) n_clusters_fit = None clusterer = HDBSCAN_flat(X, n_clusters=n_clusters_fit) n_clusters_fitted = n_clusters_from_labels(clusterer.labels_) n_clusters_predict = (n_clusters_fitted + 3) memberships...
(name='run') ('-p', '--ca-path', required=True, help='The ca path', type=ClickPath()) def run_(ca_path): run(ca_path)
def Diff(**kwargs): return L.Lambda((lambda x: tf.gradients(x[0], x[1], unconnected_gradients='zero')), **kwargs)
class TestOptimizer(unittest.TestCase): (torch.backends.xnnpack.enabled, ' XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.') def test_optimize_for_mobile(self): batch_size = 2 input_channels_per_group = 6 height = 16 width = 16 output_channels_pe...
class config(object): _config = {} __metaclass__ = MetaConfig def set(key, val): config._config[key] = val def init_config(file='config.py'): if (len(config._config) > 0): return logging.info('use configuration: %s', file) data = {} execfile(file, data...
def context_fusion_layers(rep_tensor, rep_mask, method, activation_function, scope=None, wd=0.0, is_train=None, keep_prob=1.0, hn=None, **kwargs): method_name_list = ['lstm', 'gru', 'sru', 'sru_normal', 'cnn', 'multi_head', 'multi_head_git', 'disa', 'block'] (bs, sl, vec) = (tf.shape(rep_tensor)[0], tf.shape(re...
def checkpoint_wrapper(m, offload_to_cpu=False): original_forward = m.forward def _checkpointed_forward(*args, **kwargs): (kwarg_keys, flat_args) = pack_kwargs(*args, **kwargs) parent_ctx_dict = {'offload': offload_to_cpu} output = CheckpointFunction.apply(original_forward, parent_ctx_di...
def generic_activation_jit(op_name): def _generic_activation_jit(outputs): out_shape = get_shape(outputs[0]) ac_count = prod(out_shape) return ac_count return (lambda inputs, outputs: Counter({op_name: _generic_activation_jit(outputs)}))
_sz(1) def box(x): (fw, to_dtype, eps) = set_framework_dependencies(x) return (to_dtype((((- 1) <= x) & (x < 0))) + to_dtype(((0 <= x) & (x <= 1))))
def getModel(model_arch, output_channels=2, parallel_flag=False, gpu_flag=True, base_filter=32): print(' model arch specified by user is ', model_arch) if (model_arch.lower() == 'vnet'): print(('loading VNet, output will have %d channels' % output_channels)) model = VNet() elif (model_arch.l...
class AutomorphismFieldGroup(UniqueRepresentation, Parent): Element = AutomorphismField def __init__(self, vector_field_module): if (not isinstance(vector_field_module, VectorFieldModule)): raise TypeError('{} is not a module of vector fields'.format(vector_field_module)) Parent.__in...
def get_results(base_result_dir, context_location, exclude_codex=True): context_result_dir = os.path.join(base_result_dir, context_location) result_files = next(os.walk(context_result_dir), (None, None, []))[2] if (result_files and exclude_codex and ('codex_4072.json' in result_files)): result_files...
def build_parser(): parser = optparse.OptionParser(add_help_option=False) option_factories = (SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ) for option_factory in option_factories: option = option_factory() parser.add_option(option) def parser_exit(self, msg): raise OptionParsingErro...
def _tokenize(tokenizer, tokens, clusters, speakers): (new_tokens, token_to_new_token_map, new_token_to_token_map) = (tokens, list(range(len(tokens))), list(range(len(tokens)))) if speakers: (new_tokens, token_to_new_token_map, new_token_to_token_map) = add_speaker_information(tokens, speakers) ...
def test_issymmetric_ishermitian_invalid_input(): A = np.array([1, 2, 3]) raises(ValueError, issymmetric, A) raises(ValueError, ishermitian, A) A = np.array([[[1, 2, 3], [4, 5, 6]]]) raises(ValueError, issymmetric, A) raises(ValueError, ishermitian, A) A = np.array([[1, 2, 3], [4, 5, 6]]) ...
class Diffusion(Simulation): def __init__(self, graph, model='SIS', runs=10, steps=5000, b=0.00208, d=0.01, c=1, **kwargs): super().__init__(graph, runs, steps, **kwargs) self.prm.update({'model': model, 'b': b, 'd': d, 'c': c, 'diffusion': None, 'method': None, 'k': None}) self.prm.update(k...
class FunctionFieldHigherDerivation_global(FunctionFieldHigherDerivation): def __init__(self, field): from sage.matrix.constructor import matrix FunctionFieldHigherDerivation.__init__(self, field) self._p = field.characteristic() self._separating_element = field(field.base_field().ge...