code
stringlengths
101
5.91M
class ResNeXtBottleneck(nn.Module): expansion = 2 def __init__(self, inplanes, planes, cardinality, stride=1, downsample=None): super(ResNeXtBottleneck, self).__init__() mid_planes = (cardinality * int((planes / 32))) self.conv1 = nn.Conv3d(inplanes, mid_planes, kernel_size=1, bias=False...
class Tracer(TracerBase): def __init__(self): super().__init__() def create_arg(self, a: Any) -> Argument: if isinstance(a, torch.nn.Parameter): for (n, p) in self.root.named_parameters(): if (a is p): return self.create_node('get_attr', n, (), {})...
def train(task: str, cache_dir: Path, data_dir: Path, log_dir: Path, model_dir: Path, proj_dim_ratio: Optional[float], seed: int): makedirs(cache_dir, exist_ok=True) makedirs(log_dir, exist_ok=True) makedirs(model_dir, exist_ok=True) manual_seed(seed) tensorboard_sm = SummaryWriter((log_dir / task))...
def extract_chain(path, chain='A'): parser = PDB.PDBParser() writer = PDB.PDBIO() path = Path(path).expanduser() struct = parser.get_structure(path.stem, path) writer.set_structure(struct) out_path = (path.parent / f'{path.stem}_{chain}.pdb') writer.save(str(out_path), select=SelectChains([c...
_model def efficientnet_b2a(pretrained=False, **kwargs): model = _gen_efficientnet('efficientnet_b2a', channel_multiplier=1.1, depth_multiplier=1.2, pretrained=pretrained, **kwargs) return model
class RandomAffine(object): def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0): if isinstance(degrees, numbers.Number): if (degrees < 0): raise ValueError('If degrees is a single number, it must be positive.') self.degrees...
((testing.get_driver() in ['mysql', 'hive']), 'skip non MySQL and Hive tests') class TestFeatureDerivationWithMockedFeatures(unittest.TestCase): def check_json_dump(self, features): dump_json = json.dumps(features, cls=fc.JSONEncoderWithFeatureColumn) new_features = json.loads(dump_json, cls=fc.JSON...
class LinearSchedule(): def __init__(self, start, end=None, steps=None): if (end is None): end = start steps = 1 self.inc = ((end - start) / float(steps)) self.current = start self.end = end if (end > start): self.bound = min else: ...
_utils.test(arch=[ti.cpu, ti.cuda]) def test_real_func_struct_ret(): s = ti.types.struct(a=ti.i16, b=ti.f64) _func def bar() -> s: return s(a=123, b=ti.f64(1.2345e+300)) def foo() -> ti.f64: a = bar() return (a.a * a.b) assert (foo() == pytest.approx((123 * 1.2345e+300)))
class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, atrous=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = SynchronizedBatchNorm2d(planes) self.conv2 = nn....
def _get_outputs_to_save(batch, outputs): targets = batch['targets'].cpu().numpy() outputs = outputs.cpu().numpy() protein_length = batch['protein_length'].sum(1).cpu().numpy() reshaped_output = [] for (target, output, plength) in zip(targets, outputs, protein_length): output_slices = tuple(...
def test_countvectorizer_custom_vocabulary_repeated_indices(): vocab = {'pizza': 0, 'beer': 0} msg = 'Vocabulary contains repeated indices' with pytest.raises(ValueError, match=msg): vect = CountVectorizer(vocabulary=vocab) vect.fit(['pasta_siziliana'])
class State(core.State): current_player: Array = jnp.int32((- 1)) observation: Array = jnp.zeros(478, dtype=jnp.bool_) rewards: Array = jnp.float32([0, 0, 0, 0]) terminated: Array = FALSE truncated: Array = FALSE legal_action_mask: Array = jnp.ones(38, dtype=jnp.bool_) _step_count: Array = j...
def find_step_similarities_for_segments_using_frame(args, logger, step_des_feats, segment_video_embeddings, segment_video_lookup_table): start = time.time() for segment_id in tqdm(range(len(segment_video_embeddings))): (v, cidx) = segment_video_lookup_table[segment_id] save_path = os.path.join(a...
def get_dsc_coef(pair, capsule_path, region_path): class_coefs = [] capsules = [] regions = [] for i in range(3): capsules.append(cv2.imread(os.path.join(capsule_path, pair[i]))) regions.append(cv2.imread(os.path.join(region_path, pair[i]))) for i in range(1, (NUM_CLASSES + 1)): ...
class Decoder(nn.Module): def __init__(self, res_dims, drop=0): super(Decoder, self).__init__() self.embedding = EmbeddingBlock(res_dims[(- 1)], res_dims[(- 1)], drop=drop) self.up3 = UpBlock((2 * res_dims[(- 1)]), res_dims[(- 2)], act='relu', drop=drop) self.up2 = UpBlock((2 * res_d...
class CyclicLR(_LRScheduler): def __init__(self, optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=(- 1), verbose=False): if (not isinstance(optimizer, Opti...
def __flip(img, flip): if flip: return img.transpose(Image.FLIP_LEFT_RIGHT) return img
class MultiIOController(MultiInputController): def __init__(self, num_output_blocks=2, with_output_blocks=True, output_block_unique_connection=True, output_block_diversity_weight=None, **kwargs): self.with_output_blocks = with_output_blocks self.num_output_blocks = num_output_blocks skip_wei...
(config_path='../../conf/inference', config_name='config_inference') def test_policy(input_cfg: DictConfig) -> None: format_sftp_path(input_cfg) train_cfg_path = (Path(input_cfg.train_folder) / '.hydra/config.yaml') train_cfg = OmegaConf.load(train_cfg_path) cfg = OmegaConf.merge(train_cfg, input_cfg) ...
def _dump_arg_defaults(kwargs, app=None): if (app is None): app = current_app if app: bp = (app.blueprints.get(request.blueprint) if request else None) kwargs.setdefault('cls', (bp.json_encoder if (bp and bp.json_encoder) else app.json_encoder)) if (not app.config['JSON_AS_ASCII'...
def reduce_bins(lrtoks): i = 0 while (i < len(lrtoks)): if (lrtoks[i] in bin_list): args = [lrtoks[(i - 1)], lrtoks[i], lrtoks[(i + 1)]] lrtoks[(i - 1)] = eval_bin_op(args) del lrtoks[i] del lrtoks[i] reduce_bins(lrtoks) i += 1
def actor_info_callback(msg): global target_finish, start_time, score, count_flag, left_actors, actors_pos, find_time, topic_arrive_time actor_id = actor_id_dict[msg.cls] for i in actor_id: if (i not in left_actors): continue topic_arrive_interval = (rospy.get_time() - topic_arri...
class CDF(BaseModel): enable: bool = True sample_size: int = 100 height: Union[(int, None)] = None width: Union[(int, None)] = None def how_to_guide(self, height: int, width: int) -> List[Tuple[(str, str)]]: vals = [self.sample_size, height, width] names = ['pdf.sample_size', 'height...
class Sigmoid(Model): def __init__(self, *, input_shape=None, name=None, bin_dtype=bb.DType.FP32, real_dtype=bb.DType.FP32, core_model=None): if (core_model is None): core_creator = search_core_model('Sigmoid', [bin_dtype, real_dtype]).create core_model = core_creator() super...
def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) import sys if (('sdist' in sys.argv) or ('bdist_wheel' in sys.argv)): return locals()['short_version'] else: return locals()['__version__']
def compute_similarity_transform_with_vis_mask(vis_mask, S1, S2): transposed = False if ((S1.shape[0] != 3) and (S1.shape[0] != 2)): S1_copy = (S1 * vis_mask) S2_copy = (S2 * vis_mask) S1_copy = S1_copy.T S2_copy = S2_copy.T transposed = True assert (S2_copy.shape[1] ...
def convert_weights_to_fp16(model: nn.Module): def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if (l.bias is not None): l.bias.data = l.bias.data.half() if isinstance(l, (nn.MultiheadAt...
class ControllerTrainEnvironment(): def __init__(self, controller, manager, max_episode=100, max_step_per_ep=2, logger=None, resume_prev_run=False, should_plot=True, initial_buffering_queue=15, working_dir='.', entropy_converge_epsilon=0.01, squeezed_action=True, with_input_blocks=False, with_skip_connection=True, ...
def get_scenario_cache_path(benchmark_output_path: str, scenario_name: str): scenarios_path: str = os.path.join(benchmark_output_path, 'scenarios', scenario_name) ensure_directory_exists(scenarios_path) return scenarios_path
def convert_checkpoint_helper(config, orig_state_dict): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if (('pooler' in key) or ('sen_class' in key) or ('conv.bias' in key)): continue else: orig_state_dict[rename_key(key)] = val orig_stat...
def lowercase_range(code1, code2): code3 = max(code1, ord('A')) code4 = min(code2, (ord('Z') + 1)) if (code3 < code4): d = (ord('a') - ord('A')) return ((code3 + d), (code4 + d)) else: return None
def multiset_eq(l1: List, l2: List) -> bool: if (len(l1) != len(l2)): return False d = defaultdict(int) for e in l1: d[e] = (d[e] + 1) for e in l2: d[e] = (d[e] - 1) if (d[e] < 0): return False return True
def save_wav(wav, path): wav *= (32767 / max(0.01, np.max(np.abs(wav)))) librosa.output.write_wav(path, wav.astype(np.int16), hparams.sample_rate)
class English(MLQALanguage): def __init__(self): super().__init__(re.compile('\\b(a|an|the)\\b')) def tokenize(self, text: str): return whitespace_tokenize(text)
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3QueueDiscItem__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): cls.add_constructor([]) cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::QueueDiscItem const >, ns3::empty, ns3::empty...
class EqualsSample(SimpleSample): def __eq__(self, other) -> bool: return (isinstance(other, EqualsSample) and (self._foo == other._foo) and (self._bar == other._bar)) def __hash__(self): return hash((self._foo, self._bar))
def train(setup, output_path, seed=0, gpu=0): device = ('cuda:%d' % (gpu,)) (net_func, trainset, valset, _) = setup() n = len(trainset) np.random.seed(seed) idx_val = np.random.permutation(n)[:nval] idx_train = np.setdiff1d(np.arange(n), idx_val) ntr = idx_train.size torch.manual_seed(se...
class VerticalOccupancyGridList(genpy.Message): _md5sum = '7ef85cc95b82747f51eb01a16bd7c795' _type = 'multi_map_server/VerticalOccupancyGridList' _has_header = False _full_text = 'float32 x\nfloat32 y\nint32[] upper\nint32[] lower\nint32[] mass\n\n\n' __slots__ = ['x', 'y', 'upper', 'lower', 'mass']...
def _get_mro(cls): if (platform.python_implementation() == 'Jython'): return ((cls,) + cls.__bases__) return inspect.getmro(cls)
def _get_nvml_function(name): global nvml_lib if (name in _nvml_function_cache): return _nvml_function_cache[name] lib_load_lock.acquire() try: if (nvml_lib is None): raise NVMLError(NVML_ERROR_UNINITIALIZED) _nvml_function_cache[name] = getattr(nvml_lib, name) ...
def mumu(N): if (N < 1): raise ValueError('N must be at least 1') p = 1 for (_, r) in factor(N): if (r > 2): return ZZ.zero() elif (r == 1): p *= (- 2) return ZZ(p)
def func_for_test_clear_buffer_in_auto_forward(x0, x1, ctx=None): return FuncForTestClearBuffersInAutoForward(ctx)(x0, x1)
class Lseries_complex(Lseries): def __call__(self, s, prec=53): abelian_variety = self.abelian_variety() if (abelian_variety.dimension() == 0): return CC(1) try: factors = self.__factors[prec] return prod((L(s) for L in factors)) except AttributeEr...
def test_multiple_examples_different_locations(testdir): testdir.make_test('\nfrom hypothesis import Phase\n\()\(max_examples=1, phases=[Phase.explicit])\ndef test(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert case.body == {"name": "John"}\n assert case.query == {"age": 35}\n', schema={'o...
def move_arm(position, quaternion, ignore_collisions=False): arm_path = arm.get_path(position, quaternion=quaternion, ignore_collisions=ignore_collisions) arm_path.visualize() done = False while (not done): done = arm_path.step() pr.step() arm_path.clear_visualization()
.skipif((not has_pytorch()), reason='Pytorch not installed.') _utils.test() def test_io_struct(): n = 16 x1 = ti.Struct.field({'a': ti.i32, 'b': ti.f32}, shape=(n,)) t1 = {'a': torch.tensor((2 * np.ones(n, dtype=np.int32))), 'b': torch.tensor((3 * np.ones(n, dtype=np.float32)))} x1.from_torch(t1) fo...
class ELMoCharacterMapper(): max_word_length = 50 beginning_of_sentence_character = 256 end_of_sentence_character = 257 beginning_of_word_character = 258 end_of_word_character = 259 padding_character = 260 beginning_of_sentence_characters = _make_bos_eos(beginning_of_sentence_character, padd...
class Model(nn.Module): def __init__(self, num_classes=1000, input_size=224): super(Model, self).__init__() self.block_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]] if (FLAGS.dataset == 'cifar10'): self.block...
def _download_dataset(dataset_dir): for filename in [_TRAIN_DATA_FILENAME, _TRAIN_LABELS_FILENAME, _TEST_DATA_FILENAME, _TEST_LABELS_FILENAME]: filepath = os.path.join(dataset_dir, filename) if (not os.path.exists(filepath)): print(('Downloading file %s...' % filename)) def _...
def createDataset(correctFileName, eps, outputFilename): delta = DELTA groundTruth = [] with open(correctFileName, 'r') as instream: lines = instream.readlines() groundTruth = [int(val.strip()) for val in lines] assigns = [] lenMacro = (len(CLUSTER_SEQUENCE) + NUM_GARBAGE) for i ...
def _do_build(hooks, env, dist, dest): get_requires_name = 'get_requires_for_build_{dist}'.format(**locals()) get_requires = getattr(hooks, get_requires_name) reqs = get_requires({}) log.info('Got build requires: %s', reqs) env.pip_install(reqs) log.info('Installed dynamic build dependencies') ...
def main(): parser = argparse.ArgumentParser() add_encoder_params(parser) add_training_params(parser) add_tokenizer_params(parser) add_reader_preprocessing_params(parser) parser.add_argument('--max_n_answers', default=10, type=int, help='Max amount of answer spans to marginalize per singe passag...
def get_rank_to_device_map(args): if (args.nnodes == 1): local_ranks = list(range(args.world_size)) else: ngpus_per_node = get_ngpus_per_node(args) local_ranks = list() for n in ngpus_per_node: local_ranks.extend(range(n)) return {rank: get_device_for_rank(args, r...
def compute_and_visualize_log_probs(test_data, tokenizer, log_prob_fn, html_dir: str, max_docs=128): def compute_and_viz_log_probs(step: StepInfo): model = step.model os.makedirs(html_dir, exist_ok=True) path = os.path.join(html_dir, f'step_{step}.html') viz_probs(path, model, tokeni...
def get_text_prompt(text_prompt: str='', fallback_prompt: str='', file_path: str='', ext_types=['.mp4'], use_caption=False): try: if use_caption: if (len(text_prompt) > 1): return text_prompt caption_file = '' for ext in ext_types: maybe_fi...
def test_union_option(): one = ak.highlevel.Array([1, 2, [], [3, 4]]).layout two = ak.highlevel.Array([100, None, 300]).layout three = ak.highlevel.Array([{'x': 1}, {'x': 2}, 5, 6, 7]).layout assert (to_list(one._mergemany([two, three])) == [1, 2, [], [3, 4], 100, None, 300, {'x': 1}, {'x': 2}, 5, 6, 7]...
def _read_config_file(filename, opts, expand=None): _LOG.debug(('Reading config %s' % filename)) config_defaults = [] cparser = configparser.ConfigParser() cparser.optionxform = str cparser.read_file(filename) if cparser.has_section('commandline'): for (k, v) in cparser.items('commandlin...
def Huber_loss(x, y): return tf.reduce_mean(tf.where(tf.less_equal(tf.abs((x - y)), 1.0), (tf.square((x - y)) / 2), (tf.abs((x - y)) - (1 / 2))))
class Partition14(nn.Module): LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[k]', 'T5ForConditionalGeneration/T5Stack[decoder]/ModuleList[block]/T5Block[18]/ModuleList[layer]/T5LayerSelfAttention[...
def test_sample(): (cmd, meta_path, out_path) = prepare('samples/shard-{000000..000009}.tar') print('running') subprocess.run(cmd.split()) print('testing') check(meta_path, out_path)
def test_function_sampler_reject_sparse(): X_sparse = sparse.csr_matrix(X) sampler = FunctionSampler(accept_sparse=False) with pytest.raises(TypeError, match='A sparse matrix was passed, but dense data is required'): sampler.fit_resample(X_sparse, y)
class BidirectionalGRU(nn.Module): def __init__(self, rnn_dim, hidden_size, dropout, batch_first): super(BidirectionalGRU, self).__init__() self.BiGRU = nn.GRU(input_size=rnn_dim, hidden_size=hidden_size, num_layers=1, batch_first=batch_first, bidirectional=True) self.layer_norm = nn.LayerNo...
class TFTapasModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def march_squares() -> int: edges.deactivate() x_range = tm.ceil(((grid_size * W) / H), int) y_range = grid_size for (i, j) in ti.ndrange(((- x_range), (x_range + 1)), ((- y_range), (y_range + 1))): case_id = 0 values = tm.vec4(isofunc(tm.vec2(i, j)), isofunc(tm.vec2((i + 1), j)), isofun...
def main(): args = parse_args() assert (args.out or args.eval or args.format_only or args.show or args.show_dir), 'Please specify at least one operation (save/eval/format/show the results / save the results) with the argument "--out", "--eval", "--format-only", "--show" or "--show-dir"' if (args.eval and ar...
def measure_mixed_traffic(vel, pos, ID, v_star, s_star, type): pos_cav = np.argwhere((np.ravel(ID) == 1)) if (type == 1): y = np.transpose([(vel - v_star)]) elif (type == 2): spacing = (pos[0:(- 1)] - pos[1:]) y = np.vstack((np.transpose([(vel - v_star)]), np.transpose([(spacing - s_...
class Discriminator(torch.nn.Module): def __init__(self, kernel_size): super().__init__() self.Layers = torch.nn.ModuleList() self.Norms = torch.nn.ModuleList() Channels = [2, 16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024, 1] for i in range((len(Channels) - 1)): ...
def block_group(inputs, filters, block_fn, blocks, strides, is_training, name, data_format='channels_last', dropblock_keep_prob=None, dropblock_size=None): inputs = block_fn(inputs, filters, is_training, strides, use_projection=True, data_format=data_format, dropblock_keep_prob=dropblock_keep_prob, dropblock_size=d...
def test_zero_sign(): y = sinpi((- 0.0)) assert (y == 0.0) assert np.signbit(y) y = sinpi(0.0) assert (y == 0.0) assert (not np.signbit(y)) y = cospi(0.5) assert (y == 0.0) assert (not np.signbit(y))
def vgg16_bn(output_dim, k_lipschitz=None, p_drop=0.5): if (k_lipschitz is not None): k_lipschitz = (k_lipschitz ** (1.0 / 16.0)) return VGG(make_layers(cfg['D'], batch_norm=True, k_lipschitz=k_lipschitz), output_dim=output_dim, k_lipschitz=k_lipschitz, p_drop=p_drop)
def _concat(dps, axis): return jax.tree_util.tree_map((lambda *x: np.concatenate(x, axis)), *dps)
def test_basic_table_parse(): c_s = 'speed of light in vacuum' assert_equal(value(c_s), c) assert_equal(value(c_s), speed_of_light)
def mass__by_Siegel_densities(self, odd_algorithm='Pall', even_algorithm='Watson'): from sage.symbolic.constants import pi n = self.dim() s = ((n - 1) // 2) if ((n % 2) != 0): char_d = squarefree_part((2 * self.det())) else: char_d = squarefree_part(self.det()) generic_prod = (ZZ...
def extract_archive(from_path, to_path=None, overwrite=False): if (to_path is None): to_path = os.path.dirname(from_path) if from_path.endswith(('.tar.gz', '.tgz')): logging.info('Opening tar file {} to {}.'.format(from_path, to_path)) with tarfile.open(from_path, 'r') as tar: ...
class Workspace(TypedDict): measurements: Sequence[Measurement] channels: Sequence[Channel] observations: Sequence[Observation]
def mask_and_keypoint_rcnn(model): logger.warn('Deprecated: use `MODEL.TYPE: generalized_rcnn` with `MODEL.MASK_ON: True and ``MODEL.KEYPOINTS_ON: True`') return generalized_rcnn(model)
class TFXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ['tf'] def __init__(self, *args, **kwargs): requires_backends(self, ['tf'])
def projective_plane(n, check=True, existence=False): from sage.combinat.designs.bibd import BruckRyserChowla_check if (n <= 1): if existence: return False raise EmptySetError('There is no projective plane of order <= 1') if (n == 10): if existence: return Fal...
def extract_frames(name: str, path: str, artifact_path: str, preprocess_resolution: int, n_val_videos: int, dry_run: bool=False) -> Tuple[(Path, Path, Path, Path)]: overwatch.info(f'Phase 1 Preprocessing :: Extracting Frames for Dataset `{name}`') (t_dir, v_dir) = (((Path(artifact_path) / name) / 'train'), ((Pa...
class SoupState(ObjectState): def __init__(self, position, ingredients=[], cooking_tick=(- 1), cook_time=None, **kwargs): super(SoupState, self).__init__('soup', position) self._ingredients = ingredients self._cooking_tick = cooking_tick self._recipe = None self._cook_time = ...
_utils.test(debug=True) def test_func_struct_arg(): class C(): i: int def f(c: C): return c.i def k(): c = C(i=2) assert (f(c) == 2) k()
.parametrize('evaluation_policy_pscore, evaluation_policy_pscore_item_position, evaluation_policy_pscore_cascade, evaluation_policy_action_dist, q_hat, description_1', valid_input_of_create_estimator_inputs) .parametrize('alpha, n_bootstrap_samples, random_state, description_2', valid_input_of_estimate_intervals) def t...
def get_sampler(name, **kwargs): if (name not in __factory.keys()): raise KeyError('Unknown sampler: {}'.format(name)) return __factory[name](**kwargs)
def clean_rfv_dict(rfv_dictionary): for (a, val) in list(rfv_dictionary.items()): if (1 in val): rfv_dictionary.pop(a)
def set_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed) torch.backends.cudnn.enabled = False torch.backends.cudnn.benchmark = False os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' os.environ['P...
def open_(config: Config, epochs: int, data_split: str): base_filename = f'{config.section}_{data_split}_e{epochs}' conll_dir = config.conll_log_dir kwargs = {'mode': 'w', 'encoding': 'utf8'} os.makedirs(conll_dir, exist_ok=True) with open(os.path.join(conll_dir, f'{base_filename}.gold.conll'), **kw...
class Conv2d(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False, bn=False): super(Conv2d, self).__init__() padding = (int(((kernel_size - 1) / 2)) if same_padding else 0) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,...
def verify_numericalized_example(field, test_example_data, test_example_numericalized, test_example_lengths=None, batch_first=False, train=True): if isinstance(test_example_numericalized, tuple): (test_example_numericalized, lengths) = test_example_numericalized assert (test_example_lengths == lengt...
def get_parameter_or_create(name, shape=None, initializer=None, need_grad=True, as_need_grad=None): names = name.split('/') if (len(names) > 1): with parameter_scope(names[0]): return get_parameter_or_create('/'.join(names[1:]), shape, initializer, need_grad, as_need_grad) if (as_need_gr...
def get_rcs_class_probs(data_root, temperature): with open(osp.join(data_root, 'sample_class_stats.json'), 'r') as of: sample_class_stats = json.load(of) overall_class_stats = {} for s in sample_class_stats: s.pop('file') for (c, n) in s.items(): c = int(c) if...
def get_output_dir(exp_id): import datetime t = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') output_dir = os.path.join(('output/' + exp_id), t) return output_dir
def test_get_dataloader(): cfg = {'dataset': 'FashionMNISTpad_OOD', 'path': 'datasets', 'shuffle': True, 'n_workers': 0, 'batch_size': 1, 'split': 'training'} dl = get_dataloader(cfg)
def validate_ad_nrt(df: Union[(str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame)], column: str='') -> Union[(bool, pd.Series, pd.DataFrame)]: if isinstance(df, (pd.Series, dd.Series)): return df.apply(nrt.is_valid) elif isinstance(df, (pd.DataFrame, dd.DataFrame)): if (column != ''): ...
def test_anchor_positive_triplet_mask(): num_data = 64 num_classes = 10 labels = np.random.randint(0, num_classes, size=num_data).astype(np.float32) mask_np = np.zeros((num_data, num_data)) for i in range(num_data): for j in range(num_data): distinct = (i != j) valid ...
(scope='function') def cfg_train(cfg_train_global, tmp_path) -> DictConfig: cfg = cfg_train_global.copy() with open_dict(cfg): cfg.paths.output_dir = str(tmp_path) cfg.paths.log_dir = str(tmp_path) (yield cfg) GlobalHydra.instance().clear()
.environment class IntelMKLScaLAPACKOpenMPI(): cmake_minimum_version = None cmake_packages = ['MPI'] cmake_packages = [] cmake_variables = {} cmake_compile_flags = [] cmake_libraries = [] cmake_files = [] headers = ['mkl.h', 'mkl_scalapack.h', 'mkl_blacs.h', 'mkl_pblas.h'] state_fiel...
class SPADENLayerDiscriminator(BaseNetwork): def modify_commandline_options(parser, is_train): return parser def __init__(self, opt): super().__init__() self.opt = opt kw = 4 padw = int(np.ceil(((kw - 1.0) / 2))) nf = opt.ndf input_nc = self.compute_D_inpu...
def rand_unif(shape: Sequence[int], a: float, b: float, requires_grad: bool=True): rand = (((a - b) * torch.rand(tuple(shape))) + b) return rand.clone().detach().requires_grad_(requires_grad)
def compute_single_category_polarity(model, input, label, tokenizer, args): break_tokens = tokenizer.encode(tokenizer._eos_token.content) MAX_LEN = args.block_size batch_pred = [] batch_ground = [] for (inp, ground) in zip(input, label): inp_text = tokenizer.decode(inp).split('<|category|>')...