code stringlengths 101 5.91M |
|---|
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, wav_lens) = batch.sig
(wavs, wav_lens) = (wavs.to(self.device), wav_lens.to(self.device))
if (stage == sb.Stage.TRAIN):
if hasattr(self.hparams, 'augmentation'):
... |
_utils.test(require=ti.extension.sparse)
def test_sparse_grid():
grid = ti.sparse.grid({'pos': ti.math.vec2, 'mass': ti.f32, 'grid2particles': ti.types.vector(20, ti.i32)}, shape=(10, 10))
grid[(0, 0)].pos = ti.math.vec2(1, 2)
grid[(0, 0)].mass = 1.0
grid[(0, 0)].grid2particles[2] = 123
assert (ti.s... |
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if could_use_op(input):
return conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, ... |
def _get_graph(expression: List) -> nx.MultiGraph:
if isinstance(expression, str):
G = nx.MultiDiGraph()
if (get_symbol_type(expression) == 1):
G.add_node(1, id=expression, type='entity')
elif (get_symbol_type(expression) == 2):
G.add_node(1, id=expression, type='lite... |
def rec(actual, predicted, k):
act_set = set(actual)
pred_set = set(predicted[:k])
re = (len((act_set & pred_set)) / len(act_set))
return re |
class TestRelationNetworksPipeline():
def test_prototypical_networks_returns_expected_output_for_example_images(example_few_shot_classification_task):
(support_images, support_labels, query_images) = example_few_shot_classification_task
torch.manual_seed(1)
torch.set_num_threads(1)
m... |
def get_supported_dtypes(op, sample_inputs_fn, device_type):
assert (device_type in ['cpu', 'cuda'])
if ((not TEST_CUDA) and (device_type == 'cuda')):
warnings.warn('WARNING: CUDA is not available, empty_dtypes dispatch will be returned!')
return _dynamic_dispatch_dtypes(())
supported_dtypes... |
def verify_file(basename: str, func_select: Optional[List[str]], soundness_select: Optional[List[str]]=None) -> List[str]:
file_names = LeanFileNames(basename=basename)
codes = get_codes([file_names.cairo_filename])
cairo_path = list(filter(None, os.getenv(LIBS_DIR_ENVVAR, '').split(':')))
main_scope = ... |
def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, fixed_batch_size=False):
if (input_data is None):
assert input_shape
if (not input_dtype):
input_dtype = K.floatx()
input_data_shape = list(... |
def cuda_cast(func):
(func)
def wrapper(*args, **kwargs):
new_args = []
for x in args:
if hasattr(x, 'cuda'):
x = x.cuda()
new_args.append(x)
new_kwargs = {}
for (k, v) in kwargs.items():
if hasattr(v, 'cuda'):
v... |
def splrep(x, y, w=None, xb=None, xe=None, k=3, task=0, s=None, t=None, full_output=0, per=0, quiet=1):
res = _impl.splrep(x, y, w, xb, xe, k, task, s, t, full_output, per, quiet)
return res |
class ItemCategoryLoader():
def __init__(self, config, *args, **kwargs):
self.logger = logging.get_logger(self.__class__.__name__)
self.args = args
self.kwargs = kwargs
self.config = config
self.column_names = ['userId', 'itemId', 'rating', 'timestamp']
if config.conf... |
def get_morgan_fp(mol: Chem.Mol, nbits: int=2048, radius=3) -> np.ndarray:
if (mol is None):
return None
curr_fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nbits)
fingerprint = np.zeros((0,), dtype=np.uint8)
DataStructs.ConvertToNumpyArray(curr_fp, fingerprint)
return fingerp... |
def generate_list_config_field(base_cls: Type[TDynamicConfig]) -> Callable[([], Sequence[TDynamicConfig])]:
assert (base_cls in CONFIG_STORAGE)
config_metadata = CONFIG_STORAGE[base_cls]
def _encoder(orig_config: Sequence[TDynamicConfig]) -> Sequence[Dict[(str, Any)]]:
return [config_metadata.encode... |
def ray_aabb_intersection(camloc, raydir, min=[(- 1.0), (- 1.0), (- 1.0)], max=[1.0, 1.0, 1.0], ctx=None):
func = RayAABBIntersection(ctx, min, max)
return func(camloc, raydir) |
def numpy_include():
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
return numpy_include |
def do_dry_run(name: str, path: str, n_train_videos: int, n_val_videos: int, train_ids: List[str], val_ids: List[str], pre_transform: ComposeMix, n_samples: int=1000) -> None:
dry_run_metrics = {'n_frames': [], 'jpg_sizes': [], 'n_samples': n_samples, 'time_per_example': [], 'blank': str((Path(path) / 'blank.jpg'))... |
def downscale_label_ratio(gt, scale_factor, min_ratio, n_classes, ignore_index=255):
assert (scale_factor > 1)
(bs, orig_c, orig_h, orig_w) = gt.shape
assert (orig_c == 1)
(trg_h, trg_w) = ((orig_h // scale_factor), (orig_w // scale_factor))
ignore_substitute = n_classes
out = gt.clone()
out... |
class GaussianMLPPolicy(StochasticPolicy):
def __init__(self, env_spec, name='GaussianMLPPolicy', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_ini... |
def init_opt(args, model, logger):
num_training_steps = (sum(args.train_iterations) // args.gradient_accumulation_steps)
if (args.optimizer == 'adam'):
if (args.lr_schedule == 'transformer'):
opt = torch.optim.Adam(model.params, lr=args.lr_multiply, betas=(0.9, 0.98), eps=1e-09, weight_decay... |
class CNN(nn.Module):
def __init__(self, input_size=50, hidden_size=256, dropout=0, kernel_size=3, padding=1, activation_function=F.relu):
super().__init__()
self.conv = nn.Conv1d(input_size, hidden_size, kernel_size, padding=padding)
self.act = activation_function
self.dropout = nn.... |
_model
def poolformer_m36(pretrained=False, **kwargs):
layers = [6, 6, 18, 6]
embed_dims = [96, 192, 384, 768]
mlp_ratios = [4, 4, 4, 4]
downsamples = [True, True, True, True]
model = PoolFormer(layers, embed_dims=embed_dims, mlp_ratios=mlp_ratios, downsamples=downsamples, layer_scale_init_value=1e-... |
class TFMT5ForConditionalGeneration():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
def _biorthogonal_window_loopy(analysis_window, shift):
fft_size = len(analysis_window)
assert (np.mod(fft_size, shift) == 0)
number_of_shifts = (len(analysis_window) // shift)
sum_of_squares = np.zeros(shift)
for synthesis_index in range(0, shift):
for sample_index in range(0, (number_of_sh... |
def cfg_from_list(cfg_list):
from ast import literal_eval
assert ((len(cfg_list) % 2) == 0)
for (k, v) in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:(- 1)]:
assert d.has_key(subkey)
d = d[subkey]
subkey... |
.parametrize('n_attacks, n_success, n_baseline', [(100, 100, 0), (100, 23, 11), (111, 84, 42), (100, 0, 100)])
def test_evaluation_results_simple(n_attacks, n_success, n_baseline):
results = EvaluationResults(n_attacks=n_attacks, n_success=n_success, n_baseline=n_baseline, n_control=None, confidence_level=0)
ri... |
def keys_to_transforms(keys: list, size=224):
return [_transforms[key](size=size) for key in keys] |
class Sampler():
def __init__(self, dataset, t0, t1, dt, obs_func, s_func=None, device=None):
self.sampler = BasicSampler(dataset, t0, t1, dt, obs_func)
self._cache = {}
self.s_func = s_func
self.device = device
def get_observation(self, t):
t = float(t)
key = rou... |
_checkable
class SupportsGetInducingVariables(ProbabilisticModel, Protocol):
def get_inducing_variables(self) -> tuple[(TensorType, TensorType, TensorType, bool)]:
raise NotImplementedError |
def build_vocab(sequences, min_token_count=1, delim=' ', punct_to_keep=None, punct_to_remove=None, add_special=None):
token_to_count = {}
tokenize_kwargs = {'delim': delim, 'punct_to_keep': punct_to_keep, 'punct_to_remove': punct_to_remove}
for seq in sequences:
seq_tokens = tokenize(seq, **tokenize... |
def count_matches(pred_texts, gt_texts):
match_res = {'gt_char_num': 0, 'pred_char_num': 0, 'true_positive_char_num': 0, 'gt_word_num': 0, 'match_word_num': 0, 'match_word_ignore_case': 0, 'match_word_ignore_case_symbol': 0}
comp = re.compile('[^A-Z^a-z^0-9^-]')
norm_ed_sum = 0.0
for (pred_text, gt_text... |
class SpinnerInterface(object):
def spin(self):
raise NotImplementedError()
def finish(self, final_status):
raise NotImplementedError() |
def requires_package(name):
return pytest.mark.skipif((not has_package(name)), reason=('%s is required' % name)) |
def obtain_evaluation_samples(policy, env, max_path_length=1000, num_trajs=100):
paths = []
for _ in range(num_trajs):
path = rollout(env, policy, max_path_length=max_path_length, deterministic=True)
paths.append(path)
return TrajectoryBatch.from_trajectory_list(env.spec, paths) |
def get_step_message(log, start, end, title, message, details):
if (end not in log):
return ''
res = ((('\n' + f'''### {title}
''') + message) + '\n\n')
if details:
res += (('<details>\n\n```\n' + log[((log.find(start) + len(start)) + 1):(log.find(end) - 1)]) + '\n```\n\n</details>\n\n')
... |
def test_ufunc_isinf_f():
_numpy_output(check_dtype=True)
def ufunc_isinf_f(A: dace.float32[10]):
A[0] = np.inf
A[1] = np.NaN
return np.isinf(A)
args = dace.Config.get('compiler', 'cpu', 'args')
print(args)
if (args.find('-ffast-math') >= 0):
new_args = args.replace('... |
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
radius = tf.cast((kernel_size / 2), dtype=tf.int32)
kernel_size = ((radius * 2) + 1)
x = tf.cast(tf.range((- radius), (radius + 1)), dtype=tf.float32)
blur_filter = tf.exp(((- tf.pow(x, 2.0)) / (2.0 * tf.pow(tf.cast(sigma, dtype=tf.float32), ... |
def main(parsed_args):
assert (parsed_args.path is not None), '--path required for evaluation!'
utils.import_user_module(parsed_args)
print(parsed_args)
use_cuda = (torch.cuda.is_available() and (not parsed_args.cpu))
task = tasks.setup_task(parsed_args)
print('| loading model(s) from {}'.format... |
_start_docstrings('\n MobileNetV2 model with a semantic segmentation head on top, e.g. for Pascal VOC.\n ', MOBILENET_V2_START_DOCSTRING)
class MobileNetV2ForSemanticSegmentation(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config) -> None:
super().__init__(config)
self.n... |
class ScaleRenderer():
def __init__(self):
self.__top = 0
return
def set_bounds(self, lo, hi):
self.__lo = lo
self.__hi = hi
def get_position(self, x):
real_x = (((x - self.__lo) * self.__width) / (self.__hi - self.__lo))
return real_x
def set_top(self):
... |
def adagrad(opfunc, x, config, state=None):
if ((config is None) and (state is None)):
raise ValueError('adagrad requires a dictionary to retain state between iterations')
state = (state if (state is not None) else config)
lr = config.get('learningRate', 0.001)
lrd = config.get('learningRateDeca... |
class Block(nn.Module):
def __init__(self, dim, mlp_ratio=4, dpr=0.0, init_value=0.01):
super().__init__()
self.norm1 = nn.BatchNorm2d(dim)
self.attn = Attention(dim)
self.drop_path = (DropPath(dpr) if (dpr > 0.0) else nn.Identity())
self.norm2 = nn.BatchNorm2d(dim)
s... |
def non_pronominal_string_match(anaphor, antecedent):
if (anaphor.attributes['type'] in ['PRO', 'DEM', 'VRB']):
return False
elif (antecedent.attributes['type'] in ['PRO', 'DEM', 'VRB']):
return False
else:
return (' '.join(util.clean_via_pos(anaphor.attributes['tokens'], anaphor.att... |
def sobel_gradient_loss(guess, truth):
g1 = sobel_edges(guess)
g2 = sobel_edges(truth)
return tf.reduce_mean(tf.pow(tf.abs((g1 - g2)), 1)) |
class GGCL_D(Module):
def __init__(self, in_features, out_features, dropout):
super(GGCL_D, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.dropout = dropout
self.weight_miu = Parameter(torch.FloatTensor(in_features, out_features))
... |
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, num_agents, base=None, base_kwargs=None):
super(Policy, self).__init__()
if (base_kwargs is None):
base_kwargs = {}
if (base is None):
if (len(obs_shape) == 3):
base = CNNBase
... |
class SpeechToTextJointDatasetItem(NamedTuple):
index: int
source: torch.Tensor
target: Optional[torch.Tensor] = None
src_txt_tokens: Optional[torch.Tensor] = None
tgt_lang_tag: Optional[int] = None |
class TimeSeries(np.ndarray):
def __new__(cls, input_array, *args, **kwargs):
import copy
dtype = kwargs.pop('dtype', None)
order = kwargs.pop('order', 'C')
if (order == 'F'):
raise ValueError(f"Requested array order '{order}' is not supported; it must be 'C'.")
i... |
_representation(onnx.defs.OpSchema, inputs=(lambda proto: list(map(convert_onnx_proto, get_proto_attr(proto, 'inputs')))), outputs=(lambda proto: list(map(convert_onnx_proto, get_proto_attr(proto, 'outputs')))), attributes=(lambda proto: {str(k): convert_onnx_proto(v) for (k, v) in get_proto_attr(proto, 'attributes').i... |
class StochasticActor(nn.Module):
def __init__(self, body: nn.Module, action_dim: int, max_action: float, log_std_bounds=((- 20), 2)):
super().__init__()
self.body = body
self.fc = layer_init(nn.Linear(self.body.feature_dim, (action_dim * 2)), w_scale=0.1)
self.max_action = max_actio... |
class DynamicalSystem_projective_finite_field(DynamicalSystem_projective_field, SchemeMorphism_polynomial_projective_space_finite_field):
def is_postcritically_finite(self, **kwds):
return True
def _is_preperiodic(self, P, **kwds):
return_period = kwds.pop('return_period', False)
if retu... |
.parametrize('has_colscale', [True, False])
.parametrize('has_residual', [True, False])
.parametrize('dropout_p', [0.37, 0.0])
.parametrize('weight_dtype', [torch.float32, torch.float16])
.parametrize('input_dtype,residual_dtype', ([(torch.float16, torch.float16), (torch.float16, torch.float32), (torch.float32, torch.f... |
def simClearStringSignal(signalName):
ret = lib.simClearStringSignal(signalName.encode('ascii'))
_check_return(ret)
return ret |
class TrainingArgs():
model_name_or_path: str
output_dir: str
overwrite_output_dir: bool = False
learning_rate: float = 1e-05
head_learning_rate: float = 0.0003
dropout_prob: float = 0.3
weight_decay: float = 0.01
adam_beta1: float = 0.9
adam_beta2: float = 0.98
adam_epsilon: flo... |
.parametrize('cond_shape', ['2d', '3d'])
.parametrize('summary_loss', ['MMD', None])
.parametrize('soft', [True, False])
def test_amortized_posterior(cond_shape, summary_loss, soft):
batch_size = np.random.randint(low=1, high=32)
inp_dim = np.random.randint(low=2, high=32)
cond_dim = np.random.randint(low=2... |
class ConcreteQuantizer(nn.Module):
def __init__(self, config, num_embeddings, embedding_dim, split):
super().__init__()
self.K = num_embeddings
self.D = embedding_dim
self.M = split
self.concrete = ConcreteRelaxation(hard=(config.concrete.hard == 1), tau_mode=config.concrete... |
def register_Ns3CallbackImpl__Void_Unsigned_short_Ns3Ptr__lt__ns3SpectrumValue__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned short, ns3::Ptr< ns3::SpectrumValue >, ns3::empty, ... |
_utils.test(debug=True)
def test_assign_unpack():
def func_unpack():
(a, b) = (1, 2)
assert (a == 1)
assert (b == 2)
func_unpack() |
class Instances():
def __init__(self, image_size: Tuple[(int, int)], **kwargs: Any):
self._image_size = image_size
self._fields: Dict[(str, Any)] = {}
for (k, v) in kwargs.items():
self.set(k, v)
def image_size(self) -> Tuple[(int, int)]:
return self._image_size
d... |
class ResBlock(nn.Module):
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups=1, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1, dilation=1):
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
se... |
def test_classify_outputs():
output = seisbench.util.ClassifyOutput('model', picks=[])
assert (output.creator == 'model')
assert (len(output.picks) == 0)
with pytest.raises(AttributeError):
output.missing_key |
def test_WatchYourStep_embeddings(barbell):
generator = AdjacencyPowerGenerator(barbell, num_powers=5)
wys = WatchYourStep(generator, embeddings_initializer='ones')
(x_in, x_out) = wys.in_out_tensors()
model = Model(inputs=x_in, outputs=x_out)
model.compile(optimizer='adam', loss=graph_log_likelihoo... |
def isLower(layout):
if ((layout == NumericTableIface.lowerPackedSymmetricMatrix) or (layout == NumericTableIface.lowerPackedTriangularMatrix)):
return True
return False |
def prepare_connections():
global ns, ds, ed, tpy_devices, tpy_tc, tpy_node
ns = rc.ChirpstackNS(NUT_API_URL)
ns.auth(NUT_API_USER, NUT_API_PASS)
ds = rc.DeviceService(ns)
ed = rc.RemoteEndDevice(DUT_HOST, DUT_PORT, cb_event=dut_cb_event, cb_class=dut_cb_class, cb_rx=dut_handle_rxinfo, cb_tx=dut_han... |
def evaluate_network_sparse(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_ROC = 0
with torch.no_grad():
list_scores = []
list_labels = []
for (iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e)) in enumerate(data_loader):
... |
def choose_charlm(language, dataset, charlm, language_charlms, dataset_charlms):
default_charlm = language_charlms.get(language, None)
specific_charlm = dataset_charlms.get(language, {}).get(dataset, None)
if (charlm is None):
return None
elif (charlm != 'default'):
return charlm
eli... |
def matched_files_iter(root_path: str, includes: Iterable=('*',), ignores: Iterable=(), extensions: Iterable=(), out_of_place_only: bool=False, is_pytorch_extension: bool=False) -> Iterator[str]:
def _fnmatch(filepath, patterns):
return any((fnmatch.fnmatch(filepath, pattern) for pattern in patterns))
e... |
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--split', dest='split', default='train', type=str, help='split to generate scan-subscan mapping on')
parser.add_argument('--mode', dest='mode', default='orig', type=str, help='the data mode to generate scan-subscan mapping with')
... |
class BasePytorchFeatureNetworkTest(BaseFeatureNetworkTest):
def __init__(self, unit_test, num_calibration_iter=1, val_batch_size=1, num_of_inputs=1, input_shape=(3, 8, 8)):
super().__init__(unit_test=unit_test, val_batch_size=val_batch_size, num_calibration_iter=num_calibration_iter, num_of_inputs=num_of_i... |
def make_subsampled_mnist(root, download, train, transform, num_data, seed=0):
if train:
train = MNIST(root, train=True, transform=transform, download=download)
sss = StratifiedShuffleSplit(n_splits=1, train_size=num_data, random_state=seed)
for (train_index, test_index) in sss.split(train.d... |
def get_tokenizer(model_str):
if ('longformer' in model_str):
tokenizer = LongformerTokenizerFast.from_pretrained(model_str, add_prefix_space=True)
else:
tokenizer = AutoTokenizer.from_pretrained(model_str)
return tokenizer |
class CAM(nn.Module):
def __init__(self):
super(CAM, self).__init__()
self.gamma = Scale(0)
def forward(self, x):
(batch_size, channels, height, width) = x.size()
proj_query = x.view(batch_size, channels, (- 1))
proj_key = x.view(batch_size, channels, (- 1)).permute(0, 2,... |
def batch_wise_training(model, optims, dset, config, args):
logging.info('Batch-wise training on %d %s', config['n_docs'], 'docs')
loss_iters = torch.zeros(1, 2).to(dtype=model.dtype, device=model.device)
(loss, kld) = model.compute_total_loss_batch_wise(dset, args.nb, use_params='all')
logging.info('In... |
def skipCUDAIfNoMagma(fn):
return skipCUDAIf('no_magma', 'no MAGMA library detected')(skipCUDANonDefaultStreamIf(True)(fn)) |
class EarlyStopping():
def __init__(self, patience=30):
self.best_fitness = 0.0
self.best_epoch = 0
self.patience = (patience or float('inf'))
self.possible_stop = False
def __call__(self, epoch, fitness):
if (fitness >= self.best_fitness):
self.best_epoch = e... |
class K_kSchur(CombinatorialFreeModule):
def __init__(self, kBoundedRing):
CombinatorialFreeModule.__init__(self, kBoundedRing.base_ring(), kBoundedRing.indices(), category=KBoundedSubspaceBases(kBoundedRing, kBoundedRing.base_ring().one()), prefix=('Kks%d' % kBoundedRing.k))
self._kBoundedRing = kB... |
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AllocationRetentionPriority_methods(root_module, root_module['ns3::AllocationRetentionPriority'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstru... |
def normal_init(module: nn.Module, mean: float=0, std: float=1, bias: float=0) -> None:
if (hasattr(module, 'weight') and (module.weight is not None)):
nn.init.normal_(module.weight, mean, std)
if (hasattr(module, 'bias') and (module.bias is not None)):
nn.init.constant_(module.bias, bias) |
def main(hparams):
log_dir = hparams.LOG_DIR
device = ('cuda' if torch.cuda.is_available() else 'cpu')
set_seed(hparams.SEED_VALUE)
logger.add(os.path.join(log_dir, 'train.log'), level='INFO', colorize=False)
copy_code(output_folder=log_dir, curr_folder=os.path.dirname(os.path.abspath(__file__)))
... |
def register_Ns3MmWaveMac_methods(root_module, cls):
cls.add_constructor([param('ns3::MmWaveMac const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetConfigurationParameters', 'ns3::Ptr< ns3::MmWavePhyMacCommon >', [], is_const=True)
cls.add_method('GetPacketBurstFromMacQueue', 'ns3::Ptr< ns3::... |
_tokenizer('nltk', dataclass=FairseqDataclass)
class NLTKTokenizer(object):
def __init__(self, *unused):
try:
from nltk.tokenize import word_tokenize
self.word_tokenize = word_tokenize
except ImportError:
raise ImportError('Please install nltk with: pip install nl... |
def test_box():
def f2(x):
return x
growablebuffer = GrowableBuffer(np.int32, initial=10)
out1 = f2(growablebuffer)
assert (len(out1._panels) == len(growablebuffer._panels))
assert (out1._panels[0] is growablebuffer._panels[0])
assert (out1._length == growablebuffer._length)
assert (... |
def get_feature_detector(pth, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert (0 <= rank < num_gpus)
key = (pth, device)
if (key not in _feature_detector_cache):
is_leader = (rank == 0)
if ((not is_leader) and (num_gpus > 1)):
torch.distributed.barrier()
... |
def make_testdata_mult(slideID_list, label):
test_dir = f'../Lymphoma/patches/test_bags'
bag_list = []
for slideID in slideID_list:
for bag in os.listdir(f'{test_dir}/x5/{slideID}'):
bag_x10 = []
bag_x20 = []
for patch in os.listdir(f'{test_dir}/x10/{slideID}/{bag... |
_module()
class MVModel(nn.Module):
def __init__(self, task='cls', backbone='resnet18', channels=16, num_classes=15, resolution=128, use_img_transform=False, **kwargs):
super().__init__()
assert (task == 'cls')
self.task = task
self.num_classes = num_classes
self.dropout = kw... |
def test_pipeline_sampler_none_classifier():
(X, y) = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=5000, random_state=0)
clf = LogisticRegression(solver='lbfgs', random_state=0)
rus = RandomUnderS... |
def find_bracket_group(input_string, start):
return find_closure_group(input_string, start, group=['{', '}']) |
class DeiTImageProcessor(BaseImageProcessor):
model_input_names = ['pixel_values']
def __init__(self, do_resize: bool=True, size: Dict[(str, int)]=None, resample: PILImageResampling=PIL.Image.BICUBIC, do_center_crop: bool=True, crop_size: Dict[(str, int)]=None, rescale_factor: Union[(int, float)]=(1 / 255), do_... |
def wide_and_deep_model_fn(features, labels, mode, params):
with tf.variable_scope('wide_part', reuse=tf.AUTO_REUSE):
wide_input = fc.input_layer(features, params['wide_part_feature_columns'])
wide_logit = tf.layers.dense(wide_input, 1, name='wide_part_variables')
with tf.variable_scope('deep_pa... |
def get_gt_samples(dataset, nimgs=50000):
if (dataset != 'cifar'):
transform = get_transform(sizes[dataset])
all_images = get_images(paths[dataset], nimgs)
images = []
for file_path in tqdm(all_images[:nimgs]):
images.append(transform(Image.open(file_path).convert('RGB'))... |
def test_binding():
ti.init()
taichi_lang = ti._lib.core
print(taichi_lang.BinaryOpType.mul)
one = taichi_lang.make_const_expr_int(ti.i32, 1)
two = taichi_lang.make_const_expr_int(ti.i32, 2)
expr = taichi_lang.make_binary_op_expr(taichi_lang.BinaryOpType.add, one, two)
print(taichi_lang.make... |
def test_check_async_function_timeout():
os._exit = mock.MagicMock()
(timeout=0.1)
async def some_async_fn(value):
(await asyncio.sleep(0.2))
return value
assert (asyncio.run(some_async_fn('')) is None)
assert os._exit.called |
class CLS(torch.nn.Module):
def __init__(self, d_in, d_out):
super(CLS, self).__init__()
self.conv = GCNConv(d_in, d_out, cached=True)
def reset_parameters(self):
self.conv.reset_parameters()
def forward(self, x, edge_index, mask=None):
x = self.conv(x, edge_index)
x ... |
class PlasmaStore():
def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int=GB100):
self.server = self.start(path, nbytes)
def __del__(self):
self.server.kill()
def start(path=DEFAULT_PLASMA_PATH, nbytes: int=GB100) -> subprocess.Popen:
if (not PYARROW_AVAILABLE):
raise... |
.parametrize('ratio, user_answer, item_answer', [(0.5, [[1, 1, 2, 2, 3, 3], [1, 3, 3]], [[1, 2, 1, 2, 1, 5], [5, 1, 2]])])
.parametrize('dataset_type', [pytest.param('spark_dataframe_test', marks=pytest.mark.spark), pytest.param('pandas_dataframe_test', marks=pytest.mark.core)])
def test_ratio_splitter_drop_items(ratio... |
def _unravel_index(flat_index, shape):
flat_index = operator.index(flat_index)
res = []
if (shape == torch.Size([])):
return 0
for size in shape[::(- 1)]:
res.append((flat_index % size))
flat_index = (flat_index // size)
if (len(res) == 1):
return res[0]
return tu... |
def unfreeze_params(module, frozen_params):
for (name, params) in module.named_parameters():
print(name)
for pattern in frozen_params:
assert isinstance(pattern, str)
if re.search(pattern, name):
params.requires_grad = True
print(('Params %s is... |
class L2DataMisfit(BaseDataMisfit):
def __call__(self, m, f=None):
R = (self.W * self.residual(m, f=f))
return (0.5 * np.vdot(R, R))
def deriv(self, m, f=None):
if (f is None):
f = self.simulation.fields(m)
return self.simulation.Jtvec(m, (self.W.T * (self.W * self.re... |
def get_class(kls, name):
parts = kls.split('.')
module = '.'.join(parts[:(- 1)])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m |
def CRF(image, unary, maxiter=10, scale_factor=1.0, color_factor=13):
assert (image.shape[:2] == unary.shape[:2])
(H, W) = image.shape[:2]
nlables = unary.shape[2]
crf = DenseCRF(W, H, nlables)
crf.set_unary_energy((- unary.ravel().astype('float32')))
crf.add_pairwise_energy(10, (80 / scale_fact... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.