code stringlengths 101 5.91M |
|---|
class ShanghaiTech(AnomalibVideoDataModule):
def __init__(self, root: (Path | str), scene: int, clip_length_in_frames: int=1, frames_between_clips: int=1, task: TaskType=TaskType.SEGMENTATION, image_size: ((int | tuple[(int, int)]) | None)=None, center_crop: ((int | tuple[(int, int)]) | None)=None, normalization: (... |
def download_url(url, model_dir='~/.torch/', overwrite=False):
target_dir = url.split('/')[(- 1)]
model_dir = os.path.expanduser(model_dir)
if (not os.path.exists(model_dir)):
os.makedirs(model_dir)
model_dir = os.path.join(model_dir, target_dir)
cached_file = model_dir
if ((not os.path.... |
def weights_init_xavier(m):
classname = m.__class__.__name__
if (classname.find('Conv') != (- 1)):
init.xavier_normal(m.weight.data, gain=0.02)
elif (classname.find('Linear') != (- 1)):
init.xavier_normal(m.weight.data, gain=0.02) |
def naivepr(y_true, y_score):
desc_score_indices = np.argsort(y_score, kind='mergesort')[::(- 1)]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
N = len(y_score)
(tp, fp) = (0, 0)
(condition_positive, condition_negative) = (np.sum(y_true), (N - np.sum(y_true)))
(pr... |
class ChannelwiseAttention(nn.Module):
def __init__(self, in_channels):
super(ChannelwiseAttention, self).__init__()
self.in_channels = in_channels
self.linear_1 = nn.Linear(self.in_channels, (self.in_channels // 4))
self.linear_2 = nn.Linear((self.in_channels // 4), self.in_channels... |
def tile_wcrs(graph_or_subgraph: GraphViewType, validate_all: bool, prefer_partial_parallelism: bool=None) -> None:
from dace.codegen.targets import cpp
from dace.frontend import operations
from dace.transformation import dataflow, helpers as xfh
graph = graph_or_subgraph
if isinstance(graph_or_subg... |
def _create_serialized_example(predecessor, current, successor, vocab):
example = tf.train.Example(features=tf.train.Features(feature={'decode_pre': _int64_feature(_sentence_to_ids(predecessor, vocab)), 'encode': _int64_feature(_sentence_to_ids(current, vocab)), 'decode_post': _int64_feature(_sentence_to_ids(succes... |
class TopLevelNode(ConfigNode):
def __init__(self, node_name, config_tree_data, smoke):
super(TopLevelNode, self).__init__(None, node_name)
self.config_tree_data = config_tree_data
self.props['smoke'] = smoke
def get_children(self):
return [OSConfigNode(self, x, c, p) for (x, (c,... |
def bbox_yolo_to_center(bbox, width, height):
bbox[0] = (bbox[0] * width)
bbox[1] = (bbox[1] * height)
bbox[2] = (bbox[2] * width)
bbox[3] = (bbox[3] * height)
return bbox |
class FindStatStatisticQuery(FindStatStatistic):
def __init__(self, data=None, values_of=None, distribution_of=None, domain=None, known_terms=None, function=None, depth=FINDSTAT_DEFAULT_DEPTH, debug=False):
self._first_terms = data
if ((data is not None) and (known_terms is None)):
self.... |
def to_tensor(in_dict, mean=[0.486, 0.459, 0.408], std=[0.229, 0.224, 0.225]):
in_dict['img'] = F.to_tensor(in_dict['img'])
in_dict['img'] = F.normalize(in_dict['img'], mean, std)
in_dict['mask'] = torch.from_numpy(np.array(in_dict['mask'])).long() |
class YOLOv7CSPVoVNet(BaseYOLOBackbone):
cfg = {'n': [0.33, 0.25], 't': [0.33, 0.375], 's': [0.33, 0.5], 'm': [0.6, 0.75], 'l': [1.0, 1.0], 'x': [1.33, 1.25]}
def __init__(self, subtype='cspdark_s', in_channels=3, out_channels=[64, 128, 256, 512, 1024], num_blocks=[6, 12, 18, 6], spp_ksizes=5, depthwise=False, ... |
def LF_header(span, negex):
rgx = re.compile('(family history[:]*|family hx)\\b', re.I)
left = get_left_span(span, span.sentence, window=6)
trigger = match_regex(rgx, left)
if trigger:
neg = match_regex(negex.rgxs['definite']['left'], get_left_span(trigger, window=2))
return (ABSTAIN if ... |
class NormalDistributionLinear(NormalDistribution):
def __init__(self, input_size, output_size, nonlinearity=None):
super(NormalDistributionLinear, self).__init__(nonlinearity=nonlinearity)
self.input_size = input_size
self.output_size = output_size
self.mean_fn = nn.Linear(input_siz... |
def check_color(c, greyscale, which):
if (c is None):
return c
if greyscale:
try:
len(c)
except TypeError:
c = (c,)
if (len(c) != 1):
raise ProtocolError(('%s for greyscale must be 1-tuple' % which))
if (not is_natural(c[0])):
... |
def NodesGTEDegree(tspec, *args):
if (type(tspec) == PUNGraph):
return NodesGTEDegree_PUNGraph(tspec, *args)
if (type(tspec) == PUndirNet):
return NodesGTEDegree_PUndirNet(tspec, *args)
if (type(tspec) == PDirNet):
return NodesGTEDegree_PDirNet(tspec, *args)
if (type(tspec) == PN... |
class MultiTrainer(TorchTrainer):
def __init__(self, trainers, trainer_steps, trainer_names=None):
super().__init__()
assert (len(trainers) == len(trainer_steps)), 'Must specify number of steps for each trainer'
self.trainers = trainers
self.trainer_steps = trainer_steps
if (... |
def build_jasper(version: str, num_classes: int, device: torch.device) -> nn.DataParallel:
assert (version.lower() in ['10x5', '5x3']), 'Unsupported Version: {}'.format(version)
return nn.DataParallel(Jasper(num_classes=num_classes, version=version, device=device)) |
def initialize(NI, NJ, NK, NL, NM, datatype=np.float64):
A = np.fromfunction((lambda i, j: ((((i * j) + 1) % NI) / (5 * NI))), (NI, NK), dtype=datatype)
B = np.fromfunction((lambda i, j: ((((i * (j + 1)) + 2) % NJ) / (5 * NJ))), (NK, NJ), dtype=datatype)
C = np.fromfunction((lambda i, j: (((i * (j + 3)) % N... |
def test_digamma():
x = Symbol('x')
assert (digamma(x) == polygamma(0, x))
assert (digamma(0) == zoo)
assert (digamma(1) == (- EulerGamma)) |
class TrainDUTS(torch.utils.data.Dataset):
def __init__(self, root, clip_n):
self.root = root
img_dir = os.path.join(root, 'JPEGImages')
mask_dir = os.path.join(root, 'Annotations')
self.img_list = sorted(glob(os.path.join(img_dir, '*.jpg')))
self.mask_list = sorted(glob(os.p... |
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum((1 for k in qid_to_has_ans if (not qid_to_has_ans[k])))
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=(lambda k: na_probs[k]))
for (_, qid) in enumerate(qid_list):
... |
class CheckpointCallback(tfk.callbacks.Callback):
def __init__(self, save_path, model, state=None, on_batch_end=None, on_epoch_end=None, relod_on_nan=False, verbose=1):
self.model = model
self.state = (state or {})
self.save_path = save_path
self.on_batch_end_fn = on_batch_end
... |
class TFMobileViTModel(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class DifferentialsSpaceInclusion(Morphism):
def _repr_(self):
s = 'Inclusion morphism:'
s += '\n From: {}'.format(self.domain())
s += '\n To: {}'.format(self.codomain())
return s
def is_injective(self):
return True
def is_surjective(self):
K = self.domain... |
.parametrize('csr_container', CSR_CONTAINERS)
.parametrize('eigen_solver', ('arpack', 'lobpcg'))
.parametrize('assign_labels', ('kmeans', 'discretize', 'cluster_qr'))
def test_spectral_clustering(eigen_solver, assign_labels, csr_container):
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.2, 0.0... |
def CalculatePolarityNumber(mol):
Distance = Chem.GetDistanceMatrix(mol)
k3 = (Distance == 3)
res = ((1.0 / 2) * k3.sum())
if (res == 0):
res = MINVALUE
return np.log10(res) |
def load_triples_with_label(data_path, r, entity_index_path, relation_index_path, seen_entities=None, verbose=False):
(entity2id, _) = load_index(entity_index_path)
(relation2id, _) = load_index(relation_index_path)
def triple2ids(e1, e2, r):
return (entity2id[e1], entity2id[e2], relation2id[r])
... |
class network(nn.Module):
def __init__(self, backbone, num_classes=None, pretrained=None, init_momentum=None, aux_layer=None):
super().__init__()
self.num_classes = num_classes
self.init_momentum = init_momentum
self.encoder = getattr(encoder, backbone)(pretrained=pretrained, aux_lay... |
class MulNode(NumBinopNode):
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or (type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, ... |
def _init_weights(module, init_linear='normal', std=0.01, bias=0.0):
assert (init_linear in ['normal', 'kaiming']), 'Undefined init_linear: {}'.format(init_linear)
for m in module.modules():
if isinstance(m, nn.Linear):
if (init_linear == 'normal'):
normal_init(m, std=std, bi... |
class ArgsParser(Tap):
output: Path
target: str
fuzzers: List[Fuzzer]
queue_dir: Path
crash_case_dir: Path
binary: str
binary_crash: str
args: str
live: bool
sleep: int
timeout: str
mode: str
input: str
input_only: bool
def configure(self):
self.add_ar... |
def main(args):
config_file = args.config_file
config = imp.load_source('config', config_file)
if args.name:
config.name = args.name
trainset = Dataset(config.train_dataset_path, prefix=config.data_prefix)
network = WarpGAN()
network.initialize(config, trainset.num_classes)
if config... |
def match_baseline_cfg(cfg_dict, cfg_dict_baseline, verbose=True):
from yacs.config import CfgNode as CN
stats_baseline = dict_to_stats(cfg_dict_baseline)
set_cfg(cfg)
cfg_new = CN(cfg_dict)
cfg.merge_from_other_cfg(cfg_new)
stats = match_computation(stats_baseline, key=['gnn', 'dim_inner'])
... |
def test_line_for_search():
line_for_search = optimize._optimize._line_for_search
lower_bound = np.array([(- 5.3), (- 1), (- 1.5), (- 3)])
upper_bound = np.array([1.9, 1, 2.8, 3])
x0 = np.array([0.0, 0, 0, 0])
x1 = np.array([0.0, 2, (- 3), 0])
all_tests = ((x0, np.array([1.0, 0, 0, 0]), (- 5.3),... |
def test_searchsorted_output_dtype(device):
B = 100
A = 50
V = 12
a = torch.sort(torch.rand(B, V, device=device), dim=1)[0]
v = torch.rand(B, A, device=device)
out = searchsorted(a, v)
out_np = numpy_searchsorted(a.cpu().numpy(), v.cpu().numpy())
assert (out.dtype == torch.long)
np.t... |
_utils.test(require=ti.extension.adstack)
def test_ib_global_load():
N = 10
a = ti.field(ti.f32, shape=N, needs_grad=True)
b = ti.field(ti.i32, shape=N)
p = ti.field(ti.f32, shape=N, needs_grad=True)
def compute():
for i in range(N):
val = a[i]
for j in range(b[i]):
... |
class Partition14(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Linear[v]', 'T5ForConditionalGeneration/T5Stack[encoder]/T5Block[17]/T5LayerSelfAttention[0]/T5Attention[SelfAttention]/Dropout[dropout]', 'T5ForConditionalGenera... |
def ref_selu(x, scale, alpha):
return np.where((x > 0), (scale * x), ((scale * alpha) * (np.exp(x) - 1))) |
_grad()
def final_test(data_loader, model, device, file):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
model.eval()
final_result = []
for batch in metric_logger.log_every(data_loader, 10, header):
videos = batch[0]
ta... |
_grad()
def detailed_predictions_on_dataset(model, data_loader, args, device, FOR_VISUALIZATION=True, tokenizer=None):
model.eval()
res = dict()
res['guessed_correctly'] = list()
res['confidences_probs'] = list()
res['contrasted_objects'] = list()
res['target_pos'] = list()
res['context_size... |
.parametrize('gshape, mask_shape', [((2, 3, 2), (2, 3)), ((3, 4), (3, 4))])
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
def test_bool_scatter_forward_backward(seed, ctx, func_name, gshape, mask_shape):
from nbla_test_utils import cap_ignore_region, function_tester
rng = np.random.RandomStat... |
class TestExpm1(object):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), (ncu.exp(0.2) - 1))
assert_almost_equal(ncu.expm1(1e-06), (ncu.exp(1e-06) - 1))
def test_special(self):
assert_equal(ncu.expm1(np.inf), np.inf)
assert_equal(ncu.expm1(0.0), 0.0)
assert_equa... |
class HardsigmoidBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.input_one = torch.rand(N, C, H, W, device=device)
self.op_func = op_func()
def forward(self):
return self.op_func(self.input_one) |
def is_assertable(obj: Any, recursion_depth: int=0) -> bool:
if (recursion_depth > 4):
return False
if isinstance(obj, float):
return False
tp_ = type(obj)
if (is_enum(tp_) or is_primitive_type(tp_) or is_none_type(tp_)):
return True
if (is_set(tp_) or is_list(tp_) or is_tupl... |
def group_by_key_prefix_and_remove_prefix(prefix, d):
(kwargs_with_prefix, kwargs) = group_dict_by_key((lambda x: x.startswith(prefix)), d)
kwargs_without_prefix = dict(map((lambda x: (x[0][len(prefix):], x[1])), tuple(kwargs_with_prefix.items())))
return (kwargs_without_prefix, kwargs) |
def test_default_sigma():
a = np.zeros((3, 3))
a[(1, 1)] = 1.0
assert_array_equal(gaussian(a, preserve_range=True), gaussian(a, preserve_range=True, sigma=1)) |
class LSTMStateTuple(_LSTMStateTuple):
__slots__ = ()
def dtype(self):
(c, h) = self
if (not (c.dtype == h.dtype)):
raise TypeError(('Inconsistent internal state: %s vs %s' % (str(c.dtype), str(h.dtype))))
return c.dtype |
def check_version(version):
def check(version):
try:
url_pattern = '
req = requests.get(url_pattern)
latest_version = parse('0')
version = parse(version)
if (req.status_code == requests.codes.ok):
j = json.loads(req.text.encode('utf... |
class RandomRequestApp(RequestApp):
def __init__(self, node: QuantumRouter, others: List[str], seed: int, min_dur: int, max_dur: int, min_size: int, max_size: int, min_fidelity: float, max_fidelity: float):
super().__init__(node)
assert (0 < min_dur <= max_dur)
assert (0 < min_size <= max_si... |
class BatchBootlegEntityDisambiguator(AbstractEntityDisambiguator):
def __init__(self, args):
super().__init__(args)
logger.info('Initializing Bootleg class')
self.model_dir = f'{self.args.database_dir}/{self.args.bootleg_model}'
self.config_path = f'{self.model_dir}/bootleg_config.y... |
class MVTecDataset(Dataset):
def __init__(self, root_path='../data', class_name='bottle', is_train=True, resize=128, cropsize=128):
assert (class_name in classes), 'class_name: {}, should be in {}'.format(class_name, classes)
self.root_path = root_path
self.class_name = class_name
se... |
.parametrize('ty', [ti.u8, ti.u16, ti.u32, ti.u64])
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], debug=True)
def test_sub_overflow_u(capfd, ty):
if (not supports_overflow(ti.lang.impl.current_cfg().arch)):
return
capfd.readouterr()
def foo(x: ty, y: ty) -> ty:
a = ty(x)
b = ty(y)
... |
def resnet50w2(pretrained=True, **kwargs):
model = _resnet50w2(**kwargs)
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(url=' map_location='cpu')
state_dict = {k.replace('module.', ''): v for (k, v) in state_dict.items()}
model.load_state_dict(state_dict, strict=False)
... |
def bgl_preprocessed_logrecord():
path = os.path.join(TEST_DATA_PATH, 'BGL_AD', 'BGL_11k_preprocessed_logrecord.csv')
return LogRecordObject.load_from_csv(path) |
def all_models(n):
count = 0
(s, xij) = ais(n)
start = time.time()
while (sat == s.check()):
block = process_model(s, xij, n)
s.add(Not(And(block)))
count += 1
print(s.statistics())
print((time.time() - start))
print(count) |
def load_csv(file, shape=None, normalize=False):
value_list = []
for row in csv.reader([l.decode('utf-8') for l in file.readlines()]):
if len(row):
value_list.append(list(map(float, row)))
try:
if (shape is None):
return numpy.array(value_list)
else:
... |
class TestJSONFloatEncoder():
def test_encodes_nan_as_string(self):
result = json.dumps(float('NaN'), cls=JSONFloatEncoder)
assert_equals('NaN', result)
def test_encodes_infinity_as_string(self):
result = json.dumps(float('inf'), cls=JSONFloatEncoder)
assert_equals('Infinity', re... |
def test_tensor_to_list(backend):
tb = pyhf.tensorlib
assert (tb.tolist(tb.astensor([1, 2, 3, 4])) == [1, 2, 3, 4])
assert (tb.tolist(tb.astensor([[1], [2], [3], [4]])) == [[1], [2], [3], [4]]) |
def match_patterns(sdfg: SDFG, patterns: Union[(Type[xf.PatternTransformation], List[Type[xf.PatternTransformation]])], node_match: Callable[([Any, Any], bool)]=type_match, edge_match: Optional[Callable[([Any, Any], bool)]]=None, permissive: bool=False, metadata: Optional[PatternMetadataType]=None, states: Optional[Lis... |
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.CELU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n... |
_args('v', 'v', 'v', 'i', 'i', 'i', 'v', 'i')
def embedding_bag(g, embedding_matrix, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset):
if (scale_grad_by_freq and sym_help._training_mode):
return sym_help._onnx_unsupported('embedding_bag with scale_grad_by_freq for... |
class GNN_node(torch.nn.Module):
def __init__(self, num_layer, emb_dim, node_encoder, drop_ratio=0.5, JK='last', residual=False, gnn_type='gin'):
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.residual = residual
... |
def test_synthetic_continuous_calc_policy_value():
n_rounds = 10
dim_context = 3
dataset = SyntheticContinuousBanditDataset(dim_context=dim_context, min_action_value=1, max_action_value=10)
bandit_feedback = dataset.obtain_batch_bandit_feedback(n_rounds=n_rounds)
policy_value = dataset.calc_ground_t... |
('/end', methods=['GET', 'POST'])
def end_app():
ready_to_end = request.get_json()['ready_to_end']
if (ready_to_end == 'ready'):
shutdown_hook = request.environ.get('werkzeug.server.shutdown')
if (shutdown_hook is not None):
shutdown_hook()
return {'status': 'success'} |
class TestNetWithLoss(nn.Module):
def __init__(self):
super().__init__()
self.testnet = testnet1.TestNet()
def forward(self, input):
return self.testnet(input).sum() |
def mutate_pop(pop, mutation_p, noise_stdev, elite_pop):
noise = (np.random.randn(*pop.shape) * noise_stdev)
noise = highpass_filter(noise)
mask = (np.random.rand(pop.shape[0], elite_pop.shape[1]) < mutation_p)
new_pop = (pop + (noise * mask))
return new_pop |
class SummedPaulis(ObservableBase):
def __init__(self, num_qubits: int, op_str: Union[(str, tuple[str])]='Z', full_sum: bool=True, include_identity: bool=True) -> None:
super().__init__(num_qubits)
self.op_str = op_str
self.full_sum = full_sum
self.include_identity = include_identity... |
def evaluate(data_source, batch_size=10):
model.eval()
if (args.model == 'QRNN'):
model.reset()
total_loss = 0
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(batch_size)
for i in range(0, (data_source.size(0) - 1), args.bptt):
(data, targets) = get_batch(data_source,... |
def get_shape(t):
if torch.is_tensor(t):
return tuple(t.shape)
elif isinstance(t, dict):
return {n: get_shape(q) for (n, q) in t.items()}
elif isinstance(t, (list, tuple)):
return [get_shape(q) for q in t]
elif isinstance(t, numbers.Number):
return type(t)
else:
... |
def save_result(result, result_dir, filename, remove_duplicate='', client=None):
result_file = os.path.join(result_dir, ('%s_rank%d.json' % (filename, utils.get_rank())))
final_result_file = os.path.join(result_dir, ('%s.json' % filename))
if (client is not None):
client.put(os.path.join('s3://Bucke... |
def max_len_seq(nbits, state=None, length=None, taps=None):
taps_dtype = (np.int32 if (np.intp().itemsize == 4) else np.int64)
if (taps is None):
if (nbits not in _mls_taps):
known_taps = np.array(list(_mls_taps.keys()))
raise ValueError(f'nbits must be between {known_taps.min()}... |
class get_model(nn.Module):
def __init__(self, num_class=40, normal_channel=False, k_neighbors=20, calc_scores='softmax', num_matrices=[8, 8, 8, 8], dropout=0.5):
super(get_model, self).__init__()
self.k = k_neighbors
self.calc_scores = calc_scores
(self.m1, self.m2, self.m3, self.m4... |
class Khinchin(Constant):
def __init__(self, name='khinchin'):
conversions = dict(maxima='khinchin', mathematica='Khinchin', pynac='Khinchin')
Constant.__init__(self, name, conversions=conversions, domain='positive')
def _mpfr_(self, R):
import sage.libs.mpmath.all as a
return a.... |
def config_reader():
config = ConfigObj(os.path.join(os.path.dirname(__file__), 'config'))
param = config['param']
model_id = param['modelID']
param['scale_search'] = list(map(float, param['scale_search']))
param['thre1'] = float(param['thre1'])
param['thre2'] = float(param['thre2'])
param['... |
def backup_code(save_path, save_parent=False, ignored_in_current_folder=None, marked_in_parent_folder=None):
if (ignored_in_current_folder is None):
ignored_in_current_folder = ['tmp', 'log', 'data', '__pycache__', 'output', 'deca']
if (marked_in_parent_folder is None):
marked_in_parent_folder =... |
.parametrize('test_stat', ['q0', 'q', 'qtilde'])
def test_hypotest_return_expected(tmp_path, hypotest_args, test_stat):
tb = pyhf.tensorlib
kwargs = {'return_tail_probs': True, 'return_expected': True, 'test_stat': test_stat}
result = pyhf.infer.hypotest(*hypotest_args, **kwargs)
assert (len(list(result... |
def test_ufunc_add_simple2():
A = np.random.randint(10, size=(10,), dtype=np.int32)
B = np.random.randint(10, dtype=np.int32)
C = ufunc_add_simple2(A, B)
assert np.array_equal((A + B), C) |
_utils.test()
def test_unordered():
val = ti.field(ti.i32)
n = 3
m = 7
p = 11
blk1 = ti.root.dense(ti.k, n)
blk2 = blk1.dense(ti.i, m)
blk3 = blk2.dense(ti.j, p)
blk3.place(val)
assert (val.dtype == ti.i32)
assert (val.shape == (m, p, n))
assert (val.snode.parent(0) == val.sn... |
def get_class_weights(y, smooth_factor=0):
counter = Counter(y)
if (smooth_factor > 0):
p = (max(counter.values()) * smooth_factor)
for k in counter.keys():
counter[k] += p
majority = max(counter.values())
return {cls: float((majority / count)) for (cls, count) in counter.ite... |
class Linear(torch.nn.Linear):
def forward(self, x: torch.Tensor) -> torch.Tensor:
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))):
out_shape = [x.shape[0], self.out_features]
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
... |
class DenseLayer(nn.Module):
def __init__(self, in_channels, out_channels):
super(DenseLayer, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return torch.cat([x, self.relu(self.conv(x))], 1) |
def make_graph(dists, scheme='default'):
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {}
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
(name, version) = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, ... |
def position(x1, y1, x2, y2, epsilon=0.1):
return (lambda bbox: ((abs((bbox['x1'] - x1)) < epsilon) and (abs((bbox['y1'] - y1)) < epsilon) and (abs((bbox['x2'] - x2)) < epsilon) and (abs((bbox['y2'] - y2)) < epsilon))) |
class TestPathHelpers(TestCase):
def test_get_path(self):
d = {'a': '42', 'b': [1, 2, 3, [['42']]], 'f': {'m': ['lgs', [1, 2, 3, '42']]}, 'c': {'m': ['lgs', [1, 2, 3]]}, 'e': {'m': ['lgs', '42']}}
self.assertEqual(get_with_path(d, ['f', 'm', 0]), 'lgs')
def test_set_path(self):
d = {'a':... |
def create_new_datasets(data_directory, normalizer, back_and_forth=False):
logging.info('Creating new datasets')
test_fraction = 0.15
validation_fraction = 0.15
transform = get_transforms(normalizer)
classes = [dI for dI in os.listdir(data_directory) if os.path.isdir(os.path.join(data_directory, dI)... |
def _matmul_broadcast_shape(shape_a, shape_b, error_msg=None):
(m, n, p) = (shape_a[(- 2)], shape_a[(- 1)], shape_b[(- 1)])
if (len(shape_b) == 1):
if (n != p):
if (error_msg is None):
raise RuntimeError(f'Incompatible dimensions for matmul: {shape_a} and {shape_b}')
... |
def to_inductive(data):
mask = (data.train_mask | data.val_mask)
data.x = data.x[mask]
data.y = data.y[mask]
data.train_mask = data.train_mask[mask]
data.val_mask = data.val_mask[mask]
data.test_mask = None
(data.edge_index, _) = subgraph(mask, data.edge_index, None, relabel_nodes=True, num_... |
def _crop(image, offset_height, offset_width, crop_height, crop_width):
original_shape = tf.shape(image)
rank_assertion = tf.Assert(tf.equal(tf.rank(image), 3), ['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, o... |
def num_gpus_to_mem(num_gpus: int, mem_per_gpu: 64) -> str:
return f'{(num_gpus * mem_per_gpu)}G' |
.parametrize('csr_container', (CSR_CONTAINERS + [None]))
def test_fit_predict(csr_container):
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if (csr_container is not None):
X[(X < 0.8)] = 0
X = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_m... |
class ErrorExtractor():
def __init__(self, reference_corpus, recall_spanning_tree_algorithm, precision_spanning_tree_algorithm):
self.reference_corpus = reference_corpus
self.recall_spanning_tree_algorithm = recall_spanning_tree_algorithm
self.precision_spanning_tree_algorithm = precision_sp... |
def qufpn_config(min_level, max_level, weight_method=None):
p = OmegaConf.create()
weight_method = (weight_method or 'fastattn')
quad_method = 'fastattn'
num_levels = ((max_level - min_level) + 1)
node_ids = {(min_level + i): [i] for i in range(num_levels)}
level_last_id = (lambda level: node_id... |
def test_impala_paper_count():
impala_entries = rldb.find_all({'source-title': 'IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner Architectures'})
assert (len(impala_entries) == (((((((0 + 57) + 57) + 57) + 30) + 30) + 30) + 30)) |
def generate_csv_csv(filename, num_of_data, data_size):
with create_temp_with_dir(filename) as csvfilename:
datadir = os.path.dirname(csvfilename)
with open(csvfilename, 'w') as f:
f.write('x:data,y\n')
for n in range(0, num_of_data):
x = (numpy.ones(data_size... |
def CalculateHydrogenNumber(mol):
i = 0
Hmol = Chem.AddHs(mol)
for atom in Hmol.GetAtoms():
if (atom.GetAtomicNum() == 1):
i = (i + 1)
return i |
def register_Ns3MinstrelHtWifiManager_methods(root_module, cls):
cls.add_constructor([param('ns3::MinstrelHtWifiManager const &', 'arg0')])
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
... |
def FetchInt8Blob(name):
result = C.fetch_blob(StringifyBlobName(name))
assert isinstance(result, tuple), 'You are not fetching an Int8Blob {}. Please use FetchBlob'.format(StringifyBlobName(name))
return Int8Tensor(*result) |
class Trainer(object):
def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False):
self.use_cuda = use_cuda
if (model_file is not None):
self.load(model_file, use_cuda)
else:
self.args = args
self.model = (None if args['dic... |
class ContentRange(object):
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), 'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.