code stringlengths 101 5.91M |
|---|
class ImageNet(data.Dataset):
def __init__(self, mode, maxSkip=0, joint_transform=None, sliding_crop=None, transform=None, dump_images=False, cv_split=None, eval_mode=False, eval_scales=None, eval_flip=False, image_in=False, extract_feature=False):
self.mode = mode
self.maxSkip = maxSkip
sel... |
def add_download_parser(subparsers, formatter_class):
subparser = subparsers.add_parser('download', formatter_class=formatter_class, help='Download the language resources required by the snips-nlu library')
subparser.add_argument('resource_name', type=str, help="Name of the language resources to download. Can b... |
def bitname(obj):
bits = _bits_of(obj)
dt = dtype(obj)
char = dt.kind
base = _kind_name(dt)
if (base == 'object'):
bits = 0
if (bits != 0):
char = ('%s%d' % (char, (bits // 8)))
return (base, bits, char) |
def find_logdirs(rootdir: os.PathLike) -> list[Path]:
return [Path(i).parent for i in Path(rootdir).rglob('config_tree.log') if check_if_logdir(Path(i).parent)] |
def export_template_args(args):
code_gen = 'public:\n'
for arg_tuple in args:
code_gen += indentation
arg_type = arg_tuple[0]
arg_name = arg_tuple[1]
internal_arg_name = (arg_name + '_')
typename = ''
if (arg_type is int):
typename = 'static int const'... |
.parametrize('dtype', [np.float32, np.float64])
def test_dot(dtype):
dot = _dot_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(10).astype(dtype, copy=False)
expected = x.dot(y)
actual = dot(x, y)
asser... |
def bf16_to_fp32(d_bf16):
assert (d_bf16.dtype == np.uint16)
s = d_bf16.shape
d_bf16 = d_bf16.ravel()
d_fp32 = np.empty_like(d_bf16, dtype=np.float32)
v_ui16 = d_fp32.view(np.uint16)
v_ui16[1::2] = d_bf16
return d_fp32.reshape(s) |
class ActivatedAffine(ABN):
def __init__(self, num_features, eps=1e-05, momentum=0.1, affine=True, activation='leaky_relu', activation_param=0.01):
super(ActivatedAffine, self).__init__(num_features, eps, momentum, affine, activation, activation_param)
def _broadcast_shape(x):
out_size = []
... |
def run_experiment(argv):
now = datetime.datetime.now(dateutil.tz.tzlocal())
rand_id = str(uuid.uuid4())[:5]
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S_%f_%Z')
default_exp_name = ('experiment_%s_%s' % (timestamp, rand_id))
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', typ... |
def run(args):
dataset = VOCSemanticSegmentationDataset(split=args.chainer_eval_set, data_dir=args.voc12_root)
labels = [dataset.get_example_by_keys(i, (1,))[0] for i in range(len(dataset))]
preds = []
for id in dataset.ids:
cam_dict = np.load(os.path.join(args.cam_out_aug_dir, (id + '.npy')), a... |
def main():
tmp_dir = tempfile.mkdtemp()
os.symlink(('%s/returnn' % _base_dir), ('%s/returnn' % tmp_dir))
config_fn = ('%s/returnn.config' % tmp_dir)
with open(config_fn, 'w') as f:
f.write('#!rnn.py\n')
f.write('use_tensorflow = True\n')
f.write('num_inputs, num_outputs = 3, 5\n... |
def to_nbytes(arg, default=None):
if (not arg):
return None
if (arg is True):
return default
if isinstance(arg, Number):
return arg
match = mem_re.match(arg)
if (match is None):
raise ValueError('Memory size could not be parsed (is your capitalisation correct?): {}'.f... |
class Merger(object):
def getName(self) -> str:
raise NotImplementedError('getName not implemented.')
def getTargetType(self) -> str:
raise NotImplementedError('getTargetType not implemented.')
def doMerge(self, objectA: Mergeable, objectB: Mergeable) -> Mergeable:
raise NotImplement... |
def train():
df = spark.read.parquet(dataloc)
filteredDF = timestampRangeDF(df, begin_date, end_date)
preprocDF = run_spark_preproc_pipeline(filteredDF, STOPWORDS)
nlpPipelineDF = run_nlp_pipeline(preprocDF).persist()
article_count = nlpPipelineDF.count()
(mlModel, ldaModel) = run_ml_pipeline(nl... |
class MovieSpec(DomainSpec):
name = 'movie'
greet = 'Want to know about movies?'
nlg_spec = {'genre': {'inform': ['I like %s movies.', '%s.', 'I love %s ones.', '%s movies.'], 'request': ['What genre do you like?', 'Which type of movie?']}, 'years': {'inform': ['Movies in %s', 'In %s.'], 'request': ["What's... |
class IdentityTransformerActionSampler(TransformerActionSampler):
def __call__(self, transformer_output: NDArray) -> Union[(NDArray, int)]:
return transformer_output |
def test_cli_video_scale():
with patch_sys_argv_helper(['ti', 'video_scale', '-i', 'video.mp4', '-w', '1.2']) as custom_argv:
cli = TaichiMain(test_mode=True)
args = cli()
assert (args.input_file == 'video.mp4')
assert (args.ratio_width == 1.2)
assert (args.ratio_height == 1.... |
def create_misuse(misuse_id: str, meta: Dict[(str, Any)]=None, project: Project=None, version: ProjectVersion=None, correct_usages: List[CorrectUsage]=None):
if (not project):
project = create_project('-project-')
if (not version):
version = create_version('-version-', misuses=[])
misuse = M... |
def get_mnist_common_config():
(rho_ref_train, tau_inv, pi1_bias, logSigmaZval) = (0.95, 0.0001, 0.0, (- 2))
(logsumexp_coef, kl_reg_coef, l2_reg_coef) = (0.01, 0.0001, 1e-05)
(USE_INPUT_BN, USE_RESNET, USE_GAP, USE_KENDALL_LOSS) = (False, True, False, False)
maxEpoch = 40
return (rho_ref_train, tau... |
class dummy_ctype(object):
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return (self._cls == other._cls)
def __ne__(self, other):
return (self._cls !... |
_grad()
def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
if (config_path is not None):
config = CLIPConfig.from_pretrained(config_path)
else:
config = CLIPConfig(projection_dim=512, text_config={}, vision_config={})
hf_model = CLIPModel(config).eval()... |
_REGISTRY.register()
class PartialREID(ImageDataset):
dataset_name = 'partialreid'
def __init__(self, root='datasets'):
self.root = root
self.query_dir = osp.join(self.root, 'Partial_REID/partial_body_images')
self.gallery_dir = osp.join(self.root, 'Partial_REID/whole_body_images')
... |
class TomlNumpyEncoder(TomlEncoder):
def __init__(self, _dict=dict, preserve=False):
import numpy as np
super(TomlNumpyEncoder, self).__init__(_dict, preserve)
self.dump_funcs[np.float16] = _dump_float
self.dump_funcs[np.float32] = _dump_float
self.dump_funcs[np.float64] = _d... |
_utils.test()
def test_loop_var_struct():
x = ti.field(ti.f32)
ti.root.dense(ti.i, 1).place(x)
def func():
i = 0
for i in x:
pass
with pytest.raises(ti.TaichiCompilationError):
func() |
class LightGBM():
def __init__(self, params=None):
if (params is None):
self.params = {'lambda_l1': 0., 'lambda_l2': 3.e-07, 'num_leaves': 220, 'feature_fraction': 0., 'bagging_fraction': 0., 'bagging_freq': 2, 'min_child_samples': 92, 'max_depth': 10}
else:
self.params = par... |
def get_scheduler(optimizer, opt):
if (opt.lr_policy == 'linear'):
def lambda_rule(epoch):
lr_l = (1.0 - (max(0, ((epoch + 1) - opt.nepochs)) / float((opt.nepochs_decay + 1))))
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif (opt.lr_po... |
def prepro(args):
if (not os.path.exists(args.target_dir)):
os.makedirs(args.target_dir)
if (args.mode == 'full'):
prepro_each(args, 'dev', out_name='test')
elif (args.mode == 'all'):
create_all(args)
prepro_each(args, 'dev', 0.0, 0.0, out_name='dev')
prepro_each(args... |
def transcriber(audio):
if ('sound' not in audio):
raise ValueError(f'`audio` ({audio}) is not a sound.')
return f'This is the transcribed text from {audio}.' |
def create_signed_cert_for_collaborator(col, data_path):
print(f'Certifying collaborator {col} with data path {data_path}...')
check_call(['fx', 'collaborator', 'create', '-d', data_path, '-n', col, '--silent'])
check_call(['fx', 'collaborator', 'generate-cert-request', '-n', col, '--silent'])
check_cal... |
class GPTNeoXModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def test_feature_agglomeration():
n_clusters = 1
X = np.array([0, 0, 1]).reshape(1, 3)
agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean)
agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median)
agglo_mean.fit(X)
agglo_median.fit(X)
asser... |
def AllTest():
EncoderTest()
DecoderTest()
WNetTest()
TrainTest()
print('WNet Passed All Tests!') |
def mlir_tasklet_double_return_generic(A: dace.int32[3], B: dace.int32[2], C: dace.int32[1]):
('MLIR')
def add():
(a << A[0])
(b << B[0])
(c >> C[0]) |
class tiu_cmd_reg(atomic_reg):
_fields_ = [('cmd_en', ctypes.c_uint64, 1), ('cmd_end', ctypes.c_uint64, 1), ('cmd_id_en', ctypes.c_uint64, 1), ('cmd_id_tpu', ctypes.c_uint64, 16), ('cmd_id_gdma', ctypes.c_uint64, 16), ('cmd_keep', ctypes.c_uint64, 1), ('cmd_intr_en', ctypes.c_uint64, 1), ('tsk_typ', ctypes.c_uint64... |
def register_Ns3BSSchedulerRtps_methods(root_module, cls):
cls.add_constructor([param('ns3::BSSchedulerRtps const &', 'arg0')])
cls.add_constructor([])
cls.add_constructor([param('ns3::Ptr< ns3::BaseStationNetDevice >', 'bs')])
cls.add_method('AddDownlinkBurst', 'void', [param('ns3::Ptr< ns3::WimaxConne... |
class HDDM_A(BaseDriftDetector):
def __init__(self, drift_confidence=0.001, warning_confidence=0.005, two_side_option=True):
super().__init__()
super().reset()
self.n_min = 0
self.c_min = 0
self.total_n = 0
self.total_c = 0
self.n_max = 0
self.c_max = ... |
class NaiveFinitePointEnumerator():
def __init__(self, fan, ring):
assert ring.is_finite()
self.ring = ring
self.fan = fan
_method
def rays(self):
return (self.fan.rays() + self.fan.virtual_rays())
_method
def units(self):
return tuple((x for x in self.ring if... |
def watershed_ift(input, markers, structure=None, output=None):
input = numpy.asarray(input)
if (input.dtype.type not in [numpy.uint8, numpy.uint16]):
raise TypeError('only 8 and 16 unsigned inputs are supported')
if (structure is None):
structure = _morphology.generate_binary_structure(inpu... |
def _is_pandas_df(X):
if (hasattr(X, 'columns') and hasattr(X, 'iloc')):
try:
pd = sys.modules['pandas']
except KeyError:
return False
return isinstance(X, pd.DataFrame)
return False |
class FairseqWav2Vec2(nn.Module):
def __init__(self, pretrained_path, save_path, input_norm=None, output_norm=False, freeze=False, freeze_feature_extractor=False, pretrain=True, dropout=None, layer_drop=None):
super().__init__()
download_file(pretrained_path, save_path)
overrides = {}
... |
class augmentations(object):
def __init__(self):
self.jitter_scale_ratio = 0.8
self.jitter_ratio = 0.2
self.max_seg = 8 |
def overwrite_call_docstring(model_class, docstring):
model_class.__call__ = copy_func(model_class.__call__)
model_class.__call__.__doc__ = None
model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__) |
def get_batcnnorm(bn, nr_features=None, nr_dims=1):
if isinstance(bn, nn.Module):
return bn
assert (1 <= nr_dims <= 3)
if (bn in (True, 'async')):
clz_name = 'BatchNorm{}d'.format(nr_dims)
return getattr(nn, clz_name)(nr_features)
else:
raise ValueError('Unknown type of b... |
def srwl_wfr_from_intens(_ar_int, _mesh, _part_beam, _Rx, _Ry, _xc=0, _yc=0):
lenInt = len(_ar_int)
nTot = ((_mesh.ne * _mesh.nx) * _mesh.ny)
if (lenInt != nTot):
raise Exception('Mesh parameters are not consistent with the length of intensity array')
aux_const = (3141592. / 1.)
constRx = (a... |
class Scheme(Parent):
def __init__(self, X=None, category=None):
from sage.schemes.generic.morphism import is_SchemeMorphism
from sage.categories.map import Map
from sage.categories.rings import Rings
if (X is None):
self._base_ring = ZZ
elif is_Scheme(X):
... |
.filterwarnings('ignore:The default value of `n_init` will change')
def test_fit_resample_half():
sampling_strategy = {0: 3, 1: 6}
cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)
(X_resampled, y_resampled) = cc.fit_resample(X, Y)
assert (X_resampled.shape == (9, 2))
... |
def retrieve_tigge_data():
date1 = [(str(i) + '-01-01') for i in xrange(2007, 2017)]
date2 = [(str(i) + '-12-31') for i in xrange(2007, 2017)]
dates = date1
for j in range(0, 10):
dates[j] = ((date1[j] + '/to/') + date2[j])
data_dir = '/media/sebastian/Elements/Postproc_NN/data/forecasts/aux... |
def count_params(model_or_params: Union[(torch.nn.Module, torch.nn.Parameter, List[torch.nn.Parameter])], return_trainable=True, verbose=True):
if isinstance(model_or_params, torch.nn.Module):
model_or_params = list(model_or_params.parameters())
elif isinstance(model_or_params, torch.nn.Parameter):
... |
class RowBroadcastNode(NameNode):
def __init__(self, element_accumulator, element_fragment, node) -> None:
super().__init__(node)
self.tag = ('RowBroadcast:' + self.tag)
self.type = 'tensor'
self.element_accumulator = element_accumulator
self.element_fragment = element_fragme... |
def test():
assert (ak.operations.is_none(ak.Array([1, 2, 3, None, 5])).to_list() == [False, False, False, True, False])
assert (ak.operations.is_none(ak.Array([[1, 2, 3], [], [None, 5]])).to_list() == [False, False, False])
assert (ak.operations.is_none(ak.Array([[1, 2, 3], [], [None, 5]]), axis=1).to_list... |
def pad_code(total_code):
keys = np.ones(len(total_code))
padding = np.zeros((MAX_CODE - len(total_code))).astype(int)
total_code = np.concatenate([total_code, padding], axis=0)
seq_mask = ((1 - np.concatenate([keys, padding])) == 1)
return (total_code, seq_mask) |
def RegularArray_toListOffsetArray(self):
nextoffsets = ([None] * (len(self) + 1))
for i in range(len(nextoffsets)):
nextoffsets[i] = (i * self.size)
return ListOffsetArray(nextoffsets, self.content) |
def _ml1_env_names():
key_train = _env_dict.HARD_MODE_ARGS_KWARGS['train']
key_test = _env_dict.HARD_MODE_ARGS_KWARGS['test']
tasks = sum([list(key_train)], list(key_test))
assert (len(tasks) == 50)
return tasks |
def test_combine_add_number_to_tensor():
a_raw = torch.tensor([2.0, 2.0, 2.0])
b_raw = torch.tensor(3.0)
feature_dim = Dim(3)
a = Tensor(name='a', raw_tensor=a_raw, dims=[feature_dim], dtype='float32')
b = Tensor(name='b', raw_tensor=b_raw, dims=[], dtype='float32')
result = (a + b)
result_a... |
def set_seed(seed):
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) |
def p_arg_list(p):
if (len(p) == 2):
p[0] = [p[1]]
else:
p[0] = (p[1] + [p[3]]) |
class InvertibleConv2d(nn.Module):
def __init__(self, dim):
super(InvertibleConv2d, self).__init__()
self.dim = dim
self.weight = nn.Parameter(torch.eye(dim)[torch.randperm(dim)])
def forward(self, x, logpx=None):
y = F.conv2d(x, self.weight.view(self.dim, self.dim, 1, 1))
... |
def linear_reward_funcion_continuous(context: np.ndarray, action: np.ndarray, random_state: Optional[int]=None) -> np.ndarray:
check_array(array=context, name='context', expected_dim=2)
check_array(array=action, name='action', expected_dim=1)
if (context.shape[0] != action.shape[0]):
raise ValueErro... |
class Disparity(torch.nn.Module):
def __init__(self):
super().__init__()
self.netImage = torch.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=7, stride=2, padding=3)
self.netSemantics = torch.nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1)
for ... |
def has_finite_length(obj):
try:
len(obj)
except OverflowError:
return True
except Exception:
return False
else:
return True |
class MemExperiment(Experiment):
def __init__(self, name, cfg, benchmark_dir, output_dir):
super().__init__(name, cfg, benchmark_dir, output_dir)
self._reps = cfg['config']['benchmark']['repetitions']
def process(self):
for (idx, instance) in enumerate(self._cfg['instances']):
... |
class GSVCitiesDataModule(pl.LightningDataModule):
def __init__(self, batch_size=32, img_per_place=4, min_img_per_place=4, shuffle_all=False, image_size=(480, 640), num_workers=4, show_data_stats=True, cities=TRAIN_CITIES, mean_std=IMAGENET_MEAN_STD, batch_sampler=None, random_sample_from_each_place=True, val_set_n... |
def find_single_person_bbox(predictions):
max_confidence = 0.5
bounding_box = None
for prediction in predictions:
confidence = prediction[1]
if ((prediction[0] == b'person') and (confidence > max_confidence)):
max_confidence = confidence
bounding_box = list(prediction... |
class TUpliftMetric(metaclass=abc.ABCMeta):
def __call__(self, y_true: np.ndarray, uplift_pred: np.ndarray, treatment: np.ndarray) -> float:
pass |
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum((y_true_f * y_pred_f))
return (((2.0 * intersection) + K.epsilon()) / ((K.sum(y_true_f) + K.sum(y_pred_f)) + K.epsilon())) |
class critic(nn.Module):
def __init__(self, env_params):
super(critic, self).__init__()
self.max_action = env_params['action_max']
self.fc1 = nn.Linear(((env_params['obs'] + env_params['goal']) + env_params['action']), 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(... |
.parametrize('device', ['cpu', 'cuda'])
.parametrize('M', [0, 1, 7, 8])
.parametrize('out_format', [0, 1, 2, 3])
def test_compatibility(device, M, out_format, L=32, B=2):
lpc2lsp = diffsptk.LinearPredictiveCoefficientsToLineSpectralPairs(M, log_gain=True, sample_rate=8000, out_format=out_format)
U.check_compati... |
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_public_network = IPv4Network('100.64.0.0/10')
_private_networks = [IPv4Network('0.0.0.0/8'), IPv4Network('10.0.0.0/8'), IPv... |
def get_avg_e_per_ts(edgelist_df):
sum_num_e_per_ts = 0
unique_ts = np.unique(np.array(edgelist_df['ts'].tolist()))
for ts in unique_ts:
num_e_at_this_ts = len(edgelist_df.loc[(edgelist_df['ts'] == ts)])
sum_num_e_per_ts += num_e_at_this_ts
avg_num_e_per_ts = ((sum_num_e_per_ts * 1.0) / ... |
def reverse_step(self, model_output, timestep: int, sample):
if (self.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
prev_timestep = (timestep - (self.config.num_train_timesteps / self.num_inference_step... |
class ASM1688NameBreakpoint(Breakpoint):
type = '1688-asm'
pattern = re.compile('^\\w+')
def match_break(cls, text, tdb: TdbCmdBackend) -> bool:
from ..target_1688.regdef import op_class_dic
if (text in op_class_dic):
return True
return False |
def test_two_sentences(tmp_path):
raw_text = ((BIO_1 + '\n\n') + BIO_2)
run_test(tmp_path, raw_text, 2, [5, 12]) |
def create_pipeline_configuration(DEBUG=False, batch_size=4):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (Linear, Dropout, T5Block, CrossEntropyLoss, T5LayerNorm, StatelessEmbedding), 'model_inputs': {'attention_mask': {'shape': torch.Size([4, 1, 1, 320]), 'dtype': torch.float32, 'is_batched': True, ... |
def generate_product_node(node, root_node, floating_data_type, depth):
result_calculation_lines = []
for child in node.children:
result_calculation_lines += [f'if ({generate_scope_check(child.scope)}) {{nodeIntermediateResult[{node.id}] *= nodeIntermediateResult[{child.id}];}}']
value_dictionary = {... |
def run(database, input_dir, output_dir=None, config_file=None, fuzzer=None):
if (database not in DBMS):
print(f'Unsupported database. The supported ones are {DBMS}')
return
if (not output_dir):
output_dir = '/tmp/fuzz'
if (not config_file):
config_file = get_config_path(data... |
class ResBlock(nn.Module):
def __init__(self, dim_in, dim_out, temp_kernel_size, stride, trans_func, dim_inner, num_groups=1, stride_1x1=False, inplace_relu=True, eps=1e-05, bn_mmt=0.1):
super(ResBlock, self).__init__()
self._inplace_relu = inplace_relu
self._eps = eps
self._bn_mmt =... |
.skip(reason='this transformation will need to be rewritten: dace now supports accessing as acessnodes')
.pure
def test_input_to_constant(sdfg_name):
net = TestModule()
dace_net = DaceModule(net, sdfg_name=sdfg_name)
inp = torch.rand((10, 5))
def ApplyInputToConst(dace_module):
sdfg = dace_modul... |
def spectral_clustering(adj_matrix: np.ndarray, k: int) -> list:
L = laplacian_matrix(adj_matrix)
V = eigenvector_matrix(L, k)
communities = init_communities(len(adj_matrix), k)
while True:
C = calc_centroids(V, communities)
updated_communities = update_assignments(V, C, deepcopy(communi... |
def get_config_files(file_list, exclude_folders):
cfg_root_path = utils.get_config_root_path()
if (file_list is not None):
files = [os.path.join(cfg_root_path, x) for x in file_list]
else:
files = glob.glob(os.path.join(cfg_root_path, './**/*.yaml'), recursive=True)
def _contains(path, e... |
class truncexpon_gen(rv_continuous):
def _shape_info(self):
return [_ShapeInfo('b', False, (0, np.inf), (False, False))]
def _get_support(self, b):
return (self.a, b)
def _pdf(self, x, b):
return (np.exp((- x)) / (- sc.expm1((- b))))
def _logpdf(self, x, b):
return ((- x)... |
def register_Ns3MgtProbeResponseHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::MgtProbeResponseHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetBeaconIntervalUs'... |
class I2b2_2010_Processor(BlueBERTProcessor):
def get_labels(self):
return ['PIP', 'TeCP', 'TeRP', 'TrAP', 'TrCP', 'TrIP', 'TrNAP', 'TrWP', 'false'] |
def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max):
dtype = X.dtype
(X, permute_axis_list) = _permute_to_axis_zero(X.to(torch.float32), axis)
Xq = torch.zeros_like(X)
for i in range(X.size()[0]):
Xq[i] = torch.round(... |
class Polyhedron_RDF(Polyhedron_base):
def _is_zero(self, x):
return (abs(x) <= 1e-06)
def _is_nonneg(self, x):
return (x >= (- 1e-06))
def _is_positive(self, x):
return (x >= (- 1e-06))
_base_ring = RDF |
def get_velocity(Ur, Ur_hat, **context):
Ur[0] = Ur_hat[0].backward(Ur[0])
Ur[1] = Ur_hat[1].backward(Ur[1])
return Ur[:2] |
def _set_jit_overload_cache(key, compiled_fns):
_jit_function_overload_caching[key] = [fn.qualified_name for fn in compiled_fns] |
class VGen50BaseConfig(VGenConfig):
identifier = 'v-gen-base'
gen_ratio = 0.5
encoder_depth = 12
encoder_embed_dim = 768
encoder_n_heads = 12
decoder_depth = 8
decoder_embed_dim = 512
decoder_n_heads = 16
device_bsz = 32
native_bsz = 32 |
class CartanType(CartanType_standard_untwisted_affine):
def __init__(self, n):
assert (n >= 1)
CartanType_standard_untwisted_affine.__init__(self, 'C', n)
def dynkin_diagram(self):
n = self.n
if (n == 1):
from . import cartan_type
res = cartan_type.CartanT... |
class RobustMultitaskClassifier(TensorGraph):
def __init__(self, n_tasks, n_features, layer_sizes=[1000], weight_init_stddevs=0.02, bias_init_consts=1.0, weight_decay_penalty=0.0, weight_decay_penalty_type='l2', dropouts=0.5, activation_fns=tf.nn.relu, n_classes=2, bypass_layer_sizes=[100], bypass_weight_init_stdde... |
def load_for_dataset(dataset_name):
path = MetadataCatalog.get(dataset_name).densepose_transform_src
densepose_transform_data_fpath = PathManager.get_local_path(path)
return DensePoseTransformData.load(densepose_transform_data_fpath) |
class WikiLink(object):
__slots__ = ('title', 'text', 'link_prob')
def __init__(self, title, text, link_prob):
self.title = title
self.text = text
self.link_prob = link_prob |
def x():
for i in range(100):
res_tvm = tvm_fn(*inp_all)
grads_tvm = torch.autograd.grad(res_tvm, inp_all, grad_outs)
ctx.sync() |
class tx_rx_hier_functionality_check(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, 'Tx Rx Hier Functionality Check', catch_exceptions=True)
self.bw = bw = 125000
self.sync_word = sync_word = 18
self.soft_decoding = soft_decoding = False
self.sf = sf = 7
... |
def check_cios(control_inputs=False, control_outputs=None, control_ios=None):
if (control_ios is not None):
if (not isinstance(control_ios, util.ControlOutputs)):
raise TypeError('Expected a util.ControlOutputs, got: {}'.format(type(control_ios)))
if (control_outputs is not None):
... |
def preprocess_fluorescence(input_image, bInvert=True, magnification_downsample_factor=1.0):
img = ((255 - input_image) / 2)
if (not bInvert):
img = (255 - img)
output_image = preprocess(img, magnification_downsample_factor=magnification_downsample_factor)
return output_image |
def make_agent(obs_spec, action_spec, cfg):
cfg.agent.obs_shape = obs_spec[cfg.obs_type].shape
try:
cfg.agent.action_shape = action_spec.shape
except:
pass
try:
cfg.agent.env_name = cfg.suite.task_name
except:
pass
return hydra.utils.instantiate(cfg.agent) |
def test_binary_policy_positive_examples(digraph, features_1d, labels):
policy = BinaryPolicy(digraph, features_1d, labels)
with pytest.raises(NotImplementedError):
policy.positive_examples('1') |
def update_query_type(query, qmap):
assert (len(query) > 0)
query = query.lower()
head = query.split()[0]
if whether_ynq(query):
qmap['yesno'] += 1
elif (head in qmap):
qmap[head] += 1
else:
qmap['other'] += 1 |
def test_nonlinear_constraint():
n = 3
m = 5
rng = np.random.RandomState(0)
x0 = rng.rand(n)
(fun, jac, hess) = create_quadratic_function(n, m, rng)
f = fun(x0)
J = jac(x0)
lb = [(- 10), 3, (- np.inf), (- np.inf), (- 5)]
ub = [10, 3, np.inf, 3, np.inf]
user_constraint = Nonlinear... |
def test_benchmark_hash(benchmark_test_case):
assert (len({benchmark_test_case.clone() for _ in range(BENCHMARK_REPETITIONS)}) == 1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.