code stringlengths 101 5.91M |
|---|
class LayoutXLMProcessor(ProcessorMixin):
feature_extractor_class = 'LayoutLMv2FeatureExtractor'
tokenizer_class = ('LayoutXLMTokenizer', 'LayoutXLMTokenizerFast')
def __call__(self, images, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, text_pair: Optional[Union... |
def chamfer(pcd1, pcd2):
(dist1, _, dist2, _) = tf_nndistance.nn_distance(pcd1, pcd2)
dist1 = tf.reduce_mean(tf.sqrt(dist1))
dist2 = tf.reduce_mean(tf.sqrt(dist2))
return ((dist1 + dist2) / 2) |
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
if (isinstance(marker, list) and (len(marker) == 1) and isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) ... |
class _ClassNamespace(types.ModuleType):
def __init__(self, name):
super(_ClassNamespace, self).__init__(('torch.classes' + name))
self.name = name
def __getattr__(self, attr):
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
if (proxy is None):
rais... |
def gen_line_dict_file(out_path, imgid2imgname, imgid2anno, img_size=False):
lines = []
for (key, value) in imgid2imgname.items():
if (key in imgid2anno):
anno = imgid2anno[key]
line_dict = {}
line_dict['file_name'] = value['file_name']
line_dict['text'] =... |
class RationalField(Singleton, number_field_base.NumberField):
def __new__(cls):
try:
return QQ
except BaseException:
from sage.rings.number_field.number_field_base import NumberField
return NumberField.__new__(cls)
def __init__(self):
from sage.catego... |
class MultiLabelPrecision(torchmetrics.Metric):
def __init__(self, num_classes, threshold):
super().__init__()
self.num_classes = num_classes
self.threshold = threshold
self.add_state('true_positives', torch.tensor(0.0))
self.add_state('false_positives', torch.tensor(0.0))
... |
class DetectorMixin():
def _check_nan(df, **kwargs):
assert (not bool(df.isnull().values.any())), 'The input dataframe contains NaNs.'
def _check_column_names(df, **kwargs):
for col in df.columns:
assert isinstance(col, str), f'The column name must be a string instead of {type(col)}.... |
def logits_to_scalar(logits: Array, num_bins: int) -> Array:
chex.assert_equal(num_bins, logits.shape[(- 1)])
max_val = ((num_bins - 1) // 2)
x = jnp.sum(((jnp.arange(num_bins) - max_val) * jax.nn.softmax(logits)), axis=(- 1))
return x |
class NCISMetric(Metric):
def __init__(self, prev_policy_weights: DataFrameLike, threshold: float=10.0, activation: Optional[str]=None, use_scala_udf: bool=False):
self._use_scala_udf = use_scala_udf
self.prev_policy_weights = convert2spark(prev_policy_weights).withColumnRenamed('relevance', 'prev_r... |
def find_min(vect):
i = len(vect)
while ((vect[(i - 1)] == 0) and (i > 0)):
i = (i - 1)
min = ([0] * len(vect))
if (i > 0):
min[(i - 1)] = 1
return min |
def loss_calc(pred, label, gpu):
label = Variable(label.long()).cuda(gpu)
criterion = CrossEntropy2d().cuda(gpu)
return criterion(pred, label) |
class MLP(nn.Module):
def __init__(self, dim, embed_dim):
super().__init__()
self.proj = nn.Linear(dim, embed_dim)
def forward(self, x: Tensor) -> Tensor:
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x |
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--num-workers', default=1, type=int, metavar='N', help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='... |
def fixmatch_augment_pool():
augs = [(AutoContrast, None, None), (Brightness, 0.9, 0.05), (Color, 0.9, 0.05), (Contrast, 0.9, 0.05), (Equalize, None, None), (Identity, None, None), (Posterize, 4, 4), (Rotate, 30, 0), (Sharpness, 0.9, 0.05), (ShearX, 0.3, 0), (ShearY, 0.3, 0), (Solarize, 256, 0), (TranslateX, 0.3, 0... |
def process_request(request):
password = request.GET['password']
if (password == 'password'):
return redirect('/login')
else:
return HttpResponse('ERROR') |
.xfail(_IS_WASM, reason='cannot start subprocess')
def test_import_raises_warning():
code = '\n import pytest\n with pytest.warns(UserWarning, match="it is not needed to import"):\n from sklearn.experimental import enable_hist_gradient_boosting # noqa\n '
assert_run_python_script(textwrap.deden... |
class Price02(Benchmark):
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip(([(- 10.0)] * self.N), ([10.0] * self.N)))
self.global_optimum = [[0.0, 0.0]]
self.fglob = 0.9
def fun(self, x, *args):
self.nfev += 1
return (... |
class FPUModeCheckPlugin(Plugin):
def prepareTestCase(self, test):
from numpy.core._multiarray_tests import get_fpu_mode
def run(result):
old_mode = get_fpu_mode()
test.test(result)
new_mode = get_fpu_mode()
if (old_mode != new_mode):
t... |
.core
def test_label_encoder_properties(full_pandas_dataset):
encoder = DatasetLabelEncoder()
dataset = create_dataset(full_pandas_dataset)
encoder.fit(dataset)
assert isinstance(encoder.query_id_encoder, LabelEncoder)
assert isinstance(encoder.item_id_encoder, LabelEncoder)
assert isinstance(en... |
def train_val(config, model, train_loaders, val_loaders, criterion):
if (config.train.optimizer.mode == 'adam'):
optimizer = optim.Adam(model.parameters(), lr=float(config.train.optimizer.adam.lr))
elif (config.train.optimizer.mode == 'adamw'):
optimizer = optim.AdamW(model.parameters(), lr=floa... |
def ensure_pandas(df: DataFrameLike, allow_collect_to_master: bool=False) -> PandasDataFrame:
if isinstance(df, PandasDataFrame):
return df
return spark_to_pandas(df, allow_collect_to_master) |
class MPNetPreTrainedModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def t5x_mlp_lookup(params, i, prefix, split_mlp_wi=False):
if split_mlp_wi:
wi_0 = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
wi_1 = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
wi = (wi_0, wi_1)
else:
wi = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
wo = params[f'{pre... |
def dump_all_thread_tracebacks(exclude_thread_ids=None, exclude_self=False):
if (exclude_thread_ids is None):
exclude_thread_ids = set()
from returnn.util.better_exchook import print_tb
import threading
if exclude_self:
exclude_thread_ids = set((list(exclude_thread_ids) + [threading.curr... |
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))
if knn:
(_, idx) = knn_point(nsample, xyz, new_xyz)
else:
(idx, pts_cnt) = query_ball_point(radius, nsample, xyz, new_xyz)
grouped_xyz = gr... |
def handle_ddp_subprocess():
def main_decorator(main_func):
(main_func)
def new_main(*args, **kwargs):
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
has_parent = (parent_cwd is not None)
has_rank = get_has_ddp_rank()
assert (has_parent ... |
class DataLoader():
def __init__(self, doc, batch_size, args, vocab=None, evaluation=False, conll_only=False, skip=None):
self.batch_size = batch_size
self.args = args
self.eval = evaluation
self.shuffled = (not self.eval)
self.doc = doc
data = self.load_doc(self.doc)... |
def test_signature_vararg():
mod = ast.parse('\ndef func(x, *y, z=None):\n ...\n')
node = mod.body[0]
assert (dosig(node) == 'x, *y, z=None') |
def import_user_module(user_dir: str):
from mmf.common.registry import registry
from mmf.utils.general import get_absolute_path
logger = logging.getLogger(__name__)
if user_dir:
if registry.get('__mmf_user_dir_imported__', no_warning=True):
logger.info(f'User dir {user_dir} already i... |
def test_fortran_frontend_sign1():
test_string = '\n PROGRAM sign1_test\n implicit none\n double precision d(3,4,5)\n CALL sign1_test_function(d)\n end\n\n SUBROUTINE sign1_test_function(d)\n ... |
class SparseFFT_Teacher():
def __init__(self, N, noise_var):
self.t = np.linspace(((- 2) * np.pi), (2 * np.pi), N, endpoint=False)
self.channel = GaussianChannel(var=noise_var)
def sample(self, seed=None):
if seed:
np.random.seed(seed)
x = (np.cos(self.t) + np.sin((2 ... |
def block_reduction_a(input):
if (K.image_dim_ordering() == 'th'):
channel_axis = 1
else:
channel_axis = (- 1)
branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch_1 = conv2d_bn(input, 192, 1, 1)
branch_1 = conv2d_bn(branch_1, 224, 3, 3)
branch_1 =... |
.parametrize('flatlist_as_rvec', [False, True])
def test_NumpyArray(flatlist_as_rvec):
array = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3]), parameters={'some': 'stuff', 'other': [1, 2, 'three']})
layout = array
generator = ak._connect.cling.togenerator(layout.form, flatlist_as_rvec=flatlist_as_rve... |
def create(prefix, template):
from os.path import isfile
from subprocess import check_call
from sys import executable
from openfl.interface.cli_helper import print_tree
from openfl.interface.cli_helper import OPENFL_USERDIR
if (not OPENFL_USERDIR.exists()):
OPENFL_USERDIR.mkdir()
pre... |
class hmr_head(nn.Module):
def __init__(self, num_input_features, smpl_mean_params=SMPL_MEAN_PARAMS):
super(hmr_head, self).__init__()
npose = (24 * 6)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(((num_input_features + npose) + 13), 1024)
self.drop1 = nn.Dro... |
def KneeSurgery_dataset(args=None):
dataset = Dataset(name='KneeSurgery', path='preprocess/MIMIC_Datasets/KneeSurgery/vec_knee_surgery.p', max_length=20000, args=args)
set_balanced_pos_weight(dataset)
dataset.test_data = dataset.test_data.mock(n=5000)
return dataset |
class ExponentialGrowthGroupFunctor(AbstractGrowthGroupFunctor):
_functor_name = 'ExponentialGrowthGroup'
def __init__(self, var):
from sage.categories.monoids import Monoids
super().__init__(var, Monoids())
def _apply_functor(self, base):
return ExponentialGrowthGroup(base, self.var... |
def calc_rotation_diff(q, q00):
rotation_dot = np.dot(quaternion.as_float_array(q00), quaternion.as_float_array(q))
rotation_dot_abs = np.abs(rotation_dot)
try:
error_rotation_rad = (2 * np.arccos(rotation_dot_abs))
except:
return 0.0
error_rotation_rad = (2 * np.arccos(rotation_dot_... |
class TestModelNumerics(QuantizationTestCase):
def test_float_quant_compare_per_tensor(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
torch.manual_seed(42)
my_model = ModelMultipleOps().to(torch.float32)
my_model... |
def test_keepdims_mask3():
content = ak.contents.NumpyArray(np.arange(((2 * 3) * 5), dtype=np.int64))
regular = ak.contents.RegularArray(content, 5, zeros_length=0)
listoffset = regular.to_ListOffsetArray64(False)
regular_regular = ak.contents.RegularArray(regular, 3, zeros_length=0)
listoffset_regu... |
class DynamicCUB(data.Dataset):
def __init__(self, **kwargs):
super(DynamicCUB, self).__init__()
self.crpModeChoices = ['train', 'test', 'clean', 'mixed']
self.tsfrmModeChoices = ['train', 'eval']
self.severityChoices = [1, 2, 3, 4, 5]
self.crpMode = kwargs['crpMode']
... |
def repr_errors(res, estimator=None, method: Optional[str]=None) -> str:
if (method is None):
if hasattr(estimator, '__init__'):
method = '__init__'
elif (estimator is None):
raise ValueError('At least one of estimator, method should be provided')
else:
ra... |
class GaussianDropout(Layer):
_gaussiandropout_support
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if (0 < self.rate < 1):
def noised():
... |
def instantiate_sampler(args, device):
return sampling.Sampler(al_type=args.al_type, p_samples=args.p_samples, p_init_samples=args.p_init_samples, device=device, task=args.task) |
def train_collate_gcn_mask_head(batch):
(imgs, masks, pids, _, pathes, img_heads, img_legs) = zip(*batch)
pids = torch.tensor(pids, dtype=torch.int64)
return (torch.stack(imgs, dim=0), pids, pathes, torch.cat(masks, dim=0), torch.stack(img_heads, dim=0), torch.stack(img_legs, dim=0)) |
def test_load_metadata():
default_clipid = 'airport-barcelona-0-0-a'
dataset = tau2020uas_mobile.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
assert (clip.split == 'development.train')
assert (clip.identifier == 'barcelona-0')
assert (clip.city == 'barcelona')
assert (clip.sou... |
def reload_config(FLAGS):
if (FLAGS.reload_model is not ''):
with open(('%s/%s' % (os.path.dirname(FLAGS.reload_model), 'config.json'))) as data_file:
config_dict = json.load(data_file)
for (key, value) in config_dict.items():
attr_remove = ['gpu', 'run_name', 'log_dir', 'n_s... |
class OnnxNode(BaseNode):
def __init__(self, node):
info = dict()
info['name'] = node.output[0]
info['op_type'] = node.op_type
info['attrs'] = [(attr.name, translate_onnx(attr.name, convert_onnx_attribute_proto(attr))) for attr in node.attribute]
info['inputs'] = node.input
... |
class Parser(utils.Parser):
dataset: str = 'halfcheetah-medium-expert-v2'
config: str = 'config.offline' |
def test_get_dataset_name_non_assin():
assert (loader.get_dataset_name('rerelem', 'english') == 'ruanchaves/rerelem_por_Latn_to_eng_Latn') |
def NMSE_cuda(x, x_hat):
x = x.contiguous().view(len(x), (- 1))
x_hat = x_hat.contiguous().view(len(x_hat), (- 1))
power = torch.sum((abs(x) ** 2), dim=1)
mse = (torch.sum((abs((x - x_hat)) ** 2), dim=1) / power)
return mse |
def cvar_func(tau, risk_kwargs):
alpha = risk_kwargs['alpha']
if (tau < alpha):
return (tau / alpha)
else:
return 1.0 |
def register_functions(root_module):
module = root_module
module.add_function('GetWildcardMatches', 'std::string', [param('std::string const &', 'configPath'), param('std::string const &', 'matchedPath'), param('std::string const &', 'wildcardSeparator', default_value='" "')])
module.add_function('isNaN', '... |
def xkcd(n=''):
import contextlib
import json
from sage.misc.html import html
from ssl import create_default_context as default_context
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
data = None
if (not n):
url = '
else:
url = '
tr... |
def test_ArrayBuilder_append_numba5():
def f1(builder, x):
builder.append(x)
def f2(builder, i):
if ((i % 2) == 0):
return 3
else:
return None
def f3(builder, i):
builder.append(f2(builder, i))
builder = ak.highlevel.ArrayBuilder()
f1(builder, ... |
class TestConstants(object):
def test_pi(self):
assert_allclose(ncu.pi, 3., 1e-15)
def test_e(self):
assert_allclose(ncu.e, 2., 1e-15)
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0., 1e-15) |
class LossWarmup(nn.Module):
def __init__(self):
super(LossWarmup, self).__init__()
self.loss_cb = CharbonnierLoss(1e-08)
self.loss_cs = nn.CosineSimilarity()
def forward(self, inp, gt, warmup1, warmup2):
loss = (self.loss_cb(warmup2, inp) + (self.loss_cb(warmup1, gt) + (1 - self... |
def takespread(sequence, num):
length = float(len(sequence))
for i in range(num):
(yield sequence[int(math.ceil(((i * length) / num)))]) |
def get_position_from_periods(iteration, cumulative_periods):
for (i, period) in enumerate(cumulative_periods):
if (iteration < period):
return i
raise ValueError(f'Current iteration {iteration} exceeds cumulative_periods {cumulative_periods}') |
class InteractiveLPProblem(SageObject):
def __init__(self, A, b, c, x='x', constraint_type='<=', variable_type='', problem_type='max', base_ring=None, is_primal=True, objective_constant_term=0):
super().__init__()
A = matrix(A)
b = vector(b)
c = vector(c)
if (base_ring is Non... |
def collate_fn_labels(sample_list):
tensor_list = [s['imp'] for s in sample_list]
batched_imp = torch.nn.utils.rnn.pad_sequence(tensor_list, batch_first=True, padding_value=PAD_IDX)
label_list = [s['label'] for s in sample_list]
batched_label = torch.stack(label_list, dim=0)
len_list = [s['len'] for... |
def digest(obj, algorithm='sha256'):
try:
stringified = json.dumps(obj, sort_keys=True, ensure_ascii=False).encode('utf8')
except TypeError:
raise ValueError('The supplied object is not JSON-serializable for calculating a hash.')
try:
hash_alg = getattr(hashlib, algorithm)
except... |
class SquadExample(object):
def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self... |
def remove_mlruns() -> None:
if os.path.isdir(mlruns_path):
shutil.rmtree(mlruns_path)
prev = '/'.join(mlruns_path.split('/')[:(- 2)])
prev = os.path.join(prev, 'mlruns')
if os.path.isdir(prev):
shutil.rmtree(prev)
os.makedirs(os.path.join(mlruns_path, '.trash'), exist_ok=True) |
class TensorflowRedundantArray(pm.SingleStateTransformation):
in_array = pm.PatternNode(nodes.AccessNode)
out_array = pm.PatternNode(nodes.AccessNode)
def expressions(cls):
return [sdutil.node_path_graph(cls.in_array, cls.out_array)]
def can_be_applied(self, graph, expr_index, sdfg, permissive=F... |
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer=5, emb_dim=300, gnn_type='gin', virtual_node=True, residual=False, drop_ratio=0.5, JK='last', graph_pooling='mean'):
super(GNN, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
... |
('change_backend')
def set_backend(backend: ((str | bytes) | TensorBackend), custom_optimizer: (((str | bytes) | Optimizer) | None)=None, precision: ((str | bytes) | None)=None, default: bool=False) -> None:
_supported_precisions = ['32b', '64b']
backend_kwargs = {}
if precision:
if isinstance(preci... |
class GaussianMLPEncoder(StochasticEncoder, StochasticModule):
def __init__(self, embedding_spec, name='GaussianMLPEncoder', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, hidden_w_init=tf.initializers.glorot_uniform(seed=deterministic.get_tf_seed_stream()), hidden_b_init=tf.zeros_initializer(), output_nonl... |
class PublishMetadataTaskConfiguration(TaskConfiguration):
def mode() -> str:
return 'publish metadata'
def tasks(self, config) -> List:
collect_misuses = CollectMisusesTask()
publish = PublishMetadataTask(config.checkouts_path, config.review_site_url, config.review_site_user, config.rev... |
class ChnSentiCorpLoader(Loader):
def __init__(self):
super().__init__()
def _load(self, path: str):
ds = DataSet()
with open(path, 'r', encoding='utf-8') as f:
f.readline()
for line in f:
line = line.strip()
tab_index = line.index(... |
class TrainingModule(torch.nn.Module):
def __init__(self, embedder, generator, discriminator, criterion_list, metric_list, running_averages={}):
super().__init__()
self.embedder = embedder
self.generator = generator
self.discriminator = discriminator
self.criterion_list = nn.... |
class bernoulli_gen(binom_gen):
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return ((p >= 0) & (p <= 1))
def _get_support(self, p):
return (self.a, self.b)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
... |
_registry
class TargetCodeGenerator(object):
def get_generated_codeobjects(self) -> List[CodeObject]:
return []
def cmake_options() -> List[str]:
return []
def preprocess(self, sdfg: SDFG) -> None:
pass
def has_initializer(self) -> bool:
return False
def has_finalizer... |
def compute_intrinsics(cameras, indices):
intrinsics = []
for i in indices:
j = (0 if (len(cameras.focal_length_xs) == 1) else i)
fx = cameras.focal_length_xs[j]
fy = cameras.focal_length_ys[j]
cx = cameras.principal_point_xs[j]
cy = cameras.principal_point_ys[j]
... |
class TensorProductsCategory(CovariantConstructionCategory):
_functor_category = 'TensorProducts'
def TensorProducts(self):
return self
def base(self):
return self.base_category().base() |
class SkewPartition(CombinatorialElement):
def __classcall_private__(cls, skp):
skp = [_Partitions(p) for p in skp]
if (skp not in SkewPartitions()):
raise ValueError(('invalid skew partition: %s' % skp))
return SkewPartitions()(skp)
def __init__(self, parent, skp):
C... |
def test_fillna_listarray_array():
content = ak.contents.NumpyArray(np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
starts = ak.index.Index64(np.array([0, 3, 4, 5, 8]))
stops = ak.index.Index64(np.array([3, 3, 6, 8, 9]))
listarray = ak.contents.ListArray(starts, stops, content)
value =... |
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument('experiments', metavar='EXP', type=str, nargs='+', help='Python modules to load the experiments from')
parser.add_argument('--list', action='store_const', const=True, default=False, help='List available experiment... |
def ref_clip_by_value(x, min_, max_):
if np.isscalar(min_):
min_ = (min_ * np.ones(x.shape))
min_idx = np.where((x < min_))
x[min_idx] = min_[min_idx]
if np.isscalar(max_):
max_ = (max_ * np.ones(x.shape))
max_idx = np.where((x > max_))
x[max_idx] = max_[max_idx]
return x |
class IndirectReference(collections.namedtuple('IndirectReferenceTuple', ['object_id', 'generation'])):
def __str__(self):
return ('%s %s R' % self)
def __bytes__(self):
return self.__str__().encode('us-ascii')
def __eq__(self, other):
return ((other.__class__ is self.__class__) and ... |
def process_ijc(paths, short_name):
base_input_path = os.path.join(paths['NERBASE'], short_name)
base_output_path = paths['NER_DATA_DIR']
test_files = [os.path.join(base_input_path, 'test-data-hindi.txt')]
test_csv_file = os.path.join(base_output_path, (short_name + '.test.csv'))
print(('Converting ... |
_utils.test(arch=[ti.vulkan])
def test_vulkan_cgraph_short():
a = ti.ndarray(ti.u8, shape=16)
c = 2
def test(a: ti.types.ndarray(), c: ti.u8):
for i in a:
a[i] = (i + c)
sym_a = ti.graph.Arg(ti.graph.ArgKind.NDARRAY, 'a', ti.u8, ndim=1)
sym_c = ti.graph.Arg(ti.graph.ArgKind.SCALA... |
def distribute_bn(model, world_size, reduce=False):
for (bn_name, bn_buf) in unwrap_model(model).named_buffers(recurse=True):
if (('running_mean' in bn_name) or ('running_var' in bn_name)):
if reduce:
torch.distributed.all_reduce(bn_buf, op=dist.ReduceOp.SUM)
bn_b... |
def check_url(url):
try:
response = requests.head(url)
if ((response.status_code == 404) or (response.status_code > 499)):
return False
else:
return True
except requests.ConnectionError:
return False |
def calculate_objective_parallel(objs: List[OptimizationFunction], param: Parametrization) -> List[float]:
def calculate_objective(obj: OptimizationFunction) -> float:
return obj.calculate_objective_function(param)
if (not objs):
return []
elif (len(objs) == 1):
return [calculate_obj... |
def test(img_dir, split_test, split_name, model, batch_size, img_size, crop_size):
since = time.time()
normalizer = [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]
data_transforms = {split_name: transforms.Compose([transforms.Resize(img_size), transforms.CenterCrop(crop_size), transforms.ToTensor(), transfor... |
def getTimeUnits(id_extra):
return dcc.Dropdown(id=('time_units_' + id_extra), options=[{'label': 's', 'value': .0}, {'label': 'ms', 'value': .0}, {'label': 'ns', 'value': 1000.0}, {'label': 'ps', 'value': 1}], value=1) |
_LAYERS.register_module()
class Dropout(nn.Dropout):
def __init__(self, drop_prob: float=0.5, inplace: bool=False):
super().__init__(p=drop_prob, inplace=inplace) |
_numpy_output(check_dtype=True)
def test_ufunc_modf_f(A: dace.float32[10]):
(Q, R) = np.modfd(A)
return (Q, R) |
class TanhLRScheduler(Scheduler):
def __init__(self, optimizer: torch.optim.Optimizer, t_initial: int, lb: float=(- 6.0), ub: float=4.0, t_mul: float=1.0, lr_min: float=0.0, decay_rate: float=1.0, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, cycle_limit=0, t_in_epochs=True, noise_range_t=None, noise_pct=0.67,... |
def static_parameters_union(sp_1: dict[(str, Any)], sp_2: dict[(str, Any)]) -> list[dict[(str, Any)]]:
full_static_parameters = (_static_parameters_union(sp_1, sp_2), _static_parameters_union(sp_2, sp_1))
return [static_parameter for static_parameter in full_static_parameters if static_parameter] |
def sort_action_list(action_list):
return list(sorted(list(action_list), key=(lambda elem: int(elem[1:])))) |
class DatasetFolderJustY(DatasetFolder):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
def __getitem__(self, index):
(_, target) = self.samples[index]
if (self.target_transform is not None):
target = self.target_transform(target)
return target |
def cons_batch_graph(graphs, word_vocab):
num_nodes = max([len(g['nodes']) for g in graphs])
num_edges = max([len(g['edges']) for g in graphs])
batch_edges = []
batch_node2edge = []
batch_edge2node = []
batch_node_num = []
batch_node_index = []
for (example_id, g) in enumerate(graphs):
... |
class SimpleImputer(BaseEstimator):
def __init__(self, missing_values=np.nan, strategy='mean', fill_value=None, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy |
def test_run_context_pairs(line_graph):
batch_size = 4
sampler = UnsupervisedSampler(G=line_graph, length=2, number_of_walks=2)
batches = sampler.run(batch_size)
grouped_by_target = defaultdict(list)
for (ids, labels) in batches:
for ((target, context), label) in zip(ids, labels):
... |
def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(tensor, mode)
gain = calculate_gain(nonlinearity, a)
std = (gain / math.sqrt(fan))
with torch.no_grad():
return tensor.normal_(0, std) |
def monitor_largest_singular_values(dis, dst):
.make_extension()
def evaluation(trainer=None):
def _l2normalize(v, eps=1e-12):
return (v / (((v ** 2).sum() ** 0.5) + eps))
xp = dis.xp
links = [[name, link] for (name, link) in sorted(dis.namedlinks())]
sigmas = []
... |
class H_Sigmoid(nn.Module):
def forward(self, x):
out = (F.relu6((x + 3), inplace=True) / 6)
return out |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.