code stringlengths 101 5.91M |
|---|
def make_dataset(dir, class_to_idx, extensions):
images = []
dir = os.path.expanduser(dir)
for target in sorted(class_to_idx.keys()):
d = os.path.join(dir, target)
if (not os.path.isdir(d)):
continue
for (root, _, fnames) in sorted(os.walk(d)):
for fname in so... |
def do_structure(cfg):
model = build_model(cfg)
logger.info(('Model Structure:\n' + str(model))) |
class NaiveAttention(nn.Module):
def __init__(self, dim, activation_fn='relu'):
super().__init__()
self.attention = nn.Sequential(nn.Linear(dim, dim), _get_activation_fn(activation_fn)(), nn.Linear(dim, 1))
def forward(self, inputs):
scores = self.attention(inputs)
output = torch... |
def conv2d(features: int, kernel_size: int, stride: Optional[int]=None, padding: Union[(str, Tuple[(int, int)])]=0, dilation: Optional[int]=None, groups: int=1, bias: bool=False, dtype: Dtype=jnp.float32, precision: Any=None, name: Optional[str]=None, kernel_init: Callable[([PRNGKey, Shape, Dtype], Array)]=default_kern... |
('basic')
class BasicTextFieldEmbedder(TextFieldEmbedder):
def __init__(self, token_embedders: Dict[(str, TokenEmbedder)], embedder_to_indexer_map: Dict[(str, List[str])]=None, allow_unmatched_keys: bool=False) -> None:
super(BasicTextFieldEmbedder, self).__init__()
self._token_embedders = token_emb... |
def ref_average_pooling_3d(x, kernel, stride, ignore_border, pad, including_pad):
y = []
for xx in x.reshape((((- 1),) + x.shape[(- 4):])):
if (xx.ndim == 3):
xx = xx[np.newaxis]
y += [refs.pooling_3d(xx, 'average', kernel, stride, pad, ignore_border, including_pad)[np.newaxis]]
... |
def parse_monitor_bd(raw_data, archlib):
return parse_fixed_length_items(raw_data, archlib.BDProfileFormat) |
class BlockBootstrap(BaseCrossValidator):
def __init__(self, n_resamplings: int=30, length: Optional[int]=None, n_blocks: Optional[int]=None, overlapping: bool=False, random_state: Optional[Union[(int, RandomState)]]=None) -> None:
self.n_resamplings = n_resamplings
self.length = length
self... |
def _dropout(raw, input, p=0.5, training=False, inplace=False):
x = raw(input, p, training, inplace)
bottom_blobs = [log.blobs(input)]
layer_name = log.add_layer(name='dropout')
top_blobs = log.add_blobs([x], name=bottom_blobs[0], with_num=False)
layer = caffe_net.Layer_param(name=layer_name, type='... |
def resume_checkpoint(model, checkpoint_path):
other_state = {}
resume_epoch = None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
if (isinstance(checkpoint, dict) and ('state_dict' in checkpoint)):
new_state_dict = OrderedDict()
... |
def main():
parser = ArgumentParser(description=__doc__, formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--version', action='version', version=('%(prog)s ' + sfepy.__version__))
parser.add_argument('-a', '--app', action='store', dest='app', choices=['bvp', 'homogen', 'bvp-mM', 'evp', 'phon... |
def test_change_timeline():
tl1 = Timeline()
tl2 = Timeline()
ENTITY_NAME = 'foo'
foo = Foo(ENTITY_NAME, tl1)
assert (foo.timeline == tl1)
assert (tl1.get_entity_by_name(ENTITY_NAME) == foo)
assert (tl2.get_entity_by_name(ENTITY_NAME) is None)
foo.change_timeline(tl2)
assert (foo.tim... |
def _worker_init_fn(worker_id, num_workers, rank, seed):
worker_seed = (((num_workers * rank) + worker_id) + seed)
np.random.seed(worker_seed)
random.seed(worker_seed) |
class LossBinaryCrossEntropy(LossFunction):
def __init__(self, dtype=bb.DType.FP32):
core_loss = bb.search_core_object('LossBinaryCrossEntropy', [dtype]).create()
super(LossBinaryCrossEntropy, self).__init__(core_loss=core_loss) |
def assign_cwes_to_cves(df_cve: pd.DataFrame):
df_cwes = extract_cwe()
cf.logger.info('Adding CWE category to CVE records...')
df_cwes_class = df_cve[['cve_id', 'problemtype_json']].copy()
df_cwes_class['cwe_id'] = add_cwe_class(df_cwes_class['problemtype_json'].tolist())
df_cwes_class = df_cwes_cla... |
class LMTrainer():
def __init__(self, cf):
self.cf = cf
from transformers import logging as trfm_logging
self.logger = cf.logger
self.log = cf.logger.log
trfm_logging.set_verbosity_error()
_logger
def train(self):
self.d = d = Sequence((cf := self.cf)).init()
... |
def make_nested_sdfg_fpga(unique_names):
n = dace.symbol('n')
m = dace.symbol('m')
sdfg = dace.SDFG('two_vecAdd')
state = sdfg.add_state('state')
sdfg_name = 'vecAdd'
to_nest = make_vecAdd_sdfg(sdfg_name)
sdfg.add_array('x', [n], dace.float32)
sdfg.add_array('y', [n], dace.float32)
s... |
class Partition5(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[20]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[21]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[22]']
TENSORS = []
def __init__(self, ... |
def show_boxes_from_python_data(img, dets, classes, output_img_path, scale=1.0):
plt.cla()
plt.axis('off')
plt.imshow(img)
for (cls_idx, cls_name) in enumerate(classes):
cls_dets = dets[cls_idx]
for det in cls_dets:
bbox = (det[:4] * scale)
color = (rand(), rand()... |
def exceptions_as_analysis_errors(project_root):
try:
(yield)
except _SuspendExecution:
pass
except AnalysisError:
raise
except Exception as ex:
logger.debug("An error occured during analysis (could be a problem with the user's code):", exc_info=ex)
if isinstance(... |
_utils.test()
def test_augassign():
def foo(x: ti.i32, y: ti.i32, a: ti.template(), b: ti.template()):
for i in a:
a[i] = x
a[0] += y
a[1] -= y
a[2] *= y
a[3] //= y
a[4] %= y
a[5] **= y
a[6] <<= y
a[7] >>= y
a[8] |= y
... |
class GPT2Config(PretrainedConfig):
pretrained_config_archive_map = GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'gpt2'
def __init__(self, vocab_size=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-05, initiali... |
def get_concat_2levelmel_model():
mel = AugmentMelSTFT(n_mels=128, sr=32000, win_length=800, hopsize=320, n_fft=1024, freqm=48, timem=192, htk=False, fmin=0.0, fmax=None, norm=1, fmin_aug_range=10, fmax_aug_range=2000)
net = get_model_passt(arch='passt_s_swa_p16_128_ap476')
model = PasstBasicWrapper(mel=mel... |
def test_conditionally_nested_class(simple_module, tracer_mock):
adapter = BranchCoverageInstrumentation(tracer_mock)
transformer = InstrumentationTransformer(tracer_mock, [adapter])
simple_module.conditionally_nested_class.__code__ = transformer.instrument_module(simple_module.conditionally_nested_class.__... |
def pose_check_valid(kp_array):
kp = give_name_to_keypoints(kp_array)
return check_keypoints_present(kp, ['Rhip', 'Lhip', 'Lsho', 'Rsho']) |
def main():
tracker_dir = os.path.join(args.tracker_path, args.dataset)
trackers = glob(os.path.join(args.tracker_path, args.dataset, (args.tracker_prefix + '*')))
trackers = [x.split('/')[(- 1)] for x in trackers]
assert (len(trackers) > 0)
args.num = min(args.num, len(trackers))
root = os.path... |
def evaluate(model, device, loader, args):
model.eval()
correct = 0
eval_pred_seg = []
eval_true_seg = []
eval_label_seg = []
for data in loader:
data = data.to(device)
if (args.class_choice is not None):
labels = (data.y - data.y.min())
else:
labe... |
def inheritdocstrings(cls):
for (name, func) in vars(cls).items():
if (isinstance(func, types.FunctionType) and (not func.__doc__)):
for parent in cls.__bases__:
parfunc = getattr(parent, name, None)
if (parfunc and getattr(parfunc, '__doc__', None)):
... |
class DayOfYear(TimeFeature):
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (((index.dayofyear - 1) / 365.0) - 0.5) |
def print_model_summary(model, name_width=25, line_width=180, ignore=None):
if (ignore is None):
ignore = []
elif (not isinstance(ignore, list)):
ignore = [ignore]
name_width = 0
for (name, mod) in model.named_modules():
if (not hasattr(mod, 'weight')):
continue
... |
class FieldPropagationBatchTest(tf.test.TestCase):
def test(self):
for units in DIMS:
batch = random_gaussian_batch(batch_size=units, units=units)
rdlayer = RMNumpy(units=units)
fields = rdlayer.propagate(batch)
inverse_fields = rdlayer.inverse_propagate(rdlay... |
def add_single_scale_rpn_losses(model):
model.net.SpatialNarrowAs(['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32')
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs([(('rpn_bbox_' + key) + '_wide'), 'rpn_bbox_pred'], ('rpn_bbox_' + key))
loss_rpn_... |
def accuracy(logits, y):
(_, preds) = torch.max(logits, 1)
return (preds == y).float().mean() |
def render_row(pr):
print('*', pr['title'].replace('`', '``'), f"(:pr:`{pr['number']}`)", f":user:`{pr['user']['login']}`") |
def kaldiFeatLoader(kaldi_mfcc_file_index_list, trans_dict):
for list_path in kaldi_mfcc_file_index_list:
print(list_path)
with open(list_path) as f:
mfcc_lines = []
raw_mfcc_id = list_path.split('/')[(- 1)]
for (i, line) in enumerate(f):
line = li... |
def mae_ast_local(ckpt, *args, **kwargs):
assert os.path.isfile(ckpt)
return _UpstreamExpert(ckpt, *args, **kwargs) |
class ModelSelector(Mean):
def __init__(self, metric: Union[(str, TSADMetric, ForecastMetric)], abs_score=False):
super().__init__(abs_score=abs_score)
if isinstance(metric, str):
(metric_cls, name) = metric.split('.', maxsplit=1)
metric_cls = {c.__name__: c for c in [Forecas... |
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.size()[0]
(output_height, output_width) = output.size()[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
group... |
def rotateImgElement(data, key, deg):
try:
img = rotateImg(data[key], deg)
if (img is None):
raise Exception()
data[key] = img
except KeyError:
logger.error(('No image data (key: %s)' % key))
except:
logger.error(('Invalid image data (key: %s)' % key))
... |
def drop_audio_features(elem: Dict[(str, Any)]) -> Dict[(str, Any)]:
if ('audio' in elem):
del elem['audio']
if ('audio_sample_rate' in elem):
del elem['audio_sample_rate']
return elem |
def slice_list(in_list, lens):
if isinstance(lens, int):
assert ((len(in_list) % lens) == 0)
lens = ([lens] * int((len(in_list) / lens)))
if (not isinstance(lens, list)):
raise TypeError('"indices" must be an integer or a list of integers')
elif (sum(lens) != len(in_list)):
r... |
def _maybe_filter_and_map_categories(dataset_name: str, dataset_dicts: List[Instance]) -> List[Instance]:
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
filtered_dataset_dicts = []
for dataset_dict in dataset_dicts:
anns = []
for ann in ... |
_dispatch
def idct(x, type=2, n=None, axis=(- 1), norm=None, overwrite_x=False, workers=None, orthogonalize=None):
return (Dispatchable(x, np.ndarray),) |
class infix_operator():
operators = {'add': {'left': '__add__', 'right': '__radd__'}, 'multiply': {'left': '__mul__', 'right': '__rmul__'}, 'or': {'left': '__or__', 'right': '__ror__'}}
def __init__(self, precedence):
self.precedence = precedence
def __call__(self, func):
left_meth = self.op... |
class Generator(nn.Module):
def __init__(self, img_size=64, nz=128, ngf=32, kg=4, nc=3, last_layer_large=True, last_layer_upsample=False, last_layer_stacked=True):
super(Generator, self).__init__()
self.img_size = img_size
self.nz = nz
self.ngf = ngf
self.kg = kg
self... |
.parametrize('a00', [float(i) for i in range(10)])
_utils.test(default_fp=ti.f32, fast_math=False)
def test_solve_2x2_f32(a00):
_test_solve_2x2(ti.f32, a00) |
class RunningAverageMeter(object):
def __init__(self, alpha=0.98):
self.reset()
self.alpha = alpha
def reset(self):
self.avg = 0.0
def update(self, val):
if (self.avg == 0.0):
self.avg = val
else:
self.avg = ((self.avg * self.alpha) + ((1 - sel... |
class SGC(nn.Module):
def __init__(self, n_features: int, n_classes: int, K: int=2, bias: bool=True, dropout: float=0, with_batch_norm: bool=False, cached: bool=False, add_self_loops: bool=True, do_cache_adj_prep: bool=True, do_normalize_adj_once: bool=True, do_use_sparse_tensor: bool=True, do_checkpoint: bool=Fals... |
def test_cache_return_values():
def test(x):
return (x * x)
a = test(5)
b = test(6)
assert ((a == 25) and (b == 36)) |
def test_invalid_distributions():
iterator_mock = mock.MagicMock()
model = BoostLGBM(default_params={'num_trees': 1, 'random_state': 42}, freeze_defaults=True, optimization_search_space={'feature_fraction': Uniform(low=0.5, high=1.0), 'min_sum_hessian_in_leaf': Normal(low=1, high=2)})
params_tuner = OptunaT... |
def get_fid(fcd, batch_size, images1, images2, inception_images, real_activation, fake_activation, activations):
start_time = time.time()
act1 = get_inception_activations(batch_size, images1, inception_images, activations)
act2 = get_inception_activations(batch_size, images2, inception_images, activations)
... |
def diffxy(f, x, xtimes, y, ytimes):
h = f
for i in range(xtimes):
h = h.derivative(x)
for j in range(ytimes):
h = h.derivative(y)
return h |
def test_arrow_union_dense():
a = pyarrow.UnionArray.from_dense(pyarrow.array([0, 1, 0, 0, 0, 1, 1], type=pyarrow.int8()), pyarrow.array([0, 0, 1, 2, 3, 1, 2], type=pyarrow.int32()), [pyarrow.array([0.0, 1.1, 2.2, 3.3]), pyarrow.array([True, True, False])])
assert (to_list(ak._connect.pyarrow.handle_arrow(a)) =... |
def validate_runtime_configs(configs: Dict[(str, Any)]):
assert ('fragment_length' in configs)
assert ('max_step' in configs)
assert ('num_eval_episodes' in configs)
assert ('num_threads' in configs)
assert ('num_env_per_thread' in configs)
assert ('num_eval_threads' in configs)
assert ('use... |
def get_entailment_id(config):
for (label, ind) in config.label2id.items():
if label.lower().startswith('entail'):
return ind
logger.warning('Could not identify entailment dimension from teacher config label2id. Setting to -1.')
return (- 1) |
def train():
net.train()
epoch = 0
print('Loading Dataset...')
dataset = KITTITrainDataset(args.kitti_image_root, args.kitti_detection_root, SSJAugmentation(sst_dim, means))
epoch_size = (len(dataset) // args.batch_size)
print('Training SSJ on', dataset.dataset_name)
step_index = 0
batch... |
def test_fit_no_bootstrap(make_whas500):
whas500 = make_whas500(to_numeric=True)
forest = RandomSurvivalForest(n_estimators=10, bootstrap=False, random_state=2)
forest.fit(whas500.x, whas500.y)
pred = forest.predict(whas500.x)
expected_c = (0., 70030, 5119, 0, 14)
assert_cindex_almost_equal(whas... |
class CaseGenerator(BasePromptExecutor):
_input_keys = ['prim_toolkits', 'aux_toolkits', 'example_cases', 'risks']
def __init__(self, llm: BaseLanguageModel, stop_at: str=None, redteam: bool=True, num_gen_per_prompt: int=1, num_sample_risks: int=1, use_simple_tool_desc: bool=False):
super().__init__(llm... |
def loss_hinge_dis_elr_bcr(netD, netG, netC, x_l, z_rand, label, x_u):
with torch.no_grad():
x_fake = netG(z_rand, label).detach()
logits_c = netC(x_u).detach()
(_, l_fake) = torch.max(logits_c, 1)
d_real = netD(x=x_l, y=label, aug=True)
d_real_2 = netD(x=x_l, y=label, aug=True)
... |
def test_write_to_file(test_file_path: Path, agent: Agent):
new_content = 'This is new content.\n'
file_ops.write_to_file(str(test_file_path), new_content, agent=agent)
with open(test_file_path, 'r', encoding='utf-8') as f:
content = f.read()
assert (content == new_content) |
def test_noisy_abstract_model():
nam = baselines.models.NoisyAbstractModel(landscape=FakeLandscape(name='FakeLandscape'))
assert (len(nam.cache) == 0)
fitness = nam.get_fitness(['ATC'])
assert (len(nam.cache) == 1)
assert (nam.get_fitness(['ATC']) == fitness)
nam = baselines.models.NoisyAbstract... |
def main():
(num_antenna_bs, num_elements_irs, num_user) = (4, 20, 4)
params_system = (num_antenna_bs, num_elements_irs, num_user)
Rician_factor = 10
location_user = None
num_samples = 1000
noise_power_db = (- 100)
Pt = 15
file_path = (((('./channel_data/channel' + str(params_system)) + ... |
def pytest_runtest_setup(item):
mark = _get_mark(item, 'xslow')
if (mark is not None):
try:
v = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
v = False
if (not v):
pytest.skip('very slow test; set environment variable SCIPY_XSLOW=1 to run ... |
_module()
class SingleStageDetector(MMDET_SingleStageDetector):
def __init__(self, backbone, neck=None, bbox_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None):
super(MMDET_SingleStageDetector, self).__init__(init_cfg=init_cfg)
if pretrained:
warnings.warn('Depreca... |
class AdamWithWarmup(Adam):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup_updates=10000, warmup_init_lr=1e-07):
super().__init__(params, lr=warmup_init_lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.warmup_updates = warmup_updates
self.... |
def infer_types(code, symbols=None):
symbols = (symbols or {})
inferred_symbols = {}
if isinstance(code, str):
_dispatch(ast.parse(code), symbols, inferred_symbols)
elif isinstance(code, ast.AST):
_dispatch(code, symbols, inferred_symbols)
elif (isinstance(code, sympy.Basic) or isins... |
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.0):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6,... |
def score_generation(anno_file, result_file):
coco = COCO(anno_file)
coco_res = coco.loadRes(result_file)
coco_eval = COCOEvalCap(coco, coco_res)
coco_eval.params['image_id'] = coco_res.getImgIds()
coco_eval.evaluate()
return copy.deepcopy(coco_eval.eval) |
class SwapGenerationFromArrangedResultEvaluator(BaseEvaluator):
def modify_commandline_options(parser, is_train):
return parser
def image_save_dir(self, nsteps):
return os.path.join(self.output_dir(), ('%s_%s' % (self.target_phase, nsteps)), 'images')
def create_webpage(self, nsteps):
... |
def work_on_fn(pass_cls):
def apply_pass(fn_or_mod):
if isinstance(fn_or_mod, tvm.IRModule):
return pass_cls()(fn_or_mod)
if isinstance(fn_or_mod, tvm.relay.Function):
return pass_cls()(tvm.IRModule({'main': fn_or_mod}))['main']
raise NotImplemented('unsupporded type ... |
class AgentExecutorWithState(AgentExecutor):
def _call(self, inputs: Dict[(str, str)], run_manager: Optional[CallbackManagerForChainRun]=None) -> Dict[(str, Any)]:
name_to_tool_map = {tool.name: tool for tool in self.tools}
color_mapping = get_color_mapping([tool.name for tool in self.tools], exclud... |
def test_in_order_compound():
check_reproduce_tree(transition_scheme=TransitionScheme.IN_ORDER_COMPOUND) |
def load_pickle(filename):
with open(fix_filetype(filename, '.pickle'), 'rb') as f:
return pickle.load(f) |
def process_chunks(chunk):
db_client = utils.init_client(MONGO_ARGS)
read_collection = db_client[DB_NAME][READ_COL]
for idx in chunk:
mongo_doc = read_collection.find_one({'_id': idx})
process_mongo_doc(db_client, read_collection, mongo_doc) |
def record_cmdline(output_file):
cmdline = ' '.join(map(shlex.quote, sys.argv[1:]))
python_output_file = (output_file + '.py')
cmdline = ((((('"""' + 'AutoGenerated with:\n') + 'python -m autopipe.partition ') + cmdline) + '\n') + '"""')
if (sys.platform == 'win32'):
cmdline = ('r' + cmdline)
... |
class CopulaGANSynthesizer(CTGANSynthesizer):
_gaussian_normalizer_hyper_transformer = None
def __init__(self, metadata, enforce_min_max_values=True, enforce_rounding=True, locales=None, embedding_dim=128, generator_dim=(256, 256), discriminator_dim=(256, 256), generator_lr=0.0002, generator_decay=1e-06, discri... |
def register_hooks(var):
fn_dict = {}
def hook_cb(fn):
def register_grad(grad_input, grad_output):
fn_dict[fn] = grad_input
fn.register_hook(register_grad)
iter_graph(var.grad_fn, hook_cb)
def is_bad_grad(grad_output):
if (grad_output is None):
return Fals... |
def alexnet(pretrained=False, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
return model |
def load_alias_file(fname):
with open(fname, 'r') as f:
file = csv.reader(f, delimiter=' ')
for line in file:
break
return line |
def gmof(x, sigma):
x_squared = (x ** 2)
sigma_squared = (sigma ** 2)
return ((sigma_squared * x_squared) / (sigma_squared + x_squared)) |
def load_obj(name, save_dir):
objfile = (((save_dir.rstrip('\\/') + '/') + name) + '.pkl')
with open(objfile, 'rb') as f:
return pk.load(f) |
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True):
super().__init__()
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if (fhidden is None):
self.fhidden = min(fin, fout)
... |
.parametrize('container', (CSR_CONTAINERS + [np.array]))
def test_perceptron_accuracy(container):
data = container(X)
clf = Perceptron(max_iter=100, tol=None, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert (score > 0.7) |
class TransformerAlgoImplBase(ImplBase):
def predict(self, inpt: TorchTransformerInput) -> torch.Tensor:
...
_api
def update(self, batch: TorchTrajectoryMiniBatch, grad_step: int) -> Dict[(str, float)]:
return self.inner_update(batch, grad_step)
def inner_update(self, batch: TorchTraject... |
class LayoutLMv3ForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def generate_embeddings_for_pooling(sequence_tensor, span_starts, span_ends):
span_starts = span_starts.unsqueeze((- 1))
span_ends = (span_ends - 1).unsqueeze((- 1))
span_widths = (span_ends - span_starts)
max_batch_span_width = (span_widths.max().item() + 1)
max_span_range_indices = util.get_range_... |
.parametrize('ty,num', sub_table)
_utils.test(arch=[ti.cpu, ti.cuda, ti.vulkan], debug=True)
def test_sub_no_overflow_i(capfd, ty, num):
if (not supports_overflow(ti.lang.impl.current_cfg().arch)):
return
capfd.readouterr()
def foo(num: ty) -> ty:
a = ty(num)
b = ty(((- num) + 1))
... |
()
('conf', type=click.Path(exists=True))
('output-dir', type=click.Path(exists=False))
('--local_rank', type=int, default=(- 1), help='This is automatically set by torch.distributed.launch.')
('--shared-filesystem', type=int, default=(- 1))
def main(conf, output_dir, local_rank, shared_filesystem):
conf: dict = lo... |
def generate_pyx_code_stage_factory(options, result):
def generate_pyx_code_stage(module_node):
module_node.process_implementation(options, result)
result.compilation_source = module_node.compilation_source
return result
return generate_pyx_code_stage |
def coordinate_ADAM(losses, indice, grad, hess, batch_size, mt_arr, vt_arr, real_modifier, lr, adam_epoch, beta1, beta2, proj):
for i in range(batch_size):
grad[i] = ((losses[((i * 2) + 1)] - losses[((i * 2) + 2)]) / 0.0002)
mt = mt_arr[indice]
mt = ((beta1 * mt) + ((1 - beta1) * grad))
mt_arr[i... |
def subregionify(item: NiftiImage, do_resolve=False, *args, **kwargs):
arr = item.obj.numpy()
wt_arr = arr.copy()
wt_arr[(wt_arr == 1.0)] = 1.0
wt_arr[(wt_arr == 2.0)] = 1.0
wt_arr[(wt_arr == 4.0)] = 1.0
tc_arr = arr.copy()
tc_arr[(tc_arr == 1.0)] = 1.0
tc_arr[(tc_arr == 2.0)] = 0.0
... |
def _nbool_correspond_all(u, v, w=None):
if ((u.dtype == v.dtype == bool) and (w is None)):
not_u = (~ u)
not_v = (~ v)
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int],... |
def main() -> None:
args = docopt(__doc__)
repo = Repo('.')
assert (repo.bare == False)
hash = args['<hash>']
(this_commits, handle) = commits_since_previous(repo.commit(hash))
version = VERSION_REG.match(handle.message).group()
previous_commits: List[Commit] = []
while (handle is not No... |
def heatmap(data, row_labels, col_labels, ax=None, show_colorbar=True, slant_labels=True, xticksize=10, yticksize=10, cbar_kw={}, cbarlabel='', **kwargs):
if (not ax):
ax = plt.gca()
im = ax.imshow(data, **kwargs)
if show_colorbar:
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar... |
def ep_rule_condition1(memory_info: 'MemoryInfo', manager: 'MemoryManager', args: Arguments):
memory_indices = args['memory_indices']
reservation = args['reservation']
if ((memory_info.index in memory_indices) and (memory_info.state == 'ENTANGLED') and (memory_info.fidelity < reservation.fidelity)):
... |
class NullstrToNoneAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if (values.strip() == ''):
setattr(namespace, self.dest, None)
else:
setattr(namespace, self.dest, values) |
class FastGradientMethod(EvasionAttack):
attack_params = (EvasionAttack.attack_params + ['norm', 'eps', 'eps_step', 'targeted', 'num_random_init', 'batch_size', 'minimal', 'distribution'])
def __init__(self, classifier, norm=np.inf, eps=0.3, eps_step=0.1, targeted=False, num_random_init=0, batch_size=1, minimal... |
class TestMLS(object):
def test_mls_inputs(self):
assert_raises(ValueError, max_len_seq, 10, state=np.zeros(10))
assert_raises(ValueError, max_len_seq, 10, state=np.ones(3))
assert_raises(ValueError, max_len_seq, 10, length=(- 1))
assert_array_equal(max_len_seq(10, length=0)[0], [])
... |
class FactoredInference():
def __init__(self, domain, backend='numpy', structural_zeros={}, metric='L2', log=False, iters=1000, warm_start=False, elim_order=None):
self.domain = domain
self.backend = backend
self.metric = metric
self.log = log
self.iters = iters
self.... |
.parametrize('action_size', [3])
.parametrize('observation_shape', [(100,)])
.parametrize('mean', [0.0])
.parametrize('std', [0.1])
def test_normal_noise(action_size: int, observation_shape: Sequence[int], mean: float, std: float) -> None:
explorer = NormalNoise(mean, std)
ref_x = np.random.random((1, *observat... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.