code stringlengths 101 5.91M |
|---|
class SawyerPlateSlideBackV2Policy(Policy):
_fully_parsed
def _parse_obs(obs):
return {'hand_pos': obs[:3], 'puck_pos': obs[3:6], 'unused_info': obs[6:]}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({'delta_pos': np.arange(3), 'grab_effort': 3})
actio... |
class TestPPOContinuousBaseline(TfGraphTestCase):
.huge
def test_ppo_pendulum_continuous_baseline(self):
with LocalTFRunner(snapshot_config, sess=self.sess) as runner:
env = GarageEnv(normalize(gym.make('InvertedDoublePendulum-v2')))
policy = GaussianMLPPolicy(env_spec=env.spec, ... |
class SwapAlign2Nat(nn.Module):
def __init__(self, lambda_val, pad_val=(- 6.0)):
super(SwapAlign2Nat, self).__init__()
self.lambda_val = lambda_val
self.pad_val = pad_val
def forward(self, X):
return swap_align2nat(X, self.lambda_val, self.pad_val)
def __repr__(self):
... |
class TestFmin(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::(- 1)]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(... |
def main(opt):
dataloader = create_dataloader(opt)
device = (torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu'))
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
inception_model = InceptionV3([block_idx])
inception_model.to(device)
inception_model.eval()
t... |
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature |
class RiemannianGradient(torch.autograd.Function):
c = 1
def forward(ctx, x):
ctx.save_for_backward(x)
return x
def backward(ctx, grad_output):
(x,) = ctx.saved_tensors
scale = ((1 - (RiemannianGradient.c * x.pow(2).sum((- 1), keepdim=True))).pow(2) / 4)
return (grad_... |
class MyScriptModule(torch.jit.ScriptModule):
def __init__(self, rank):
super().__init__()
self.a = torch.ones(rank)
.script_method
def forward(self) -> Tensor:
return self.a
.script_method
def custom_func(self) -> Tensor:
return self.a |
class ConvBn3d(_ConvBnNd, nn.Conv3d):
_FLOAT_MODULE = nni.ConvBn3d
_FLOAT_CONV_MODULE = nn.Conv3d
_FLOAT_BN_MODULE = nn.BatchNorm3d
_FLOAT_RELU_MODULE = None
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=None, padding_mode='zeros', eps=1e-... |
def train_dataset(data_tr, batch_size):
data_tr = data_tr.astype(np.float32)
data_tr_coo = data_tr.tocoo()
n_items = data_tr_coo.shape[1]
indices = np.mat([data_tr_coo.row, data_tr_coo.col]).transpose()
sparse_data = tf.SparseTensor(indices, data_tr_coo.data, data_tr_coo.shape)
samples_tr = tf.d... |
def libc_ver():
glibc_version = glibc_version_string()
if (glibc_version is None):
return ('', '')
else:
return ('glibc', glibc_version) |
_utils.test(debug=True)
def test_adjoint_checkbit_needs_grad():
x = ti.field(float, shape=(), needs_grad=True)
def test():
x[None] = 1
with ti.ad.Tape(loss=x, validation=True):
test()
assert x.snode.ptr.has_adjoint_checkbit() |
class KerasAgent(Agent):
def __init__(self, observation_space, action_space, filename):
self.observation_space = observation_space
self.action_space = action_space
self.filename = filename
def train(self, env, nb_steps):
try:
print('[train] Loading weights from {}'.fo... |
def loss(logits, labels):
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
concated = tf.concat([indices, labels], 1)
onehot_labels = tf.sparse_to_dense(concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
cross_entropy = tf.nn.... |
_PROCESSING.register_module(name=['SGFilter', 'savgol'])
class SGFilter():
def __init__(self, window_size=11, polyorder=2):
super(SGFilter, self).__init__()
self.window_size = window_size
self.polyorder = polyorder
def __call__(self, x=None):
if ((self.window_size % 2) == 0):
... |
_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args) |
class GraphTokenVocab(TokenVocab):
_depth = (- 1)
def __init__(self, *args, **kwargs):
kwargs['placeholder_shape'] = [None, None, None]
super(GraphTokenVocab, self).__init__(*args, **kwargs)
return
def get_bilinear_discriminator(self, layer, token_weights, variable_scope=None, reuse=... |
def is_algebraic_value(a):
return (is_arith(a) and a.is_real() and _is_algebraic(a.ctx, a.as_ast())) |
def get_type_name(x: EntryBase):
ty = type(x)
if (ty in [BuiltInType]):
return x.type_name
elif (ty in [Alias, Handle, Enumeration, Structure, Union, Callback]):
return x.name.upper_camel_case
elif (ty in [BitField]):
return x.name.extend('flags').upper_camel_case
else:
... |
class PointFlow(nn.Module):
def __init__(self, args):
super(PointFlow, self).__init__()
self.input_dim = args.input_dim
self.zdim = args.zdim
self.use_latent_flow = args.use_latent_flow
self.use_deterministic_encoder = args.use_deterministic_encoder
self.prior_weight ... |
def _is_finite(t: TensorType) -> TensorType:
return tf.logical_and(tf.math.is_finite(t), tf.logical_not(tf.math.is_nan(t))) |
def test_ignore_warning():
def _warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
def _multiple_warning_function():
warnings.warn('deprecation warning', DeprecationWarning)
warnings.warn('deprecation warning')
assert_no_warnings(ignore_warnings(_warning_fun... |
class DialogFlowCXParser(Parser, ABC):
def __init__(self, config):
super().__init__(config)
self.flow_to_training_utts = None
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = config['cx_credential']
self.google_cloud_agent_path = f"projects/{config['project_id']}/locations/{config['loc... |
def register_Ns3U32TlvValue_methods(root_module, cls):
cls.add_constructor([param('ns3::U32TlvValue const &', 'arg0')])
cls.add_constructor([param('uint32_t', 'value')])
cls.add_constructor([])
cls.add_method('Copy', 'ns3::U32TlvValue *', [], is_const=True, is_virtual=True)
cls.add_method('Deseriali... |
def register_Ns3Ipv6Header_methods(root_module, cls):
cls.add_constructor([param('ns3::Ipv6Header const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('DscpTypeToString', 'std::string', [param('ns... |
def test_download_archive_force(tmp_path, requests_mock, tarfile_path):
archive_url = '
requests_mock.get(archive_url, content=open(tarfile_path, 'rb').read(), status_code=200)
with pytest.raises(InvalidArchiveHost):
download(archive_url, tmp_path.joinpath('likelihoods'), force=False)
download(a... |
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
prev_best = getattr(save_checkpoint, 'best', val_loss)
if (val_loss is not None):
best_function = (max if args.maximize_best_checkpoint_metric else min)
save_checkpoint.best = best_function... |
def gscluster_conv(in_channels, out_channels, kernel_size, stride=1, bias=True):
return GSCConv() |
def point_unpool(input_tensor, index_tensor, output_shape, elem_count=8):
out = []
for i in range(0, elem_count):
out.append(tf.scatter_nd(tf.slice(index_tensor, [0, i], [(- 1), 1]), input_tensor, output_shape))
return tf.add_n(out) |
def validate_and_save(cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool) -> Tuple[(List[Optional[float]], bool)]:
num_updates = trainer.get_num_updates()
max_update = (cfg.optimization.max_update or math.inf)
should_stop = False
if (num_u... |
def _seg_44():
return [(64315, 'M', u''), (64316, 'M', u''), (64317, 'X'), (64318, 'M', u''), (64319, 'X'), (64320, 'M', u''), (64321, 'M', u''), (64322, 'X'), (64323, 'M', u''), (64324, 'M', u''), (64325, 'X'), (64326, 'M', u''), (64327, 'M', u''), (64328, 'M', u''), (64329, 'M', u''), (64330, 'M', u''), (64331, '... |
def download_datasets(root, url):
download_and_extract_archive(url=url, download_root=root, extract_root=storage_dir.parent) |
def re_evaluate(local_dict=None):
numexpr = _import_numexpr()
try:
compiled_ex = numexpr.necompiler._numexpr_last['ex']
except KeyError as err:
raise RuntimeError('not a previous evaluate() execution found') from err
names = numexpr.necompiler._numexpr_last['argnames']
arguments = ge... |
def test_deselecting(testdir):
testdir.make_test('\()\(max_examples=1)\ndef test_a(request, case):\n request.config.HYPOTHESIS_CASES += 1\n\(endpoint="pets")\(max_examples=1)\ndef test_b(request, case):\n request.config.HYPOTHESIS_CASES += 1\n ', paths={'/pets': {'post': {'responses': {'200': {'description... |
class TestClickToken(ActionTokenTester):
def test_execute(self, env, fields, dom, dom_elem):
button = env.elements[0]
click = ClickToken(MockReturnElementSet(button))
result = click.execute(env)
assert isinstance(result, click.return_type)
assert isinstance(result, MiniWoBEle... |
def test_chunk_selection(en_core_web_sm):
doc = en_core_web_sm('Natural language processing is fun')
candidates = chunk_selection(doc)
assert (candidates[0].lexical_form == ['natural', 'language', 'processing'])
assert (candidates[0].sentence_ids == [0])
assert (candidates[0].surface_forms[0].start ... |
def normalized_coords_transform(x0, y0, w, h):
def f(p):
return ((((2 * (p[0] - x0)) / w) - 1), (((2 * (p[1] - y0)) / h) - 1))
return f |
def seed_everything(seed=seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True |
def SDM(user_feature_columns, item_feature_columns, history_feature_list, units=64, rnn_layers=2, dropout_rate=0.2, rnn_num_res=1, num_head=4, l2_reg_embedding=1e-06, dnn_activation='tanh', temperature=0.05, sampler_config=None, seed=1024):
if (len(item_feature_columns) > 1):
raise ValueError('Now SDM only ... |
def calculate_dropouts(model):
res = 0
for (i, layer) in enumerate(list(model.children())):
module_name = list(model._modules.items())[i][0]
layer_name = layer._get_name()
if (layer_name == 'Dropout'):
res += 1
else:
res += calculate_dropouts(model=layer)
... |
class SpiderUnparser():
ast_wrapper = attr.ib()
schema = attr.ib()
UNIT_TYPES_B = {'Minus': '-', 'Plus': '+', 'Times': '*', 'Divide': '/'}
COND_TYPES_B = {'Between': 'BETWEEN', 'Eq': '=', 'Gt': '>', 'Lt': '<', 'Ge': '>=', 'Le': '<=', 'Ne': '!=', 'In': 'IN', 'Like': 'LIKE'}
def conjoin_conds(cls, con... |
def _make_bridge_dwg(dwg, state: BridgeBiddingState, config):
NUM_CARD_TYPE = 13
TO_CARD = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
SUITS = ['', '', '', '']
DENOMINATIONS = ['', '', '', '', 'N']
ACT = ['P', 'X', 'XX']
BID_OFFSET_NUM = 3
color_set = config['COLOR_SET... |
def _find_param_in_list(param: _torch.Tensor, l: _typing.Iterable[_torch.Tensor]) -> _typing.Optional[int]:
for (i, p) in enumerate(l):
if (p is param):
return i
else:
return None |
def main(rank: int, world_size: int, args):
device = rank
if args.distributed:
device = args.device_ids[rank]
torch.cuda.set_device(args.device_ids[rank])
args.lr *= world_size
if ((not args.distributed) or (rank == 0)):
wandb.init(project='data-efficient-contrastive-learning... |
class TestShowPickle(unittest.TestCase):
(IS_WINDOWS, "Can't re-open temp file on Windows")
def test_scripted_model(self):
class MyCoolModule(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = weight
def forward(self, x)... |
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while (i < len(lines)):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = (int(lines[(i + 2)]) + 3)
i += skip_... |
def register_Ns3RxPacketTraceParams_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::RxPacketTraceParams const &', 'arg0')])
cls.add_instance_attribute('m_cellId', 'uint64_t', is_const=False)
cls.add_instance_attribute('m_corrupt', 'bool', is_const=False)
cls.add_i... |
_torch
class DistilBertModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = ((DistilBertModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DistilBertForSequenceClassification) if is_torch_available() else None)
test_pruning = True
test_torchscript = True
test_resize_embeddings ... |
def cross_entropy_soft(input, targets, reduction='mean'):
targets_prob = F.softmax(targets, dim=1)
xent = ((- targets_prob) * F.log_softmax(input, dim=1)).sum(1)
if (reduction == 'sum'):
return xent.sum()
elif (reduction == 'mean'):
return xent.mean()
elif (reduction == 'none'):
... |
class TestIterators(unittest.TestCase):
def test_counting_iterator_index(self, ref=None, itr=None):
if (ref is None):
assert (itr is None)
ref = list(range(10))
itr = iterators.CountingIterator(ref)
else:
assert (len(ref) == 10)
assert (itr... |
class LogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
_model
def vovnet57a(pretrained=False, **kwargs):
return _vovnet('vovnet57a', pretrained=pretrained, **kwargs) |
def test_load_zero_based_auto():
data1 = b'-1 1:1 2:2 3:3\n'
data2 = b'-1 0:0 1:1\n'
f1 = BytesIO(data1)
(X, y) = load_svmlight_file(f1, zero_based='auto')
assert (X.shape == (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
(X1, y1, X2, y2) = load_svmlight_files([f1, f2], zero_based='auto... |
def clean_up_response(results, column_names):
final_res = []
for res in results:
temp = dict(((column_name, result) for (column_name, result) in zip(column_names, res) if if_usable_restaurants(column_name)))
for i in temp:
if isinstance(temp[i], Decimal):
temp[i] = fl... |
def save_checkpoint(state, is_best=False, filename='checkpoint.pyth'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pyth') |
def split_paths(paths):
rewards = [path['rewards'].reshape((- 1), 1) for path in paths]
terminals = [path['terminals'].reshape((- 1), 1) for path in paths]
actions = [path['actions'] for path in paths]
obs = [path['observations'] for path in paths]
next_obs = [path['next_observations'] for path in p... |
class StateDataset(PretrainDataset):
def __init__(self, epoch: int, index_path: Path, img_transform: Compose, stream: bool=False, prefix: Optional[Path]=None, is_val: bool=False, do_retry: bool=True, n_retries: int=3) -> None:
super().__init__()
(self.index_path, self.stream, self.is_val, self.val_l... |
def append_to_low_level_steps(trace, name, args, observation):
trace.low_level_steps.append(Step(action=Action(name, args), observation=observation, timestamp=time.time())) |
def drn_d_105(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-105']))
return model |
('/')
def index():
module = request.args.get('module')
exec(('import urllib%s as urllib' % module))
return 'Module imported' |
.parametrize('fname, ctx, func_name', list_ctx_and_func_name(['mul2']))
def test_large_transform_binary(fname, ctx, func_name):
if (not func_name.endswith('Cuda')):
pytest.skip('Grid-strided loop is tested only for CUDA backend')
with nn.context_scope(ctx), nn.auto_forward(True):
a = nn.Variable... |
def register_Ns3DefaultDeleter__Ns3QosBlockedDestinations_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::DefaultDeleter< ns3::QosBlockedDestinations > const &', 'arg0')])
cls.add_method('Delete', 'void', [param('ns3::QosBlockedDestinations *', 'object')], is_static=True)... |
def test_olsq_swap_normal():
lsqc_solver = OLSQ_qiskit('swap', 'normal')
lsqc_solver.setdevice(device_tmp)
lsqc_solver.setprogram(circuit_qiskit)
assert (lsqc_solver.solve()[2] == 1) |
def median_freq_balancing(dataloader, num_classes):
class_count = 0
total = 0
for (_, label) in dataloader:
label = label.cpu().numpy()
flat_label = label.flatten()
bincount = np.bincount(flat_label, minlength=num_classes)
mask = (bincount > 0)
total += (mask * flat_l... |
class EstimatorNoSetOutputWithTransformNoFeatureNamesOut(_SetOutputMixin):
def transform(self, X, y=None):
return X |
.parametrize('classifier', classifiers)
def test_fit_bert(classifier):
bert = ConstantClassifier()
clf = classifier(local_classifier=bert, bert=True)
X = ['Text 1', 'Text 2']
y = ['a', 'a']
clf.fit(X, y)
check_is_fitted(clf)
predictions = clf.predict(X)
assert_array_equal(y, predictions) |
class TNPClassifier(PreTrainedModel):
def __init__(self, model, n_labels, loss_func, dropout=0.0, seed=0, cla_bias=True):
super().__init__(model.config)
(self.encoder, self.loss_func) = (model, loss_func)
self.dropout = nn.Dropout(dropout)
hidden_dim = model.config.hidden_size
... |
def load_parents(dirpath):
parents = []
with open(os.path.join(dirpath, 'STree.txt')) as parentsfile:
for line in parentsfile:
p = ' '.join(line.split('|'))
parents.append(p.strip())
return parents |
class RegNet(AnyNet):
def __init__(self, last_stride, bn_norm):
(b_ws, num_s, _, _) = generate_regnet(regnet_cfg.REGNET.WA, regnet_cfg.REGNET.W0, regnet_cfg.REGNET.WM, regnet_cfg.REGNET.DEPTH)
(ws, ds) = get_stages_from_blocks(b_ws, b_ws)
gws = [regnet_cfg.REGNET.GROUP_W for _ in range(num_s... |
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super(Joiner, self).__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for (_, x) in xs.items()... |
class ScorepCProfile(scorep._bindings.CInstrumenter, ScorepInstrumenter):
def __init__(self, enable_instrumenter):
scorep._bindings.CInstrumenter.__init__(self, interface='Profile')
ScorepInstrumenter.__init__(self, enable_instrumenter) |
def train_policy(num_of_envs, log_relative_path, maximum_episode_length, skip_frame, seed_num, ppo_config, total_time_steps, validate_every_timesteps, task_name):
def _make_env(rank):
def _init():
task = generate_task(task_generator_id=task_name)
env = CausalWorld(task=task, skip_fra... |
def f_classif(target_column, features, df):
groupby = df.replace(float('nan'), None).groupBy(target_column)
avg = groupby.agg({feature: 'mean' for feature in features}).toPandas().set_index(target_column).rename((lambda colname: colname[4:(- 1)]), axis=1)
var = groupby.agg({feature: 'var_pop' for feature in... |
class ContrastiveDataSplitter(DataSplitter):
def __init__(self, adata_manager: AnnDataManager, background_indices: list[int], target_indices: list[int], train_size: float=0.9, validation_size: Optional[float]=None, shuffle_set_split: bool=True, load_sparse_tensor: bool=False, pin_memory: bool=False, **kwargs) -> No... |
class ByteMaskedType(LayoutBuilderType):
def __init__(self, content, valid_when, parameters):
super().__init__(name=f'ak.lb.ByteMasked({content.numbatype()}, valid_when={valid_when}, parameters={parameters!r})')
self._content = content
self._valid_when = valid_when
self._init(paramet... |
class SimplifiedToTraditionalPerturbation(TextPerturbation):
name: str = 'simplified_to_traditional'
should_perturb_references: bool = True
def description(self) -> PerturbationDescription:
return PerturbationDescription(name=self.name, fairness=True)
def __init__(self):
try:
... |
class RASampler(torch.utils.data.Sampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
if (num_replicas is None):
if (not dist.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = dist.get_worl... |
_module
class WeightedSmoothL1Loss(nn.Module):
def __init__(self, sigma=3.0, reduction='mean', code_weights=None, codewise=True, loss_weight=1.0):
super(WeightedSmoothL1Loss, self).__init__()
self._sigma = sigma
self._code_weights = None
self._codewise = codewise
self._reduct... |
def nested_truncate(tensors, limit):
if isinstance(tensors, (list, tuple)):
return type(tensors)((nested_truncate(t, limit) for t in tensors))
return tensors[:limit] |
def getDmaFunctionName(cmd_type, cmd_special_function, direction):
dmaFunctionNameDict = {(0, 0): 'DMA_tensor', (0, 1): 'NC trans', (0, 2): 'collect', (0, 3): 'broadcast', (0, 4): 'distribute', (0, 5): 'lmem 4 bank copy', (0, 6): 'lmem 4 bank broadcast', (1, 0): 'DMA_matrix', (1, 1): 'matrix transpose', (2, 0): 'DM... |
class Vocabulary(object):
def __init__(self, tokens, tokenizer=tokenizer):
self._tokens = tokens
self._ids = {i: token for (token, i) in tokens.items()}
self._ntokens = len(tokens)
self._tokenizer = tokenizer
def word2id(self, word):
return self._tokens[word]
def id2w... |
def divide_no_nan(a, b):
mask = (b == 0.0)
b[mask] = 1.0
result = (a / b)
result[mask] = 0.0
result[(result != result)] = 0.0
result[(result == np.inf)] = 0.0
return result |
def _track_split(df_target, msd_path, types='ecals'):
track_split = {}
if (types == 'ecals'):
df_target = df_target[df_target['tag'].apply((lambda x: (len(x) != 0)))]
for i in set(df_target['splits']):
track_list = list(df_target[(df_target['splits'] == i)].index)
if (i == 'TRAIN'):
... |
_connect.numpy.implements('broadcast_arrays')
def _nep_18_impl(*args, subok=UNSUPPORTED):
return broadcast_arrays(*args) |
def make_loaders(collate_fn, train_path='', valid_path='', test_path='', batch_size=32, num_workers=4):
train_loader = None
if (train_path and os.path.exists(train_path)):
train_loader = DataLoader(CscDataset(train_path), batch_size=batch_size, shuffle=False, num_workers=num_workers, collate_fn=collate_... |
def afss_active_learn_ensemble(x, y, ensemble, opts):
data_2D = (x.shape[1] == 2)
plot = (opts.plot and data_2D)
y_labeled = (np.ones(x.shape[0], dtype=int) * (- 1))
scores = ensemble.get_scores(x)
xx = yy = None
afss = get_afss_model(opts, n_output=ensemble.m)
afss.init_network(x, prime_net... |
class BottleneckLIP(nn.Module):
def __init__(self, channels):
super(BottleneckLIP, self).__init__()
rp = BOTTLENECK_WIDTH
self.logit = nn.Sequential(OrderedDict((('conv1', conv1x1(channels, rp)), ('bn1', nn.InstanceNorm2d(rp, affine=True)), ('relu1', nn.ReLU(inplace=True)), ('conv2', conv3x3... |
class AbsModel(BaseModel):
def __init__(self, args):
super(AbsModel, self).__init__(args)
def kl_loss(self, latent_stats, exemplars_embedding, dataset, cache, x_indices):
(z_q, z_q_mean, z_q_logvar) = latent_stats
if ((exemplars_embedding is None) and (self.args.prior == 'exemplar_prior'... |
def tokenize(utterance, lowercase=True):
if lowercase:
utterance = utterance.lower()
tokens = word_tokenize(utterance)
return tokens |
def _python_to_cpp_to_python_from_threads(num_threads, parallel=False):
threads = []
for _ in range(num_threads):
thread = threading.Thread(target=_python_to_cpp_to_python)
thread.daemon = True
thread.start()
if parallel:
threads.append(thread)
else:
... |
class Vox():
def __init__(self, dims=[0, 0, 0], res=0.0, grid2world=None, sdf=None, pdf=None, noc=None, bbox=None):
self.dims = dims
self.res = res
self.grid2world = grid2world
self.sdf = sdf
self.pdf = pdf
self.noc = noc
self.bbox = bbox
def make_torch(se... |
class TestCRFOp(hu.HypothesisTestCase):
(num_tags=st.integers(2, 4), num_words=st.integers(2, 15))
(deadline=1000)
def test_crf_with_loss_op(self, num_tags, num_words):
model = ModelHelper(name='external')
embeddings_dim = 200
embeddings = np.random.randn(num_words, embeddings_dim).a... |
class HavingGenerator(AbstractSqlGenerator):
def __init__(self, database: SingleDatabase, seed=2023):
super().__init__(database, seed)
def sql_generate(self, table_name: str) -> dict[(str, list)]:
self.empty_sql_generated()
(df, cat_cols, num_cols) = self._sample_cat_num_cols(table_name)... |
def test_notebooks():
num_errors = 0
num_passed = 0
for nb_path in notebook_paths:
abs_nb_path = os.path.join(SGDIR, nb_path)
cmd_line = f'treon . --threads=2'
print(f'[1;33;40m Running {abs_nb_path}[0m')
environ = dict(os.environ, PYTHONPATH=abs_nb_path)
procout = ... |
.parametrize('clients_method,model_type', [('get_best_model', 'BEST_MODEL'), ('get_last_model', 'LAST_MODEL')])
('openfl.transport.grpc.director_client.deconstruct_model_proto')
def test_get_best_model(deconstruct_model_proto, director_client, clients_method, model_type):
deconstruct_model_proto.return_value = ({},... |
class RequestMethods(object):
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = (headers or {})
def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw):
raise NotImplementedEr... |
def protect_pip_from_modification_on_windows(modifying_pip):
pip_names = ['pip.exe', 'pip{}.exe'.format(sys.version_info[0]), 'pip{}.{}.exe'.format(*sys.version_info[:2])]
should_show_use_python_msg = (modifying_pip and WINDOWS and (os.path.basename(sys.argv[0]) in pip_names))
if should_show_use_python_msg:... |
def test_dataset_evaluation():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_dummy_coco_json(fake_json_file)
coco_dataset = CocoDataset(ann_file=fake_json_file, classes=('car',), pipeline=[])
fake_results = _create_dummy_results()
eval_... |
def descriptor_sequence(tensor, batch_sizes):
descriptors = TensorDescriptorArray(len(batch_sizes))
_type = _typemap[tensor.type()]
_ndim = 5
dim_pad = ((1,) * (5 - tensor.dim()))
_size = int_array((tensor.size() + dim_pad))
_stride = int_array((tensor.stride() + dim_pad))
for (i, batch_size... |
def modelGenerator(conv_kernel_c7Ak, input, output, use_resize_convolution, name=None):
input_img = Input(input)
x = ReflectionPadding2D((3, 3))(input_img)
x = c7Ak(x, 32, conv_kernel_c7Ak)
x = dk(x, 64)
x = dk(x, 128)
for _ in range(4, 13):
x = Rk(x)
x = uk(x, 64, use_resize_convolu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.