code stringlengths 101 5.91M |
|---|
class SoftmaxTransformerActionSampler(TransformerActionSampler):
_temperature: float
def __init__(self, temperature: float=1.0):
self._temperature = temperature
def __call__(self, transformer_output: NDArray) -> Union[(NDArray, int)]:
assert (transformer_output.ndim == 1)
logits = (t... |
def gauss_newton_product(cost, p, v, s):
if (not isinstance(s, (list, tuple))):
s = [s]
sum_Gv = None
for si in s:
Jv = T.Rop(si, p, v)
HJv = T.grad(T.sum((T.grad(cost, si, disconnected_inputs='ignore') * Jv)), si, consider_constant=[Jv], disconnected_inputs='ignore')
Gv = T.... |
def integrate_vortex():
for i in range(n_vortex):
v = ti.Vector([0.0, 0.0])
for j in range(n_vortex):
if (i != j):
v += compute_u_single(pos[i], j)
new_pos[i] = (pos[i] + (dt * v))
for i in range(n_vortex):
pos[i] = new_pos[i] |
class _InstanceNorm(_NormBase):
def __init__(self, num_features: int, eps: float=1e-05, momentum: float=0.1, affine: bool=False, track_running_stats: bool=False) -> None:
super(_InstanceNorm, self).__init__(num_features, eps, momentum, affine, track_running_stats)
def _check_input_dim(self, input):
... |
class Order(db.Entity):
user = Required(User)
oid = PrimaryKey(int)
delivery_address = Required(str)
product = Required(str)
quantity = Required(int)
order_status = Required(str) |
def resnet_v1(inputs, blocks, num_classes=None, is_training=True, global_pool=True, output_stride=None, include_root_block=True, reuse=None, scope=None):
with variable_scope.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = (sc.original_name_scope + '_end_points')
... |
class Encoder(nn.Module, metaclass=ABCMeta):
def forward(self, x: TorchObservation) -> torch.Tensor:
pass
def __call__(self, x: TorchObservation) -> torch.Tensor:
return super().__call__(x) |
class DistanceRepresentation():
def distance(self, p1s: tf.Tensor, p2s: tf.Tensor) -> tf.Tensor:
diff = (p1s - p2s)
square = tf.square(diff)
sum_squares = tf.reduce_sum(square, axis=(- 1))
return tf.sqrt(sum_squares)
def __call__(self, p1s: tf.Tensor, p2s: tf.Tensor) -> tf.Tensor... |
class MavenCommand(BuildCommand):
def name() -> str:
return 'mvn'
def _prepare_args(self, args: List[str]) -> List[str]:
return (['dependency:build-classpath', '-DincludeScope=compile'] + args)
def _get_errors(self, output: str, error: str) -> str:
lines = output.splitlines()
... |
class WandbCallback(TrainerCallback):
def __init__(self):
assert _has_wandb, 'WandbCallback requires wandb to be installed. Run `pip install wandb`.'
self._initialized = False
def setup(self, args, state, model, reinit, **kwargs):
self._initialized = True
if state.is_world_proces... |
class TestSQLFlowMagic(unittest.TestCase):
train_statement = 'SELECT * FROM iris.train\nTO TRAIN ElasticDLKerasClassifier\nWITH\n model.num_classes = 10,\n train.shuffle = 120,\n train.epoch = 2,\n train.grads_to_wait = 2,\n train.tensorboard_log_dir = "",\n train.checkpoint_steps = 0,\n train.... |
def get_hypernyms(word, pos):
hypers_lst = []
try:
s = wordnet.synsets(word, pos)[0]
except:
try:
s = wordnet.synsets(word)[0]
except:
return hypers_lst
if (s.name() == 'restrain.v.01'):
print('RESTRAIN ENCOUNTERED (hypers)')
return hypers_... |
def ExamineGraph(adj, graph):
for i in range(len(graph.nodes())):
for j in range(len(graph.nodes())):
(r, c) = (i, j)
if (adj[(r, c)] == 1):
print('Connected: ', list(graph.nodes)[r], list(graph.nodes)[c]) |
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if (not data):
return data
... |
class EDGE_ENHANCE(BuiltinFilter):
name = 'Edge-enhance'
filterargs = ((3, 3), 2, 0, ((- 1), (- 1), (- 1), (- 1), 10, (- 1), (- 1), (- 1), (- 1))) |
def markup_join(seq):
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf) |
def to_local_command(params, python_command='python', script='garage.experiment.experiment_wrapper'):
command = ((python_command + ' -m ') + script)
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for (k, v) in garage_env.items():
command = ('{}={} '.format(k, v) + command)
pre_commands = ... |
def _parse_signature(func):
if hasattr(func, 'im_func'):
func = func.im_func
parse = _signature_cache.get(func)
if (parse is not None):
return parse
if hasattr(inspect, 'getfullargspec'):
tup = inspect.getfullargspec(func)
else:
tup = inspect.getargspec(func)
(pos... |
def gen():
np.random.seed(123)
params = {'model_name_or_path': ['bert-large-uncased-whole-word-masking'], 'train_file': ['./data/train.json'], 'dev_file': ['./data/dev.json'], 'config_name': [None], 'tokenizer_name': [None], 'cache_dir': [None], 'max_seq_length': [512], 'max_query_length': [256], 'do_lower_case... |
def is_value(token):
is_number = True
try:
float(token)
except ValueError:
is_number = False
is_string = (token.startswith('"') or token.startswith("'") or token.endswith('"') or token.endswith("'"))
return (is_number or is_string) |
def mobilecrnn_v2(inputdim=64, outputdim=527, pretrained=True):
model = MobileCRNN(inputdim, outputdim, **{'filters': [64, 64, 128, 128, 256, 256, 512, 512], 'kernels': [5, 3, 3, 3, 3, 3, 3, 3], 'padding': [2, 1, 1, 1, 1, 1, 1, 1], 'strides': [2, 1, 1, 1, 1, 1, 1, 1], 'pooling': [[2], [1, 2], [1, 1], [1, 2], [1], [... |
def interpolate_alpha_range(alphas, down_hist, nom_hist, up_hist):
at_alphas = []
for alpha in alphas:
interpolated_hist_at_alpha = [(nominal + interpolate_deltas(down, nominal, up, alpha)) for (down, nominal, up) in zip(down_hist, nom_hist, up_hist)]
at_alphas.append(interpolated_hist_at_alpha)... |
def compute_next_turn(dlgHistory: List[DialogueTurn], user_utterance: str, engine='text-davinci-003', sys_type='sql_textfcns_v0801'):
print(sys_type)
assert (sys_type in ['sql_textfcns_v0801', 'semantic_index_w_textfncs', 'baseline_linearization'])
first_classification_time = 0
semantic_parser_time = 0
... |
def to_camel_case(snake_str):
components = snake_str.split('_')
return (components[0] + ''.join((x.title() for x in components[1:]))) |
def vector_grid_conversion(_hf, _npoints, _nslices, _grid_size, _wv, _lambda_un):
vac_imp = const.codata.value('characteristic impedance of vacuum')
eev = (1000000.0 * const.codata.value('electron mass energy equivalent in MeV'))
h5f = _hf
npt = _npoints
nsl = _nslices
mesh_size = (_grid_size / ... |
def add_nets_in_order(step, net_list):
proto = step.Proto()
for substep in step.Substeps():
add_nets_in_order(substep, net_list)
for net in proto.network:
if (net not in net_list):
net_list.append(net)
if (proto.report_net and (proto.report_net not in net_list)):
net_... |
class ProductRegressor():
def __init__(self, regressors):
self.regressors = regressors
self.output_dims = [x.output_dim for x in regressors]
def _split_ys(self, ys):
ys = np.asarray(ys)
split_ids = np.cumsum(self.output_dims)[:(- 1)]
return np.split(ys, split_ids, axis=1)... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('val', [0.5, 1, 2])
.parametrize('inplace', [False, True])
def test_pow_scalar_double_backward(seed, val, ctx, func_name, inplace):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [(... |
def compute_fitness(chromesome, codebert_tgt, tokenizer_tgt, orig_prob, orig_label, true_label, code, names_positions_dict, args):
temp_code = map_chromesome(chromesome, code, 'python')
new_feature = convert_code_to_features(temp_code, tokenizer_tgt, true_label, args)
new_dataset = GraphCodeDataset([new_fea... |
class TestLVID(torch.utils.data.Dataset):
def __init__(self, root):
self.root = root
self.video_list = sorted(os.listdir(os.path.join(root, 'JPEGImages')))
self.to_tensor = tv.transforms.ToTensor()
self.to_mask = LabelToLongTensor()
def __len__(self):
return len(self.vide... |
.parametrize('dtype, storage_format', [(ti.f32, 'col_major'), (ti.f32, 'row_major'), (ti.f64, 'col_major'), (ti.f64, 'row_major')])
_utils.test(arch=ti.cpu)
def test_sparse_matrix_builder_deprecated_anno(dtype, storage_format):
n = 8
Abuilder = ti.linalg.SparseMatrixBuilder(n, n, max_num_triplets=100, dtype=dty... |
class XLMRobertaForTokenClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class MyImageFolder(MyDatasetFolder):
def __init__(self, root, transform=None, target_transform=None, loader=default_loader):
super(MyImageFolder, self).__init__(root, loader, IMG_EXTENSIONS, transform=transform, target_transform=target_transform)
self.imgs = self.samples
def __getitem__(self, i... |
def define_D(input_nc, ndf, netD, norm='batch', nl='lrelu', init_type='xavier', init_gain=0.02, num_Ds=1, gpu_ids=[]):
net = None
norm_layer = get_norm_layer(norm_type=norm)
nl = 'lrelu'
nl_layer = get_non_linearity(layer_type=nl)
if (netD == 'basic_128'):
net = D_NLayers(input_nc, ndf, n_la... |
def plot_all_sensitivities_per_alg_gradients(**kwargs):
global color_counter, COUNTER
for exp in kwargs['exps']:
exp_attrs = EXP_ATTRS[exp](exp)
for auc_or_final in kwargs['auc_or_final']:
for sp in kwargs['sp_list']:
for alg in kwargs['algs']:
col... |
def load_for_host(hostname: str=DEFAULT_HOSTNAME, hosts_file: PathLike=DEFAULT_HOSTS_PATH) -> dict[(str, Any)]:
return load(hosts_file).get(hostname, {}) |
class QSystem(CombinatorialFreeModule):
def __classcall__(cls, base_ring, cartan_type, level=None, twisted=False):
cartan_type = CartanType(cartan_type)
if (not is_tamely_laced(cartan_type)):
raise ValueError('the Cartan type is not tamely-laced')
if (twisted and (not cartan_type... |
def append_dims(x, target_dims):
dims_to_append = (target_dims - x.ndim)
if (dims_to_append < 0):
raise ValueError(f'input has {x.ndim} dims but target_dims is {target_dims}, which is less')
return x[((...,) + ((None,) * dims_to_append))] |
class SeqLabeling(BaseModel):
def __init__(self, embed, hidden_size, num_classes):
super(SeqLabeling, self).__init__()
self.embedding = get_embeddings(embed)
self.rnn = encoder.LSTM(self.embedding.embedding_dim, hidden_size)
self.fc = nn.Linear(hidden_size, num_classes)
self.... |
def build_tools(model):
optimizer = torch.optim.SGD(model.parameters(), lr=cfg.WARMUP_LR, weight_decay=cfg.WEIGHT_DECAY, momentum=cfg.MOMENTUM)
schedule_helper = CosineLRScheduler(lr_warmup_init=cfg.WARMUP_LR, base_lr=cfg.BASE_LR, lr_warmup_step=cfg.STEPS_PER_EPOCH, total_steps=cfg.TOTAL_STEPS)
scheduler = ... |
class LshANN(BaseANN):
def __init__(self, metric, hash_bits_per_dim):
self.index = None
self._metric = metric
self.hash_bits_per_dim = hash_bits_per_dim
def __str__(self):
return f'Lsh(m={self.hash_bits_per_dim})'
def fit(self, X):
if (X.dtype != numpy.float32):
... |
def get_script_files(scripts):
scripts = [_m for _m in scripts if is_string(_m)]
return scripts |
class KetState(State):
def __init__(self, amplitudes: List[complex], keys: List[int], truncation: int=1):
super().__init__()
self.truncation = truncation
dim = (self.truncation + 1)
assert all([(abs(a) <= 1.01) for a in amplitudes]), 'Illegal value with abs > 1 in ket vector'
... |
def log_pattern():
path = os.path.join(TEST_DATA_PATH, 'healthapp_log_pattern')
pattern = pd.read_pickle(path)
return pattern |
def test(model, test_loader, class_weights, class_encoding, step):
print('\nTesting...\n')
num_classes = len(class_encoding)
criterion = nn.CrossEntropyLoss(weight=class_weights)
if use_cuda:
criterion = criterion.cuda()
if args.ignore_unlabeled:
ignore_index = list(class_encoding).i... |
def write_unigrams(unigrams_dict, output_path):
f = io.open(output_path, 'w', encoding='utf-8')
print('Currently writing unigrams to file ...')
for (key, value) in unigrams_dict.items():
f.write((((((key + '\t') + str(value[0])) + '\t') + str(value[1])) + '\n'))
f.close()
print('Unigrams suc... |
def construct_grids(batch):
xmin = (batch.x_left_lower_corner + batch.grid_size)
xmax = (xmin + (batch.Nx * batch.grid_size))
ymin = (batch.y_left_lower_corner + batch.grid_size)
ymax = (ymin + (batch.Ny * batch.grid_size))
xgrid = np.arange(xmin, xmax, batch.grid_size)
ygrid = np.arange(ymin, y... |
def assp_branch(in_channels, out_channles, kernel_size, dilation):
padding = (0 if (kernel_size == 1) else dilation)
return nn.Sequential(nn.Conv2d(in_channels, out_channles, kernel_size, padding=padding, dilation=dilation, bias=False), BatchNorm2d(out_channles), nn.ReLU(inplace=True)) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, required=True)
parser.add_argument('--save_root', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--n_sv', type=int, required=True)
parse... |
def obj_pre(obj, pre):
if (('obj' in graph[get_id(obj)]) and (pre in graph[get_id(obj)]['obj'])):
return list(graph[get_id(obj)]['obj'][pre])
else:
return None |
class MaximaAbstract(ExtraTabCompletion, Interface):
def __init__(self, name='maxima_abstract'):
Interface.__init__(self, name)
def chdir(self, dir):
self.lisp(('(ext::cd "%s")' % dir))
def _command_runner(self, command, s, redirect=True):
cmd = '{} --very-quiet --batch-string="{}({}... |
def tobytes(array):
if hasattr(array, 'tobytes'):
return array.tobytes()
else:
return array.tostring() |
def generate_timbre(m_type, mx, mn, condition, cat_input=None):
model_path = 'snapshots/harmonic'
if (m_type == 1):
model_path = 'snapshots/aperiodic'
model = load_latest_model_from(m_type, model_path)
raw_gen = model.generate(condition, cat_input)
sample = (((raw_gen.transpose(0, 1).cpu().n... |
class AlignBrain(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(wavs, lens) = batch.sig
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x... |
(device=True)
def line_search_cuda(nu, nu_insert, number_of_lines):
imin = 0
imax = (number_of_lines - 1)
if (nu_insert > nu[imin]):
result = imin
elif (nu_insert < nu[imax]):
result = (imax + 1)
else:
result = reverse_binary_search_cuda(nu, nu_insert, imin, imax)
res... |
.parametrize('sym', [False, True])
def test_error_mother_class_initialization(sym: bool) -> None:
with pytest.raises(TypeError):
ConformityScore(sym) |
def leaky_integrate_neuron(U, time_step=0.001, I=0, R=.0, C=1e-10):
tau = (R * C)
U = (U + ((time_step / tau) * ((- U) + (I * R))))
return U |
def incremental_pre(I, prot, kwds):
def sort_key(p):
p = Polynomial(p)
return (p.navigation().value(), (- p.deg()))
I = sorted(I, key=sort_key)
inc_sys = []
kwds = copy(kwds)
kwds['incremental'] = False
for p in I[:(- 1)]:
inc_sys.append(p)
inc_sys = groebner_basi... |
class NodeClassification(MethodBase):
def __init__(self, num_classes: int, epochs: Annotated[(int, ArgInfo(help='number of epochs for training'))]=100, optimizer: Annotated[(str, ArgInfo(help='optimization algorithm', choices=['sgd', 'adam']))]='adam', learning_rate: Annotated[(float, ArgInfo(help='learning rate', ... |
class GridSearchCV(skGSCV):
def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=None, iid='warn', refit=True, cv='warn', verbose=0, pre_dispatch='2*n_jobs', error_score='raise-deprecating', return_train_score='warn'):
super(GridSearchCV, self).__init__(estimator, param_grid, scor... |
def normalize_image(x):
ma = float(x.max().cpu().data)
mi = float(x.min().cpu().data)
d = ((ma - mi) if (ma != mi) else 100000.0)
return ((x - mi) / d) |
class ShardedDataIterator(object):
def __init__(self, data: list, shard_id: int=0, num_shards: int=1, batch_size: int=1, shuffle=True, shuffle_seed: int=0, offset: int=0, strict_batch_size: bool=False):
self.data = data
total_size = len(data)
self.shards_num = max(num_shards, 1)
self... |
def load_checkpoint_test(opt):
if os.path.isfile(opt.checkpoint):
print(('=> loading checkpoint ' + opt.checkpoint))
checkpoint = torch.load(opt.checkpoint)
else:
raise Exception(('=> no checkpoint found at ' + opt.checkpoint))
return checkpoint |
class FiniteWordPath_square_grid_str(WordDatatype_str, FiniteWordPath_square_grid, FiniteWord_class):
pass |
class TestCEM(unittest.TestCase):
def setUp(self) -> None:
self.device = ('cuda' if torch.cuda.is_available() else 'cpu')
train_data = torchvision.datasets.MNIST(root='../../data/tmp', train=True, download=True)
test_data = torchvision.datasets.MNIST(root='../../data/tmp', train=False, downl... |
class TNPG(NPO):
def __init__(self, env_spec, policy, baseline, scope=None, max_path_length=500, discount=0.99, gae_lambda=0.98, center_adv=True, positive_adv=False, fixed_horizon=False, lr_clip_range=0.01, max_kl_step=0.01, optimizer=None, optimizer_args=None, policy_ent_coeff=0.0, use_softplus_entropy=False, use_... |
_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=(- 1), norm=None):
a = asarray(a)
if (n is None):
n = a.shape[axis]
inv_norm = 1
if ((norm is not None) and _unitary(norm)):
inv_norm = sqrt(n)
output = _raw_fft(a, n, axis, False, True, inv_norm)
return output |
.parametrize('observation_shape', [(100,), ((100,), (200,))])
.parametrize('action_size', [2])
.parametrize('batch_size', [32])
.parametrize('beta', [0.5])
def test_compute_discrete_imitation_loss(observation_shape: Shape, action_size: int, batch_size: int, beta: float) -> None:
encoder = DummyEncoder(observation_s... |
class CODEC():
def __init__(self, img_size, num_channels, compress_mode=1, clip_value=0.5, resize=None, use_tanh=True):
self.compress_mode = compress_mode
working_img_size = img_size
encoder_model = Sequential()
if resize:
encoder_model.add(Lambda((lambda image: tf.image.... |
class TestToken(object):
def test_assign_attr(self):
tok = Token('-5.44', chunking='B-NP')
assert hasattr(tok, 'chunking')
assert (tok.chunking == 'B-NP')
.parametrize('raw_text, lowered_text, expected_en_pattern, expected_en_pattern_sum', [('Of', 'of', 'Aa', 'Aa'), ('THE', 'the', 'AAA',... |
class InOutBlock():
def __init__(self, out_size, in_size, output='out', input='in', in_start_index=0, out_start_index=0, out_reverse=False, in_reverse=False):
self.output = Block(var_name=output, start_index=out_start_index, size=out_size, reverse=out_reverse)
self.input = Block(var_name=input, star... |
class SupercommutativeAlgebras(CategoryWithAxiom_over_base_ring):
_base_category_class_and_axiom = (SuperAlgebras, 'Supercommutative')
class SignedTensorProducts(SignedTensorProductsCategory):
_method
def extra_super_categories(self):
return [self.base_category()]
class WithBasis... |
(4, 1, FOptsDir.DOWNLINK, fOptsDownlink)
class DutyCycleReq(FOpt):
_MASK_MAXDCYCLE = 15
def __init__(self, maxDCycle=0, **kwargs):
super().__init__(**kwargs)
self.maxDCycle = maxDCycle
def maxDCycle(self):
return getWithMask(self._raw[0], self._MASK_MAXDCYCLE)
def maxDCycle(self,... |
_PARSING_OUTPUTS.register('parsing_output')
class Parsing_output(nn.Module):
def __init__(self, dim_in):
super(Parsing_output, self).__init__()
num_parsing = cfg.PRCNN.NUM_PARSING
assert ((cfg.PRCNN.RESOLUTION[0] // cfg.PRCNN.ROI_XFORM_RESOLUTION[0]) == (cfg.PRCNN.RESOLUTION[1] // cfg.PRCNN.... |
class StochasticResNet(ResNet):
def __init__(self, Block, layers, filters, num_classes=10, inplanes=None, min_survival_rate=1.0, decay='linear'):
super().__init__(Block, layers, filters, num_classes=num_classes, inplanes=inplanes)
L = sum(layers)
curr = 1
for section_index in range(s... |
class NL2CodeEncoderState():
state = attr.ib()
memory = attr.ib()
words = attr.ib()
def find_word_occurrences(self, word):
return [i for (i, w) in enumerate(self.words) if (w == word)] |
def _get_pascalvocpart_metadata(categories):
if (len(categories) == 0):
return {}
id_to_name = {x['id']: x['name'] for x in categories}
thing_dataset_id_to_contiguous_id = {i: i for i in range(len(categories))}
thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
return {'thing_datase... |
def main(args, config, client):
utils.init_distributed_mode(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
config['pretrained'] = args.pretrained
config['w_sp_attn'] ... |
def generate_lapack_pxd(all_sigs):
return (lapack_pxd_preamble + '\n'.join((pxd_decl(*sig) for sig in all_sigs))) |
def _convert_train(in_path: str, out_path: str):
with open(in_path) as f, open(out_path, 'w') as f_o:
for s in f:
ss = s.strip()
if ss.startswith('<'):
continue
f_o.write((ss.strip() + '\n')) |
def set_with_path(d, path, value):
my_d = d
for key in path[:(- 1)]:
my_d = my_d[key]
my_d[path[(- 1)]] = value |
def preprocess_image(image, output_height, output_width, is_training=False, add_image_summaries=True):
if is_training:
return preprocess_for_train(image, output_height, output_width, add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(image, output_height, output_width, add... |
class BatchIterator(Iterator):
__metaclass__ = ABCMeta
def __init__(self, default_batch_size=20):
self._default_batch_size = default_batch_size
def next_batch(self, k):
pass
def next(self):
try:
return next(self._latest_batch)
except (AttributeError, StopItera... |
_module()
class NightDrivingDataset(CityscapesDataset):
def __init__(self, **kwargs):
super().__init__(img_suffix='_leftImg8bit.png', seg_map_suffix='_gtCoarse_labelTrainIds.png', **kwargs) |
class FairseqCriterion(_Loss):
def __init__(self, args, task):
super().__init__()
self.args = args
self.task = task
self.padding_idx = (task.target_dictionary.pad() if (task.target_dictionary is not None) else (- 100))
def add_args(parser):
pass
def build_criterion(cl... |
def test_halving_random_search_list_of_dicts():
(X, y) = make_classification(n_samples=150, n_features=4, random_state=42)
params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}]
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_ke... |
def bottleneck_block(inputs, filters, is_training, strides, use_projection=False, data_format='channels_last', dropblock_keep_prob=None, dropblock_size=None):
shortcut = inputs
if use_projection:
filters_out = (4 * filters)
if (FLAGS.sk_ratio > 0):
if (strides > 1):
s... |
def resort_batch(samples, batch_size):
sorted_index = []
for i in range(0, len(samples)):
sorted_index.append((floor((i / batch_size)) + ((i % batch_size) * batch_size)))
return [samples[i] for i in sorted_index] |
def test_extract_nodes_nested():
class OuterModel(optplan.ProblemGraphNode):
type = types.StringType(default='Model')
value = optplan.ReferenceType(optplan.ProblemGraphNode)
class InnerModel(optplan.ProblemGraphNode):
type = types.StringType(default='Model2')
value = optplan.Refe... |
class HTTPProxyAuth(HTTPBasicAuth):
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r |
class TestMRecordsImport(object):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array([b'one', b'two', b'three'], mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], d... |
class h_swish(nn.Module):
def __init__(self, inplace=True):
super(h_swish, self).__init__()
self.sigmoid = h_sigmoid(inplace=inplace)
def forward(self, x):
return (x * self.sigmoid(x)) |
def max_backward(grad_inputs, inputs, input_shapes, outputs, output_shapes, axes=None, keep_dims=False, with_index=False, only_index=False):
dy = grad_inputs[0]
x0 = inputs[0]
y0 = outputs[0]
if keep_dims:
y0 = F.broadcast(y0, x0.shape)
dy = F.broadcast(dy, x0.shape)
else:
ax... |
def png(x, filename, density=150, debug=False, do_in_background=False, tiny=False, engine=None):
import sage.plot.all
if sage.plot.graphics.is_Graphics(x):
x.save(filename)
return
s = _latex_file_([x], math_left='$\\displaystyle', math_right='$', title='', debug=debug, tiny=tiny, extra_pream... |
def constant_pad_nd(g, input, padding, value):
mode = 'constant'
try:
value = sym_help._get_const(value, 'f', 'value')
except Exception:
return sym_help._onnx_opset_unsupported_detailed('Pad', 9, 11, 'The value for the padding must be constant')
padding = _convert_padding_node(padding)
... |
def _uncorrelated_location_entropy_individual(traj, normalize=True):
n = len(traj)
probs = [((1.0 * len(group)) / n) for group in traj.groupby(by=constants.UID).groups.values()]
entropy = stats.entropy(probs)
if normalize:
n_unique_users = len(traj[constants.UID].unique())
if (n_unique_u... |
def safety_scores(method='Salesforce/safety-flan-t5-base', data=[], global_knowledge='', batch_size=8, use_cuda=False):
meta_data = {}
scores = []
data = example_format_checker(data)
if ('Salesforce/safety-flan-t5' in method):
from .classifier import safety_generation
(scores, prediction... |
_model
def pretrain_videomae_teacher_huge_patch16_224(pretrained=False, **kwargs):
model = PretrainVideoTransformerTeacher(patch_size=16, encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_num_classes=0, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-06), **kwargs)
mode... |
def coverage(gen_mapping, gt_mapping, type_ids=None):
coverages = []
if type_ids:
type_ids = set([str(int(id.split('.')[0])) for id in type_ids])
for (id, (gen_before, gen_after)) in gen_mapping.items():
gen_before = (gen_before / gen_before.sum())
gen_after = (gen_after / gen_after.... |
def test_BitMaskedArray_NumpyArray():
a = ak.contents.bitmaskedarray.BitMaskedArray(ak.index.Index(np.packbits(np.array([1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1], dtype=np.uint8))), ak.contents.numpyarray.NumpyArray(np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6])), valid_when=True, le... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.