code stringlengths 101 5.91M |
|---|
def prepare_emovdb(data_folder, save_json, seed=12):
random.seed(seed)
if skip(save_json):
logger.info('Preparation completed in previous run, skipping.')
return
logger.info('Converting format from double to int16 ...')
all_paths = Path(data_folder).rglob('*.wav')
for repo in repos:
... |
(Output('data-stats-table', 'children'), Output('data-state', 'data'), Output('data-table', 'children'), Output('data-plots', 'children'), Output('data-exception-modal', 'is_open'), Output('data-exception-modal-content', 'children'), [Input('data-btn', 'n_clicks'), Input('data-exception-modal-close', 'n_clicks')], [Sta... |
def load_checkpoint(cfg: CheckpointConfig, trainer, **passthrough_args):
reset_optimizer = cfg.reset_optimizer
reset_lr_scheduler = cfg.reset_lr_scheduler
optimizer_overrides = ast.literal_eval(cfg.optimizer_overrides)
reset_meters = cfg.reset_meters
reset_dataloader = cfg.reset_dataloader
if ((... |
def test_sr_folder_multiple_gt_dataset():
root_path = ((Path(__file__).parent.parent.parent / 'data') / 'test_multiple_gt')
test_dataset = SRFolderMultipleGTDataset(lq_folder=root_path, gt_folder=root_path, pipeline=[], scale=4, test_mode=True)
assert (test_dataset.data_infos == [dict(lq_path=str(root_path)... |
def pick_random_block():
random_person = random.choice(PERSONS)
return pick_random_activity_block(random_person) |
def _imsave_before(img, channel_first, auto_scale):
if (not isinstance(img, np.ndarray)):
raise ValueError('the input img for imsave must be numpy.ndarray.')
if (len(img.shape) not in [2, 3]):
raise ValueError('Invalid dimension size of input image. (dims: {})'.format(len(img.shape)))
if (im... |
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read() |
def debias_by_specific_directions(directions: List[np.ndarray], input_dim: int):
rowspace_projections = []
for v in directions:
P_v = get_rowspace_projection(np.array([v]))
rowspace_projections.append(P_v)
P = get_projection_to_intersection_of_nullspaces(rowspace_projections, input_dim)
... |
def target_class_sampler(dataset, target_class):
try:
targets = dataset.data.targets
except:
targets = dataset.labels
weights = [(True if (target == target_class) else False) for target in targets]
num_samples = sum(weights)
weights = torch.DoubleTensor(weights)
sampler = torch.u... |
def options(opt):
opt.add_option('--controller', type='string', help='path to hexapod_controller', dest='controller') |
def otsu(image, footprint, out=None, mask=None, shift_x=False, shift_y=False, shift_z=False):
np_image = np.asanyarray(image)
if (np_image.ndim == 2):
return _apply_scalar_per_pixel(generic_cy._otsu, image, footprint, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y)
elif (np_image.ndim == 3):
... |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('inshape', [(1, 2, 3), (2, 4, 6), (2, 2, 4, 6), (2, 2, 2, 4, 6)])
.parametrize('kernel', [(1, 1), (2, 3), (2, 1, 2)])
.parametrize('channel_last', [False, True])
def test_unpooling_forward_backward(seed, inshape, channel_last, kernel, ctx, fu... |
def _make_go_dwg(dwg, state: GoState, config):
GRID_SIZE = config['GRID_SIZE']
BOARD_SIZE = config['BOARD_WIDTH']
color_set = config['COLOR_SET']
dwg.add(dwg.rect((0, 0), ((BOARD_SIZE * GRID_SIZE), (BOARD_SIZE * GRID_SIZE)), fill=color_set.background_color))
board_g = dwg.g()
hlines = board_g.ad... |
def get_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bucket-name', default='sky-play-bucket')
parser.add_argument('-l', '--local-source', default='/home/romilb/tmp')
return parser.parse_args() |
class ConvDataGenerator(ConvGenerator):
def __init__(self, latent_size=128):
self.out_channels = 3
super().__init__(latent_size=latent_size)
self.transform = (lambda x: torch.sigmoid(x)) |
def fallback_cmd_gcp_cp(src_path: str, dest_path: str, recursive: bool) -> str:
return (f'gsutil -m cp {src_path} {dest_path}' if (not recursive) else f'gsutil -m cp -r {src_path} {dest_path}') |
class _InternalRPCPickler():
def __init__(self):
self._dispatch_table = copyreg.dispatch_table.copy()
self._dispatch_table[torch.Tensor] = self._tensor_reducer
def _tensor_receiver(cls, tensor_index):
global _thread_local_tensor_tables
return _thread_local_tensor_tables.recv_tabl... |
def weight_init(module):
if isinstance(module, keras.layers.Dense):
keras.initializers.glorot_uniform(module.weight)
module.bias.data.zero_() |
class ShenNeumann(CompositeBase):
def __init__(self, N, quad='LG', bc=(0, 0), domain=((- 1), 1), padding_factor=1, dealias_direct=False, dtype=float, coordinates=None, **kw):
if isinstance(bc, (tuple, list)):
bc = BoundaryConditions({'left': {'N': bc[0]}, 'right': {'N': bc[1]}}, domain=domain)
... |
def run_return_code(command):
import subprocess
result = subprocess.run(command, shell=True)
return result.returncode |
def parse(exit_code, log, output):
(findings, infos) = ([], set())
(errors, fails) = sb.parse_utils.errors_fails(exit_code, log)
if any((('Invalid solc compilation' in line) for line in log)):
errors.add('solc error')
try:
with io.BytesIO(output) as o, tarfile.open(fileobj=o) as tar:
... |
def main(args):
val_loader = torch.utils.data.DataLoader(datasets.HCOCO('val', args), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=False)
data_loaders = (None, val_loader)
Machine = machines.__dict__[args.machine](datasets=data_loaders, args=args)
Machine.test() |
class StatefulTransformerWrapper(Generic[(TTransformerImpl, TTransformerConfig)]):
_algo: 'TransformerAlgoBase[TTransformerImpl, TTransformerConfig]'
_target_return: float
_action_sampler: TransformerActionSampler
_return_rest: float
_observations: Deque[Observation]
_actions: Deque[Union[(NDArr... |
class ClasTrainer(DefaultTrainer):
idx2class = None
def build_train_loader(cls, cfg):
logger = logging.getLogger('fastreid.clas_dataset')
logger.info('Prepare training set')
train_items = list()
for d in cfg.DATASETS.NAMES:
data = DATASET_REGISTRY.get(d)(root=_root)
... |
class TFMPNetMainLayer(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
class BridgeTest(tf.test.TestCase):
def setUp(self):
super(BridgeTest, self).setUp()
self.batch_size = 4
self.encoder_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
self.decoder_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(16... |
def get_attribute(node, attr_name, default_value=None):
found = [attr for attr in node.attribute if (attr.name == attr_name)]
if found:
return helper.get_attribute_value(found[0])
return default_value |
class BeitOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse('1.11')
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
return OrderedDict([('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'})])
def atol_for_validation(self) -> float:
return 0.0001 |
def train_wrapper_mnist(_paramsList, _GPU_ID, _DO_PRINT=True):
for (pIdx, params) in enumerate(_paramsList):
(method, useMixup, outlierRatio, errType) = (params[0], params[1], params[2], params[3])
(xdim, ydim, hdims, filterSizes, max_pools, feat_dim) = ([28, 28, 1], 10, [64, 64], [3, 3], [2, 2], 25... |
def test_shared_grads(with_shapes, create_model, conv_blob, last_out_blob, data_blob='gpu_0/data', label_blob='gpu_0/label', num_labels=1000):
model = cnn.CNNModelHelper(order='NCHW', name='test', cudnn_exhaustive_search=True)
with core.NameScope('gpu_0'):
data = model.net.AddExternalInput(data_blob)
... |
def test_function():
with tf.Graph().as_default():
x = tf.compat.v1.placeholder(tf.int32, (), name='x')
y = tf.compat.v1.placeholder(tf.int32, (), name='y')
z = ((3 * x) + (2 * y))
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize... |
def _is_cuda_file(path: str) -> bool:
valid_ext = ['.cu', '.cuh']
if IS_HIP_EXTENSION:
valid_ext.append('.hip')
return (os.path.splitext(path)[1] in valid_ext) |
class TestBlackman():
def test_basic(self):
assert_allclose(windows.blackman(6, sym=False), [0, 0.13, 0.63, 1.0, 0.63, 0.13], atol=1e-14)
assert_allclose(windows.blackman(7, sym=False), [0, 0., 0., 0., 0., 0., 0.], atol=1e-08)
assert_allclose(windows.blackman(6), [0, 0., 0., 0., 0., 0], atol... |
.skipif((platform.system() == 'Darwin'), reason='Prone to error when run with numpy/f2py/tests on mac os, but not when run in isolation')
class TestCReturnReal(TestReturnReal):
suffix = '.pyf'
module_name = 'c_ext_return_real'
code = "\npython module c_ext_return_real\nusercode '''\nfloat t4(float value) { ... |
class RandomOffsetPlayerSpaceInvadersWorld(SpaceInvadersWorld):
offset_range_start = 25
offset_range_end = 125
def initial_shield_configuration(self):
return [{'health': 20, 'position': ((self._width // 4), 200)}, {'health': 20, 'position': (((2 * self._width) // 4), 200)}, {'health': 20, 'position'... |
def conv_flops_counter_hook(conv_module, input, output):
input = input[0]
batch_size = input.shape[0]
(output_height, output_width) = output.shape[2:]
(kernel_height, kernel_width) = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
conv_pe... |
def test_clip():
default_clipid = '[b827ebf3744c][2020-08-19T22-46-04Z][manual][---][4edbade2d41d5f80e324ee4f10d401c0][]-135'
dataset = singapura.Dataset(TEST_DATA_HOME)
clip = dataset.clip(default_clipid)
expected_attributes = {'audio_path': os.path.join(TEST_DATA_HOME, 'labelled/', '2020-08-19/', '[b8... |
def gaussian_likelihood(x, mu, log_std):
pre_sum = ((- 0.5) * (((((x - mu) / (tf.exp(log_std) + EPS)) ** 2) + (2 * log_std)) + np.log((2 * np.pi))))
return tf.reduce_sum(input_tensor=pre_sum, axis=1) |
def get_images_info(data, image_dir, record_file):
with tqdm(total=len(data)) as pbar:
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
chunk_size = min(50000, (args.max_workers * 500))
for i in range(0, len(data), chunk_size):
fut... |
class ConcurrentBatchIterator(IBatchIterator):
def __init__(self, batch_iter, max_queue_size=10, num_threads=5, log_queue=20, name=None):
super(ConcurrentBatchIterator, self).__init__()
self.max_queue_size = max_queue_size
self.num_threads = num_threads
self.q = queue.Queue(maxsize=m... |
class QGPC(GaussianProcessClassifier):
def __init__(self, quantum_kernel: KernelMatrixBase, **kwargs) -> None:
self._quantum_kernel = quantum_kernel
quantum_kernel_update_params = (self.quantum_kernel.get_params().keys() & kwargs.keys())
if quantum_kernel_update_params:
self.quan... |
def is_valid(column_names, data, increment, exclusion_column):
column_name = column_names[0]
is_divisible = ((data[column_name] % increment) == 0)
is_excluded = (data[exclusion_column] > 0)
return (is_divisible | is_excluded) |
def cifar100(cuda=True, model_root=None):
print('Building and initializing cifar100 parameters')
from cifar import model, dataset
m = model.cifar100(128, pretrained=os.path.join(model_root, 'cifar100.pth'))
if cuda:
m = m.cuda()
return (m, dataset.get100, False) |
class Tactic():
def __init__(self, tactic, ctx=None):
self.ctx = _get_ctx(ctx)
self.tactic = None
if isinstance(tactic, TacticObj):
self.tactic = tactic
else:
if z3_debug():
_z3_assert(isinstance(tactic, str), 'tactic name expected')
... |
(config_path='configs/', config_name='convert.yaml')
def convert(config: DictConfig):
assert (config.get('convert_to') in ['pytorch', 'torchscript', 'onnx', 'tensorrt']), 'Please Choose one of [pytorch, torchscript, onnx, tensorrt]'
log.info(f'Instantiating model <{config.model._target_}>')
model: Lightning... |
class SkyplaneClient():
def __init__(self, aws_config: Optional['AWSConfig']=None, azure_config: Optional['AzureConfig']=None, gcp_config: Optional['GCPConfig']=None, ibmcloud_config: Optional['IBMCloudConfig']=None, transfer_config: Optional[TransferConfig]=None, log_dir: Optional[str]=None):
self.clientid... |
class WFRadiationMeshHvy(RadiationField):
glossary_name = 'params/Mesh/hvy'
def __init__(self, wf):
super(WFRadiationMeshHvy, self).__init__(wf)
self.attributes.update({'units': '-', 'limits': '[2:LONG_MAX]', 'alias': ''})
def value(self):
return self._wf._srwl_wf.mesh.hvy
def va... |
def test_distinct_generator(mock_database):
generator = DistinctGenerator(mock_database)
table_name = 'example_table'
with patch.object(generator, '_sample_cat_num_cols', return_value=([], ['col1', 'col2'], [])):
generated_sql = generator.sql_generate(table_name)
assert ('DISTINCT-SINGLE' in gen... |
_quantizer(quantization_target=QuantizationTarget.Weights, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC])
class IdentityWeightsQuantizer(BaseKerasTrainableQuantizer):
def __init__(self, quantization_config: TrainableQuantizerWeightsConfig):
super().__init__(quantization... |
class RPNModule(torch.nn.Module):
def __init__(self):
super(RPNModule, self).__init__()
anchor_generator = make_anchor_generator()
in_channels = cfg.MODEL.BACKBONE.OUT_CHANNELS
rpn_head = registry.RPN_HEADS[cfg.MODEL.RPN.RPN_HEAD]
head = rpn_head(in_channels, anchor_generator... |
def crosscorr_freq(u, v, model, freq=None, dft_sub=None, **kwargs):
time = model.grid.time_dim
dt = time.spacing
(tsave, factor) = sub_time(time, dft_sub)
expr = 0
fdim = as_tuple(u)[0][0].dimensions[0]
(f, nfreq) = frequencies(freq, fdim=fdim)
omega_t = (((((2 * np.pi) * f) * tsave) * facto... |
def main():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--en2fr', required=True, help='path to en2fr model')
parser.add_argument('--fr2en', required=True, help='path to fr2en mixture of experts model')
parser.add_argument('--user-dir', help='path to fairseq examples/translation... |
def train(cluster_pairs, model, optimizer, loss_function, device, topic_docs, epoch, topics_counter, topics_num, config_dict, is_event, other_clusters):
batch_size = config_dict['batch_size']
mode = ('Event' if is_event else 'Entity')
retain_graph = False
epochs = config_dict['regressor_epochs']
ran... |
class TimeIt():
print_output = True
last_parent = None
level = (- 1)
def __init__(self, s):
self.s = s
self.t0 = None
self.t1 = None
self.outputs = []
self.parent = None
def __enter__(self):
self.t0 = time.time()
self.parent = TimeIt.last_paren... |
class DatasetReader(Registrable):
def read(self, file_path: str) -> Dataset:
raise NotImplementedError
def text_to_instance(self, *inputs) -> Instance:
raise NotImplementedError
def from_params(cls, params: Params) -> 'DatasetReader':
choice = params.pop_choice('type', cls.list_avail... |
def _format_template_tags(raw_text: str) -> str:
return re.sub('{{([^{}]+)}}', '${\\1}', raw_text) |
class Partition3(nn.Module):
LAYER_SCOPES = ['T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[12]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[13]', 'T5ForConditionalGeneration/T5Stack[encoder]/ModuleList[block]/T5Block[14]', 'T5ForConditionalGeneration/T5Stack[enco... |
class network_29layers(nn.Module):
def __init__(self, block, layers, num_classes=79077):
super(network_29layers, self).__init__()
self.conv1 = mfm(1, 48, 5, 1, 2)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.block1 = self._make_layer(block, layers[0], 48, 4... |
def _narrow_to_fpn_roi_levels(blobs, spatial_scales):
assert (cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL)
assert (cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL)
num_roi_levels = ((cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL) + 1)
return (blobs[(- num_roi_levels):], spatial_scales[(- num_roi_level... |
.parametrize('observation_shape', [(100,), (4, 84, 84)])
.parametrize('action_size', [2])
.parametrize('episode_length', [10])
.parametrize('n_trials', [10])
def test_evaluate_on_environment(observation_shape: Sequence[int], action_size: int, episode_length: int, n_trials: int) -> None:
shape = (n_trials, (episode_... |
class SafeEval(object):
def __init__(self):
warnings.warn('SafeEval is deprecated in 1.10 and will be removed.', DeprecationWarning, stacklevel=2)
def visit(self, node):
cls = node.__class__
meth = getattr(self, ('visit' + cls.__name__), self.default)
return meth(node)
def de... |
def save_coverage():
global MAP
ret = {}
ret['coverage'] = {}
ret['unique_bugs'] = {}
ret['unique_bugs_ip'] = {}
ret['unique_bugs_trace'] = {}
ret['unique_bugs_trace3'] = {}
for fuzzer in get_all_names():
ret['coverage'][fuzzer] = int(FUZZER_BITMAP[fuzzer].count())
ret['u... |
def main(params):
backbone = get_backbone_class(params.backbone)()
model = get_model_class(params.model)(backbone, params)
output_dir = get_output_directory(params)
(labeled_source_loader, unlabeled_source_loader, unlabeled_target_loader) = _get_dataloaders(params)
params_path = get_pretrain_params_... |
def test_autoencoder(model, true_dag_adj, train_loader, test_loader, result_path, seed_dataset):
model.eval()
if (model.pd_initial_adj is None):
prob_mask = model.probabilistic_dag.get_prob_mask()
else:
prob_mask = model.pd_initial_adj
metrics = {'undirected_edge_auroc': edge_auroc(pred_... |
def test_many_path_parameters_allow_partial_negation(testdir):
testdir.make_test('\nschema = schemathesis.from_dict(\n raw_schema,\n method="GET",\n endpoint="/pets/{key}/{value}/",\n data_generation_methods=DataGenerationMethod.negative\n)\n\()\(max_examples=1)\ndef test_(request, case):\n request.c... |
def build_optimizer(model, optim='adam', lr=0.0003, weight_decay=0.0005, momentum=0.9, sgd_dampening=0, sgd_nesterov=False, rmsprop_alpha=0.99, adam_beta1=0.9, adam_beta2=0.99, staged_lr=False, new_layers='', base_lr_mult=0.1):
if (optim not in AVAI_OPTIMS):
raise ValueError('Unsupported optim: {}. Must be ... |
def mp_loss_batch(model: nn.Module, xb: Tensor, yb: Tensor, loss_func: OptLossFunc=None, opt: OptOptimizer=None, cb_handler: Optional[CallbackHandler]=None) -> Tuple[Union[(Tensor, int, float, str)]]:
cb_handler = ifnone(cb_handler, CallbackHandler())
if (not is_listy(xb)):
xb = [xb]
if (not is_list... |
def train_gmm(opt, train_loader, model, board):
model.cuda()
model.train()
criterionL1 = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=(lambda step: (1.0 - (max(0, (step - opt.keep_step))... |
('This function has been renamed computeJointJacobian and will be removed in future releases of Pinocchio. Please change for new computeJointJacobian.')
def jointJacobian(model, data, q, jointId):
return pin.computeJointJacobian(model, data, q, jointId) |
def get_dataset(args):
trans = (lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise]))
if (args.data == 'mnist'):
im_dim = 1
im_size = (28 if (args.imagesize is None) else args.imagesize)
train_set = dset.MNIST(root='./data', train=True, transform=trans(i... |
class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
def from_config(cls, config):
warnings.warn('The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models and `... |
def val_loc_one_epoch(val_loader, model, device, criterion, writer, cfg, update_val_step):
losses = AverageMeter()
cls_top1 = []
cls_top5 = []
loc_top1 = []
loc_top5 = []
loc_gt_known = []
top1_loc_right = []
top1_loc_cls = []
top1_loc_mins = []
top1_loc_part = []
top1_loc_mo... |
.parametrize('step', [0, 1, 42])
def test_set_get_step_number(step: int) -> None:
set_step_number(step)
assert (get_step_number() == step)
set_step_number(0)
assert (get_step_number() == 0) |
def run_forward(self, input, num_forwards=10):
if (num_forwards <= 0):
return 0.0
with Timer() as t:
for _ in range(num_forwards):
self.forward(*input)
torch.cuda.synchronize()
return int(((t.time * .0) / num_forwards)) |
def rampdown(epoch, rampdown_length, total_epoch):
if (epoch >= (total_epoch - rampdown_length)):
ep = ((epoch - (total_epoch - rampdown_length)) * 0.5)
return math.exp(((- (ep * ep)) / rampdown_length))
else:
return 1.0 |
def chunker(iterable, chunksize):
for i in range(0, len(iterable), chunksize):
(yield iterable[i:(i + chunksize)]) |
def main():
(args, cfg) = parse_config()
if (args.launcher == 'none'):
dist_train = False
total_gpus = 1
else:
(total_gpus, cfg.LOCAL_RANK) = getattr(common_utils, ('init_dist_%s' % args.launcher))(args.tcp_port, args.local_rank, backend='nccl')
dist_train = False
if (arg... |
class RolloutWorkerManager(Manager):
def __init__(self, experiment_tag: str, stopping_conditions: Dict[(str, Any)], num_worker: int, agent_mapping_func: Callable, rollout_config: Dict[(str, Any)], env_desc: Dict[(str, Any)], log_dir: str, resource_config: Dict[(str, Any)]=None, verbose: bool=True):
super().... |
def parse_nm(nm_output):
data = DATA_RE.findall(nm_output)
func = FUNC_RE.findall(nm_output)
flist = []
for sym in data:
if ((sym in func) and ((sym[:2] == 'Py') or (sym[:3] == '_Py') or (sym[:4] == 'init'))):
flist.append(sym)
dlist = []
for sym in data:
if ((sym not... |
def test_fit_classifier():
with pytest.raises(NotImplementedError):
HierarchicalClassifier._fit_classifier(None, None) |
class Warmup(lr_scheduler._LRScheduler):
def __init__(self, optimizer, model_dim, factor=1, warmup=16000):
self.optimizer = optimizer
self.model_dim = model_dim
self.factor = factor
self.warmup = warmup
self.iteration = 0
super().__init__(optimizer, (- 1))
def get... |
def find_mincost_depth(cost_volume, depth_hypos):
argmax = torch.argmax(cost_volume, dim=1, keepdim=True)
mincost_depth = torch.gather(input=depth_hypos, dim=1, index=argmax)
return mincost_depth |
class CBR(chainer.Chain):
def __init__(self, ch0, ch11, ksize=3, pad=1, norm='instance', sample='down', activation='relu', dropout=False, equalised=False, separable=False, senet=False):
super(CBR, self).__init__()
self.activation = activation_func[activation]
self.dropout = dropout
s... |
def performance_metrics(df, sample_target=0):
n_targets = len(np.unique(df['target']))
accuracy = metrics.balanced_accuracy_score(df['target'].astype(int), df['predicted_target'])
accuracy = round((accuracy * 100), 2)
if (n_targets == 2):
auc = round(metrics.roc_auc_score(df['target'], df['class... |
class StructuredGraphBuilder(graph_builder.GreedyParser):
def __init__(self, *args, **kwargs):
self._beam_size = kwargs.pop('beam_size', 10)
self._max_steps = kwargs.pop('max_steps', 25)
super(StructuredGraphBuilder, self).__init__(*args, **kwargs)
def _AddBeamReader(self, task_context, ... |
.node
class Axpy(dace.sdfg.nodes.LibraryNode):
implementations = {'pure': ExpandAxpyVectorized, 'fpga': ExpandAxpyFpga}
default_implementation = None
a = dace.properties.SymbolicProperty(allow_none=False, default=dace.symbolic.symbol('a'))
n = dace.properties.SymbolicProperty(allow_none=False, default=d... |
_LAYERS.register_module('ConvWS')
class ConvWS2d(nn.Conv2d):
def __init__(self, in_channels: int, out_channels: int, kernel_size: Union[(int, Tuple[(int, int)])], stride: Union[(int, Tuple[(int, int)])]=1, padding: Union[(int, Tuple[(int, int)])]=0, dilation: Union[(int, Tuple[(int, int)])]=1, groups: int=1, bias: ... |
_utils.test()
def test_python_access():
n = 128
x = ti.field(ti.i32, shape=n)
x[3] = 123
x[4] = 456
assert (x[3] == 123)
assert (x[4] == 456) |
def _internal_eval(model, global_step, sess, iterator, iterator_feed_dict, summary_writer, label):
sess.run(iterator.initializer, feed_dict=iterator_feed_dict)
ppl = model_helper.compute_perplexity(model, sess, label)
if summary_writer:
utils.add_summary(summary_writer, global_step, ('%s_ppl' % labe... |
class Service(ABC):
def get_general_info(self) -> GeneralInfo:
pass
def get_window_service_info(self, model_name: str) -> WindowServiceInfo:
pass
def expand_query(self, query: Query) -> QueryResult:
pass
def make_request(self, auth: Authentication, request: Request) -> RequestRes... |
class CMTFNet(nn.Module):
def __init__(self, encode_channels=[256, 512, 1024, 2048], decode_channels=512, dropout=0.1, num_classes=6, backbone=ResNet50):
super().__init__()
self.backbone = backbone()
self.decoder = Decoder(encode_channels, decode_channels, dropout=dropout, num_classes=num_cl... |
class TestImag(object):
def test_real(self):
y = np.random.rand(10)
assert_array_equal(0, np.imag(y))
y = np.array(1)
out = np.imag(y)
assert_array_equal(0, out)
assert_(isinstance(out, np.ndarray))
y = 1
out = np.imag(y)
assert_equal(0, out)
... |
def build_from_cfg(cfg, registry, default_args=None):
if (cfg is None):
return None
return MMCV_MODELS.build_func(cfg, registry, default_args) |
class SecondOrderDigitalFilter(nn.Module):
def __init__(self, sample_rate, pole_frequency=None, pole_bandwidth=None, zero_frequency=None, zero_bandwidth=None, **kwargs):
super(SecondOrderDigitalFilter, self).__init__()
def get_filter_coefficients(frequency, bandwidth, sample_rate):
asser... |
def _scope_dict_to_ids(state: 'dace.sdfg.SDFGState', scope_dict: ScopeDictType):
def node_id_or_none(node):
if (node is None):
return (- 1)
return state.node_id(node)
return {node_id_or_none(k): [node_id_or_none(vi) for vi in v] for (k, v) in scope_dict.items()} |
class TestArmsAndConfigurationPaths(TestCore):
def setUp(self):
self.pyrep = PyRep()
self.pyrep.launch(path.join(ASSET_DIR, 'test_scene_robots.ttt'), headless=True)
self.pyrep.step()
self.pyrep.start()
def test_get_arm(self):
for (arm_name, arm_type) in ARMS:
... |
def build_eva_model_and_transforms(model_name: str, pretrained: str='', precision: str='fp32', device: torch.device=torch.device('cpu'), force_quick_gelu: bool=False, image_mean: Optional[Tuple[(float, ...)]]=None, image_std: Optional[Tuple[(float, ...)]]=None):
model = create_model(model_name, pretrained, precisio... |
def ser_exc_info(exception=None) -> ExceptionInfo:
if (exception is None):
(exc_type, exc_value, exc_traceback) = sys.exc_info()
tb = tblib.Traceback(exc_traceback)
return ExceptionInfo(exc_value, tb)
else:
tb = exception.__traceback__
tb = tblib.Traceback(tb)
ret... |
def add_joint_connections_to_image(img_demo, joints, joint_pairs, joint_names, flag_only_draw_sure=False):
for joint_pair in joint_pairs:
ind_1 = joint_names.index(joint_pair[0])
ind_2 = joint_names.index(joint_pair[1])
if (flag_color_sticks is True):
color = find_color_scalar(jo... |
def model_fn_decorator():
ModelReturn = namedtuple('ModelReturn', ['loss', 'tb_dict', 'disp_dict'])
def model_func(model, batch_dict):
load_data_to_gpu(batch_dict)
(ret_dict, tb_dict, disp_dict) = model(batch_dict)
loss = ret_dict['loss'].mean()
if hasattr(model, 'update_global_s... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.