code stringlengths 101 5.91M |
|---|
class PPAS(PAS):
def __init__(self, trainable_weights_shapes, lr=0.01, c=1.0, **kwargs):
super(PPAS, self).__init__(trainable_weights_shapes, lr, c, **kwargs)
self.__dict__.update(locals())
self.b = K.variable(c)
_get_updates_support
def get_updates(self, loss, params, learning_rate_multipliers):
grads = self.get_gradients(loss, params)
lr = self.lr
weights_init = self.get_weights()
l = self.loss_value
b = self.b
for (wk, g, lmul, wt) in zip(params, grads, learning_rate_multipliers, weights_init):
new_wk = (wk - (((lr * lmul) * l) * g))
p_new_wk = ((((new_wk - wt) / (K.epsilon() + K.sqrt(K.sum(K.square((new_wk - wt)))))) * b) + wt)
if (wk in constraints):
c = constraints[wk]
p_new_wk = c(p_new_wk)
self.updates.append(K.update(wk, p_new_wk))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)), 'B': float(K.get_value(self.b))}
base_config = super(PPAS, self).get_config()
return dict((list(base_config.items()) + list(config.items()))) |
def get_configuration_file(configuration_files: List[str]) -> str:
configuration_files_map = {}
for file_name in configuration_files:
search = _re_configuration_file.search(file_name)
if (search is not None):
v = search.groups()[0]
configuration_files_map[v] = file_name
available_versions = sorted(configuration_files_map.keys())
configuration_file = CONFIG_NAME
transformers_version = version.parse(__version__)
for v in available_versions:
if (version.parse(v) <= transformers_version):
configuration_file = configuration_files_map[v]
else:
break
return configuration_file |
def get_alpha_and_beta(t, scheduler):
if (t < 0):
return (scheduler.final_alpha_cumprod.item(), (1 - scheduler.final_alpha_cumprod.item()))
if ((t.dtype == torch.long) or (t == t.long())):
alpha = scheduler.alphas_cumprod[t.long()]
return (alpha.item(), (1 - alpha.item()))
low = t.floor().long()
high = t.ceil().long()
rem = (t - low)
low_alpha = scheduler.alphas_cumprod[low]
high_alpha = scheduler.alphas_cumprod[high]
interpolated_alpha = ((low_alpha * rem) + (high_alpha * (1 - rem)))
interpolated_beta = (1 - interpolated_alpha)
return (interpolated_alpha.item(), interpolated_beta.item()) |
def save_checkpoint(sess, checkpoint_dir, saver_op, step):
checkpoint_name = os.path.join(checkpoint_dir, 'step')
path = saver_op.save(sess, checkpoint_name, global_step=step) |
class HTTPDLinuxRPO(HTTPD):
def __init__(self) -> None:
super().__init__()
self. = '/root/mtcp/apps/lig |
def _t2n(x):
if (not isinstance(x, torch.Tensor)):
return x
return x.detach().cpu().numpy() |
def cod(true, pred, pv=None):
mean = np.mean(true)
sum_of_squares = np.sum(((true - mean) ** 2))
sum_of_residuals = np.sum(((true - pred) ** 2))
return (1.0 - (sum_of_residuals / sum_of_squares)) |
_connect.numpy.implements('concatenate')
_level_function()
def concatenate(arrays, axis=0, *, mergebool=True, highlevel=True, behavior=None, attrs=None):
if (backend_of_obj(arrays, default=None) is not None):
(yield (arrays,))
else:
(yield arrays)
return _impl(arrays, axis, mergebool, highlevel, behavior, attrs) |
def my_build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
if (cfg.SOLVER.OPTIMIZER_CFG != ''):
optim_cfg = eval(cfg.SOLVER.OPTIMIZER_CFG)
register_optimizer(optim_cfg['type'])
return build_optimizer(model, optim_cfg)
return build_optimizer_d2(cfg, model) |
class Block(nn.Module):
def __init__(self, inp, outp, stride, tmp_ratio=1.0):
super(Block, self).__init__()
assert (stride in [1, 2])
midp = make_divisible((outp // 4))
expand_ratio = 0.25
layers = [USConv2d(inp, midp, 1, 1, 0, bias=False, ratio=[tmp_ratio, expand_ratio]), USBatchNorm2d(midp, ratio=expand_ratio), nn.ReLU(inplace=True), USConv2d(midp, midp, 3, stride, 1, bias=False, ratio=[expand_ratio, expand_ratio]), USBatchNorm2d(midp, ratio=expand_ratio), nn.ReLU(inplace=True), USConv2d(midp, outp, 1, 1, 0, bias=False, ratio=[expand_ratio, 1]), USBatchNorm2d(outp)]
self.body = nn.Sequential(*layers)
self.residual_connection = ((stride == 1) and (inp == outp))
if (not self.residual_connection):
self.shortcut = nn.Sequential(USConv2d(inp, outp, 1, stride=stride, bias=False, ratio=[tmp_ratio, 1]), USBatchNorm2d(outp))
self.post_relu = nn.ReLU(inplace=True)
def forward(self, x):
if self.residual_connection:
res = self.body(x)
res += x
else:
res = self.body(x)
res += self.shortcut(x)
res = self.post_relu(res)
return res |
class TFRobertaForMaskedLM():
def __init__(self, *args, **kwargs):
requires_tf(self)
def from_pretrained(self, *args, **kwargs):
requires_tf(self) |
class ChineseCLIPTextConfig(PretrainedConfig):
model_type = 'chinese_clip_text_model'
def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, initializer_factor=1.0, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type='absolute', use_cache=True, **kwargs):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
def from_pretrained(cls, pretrained_model_name_or_path: Union[(str, os.PathLike)], **kwargs) -> 'PretrainedConfig':
(config_dict, kwargs) = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if (config_dict.get('model_type') == 'chinese_clip'):
config_dict = config_dict['text_config']
if (('model_type' in config_dict) and hasattr(cls, 'model_type') and (config_dict['model_type'] != cls.model_type)):
logger.warning(f"You are using a model of type {config_dict['model_type']} to instantiate a model of type {cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(config_dict, **kwargs) |
.parametrize('minsize', [None, 200, 20000, 40000, 80000])
.parametrize('dtype', [np.uint8, np.float32])
def test_two_image_peaks(minsize, dtype):
image = np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 2, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 3, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=dtype)
mask = np.array([[4, 4, 4, 1, 1, 1, 1, 1, 1], [4, 4, 4, 1, 1, 1, 1, 1, 1], [4, 4, 4, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 4, 4, 4, 1], [1, 1, 1, 1, 1, 4, 4, 4, 1], [1, 1, 1, 1, 1, 4, 4, 4, 1]], dtype=dtype)
expected = np.array([[2, 2, 2, 1, 1, 1, 1, 1, 1], [2, 2, 2, 1, 1, 1, 1, 1, 1], [2, 2, 2, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 3, 3, 3, 1], [1, 1, 1, 1, 1, 3, 3, 3, 1], [1, 1, 1, 1, 1, 3, 3, 3, 1]], dtype=dtype)
if (minsize is not None):
nrow = math.ceil(math.sqrt((minsize / image.size)))
ncol = math.ceil((minsize / (image.size * nrow)))
image = np.tile(image, (nrow, ncol))
mask = np.tile(mask, (nrow, ncol))
expected = np.tile(expected, (nrow, ncol))
out = reconstruction(image, mask)
assert (out.dtype == _supported_float_type(mask.dtype))
assert_array_almost_equal(out, expected) |
def list(github, force_reload=False):
repo_dir = _get_cache_or_reload(github, force_reload, True)
sys.path.insert(0, repo_dir)
hub_module = import_module(MODULE_HUBCONF, ((repo_dir + '/') + MODULE_HUBCONF))
sys.path.remove(repo_dir)
entrypoints = [f for f in dir(hub_module) if (callable(getattr(hub_module, f)) and (not f.startswith('_')))]
return entrypoints |
class SRResNet(nn.Module):
def __init__(self, large_kernel_size=9, small_kernel_size=3, n_channels=64, n_blocks=16, scaling_factor=4):
super(SRResNet, self).__init__()
scaling_factor = int(scaling_factor)
assert (scaling_factor in {2, 4, 8}), 'The scaling factor must be 2, 4, or 8!'
self.conv_block1 = ConvolutionalBlock(in_channels=3, out_channels=n_channels, kernel_size=large_kernel_size, batch_norm=False, activation='PReLu')
self.residual_blocks = nn.Sequential(*[ResidualBlock(kernel_size=small_kernel_size, n_channels=n_channels) for i in range(n_blocks)])
self.conv_block2 = ConvolutionalBlock(in_channels=n_channels, out_channels=n_channels, kernel_size=small_kernel_size, batch_norm=True, activation=None)
n_subpixel_convolution_blocks = int(math.log2(scaling_factor))
self.subpixel_convolutional_blocks = nn.Sequential(*[SubPixelConvolutionalBlock(kernel_size=small_kernel_size, n_channels=n_channels, scaling_factor=2) for i in range(n_subpixel_convolution_blocks)])
self.conv_block3 = ConvolutionalBlock(in_channels=n_channels, out_channels=3, kernel_size=large_kernel_size, batch_norm=False, activation='Tanh')
def forward(self, lr_imgs):
output = self.conv_block1(lr_imgs)
residual = output
output = self.residual_blocks(output)
output = self.conv_block2(output)
output = (output + residual)
output = self.subpixel_convolutional_blocks(output)
sr_imgs = self.conv_block3(output)
return sr_imgs |
def get_random_field_order_tag(args):
if args.random_field_order:
return 'rfo.'
else:
return '' |
_task('translation')
class TranslationTask(FairseqTask):
def add_args(parser):
parser.add_argument('data', help='colon separated path to data directories list, will be iterated upon during epochs in round-robin manner')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
parser.add_argument('--lazy-load', action='store_true', help='load the dataset lazily')
parser.add_argument('--raw-text', action='store_true', help='load raw text dataset')
parser.add_argument('--load-alignments', action='store_true', help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset')
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if getattr(args, 'raw_text', False):
utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
args.dataset_impl = 'raw'
elif getattr(args, 'lazy_load', False):
utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
args.dataset_impl = 'lazy'
paths = args.data.split(':')
assert (len(paths) > 0)
if ((args.source_lang is None) or (args.target_lang is None)):
(args.source_lang, args.target_lang) = data_utils.infer_language_pair(paths[0])
if ((args.source_lang is None) or (args.target_lang is None)):
raise Exception('Could not infer language pair, please provide it explicitly')
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert (src_dict.pad() == tgt_dict.pad())
assert (src_dict.eos() == tgt_dict.eos())
assert (src_dict.unk() == tgt_dict.unk())
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
paths = self.args.data.split(':')
assert (len(paths) > 0)
data_path = paths[(epoch % len(paths))]
(src, tgt) = (self.args.source_lang, self.args.target_lang)
self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.args.dataset_impl, upsample_primary=self.args.upsample_primary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, load_alignments=self.args.load_alignments)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
def max_positions(self):
return (self.args.max_source_positions, self.args.max_target_positions)
def source_dictionary(self):
return self.src_dict
def target_dictionary(self):
return self.tgt_dict |
def ModularFormsSubSpace(*args, **kwargs):
generators = []
for arg in args:
if isinstance(arg, (list, tuple)):
generators += arg
else:
generators.append(arg)
if (('reduce' in kwargs) and kwargs['reduce']):
generators = [gen.full_reduce() for gen in generators]
if (len(generators) == 0):
raise ValueError('No generators specified')
el = False
for gen in generators:
if el:
el += gen
else:
el = gen
ambient_space = el.parent()
try:
ambient_space.coordinate_vector(el)
generators = [ambient_space(gen) for gen in generators]
return SubSpaceForms(ambient_space, generators)
except (NotImplementedError, AttributeError):
return ambient_space |
def copy_model_params_from_to(source, target):
for (target_param, param) in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data) |
class Entropy(_Loss):
def __init__(self):
super(Entropy, self).__init__()
def forward(self, log_qy, batch_size=None, unit_average=False):
if (log_qy.dim() > 2):
log_qy = log_qy.squeeze()
qy = th.exp(log_qy)
h_q = th.sum((((- 1) * log_qy) * qy), dim=1)
if unit_average:
return th.mean(h_q)
else:
return (th.sum(h_q) / batch_size) |
def vendor_exist(vuln_scan, vendor_name):
if ('all' in vuln_scan):
return True
for arg_vendor in vuln_scan:
if (arg_vendor.lower() not in vendor_name.lower()):
continue
else:
return True
return False |
class Args():
def __init__(self, **kwargs):
self.bs = 32
self.epochs = 500
self.lr = 0.001
self.hid_units = '128_64_32'
self.bins = 200
self.train_num = 10000
self.__dict__.update(kwargs) |
class AlignVisionModelTester():
def __init__(self, parent, batch_size=12, image_size=32, num_channels=3, kernel_sizes=[3, 3, 5], in_channels=[32, 16, 24], out_channels=[16, 24, 30], hidden_dim=64, strides=[1, 1, 2], num_block_repeats=[1, 1, 2], expand_ratios=[1, 6, 6], is_training=True, hidden_act='gelu'):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.kernel_sizes = kernel_sizes
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_dim = hidden_dim
self.strides = strides
self.num_block_repeats = num_block_repeats
self.expand_ratios = expand_ratios
self.is_training = is_training
self.hidden_act = hidden_act
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return (config, pixel_values)
def get_config(self):
return AlignVisionConfig(num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, out_channels=self.out_channels, hidden_dim=self.hidden_dim, strides=self.strides, num_block_repeats=self.num_block_repeats, expand_ratios=self.expand_ratios, hidden_act=self.hidden_act)
def create_and_check_model(self, config, pixel_values):
model = AlignVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
patch_size = (self.image_size // 4)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, config.hidden_dim, patch_size, patch_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, config.hidden_dim))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, pixel_values) = config_and_inputs
inputs_dict = {'pixel_values': pixel_values}
return (config, inputs_dict) |
class DiscreteCQL(QLearningAlgoBase[(DiscreteCQLImpl, DiscreteCQLConfig)]):
def inner_create_impl(self, observation_shape: Shape, action_size: int) -> None:
(q_funcs, q_func_forwarder) = create_discrete_q_function(observation_shape, action_size, self._config.encoder_factory, self._config.q_func_factory, n_ensembles=self._config.n_critics, device=self._device)
(targ_q_funcs, targ_q_func_forwarder) = create_discrete_q_function(observation_shape, action_size, self._config.encoder_factory, self._config.q_func_factory, n_ensembles=self._config.n_critics, device=self._device)
optim = self._config.optim_factory.create(q_funcs.named_modules(), lr=self._config.learning_rate)
modules = DQNModules(q_funcs=q_funcs, targ_q_funcs=targ_q_funcs, optim=optim)
self._impl = DiscreteCQLImpl(observation_shape=observation_shape, action_size=action_size, modules=modules, q_func_forwarder=q_func_forwarder, targ_q_func_forwarder=targ_q_func_forwarder, target_update_interval=self._config.target_update_interval, gamma=self._config.gamma, alpha=self._config.alpha, device=self._device)
def get_action_type(self) -> ActionSpace:
return ActionSpace.DISCRETE |
def getEntitySegClass(tweet, annot, lower=False, getIndices=True):
start = None
result = []
for i in range(len(tweet)):
if ('B-' in annot[i]):
if (start != None):
if getIndices:
if (start != len(tweet)):
result.append((' '.join(tweet[start:i]), (start, i, annot[start])))
else:
result.append((' '.join(tweet[start:i]), (start, i, annot[start])))
else:
result.append(' '.join(tweet[start:i]))
start = i
elif ((annot[i] == 'O') and (start != None)):
if getIndices:
result.append((' '.join(tweet[start:i]), ((start, i), annot[start])))
else:
result.append(' '.join(tweet[start:i]))
start = None
if (start != None):
if getIndices:
result.append((' '.join(tweet[start:(i + 1)]), (start, (i + 1), annot[start])))
else:
result.append(' '.join(tweet[start:(i + 1)]))
if lower:
if getIndices:
result = [(x[0].lower(), x[1]) for x in result]
else:
result = [x.lower() for x in result]
return result |
class SwitchNorm1d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.997, using_moving_average=True):
super(SwitchNorm1d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
self.register_buffer('running_mean', torch.zeros(1, num_features))
self.register_buffer('running_var', torch.zeros(1, num_features))
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_var.zero_()
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if (input.dim() != 2):
raise ValueError('expected 2D input (got {}D input)'.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
mean_ln = x.mean(1, keepdim=True)
var_ln = x.var(1, keepdim=True)
if self.training:
mean_bn = x.mean(0, keepdim=True)
var_bn = x.var(0, keepdim=True)
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_(((1 - self.momentum) * mean_bn.data))
self.running_var.mul_(self.momentum)
self.running_var.add_(((1 - self.momentum) * var_bn.data))
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(((mean_bn.data ** 2) + var_bn.data))
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
mean = ((mean_weight[0] * mean_ln) + (mean_weight[1] * mean_bn))
var = ((var_weight[0] * var_ln) + (var_weight[1] * var_bn))
x = ((x - mean) / (var + self.eps).sqrt())
return ((x * self.weight) + self.bias) |
def changearm(old_label):
label = old_label
arm1 = torch.FloatTensor((data['label'].cpu().numpy() == 11).astype(np.int))
arm2 = torch.FloatTensor((data['label'].cpu().numpy() == 13).astype(np.int))
noise = torch.FloatTensor((data['label'].cpu().numpy() == 7).astype(np.int))
label = ((label * (1 - arm1)) + (arm1 * 4))
label = ((label * (1 - arm2)) + (arm2 * 4))
label = ((label * (1 - noise)) + (noise * 4))
return label |
def save(f, ob, extensions=None, **options):
s = BsdfSerializer(extensions, **options)
if isinstance(f, string_types):
with open(f, 'wb') as fp:
return s.save(fp, ob)
else:
return s.save(f, ob) |
class FixupWideResNet(nn.Module):
def __init__(self, depth, widen_factor, num_classes=10, dropRate=0.0):
super(FixupWideResNet, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) // 6)
block = FixupBasicBlock
self.num_layers = (n * 3)
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bias1 = Bias()
self.block1 = FixupNetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
self.block2 = FixupNetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate)
self.block3 = FixupNetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate)
self.bias2 = Bias()
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, FixupBasicBlock):
conv = m.conv1
k = (conv.weight.shape[0] * np.prod(conv.weight.shape[2:]))
nn.init.normal_(conv.weight, mean=0, std=(np.sqrt((2.0 / k)) * (self.num_layers ** (- 0.5))))
nn.init.constant_(m.conv2.weight, 0)
if (m.convShortcut is not None):
cs = m.convShortcut
k = (cs.weight.shape[0] * np.prod(cs.weight.shape[2:]))
nn.init.normal_(cs.weight, mean=0, std=np.sqrt((2.0 / k)))
elif isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
def forward(self, x):
out = self.bias1(self.conv1(x))
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels)
return self.fc(self.bias2(out)) |
class NetProxy(SimProxy):
def __init__(self) -> None:
super().__init__()
self.nics: tp.List[tp.Tuple[(NICSim, bool)]] = []
self.n2ns: tp.List[tp.Tuple[(tp.Tuple[(Simulator, Simulator)], bool)]] = []
self.shm_size = 2048
def start_delay(self) -> int:
return 10 |
def isunsigned_chararray(var):
return (isarray(var) and (var.get('typespec') in ['integer', 'logical']) and (get_kind(var) == '-1')) |
class DoxygenType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, version=None, compound=None):
self.version = version
if (compound is None):
self.compound = []
else:
self.compound = compound
def factory(*args_, **kwargs_):
if DoxygenType.subclass:
return DoxygenType.subclass(*args_, **kwargs_)
else:
return DoxygenType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_compound(self):
return self.compound
def set_compound(self, compound):
self.compound = compound
def add_compound(self, value):
self.compound.append(value)
def insert_compound(self, index, value):
self.compound[index] = value
def get_version(self):
return self.version
def set_version(self, version):
self.version = version
def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''):
showIndent(outfile, level)
outfile.write(('<%s%s %s' % (namespace_, name_, namespacedef_)))
self.exportAttributes(outfile, level, namespace_, name_='DoxygenType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, (level + 1), namespace_, name_)
showIndent(outfile, level)
outfile.write(('</%s%s>\n' % (namespace_, name_)))
else:
outfile.write(' />\n')
def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'):
outfile.write((' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'),)))
def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'):
for compound_ in self.compound:
compound_.export(outfile, level, namespace_, name_='compound')
def hasContent_(self):
if (self.compound is not None):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DoxygenType'):
level += 1
self.exportLiteralAttributes(outfile, level, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, name_):
if (self.version is not None):
showIndent(outfile, level)
outfile.write(('version = %s,\n' % (self.version,)))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('compound=[\n')
level += 1
for compound in self.compound:
showIndent(outfile, level)
outfile.write('model_.compound(\n')
compound.exportLiteral(outfile, level, name_='compound')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node_):
attrs = node_.attributes
self.buildAttributes(attrs)
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(':')[(- 1)]
self.buildChildren(child_, nodeName_)
def buildAttributes(self, attrs):
if attrs.get('version'):
self.version = attrs.get('version').value
def buildChildren(self, child_, nodeName_):
if ((child_.nodeType == Node.ELEMENT_NODE) and (nodeName_ == 'compound')):
obj_ = CompoundType.factory()
obj_.build(child_)
self.compound.append(obj_) |
def parallel_iter(f, inputs):
v = list(inputs)
shuffle(v)
for (args, kwds) in v:
(yield ((args, kwds), f(*args, **kwds))) |
def obtain_score_dict(input_file, output_file):
corpus = load_all_questions(input_file)
sorted_scores = rank_PPL_score(corpus)
with open(output_file, 'w') as f:
f.write(json.dumps(sorted_scores, indent=2)) |
.parametrize('dtype', [np.float32, np.float64])
.parametrize('order', [RowMajor, ColMajor], ids=['RowMajor', 'ColMajor'])
def test_ger(dtype, order):
ger = _ger_memview[_numpy_to_cython(dtype)]
rng = np.random.RandomState(0)
x = rng.random_sample(10).astype(dtype, copy=False)
y = rng.random_sample(20).astype(dtype, copy=False)
A = np.asarray(rng.random_sample((10, 20)).astype(dtype, copy=False), order=ORDER[order])
alpha = 2.5
expected = ((alpha * np.outer(x, y)) + A)
ger(alpha, x, y, A)
assert_allclose(A, expected, rtol=RTOL[dtype]) |
def gpus_for_rank(world_size):
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = (torch.cuda.device_count() // world_size)
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(visible_devices[(rank * gpus_per_process):((rank + 1) * gpus_per_process)])
return gpus_for_rank |
def generate_synthetic_efficiency_instances(tokens: Dict[(str, List[TokenizationToken])], text_chunks: Dict[(str, List[str])], tokenizer: Tokenizer, num_instances: int, num_prompt_tokens: int, tokenizer_name: str, output_path: str='synthetic_efficiency_instances', base_path: str='prod_env'):
tokenizer_organization: str = tokenizer_name.split('/')[0]
ai21_tokenizer: bool = (tokenizer_organization == 'ai21')
books = list(tokens.keys())
prompts = []
for i in range((num_instances // len(books))):
for j in range(len(books)):
finished = False
attempt_num = 0
orig_i = i
while (not finished):
i = (orig_i + attempt_num)
prompt: str = ''
if ai21_tokenizer:
per_instance_tokens = text_chunks[books[j]][(i * num_prompt_tokens):((i + 1) * num_prompt_tokens)]
else:
per_instance_tokens = [token.value for token in tokens[books[j]][(i * num_prompt_tokens):((i + 1) * num_prompt_tokens)]]
num_iters = 0
while (num_iters < MAX_ITERS):
if ai21_tokenizer:
prompt = ''.join(per_instance_tokens)
else:
decode_request: DecodeRequest = DecodeRequest(tokens=per_instance_tokens)
decode_result: DecodeRequestResult = tokenizer.decode(decode_request)
prompt = decode_result.text
if (prompt == ''):
num_generated_tokens = 0
else:
num_generated_tokens = _count_prompt_tokens(tokenizer, prompt, tokenizer_name)
if (num_generated_tokens != num_prompt_tokens):
temp_num_tokens = num_generated_tokens
while (temp_num_tokens < num_prompt_tokens):
if (len(per_instance_tokens) == 0):
if (num_prompt_tokens != 1):
print(f'WARNING: got 0 per_instance_tokens with num_prompt_tokens={num_prompt_tokens}')
break
if ai21_tokenizer:
per_instance_tokens = text_chunks[books[j]][:2]
else:
per_instance_tokens = [token.value for token in tokens[books[j]][:2]]
else:
per_instance_tokens.append(per_instance_tokens[(- 1)])
temp_num_tokens += 1
while (temp_num_tokens > num_prompt_tokens):
per_instance_tokens = per_instance_tokens[:(- 1)]
temp_num_tokens -= 1
else:
finished = True
break
num_iters += 1
if (not finished):
print(f'Requested {num_prompt_tokens}, got {num_generated_tokens} for book {books[j]}, instance #{orig_i}, tokenizer={tokenizer_name}, trying again with a new span of text...')
attempt_num += 1
continue
prompts.append(prompt)
for (i, prompt) in enumerate(prompts):
for (k, v) in TOKENIZER_REPLACEMENTS.items():
tokenizer_name = tokenizer_name.replace(k, v)
name = f"num_prompt_tokens={num_prompt_tokens},tokenizer={tokenizer_name.replace('/', '_')},id={i}.txt"
write(os.path.join(output_path, name), prompt) |
def tweak(fun_or_val, identifier=None):
if isinstance(fun_or_val, collections.Callable):
return tweakfun(fun_or_val, identifier)
return tweakval(fun_or_val, identifier) |
def test_min_pos():
X = np.random.RandomState(0).randn(100)
min_double = min_pos(X)
min_float = min_pos(X.astype(np.float32))
assert_allclose(min_double, min_float)
assert (min_double >= 0) |
class VocCfg():
variant: str = None
parser: str = 'voc'
num_classes: int = 80
img_filename: str = '%s.jpg'
splits: Dict[(str, dict)] = None |
class DiscreteSACImpl(DiscreteQFunctionMixin, QLearningAlgoImplBase):
_modules: DiscreteSACModules
_q_func_forwarder: DiscreteEnsembleQFunctionForwarder
_targ_q_func_forwarder: DiscreteEnsembleQFunctionForwarder
_target_update_interval: int
def __init__(self, observation_shape: Shape, action_size: int, modules: DiscreteSACModules, q_func_forwarder: DiscreteEnsembleQFunctionForwarder, targ_q_func_forwarder: DiscreteEnsembleQFunctionForwarder, target_update_interval: int, gamma: float, device: str):
super().__init__(observation_shape=observation_shape, action_size=action_size, modules=modules, device=device)
self._gamma = gamma
self._q_func_forwarder = q_func_forwarder
self._targ_q_func_forwarder = targ_q_func_forwarder
self._target_update_interval = target_update_interval
hard_sync(modules.targ_q_funcs, modules.q_funcs)
def update_critic(self, batch: TorchMiniBatch) -> Dict[(str, float)]:
self._modules.critic_optim.zero_grad()
q_tpn = self.compute_target(batch)
loss = self.compute_critic_loss(batch, q_tpn)
loss.backward()
self._modules.critic_optim.step()
return {'critic_loss': float(loss.cpu().detach().numpy())}
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
with torch.no_grad():
dist = self._modules.policy(batch.next_observations)
log_probs = dist.logits
probs = dist.probs
if (self._modules.log_temp is None):
temp = torch.zeros_like(log_probs)
else:
temp = self._modules.log_temp().exp()
entropy = (temp * log_probs)
target = self._targ_q_func_forwarder.compute_target(batch.next_observations)
keepdims = True
if (target.dim() == 3):
entropy = entropy.unsqueeze((- 1))
probs = probs.unsqueeze((- 1))
keepdims = False
return (probs * (target - entropy)).sum(dim=1, keepdim=keepdims)
def compute_critic_loss(self, batch: TorchMiniBatch, q_tpn: torch.Tensor) -> torch.Tensor:
return self._q_func_forwarder.compute_error(observations=batch.observations, actions=batch.actions.long(), rewards=batch.rewards, target=q_tpn, terminals=batch.terminals, gamma=(self._gamma ** batch.intervals))
def update_actor(self, batch: TorchMiniBatch) -> Dict[(str, float)]:
self._modules.q_funcs.eval()
self._modules.actor_optim.zero_grad()
loss = self.compute_actor_loss(batch)
loss.backward()
self._modules.actor_optim.step()
return {'actor_loss': float(loss.cpu().detach().numpy())}
def compute_actor_loss(self, batch: TorchMiniBatch) -> torch.Tensor:
with torch.no_grad():
q_t = self._q_func_forwarder.compute_expected_q(batch.observations, reduction='min')
dist = self._modules.policy(batch.observations)
log_probs = dist.logits
probs = dist.probs
if (self._modules.log_temp is None):
temp = torch.zeros_like(log_probs)
else:
temp = self._modules.log_temp().exp()
entropy = (temp * log_probs)
return (probs * (entropy - q_t)).sum(dim=1).mean()
def update_temp(self, batch: TorchMiniBatch) -> Dict[(str, float)]:
assert self._modules.temp_optim
assert (self._modules.log_temp is not None)
self._modules.temp_optim.zero_grad()
with torch.no_grad():
dist = self._modules.policy(batch.observations)
log_probs = F.log_softmax(dist.logits, dim=1)
probs = dist.probs
expct_log_probs = (probs * log_probs).sum(dim=1, keepdim=True)
entropy_target = (0.98 * (- math.log((1 / self.action_size))))
targ_temp = (expct_log_probs + entropy_target)
loss = (- (self._modules.log_temp().exp() * targ_temp).mean())
loss.backward()
self._modules.temp_optim.step()
cur_temp = self._modules.log_temp().exp().cpu().detach().numpy()[0][0]
return {'temp_loss': float(loss.cpu().detach().numpy()), 'temp': float(cur_temp)}
def inner_update(self, batch: TorchMiniBatch, grad_step: int) -> Dict[(str, float)]:
metrics = {}
if self._modules.temp_optim:
metrics.update(self.update_temp(batch))
metrics.update(self.update_critic(batch))
metrics.update(self.update_actor(batch))
if ((grad_step % self._target_update_interval) == 0):
self.update_target()
return metrics
def inner_predict_best_action(self, x: TorchObservation) -> torch.Tensor:
dist = self._modules.policy(x)
return dist.probs.argmax(dim=1)
def inner_sample_action(self, x: TorchObservation) -> torch.Tensor:
dist = self._modules.policy(x)
return dist.sample()
def update_target(self) -> None:
hard_sync(self._modules.targ_q_funcs, self._modules.q_funcs)
def policy(self) -> Policy:
return self._modules.policy
def policy_optim(self) -> Optimizer:
return self._modules.actor_optim
def q_function(self) -> nn.ModuleList:
return self._modules.q_funcs
def q_function_optim(self) -> Optimizer:
return self._modules.critic_optim |
def ConvertESubGraph_PUNGraph_PNEANet(InGraph, EIdV, RenumberNodes=False):
return _snap.ConvertESubGraph_PUNGraph_PNEANet(InGraph, EIdV, RenumberNodes) |
def minmax_data(xdata, ydata, dict=False):
xmin = (min(xdata) if len(xdata) else (- 1))
xmax = (max(xdata) if len(xdata) else 1)
ymin = (min(ydata) if len(ydata) else (- 1))
ymax = (max(ydata) if len(ydata) else 1)
if dict:
return {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax}
else:
return (xmin, xmax, ymin, ymax) |
def create_bn_node(source_node: BaseNode, bn_node_weights: Dict[(Any, Any)]):
bn_node = BaseNode(name=(source_node.name + '_reconstructed'), framework_attr={EPSILON: EPSILON_VAL, MOMENTUM: MOMENTUM_VAL}, input_shape=source_node.output_shape, output_shape=source_node.output_shape, weights=bn_node_weights, layer_class=BatchNormalization)
return bn_node |
def handleEntity(ctxObj, publish):
print('Implement logic')
print(ctxObj)
sys.stdout.flush()
print(ctxObj['type'])
sys.stdout.flush()
print(ctxObj['airmoisture']['value'])
sys.stdout.flush()
atemp = ctxObj['airTemp']['value']
shum = ctxObj['soilmoisture']['value']
pH = ctxObj['soilpH']['value']
rain = ctxObj['rainfall']['value']
ah = ctxObj['airmoisture']['value']
l = []
l.append(ah)
l.append(atemp)
l.append(pH)
l.append(rain)
predictcrop = [l]
predictions = loaded_rf.predict(predictcrop)
count = 0
for i in range(0, 30):
if (predictions[0][i] == 1):
c = crops[i]
count = (count + 1)
break
i = (i + 1)
if (count == 0):
print(('The predicted crop is %s' % cr))
result = 'rice'
else:
print(('The predicted crop is %s' % c))
result = c
sys.stdout.flush()
updateEntity = {'id': (ctxObj['id'] + '.prediction'), 'type': 'CropPrediction', 'soilmoisture': {'type': 'Property', 'value': shum}, 'soilph': {'type': 'Property', 'value': pH}, 'rainfall': {'type': 'Property', 'value': rain}, 'airmoisture': {'type': 'Property', 'value': ah}, 'cropprediction': {'type': 'Property', 'value': str(result)}}
print('Update Entity : ')
print(json.dumps(updateEntity))
sys.stdout.flush()
publish(updateEntity)
sys.stdout.flush() |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [314])
def test_batch_det_double_backward(seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [np.clip(rng.randn(2, 3, 3).astype(np.float32), (- 0.9), 0.9)]
backward_function_tester(rng, F.batch_det, inputs, ctx=ctx, atol_accum=0.1, skip_backward_check=True) |
class Shape():
def __init__(self, pts=np.zeros((2, 0)), max_sides=4, text=''):
self.pts = pts
self.max_sides = max_sides
self.text = text
def isValid(self):
return (self.pts.shape[1] > 2)
def write(self, fp):
fp.write(('%d,' % self.pts.shape[1]))
ptsarray = self.pts.flatten()
fp.write(''.join([('%f,' % value) for value in ptsarray]))
fp.write(('%s,' % self.text))
fp.write('\n')
def read(self, line):
data = line.strip().split(',')
ss = int(data[0])
values = data[1:((ss * 2) + 1)]
text = (data[((ss * 2) + 1)] if (len(data) >= ((ss * 2) + 2)) else '')
self.pts = np.array([float(value) for value in values]).reshape((2, ss))
self.text = text |
class DeQuantize(torch.nn.Module):
def __init__(self):
super(DeQuantize, self).__init__()
def forward(self, Xq):
return Xq.dequantize()
def from_float(mod):
return DeQuantize() |
class LOLValidationHSV(LOLValidation):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, idx):
(x, t) = (Image.open(self.image_paths[idx]).convert('HSV'), Image.open(self.target_paths[idx]).convert('HSV'))
(x, t) = self.transforms((x, t))
return {'x': x, 't': t} |
class Rolling(OptTask):
def __init__(self, n_parameters=4, visualize=True):
super(Rolling, self).__init__(f=self._f, fprime=self._g, name='Rolling', n_parameters=n_parameters, n_objectives=1, order=1, bounds=rutils.bounds(max=([2] * n_parameters), min=([(- 2)] * n_parameters)), task={'minimize'}, labels_param=None, labels_obj=None, vectorized=False, info=None, opt_obj=0, opt_parameters=np.matrix([([0] * n_parameters)]))
self.env = RollingEnv.RollingEnv(visTacto=visualize, visPyBullet=visualize)
def _f(self, xs):
costs = []
goals = [[0.3, 0.3], [0.3, 0.5], [0.3, 0.7], [0.5, 0.3], [0.5, 0.7], [0.7, 0.3], [0.7, 0.5], [0.7, 0.7]]
print('xs', xs)
for i in range(len(xs)):
K = (xs[i].reshape([2, 2]) / 1000)
c = 0
for goal in goals:
c += self.env.simulate(goal, K)
costs.append([(c / len(goals))])
costs = np.matrix(costs)
print('costs', costs)
return costs
def _g(self, x):
return np.matrix((2 * x)) |
class MoNet(torch.nn.Module):
def __init__(self, dataset):
super(MoNet, self).__init__()
self.conv1 = GMMConv(dataset.num_features, args.hidden, dim=2, kernel_size=args.kernel_size)
self.conv2 = GMMConv(args.hidden, dataset.num_classes, dim=2, kernel_size=args.kernel_size)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def forward(self, data):
(x, edge_index, edge_attr) = (data.x, data.edge_index, data.edge_attr)
x = F.dropout(x, p=args.dropout, training=self.training)
x = F.elu(self.conv1(x, edge_index, edge_attr))
x = F.dropout(x, p=args.dropout, training=self.training)
x = self.conv2(x, edge_index, edge_attr)
return F.log_softmax(x, dim=1) |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Time'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::QueueDiscItem const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'char const*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
return |
def load_search_config(searchspace_path):
with open(searchspace_path, 'r') as f:
return yaml.load(f) |
def test_all_nonzero():
array = ak.highlevel.Array([[[np.datetime64('2022'), np.datetime64('2023'), np.datetime64('2025')], [], [np.datetime64('2027'), np.datetime64('2011')], [np.datetime64('2013')]], [], [[np.datetime64('2017'), np.datetime64('2019')], [np.datetime64('2023')]]], check_valid=True)
assert (to_list(ak.operations.all(array, axis=(- 1))) == [[True, True, True, True], [], [True, True]])
assert (to_list(ak.operations.all(array, axis=(- 2))) == [[True, True, True], [], [True, True]]) |
class SEAE(Model):
def __init__(self, sess, args, devices, infer=False):
self.args = args
self.sess = sess
self.keep_prob = 1.0
if infer:
self.keep_prob_var = tf.Variable(self.keep_prob, trainable=False)
else:
self.keep_prob = 0.5
self.keep_prob_var = tf.Variable(self.keep_prob, trainable=False)
self.batch_size = args.batch_size
self.epoch = args.epoch
self.devices = devices
self.save_path = args.save_path
self.canvas_size = args.canvas_size
self.g_enc_depths = [16, 32, 32, 64, 64, 128, 128, 256, 256, 512, 1024]
self.e2e_dataset = args.e2e_dataset
self.generator = AEGenerator(self)
self.build_model(args)
def build_model(self, config):
all_g_grads = []
g_opt = tf.train.AdamOptimizer(config.g_learning_rate, config.beta_1)
for (idx, device) in enumerate(self.devices):
with tf.device(('/%s' % device)):
with tf.name_scope(('device_%s' % idx)):
with variables_on_gpu0():
self.build_model_single_gpu(idx)
g_grads = g_opt.compute_gradients(self.g_losses[(- 1)], var_list=self.g_vars)
all_g_grads.append(g_grads)
tf.get_variable_scope().reuse_variables()
avg_g_grads = average_gradients(all_g_grads)
self.g_opt = g_opt.apply_gradients(avg_g_grads)
def build_model_single_gpu(self, gpu_idx):
if (gpu_idx == 0):
filename_queue = tf.train.string_input_producer([self.e2e_dataset])
(self.get_wav, self.get_noisy) = read_and_decode(filename_queue, (2 ** 14))
(wavbatch, noisybatch) = tf.train.shuffle_batch([self.get_wav, self.get_noisy], batch_size=self.batch_size, num_threads=2, capacity=(1000 + (3 * self.batch_size)), min_after_dequeue=1000, name='wav_and_noisy')
if (gpu_idx == 0):
self.Gs = []
self.zs = []
self.gtruth_wavs = []
self.gtruth_noisy = []
self.gtruth_wavs.append(wavbatch)
self.gtruth_noisy.append(noisybatch)
wavbatch = tf.expand_dims(wavbatch, (- 1))
noisybatch = tf.expand_dims(noisybatch, (- 1))
if (gpu_idx == 0):
self.reference_G = self.generator(noisybatch, is_ref=True, spk=None, z_on=False)
G = self.generator(noisybatch, is_ref=False, spk=None, z_on=False)
print('GAE shape: ', G.get_shape())
self.Gs.append(G)
self.rl_audio_summ = audio_summary('real_audio', wavbatch)
self.real_w_summ = histogram_summary('real_wav', wavbatch)
self.noisy_audio_summ = audio_summary('noisy_audio', noisybatch)
self.noisy_w_summ = histogram_summary('noisy_wav', noisybatch)
self.gen_audio_summ = audio_summary('G_audio', G)
self.gen_summ = histogram_summary('G_wav', G)
if (gpu_idx == 0):
self.g_losses = []
g_loss = tf.reduce_mean(tf.abs(tf.sub(G, wavbatch)))
self.g_losses.append(g_loss)
self.g_loss_sum = scalar_summary('g_loss', g_loss)
if (gpu_idx == 0):
self.get_vars()
def get_vars(self):
t_vars = tf.trainable_variables()
self.g_vars = [var for var in t_vars if var.name.startswith('g_')]
for x in t_vars:
assert (x in self.g_vars), x.name
self.all_vars = t_vars
def train(self, config, devices):
print('Initializing optimizer...')
g_opt = self.g_opt
num_devices = len(devices)
try:
init = tf.global_variables_initializer()
except AttributeError:
init = tf.initialize_all_variables()
print('Initializing variables...')
self.sess.run(init)
self.saver = tf.train.Saver()
self.g_sum = tf.summary.merge([self.g_loss_sum, self.gen_summ, self.rl_audio_summ, self.real_w_summ, self.gen_audio_summ])
if (not os.path.exists(os.path.join(config.save_path, 'train'))):
os.makedirs(os.path.join(config.save_path, 'train'))
self.writer = tf.summary.FileWriter(os.path.join(config.save_path, 'train'), self.sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
print('Sampling some wavs to store sample references...')
(sample_noisy, sample_wav) = self.sess.run([self.gtruth_noisy[0], self.gtruth_wavs[0]])
print('sample noisy shape: ', sample_noisy.shape)
print('sample wav shape: ', sample_wav.shape)
save_path = config.save_path
counter = 0
num_examples = 0
for record in tf.python_io.tf_record_iterator(self.e2e_dataset):
num_examples += 1
print('total examples in TFRecords {}: {}'.format(self.e2e_dataset, num_examples))
num_batches = (num_examples / self.batch_size)
print('Batches per epoch: ', num_batches)
if self.load(self.save_path):
print('[*] Load SUCCESS')
else:
print('[!] Load failed')
batch_idx = 0
curr_epoch = 0
batch_timings = []
g_losses = []
try:
while (not coord.should_stop()):
start = timeit.default_timer()
if ((counter % config.save_freq) == 0):
(_g_opt, _g_sum, g_loss) = self.sess.run([g_opt, self.g_sum, self.g_losses[0]])
else:
(_g_opt, g_loss) = self.sess.run([g_opt, self.g_losses[0]])
end = timeit.default_timer()
batch_timings.append((end - start))
g_losses.append(g_loss)
print('{}/{} (epoch {}), g_loss = {:.5f}, time/batch = {:.5f}, mtime/batch = {:.5f}'.format(counter, (config.epoch * num_batches), curr_epoch, g_loss, (end - start), np.mean(batch_timings)))
batch_idx += num_devices
counter += num_devices
if (((counter / num_devices) % config.save_freq) == 0):
self.save(config.save_path, counter)
self.writer.add_summary(_g_sum, counter)
fdict = {self.gtruth_noisy[0]: sample_noisy}
canvas_w = self.sess.run(self.Gs[0], feed_dict=fdict)
swaves = sample_wav
sample_dif = (sample_wav - sample_noisy)
for m in range(min(20, canvas_w.shape[0])):
print('w{} max: {} min: {}'.format(m, np.max(canvas_w[m]), np.min(canvas_w[m])))
wavfile.write(os.path.join(save_path, 'sample_{}-{}.wav'.format(counter, m)), 16000.0, canvas_w[m])
if (not os.path.exists(os.path.join(save_path, 'gtruth_{}.wav'.format(m)))):
wavfile.write(os.path.join(save_path, 'gtruth_{}.wav'.format(m)), 16000.0, swaves[m])
wavfile.write(os.path.join(save_path, 'noisy_{}.wav'.format(m)), 16000.0, sample_noisy[m])
wavfile.write(os.path.join(save_path, 'dif_{}.wav'.format(m)), 16000.0, sample_dif[m])
np.savetxt(os.path.join(save_path, 'g_losses.txt'), g_losses)
if (batch_idx >= num_batches):
curr_epoch += 1
batch_idx = 0
if (curr_epoch >= config.epoch):
print('Done training; epoch limit {} reached.'.format(self.epoch))
print('Saving last model at iteration {}'.format(counter))
self.save(config.save_path, counter)
self.writer.add_summary(_g_sum, counter)
break
except tf.errors.OutOfRangeError:
print('[!] Reached queues limits in training loop')
finally:
coord.request_stop()
coord.join(threads) |
def list_join(L, x):
if isinstance(x, string_types):
x = (x, RESERVED_TOKEN)
if (len(L) == 0):
return ([], [])
(out, out_types) = (copy.deepcopy(L[0][0]), copy.deepcopy(L[0][1]))
for (v, t) in L[1:]:
if x:
out += [x[0]]
out_types += [x[1]]
out += v
out_types += t
return (out, out_types) |
def test_indexed():
array = ak.Array(ak.contents.IndexedArray(ak.index.Index64(np.array([0, 1, 3], dtype=np.int64)), tuple))
assert ak.is_tuple(array)
array = ak.Array(ak.contents.IndexedArray(ak.index.Index64(np.array([0, 1, 3], dtype=np.int64)), record))
assert (not ak.is_tuple(array)) |
def masses_from_heliocentric(mu, M):
mu_arr = np.array(mu)[1:]
M_arr = np.array(M)[1:]
X = np.sqrt(((M_arr ** 2) - ((4 * M_arr) * mu_arr)))
mstar_arr = (0.5 * (M_arr + X))
m_arr = (0.5 * (M_arr - X))
assert np.alltrue(np.isclose(mstar_arr, mstar_arr[0], rtol=1e-10))
mstar = np.mean(mstar_arr)
return ([mstar] + m_arr.tolist()) |
def test_isotonic_make_unique_tolerance():
X = np.array([0, 1, (1 + 1e-16), 2], dtype=np.float64)
y = np.array([0, 1, 2, 3], dtype=np.float64)
ireg = IsotonicRegression().fit(X, y)
y_pred = ireg.predict([0, 0.5, 1, 1.5, 2])
assert_array_equal(y_pred, np.array([0, 0.75, 1.5, 2.25, 3]))
assert_array_equal(ireg.X_thresholds_, np.array([0.0, 1.0, 2.0]))
assert_array_equal(ireg.y_thresholds_, np.array([0.0, 1.5, 3.0])) |
.ort
.parametrize('break_opchecker', [True, False])
.parametrize('simplify', [True, False])
def test_squeeze(gpu, simplify, break_opchecker, sdfg_name):
with (BreakOpChecker() if break_opchecker else suppress()):
sdfg = dace.SDFG(sdfg_name)
sdfg.add_array('X_arr', [1], dace.float32)
sdfg.add_scalar('scalar', dace.float32, transient=True)
sdfg.add_array('__return', [1], dace.float32)
state = sdfg.add_state()
access_X = state.add_access('X_arr')
access_scalar = state.add_access('scalar')
access_result = state.add_access('__return')
op_node = donnx.ONNXSqueeze('Squeeze')
state.add_node(op_node)
state.add_edge(access_X, None, op_node, 'data', sdfg.make_array_memlet('X_arr'))
state.add_edge(op_node, 'squeezed', access_scalar, None, sdfg.make_array_memlet('scalar'))
unsqueeze_op = donnx.ONNXUnsqueeze('Unsqueeze', axes=[0])
state.add_node(unsqueeze_op)
state.add_edge(access_scalar, None, unsqueeze_op, 'data', sdfg.make_array_memlet('scalar'))
state.add_edge(unsqueeze_op, 'expanded', access_result, None, sdfg.make_array_memlet('__return'))
X = np.random.rand(1).astype(np.float32)
if gpu:
sdfg.apply_gpu_transformations()
if simplify:
sdfg.expand_library_nodes()
sdfg.simplify()
result = sdfg(X_arr=X)
assert (result.shape == (1,))
assert (result[0] == X) |
def register_Ns3CallbackImplBase_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True)
cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected')
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'void'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::MobilityModel const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::SpectrumPhy const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'double'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::SpectrumSignalParameters> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::PacketBurst const> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::PhyReceptionStatParameters'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned char'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::SpectrumValue> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::PhyTransmissionStatParameters'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::LteUePhy::State'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'bool'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Socket> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::EpcUeNas::State'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::SpectrumValue const&'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'unsigned long long'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::UeManager::State'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::LteRrcSap::MeasurementReport'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::DlSchedulingCallbackInfo'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::LteUeRrc::State'])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::LteUeRrc> '])
cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, visibility='protected', template_parameters=[u'std::__cxx11::list<ns3::LteRrcSap::SCellToAddMod', u' std::allocator<ns3::LteRrcSap::SCellToAddMod> > '])
return |
class ScorerLM(Scorer):
JM = 'jm'
DIRICHLET = 'dirichlet'
def __init__(self, elastic, query, params):
super(ScorerLM, self).__init__(elastic, query, params)
self._field = params.get('fields', 'catchall')
self._smoothing_method = params.get('smoothing_method', self.DIRICHLET).lower()
if (self._smoothing_method == self.DIRICHLET):
self._smoothing_param = params.get('smoothing_param', 50)
elif (self._smoothing_method == ScorerLM.JM):
self._smoothing_param = params.get('smoothing_param', 0.1)
else:
sys.exit(0)
self._tf = {}
def get_jm_prob(tf_t_d, len_d, tf_t_C, len_C, lambd):
p_t_d = ((tf_t_d / len_d) if (len_d > 0) else 0)
p_t_C = ((tf_t_C / len_C) if (len_C > 0) else 0)
if Scorer.SCORER_DEBUG:
print('\t\t\tp(t|d) = {}\tp(t|C) = {}'.format(p_t_d, p_t_C))
return (((1 - lambd) * p_t_d) + (lambd * p_t_C))
def get_dirichlet_prob(tf_t_d, len_d, tf_t_C, len_C, mu):
if (mu == 0):
return 0
else:
p_t_C = ((tf_t_C / len_C) if (len_C > 0) else 0)
return ((tf_t_d + (mu * p_t_C)) / (len_d + mu))
def __get_term_freq(self, doc_id, field, term):
if (doc_id not in self._tf):
self._tf[doc_id] = {}
if (field not in self._tf[doc_id]):
self._tf[doc_id][field] = self._elastic.term_freqs(doc_id, field)
return self._tf[doc_id][field].get(term, 0)
def get_lm_term_prob(self, doc_id, field, t, tf_t_d_f=None, tf_t_C_f=None):
len_d_f = self._elastic.doc_length(doc_id, field)
len_C_f = self._elastic.coll_length(field)
tf_t_C_f = (self._elastic.coll_term_freq(t, field) if (tf_t_C_f is None) else tf_t_C_f)
tf_t_d_f = (self.__get_term_freq(doc_id, field, t) if (tf_t_d_f is None) else tf_t_d_f)
if self.SCORER_DEBUG:
print('\t\tt = {}\t f = {}'.format(t, field))
print('\t\t\tDoc: tf(t,f) = {}\t|f| = {}'.format(tf_t_d_f, len_d_f))
print('\t\t\tColl: tf(t,f) = {}\t|f| = '.format(tf_t_C_f, len_C_f))
p_t_d_f = 0
if (self._smoothing_method == self.JM):
lambd = self._smoothing_param
p_t_d_f = self.get_jm_prob(tf_t_d_f, len_d_f, tf_t_C_f, len_C_f, lambd)
if self.SCORER_DEBUG:
print('\t\t\tJM smoothing:')
print('\t\t\tDoc: p(t|theta_d_f)= ', p_t_d_f)
elif (self._smoothing_method == self.DIRICHLET):
mu = (self._smoothing_param if (self._smoothing_param != 'avg_len') else self._elastic.avg_len(field))
p_t_d_f = self.get_dirichlet_prob(tf_t_d_f, len_d_f, tf_t_C_f, len_C_f, mu)
if self.SCORER_DEBUG:
print('\t\t\tDirichlet smoothing:')
print('\t\t\tmu: ', mu)
print('\t\t\tDoc: p(t|theta_d_f)= ', p_t_d_f)
return p_t_d_f
def get_lm_term_probs(self, doc_id, field):
p_t_theta_d_f = {}
for t in set(self._query_terms):
p_t_theta_d_f[t] = self.get_lm_term_prob(doc_id, field, t)
return p_t_theta_d_f
def score_doc(self, doc_id):
if self.SCORER_DEBUG:
print(('Scoring doc ID=' + doc_id))
p_t_theta_d = self.get_lm_term_probs(doc_id, self._field)
if (sum(p_t_theta_d.values()) == 0):
if self.SCORER_DEBUG:
print('\t\tP(q|{}) = None'.format(self._field))
return None
p_q_theta_d = 0
for t in self._query_terms:
if (p_t_theta_d[t] == 0):
continue
if self.SCORER_DEBUG:
print('\t\tP({}|{}) = {}'.format(t, self._field, p_t_theta_d[t]))
p_q_theta_d += math.log(p_t_theta_d[t])
if self.SCORER_DEBUG:
print('P(d|q) = {}'.format(p_q_theta_d))
return p_q_theta_d |
def bar_plots_with_protocol_table(output_path, data, protocol_settings, task):
protocol_labels = data[0]
protocol_ids = ['P{}'.format(i) for i in range(len(protocol_labels))]
experiment_labels = data[1]
metric_labels = data[2]
x = np.arange(len(protocol_labels))
mpl.rc('text', usetex=True)
tex_fonts = {'text.usetex': True, 'font.family': 'serif', 'font.serif': ['computer modern'], 'axes.labelsize': 10, 'font.size': 10, 'axes.titlesize': 10, 'legend.fontsize': 8, 'xtick.labelsize': 8, 'ytick.labelsize': 8}
mpl.rcParams.update(tex_fonts)
colors = ['#3182bd', '#de2d26', '#31a354', '#9ecae1', '#fc9272', '#a1d99b', '#deebf7', '#fee0d2', '#e5f5e0']
colors = ['#3182bd', '#9ecae1', '#deebf7', '#de2d26', '#fc9272', '#fee0d2', '#31a354', '#a1d99b', '#e5f5e0']
for metric_label in data[3]:
metric_scores = data[3][metric_label]
num_groups = len(metric_scores)
fig_width = 5.5
fig_height = (fig_width / 3)
(fig, ax) = plt.subplots(figsize=(fig_width, fig_height))
fig.set_size_inches(fig_width, fig_height)
width = (0.7 / num_groups)
spare_width = 0.5
ax.set_xlim((- spare_width), (len(protocol_labels) - spare_width))
row_labels = list(protocol_settings[list(protocol_settings.keys())[0]].keys())
for (index, experiment_label) in enumerate(metric_scores):
(experiment_scores_mean, experiment_scores_err) = metric_scores[experiment_label]
experiment_scores_std_list_upper = [min(std, (1.0 - mean)) for (mean, std) in zip(experiment_scores_mean, experiment_scores_err)]
error_kw = dict(lw=5, capsize=5, capthick=3)
plt.bar(((x - (((num_groups - 1) * width) / 2)) + (width * index)), experiment_scores_mean, width, yerr=(experiment_scores_err, experiment_scores_std_list_upper), error_kw=dict(lw=1, capsize=1, capthick=1), label=experiment_labels[index], color=colors[index])
cell_text = list()
for row_label in row_labels:
cell_text.append(['{}'.format(protocol_settings[experiment_label][row_label]) for experiment_label in list(protocol_settings.keys())])
ax.set_ylabel('fractional success', fontsize=8)
plt.legend(ncol=3, loc='upper right', prop={'size': 6})
ax.set_ylim((0, 1.2))
plt.yticks(fontsize=8)
ax.get_xaxis().set_visible(False)
table = plt.table(cellText=cell_text, rowLabels=row_labels, colLabels=protocol_ids, loc='bottom')
table.auto_set_font_size(False)
table.set_fontsize(8)
cellDict = table.get_celld()
for i in range((- 1), len(protocol_ids)):
if (i != (- 1)):
cellDict[(0, i)].set_height(0.11)
for j in range(1, (len(row_labels) + 1)):
if (j == 2):
cellDict[(j, i)].set_height(0.15)
else:
cellDict[(j, i)].set_height(0.11)
cellDict[(j, i)].set_fontsize(6)
fig.subplots_adjust(bottom=0.33, left=0.11, right=0.99, top=0.98)
plt.savefig(os.path.join(output_path, 'bar_plots_protocol_table_{}.pdf'.format(metric_label)), dpi=300) |
def stable_resize_token_embeddings(model: transformers.PreTrainedModel, target_size: int, jitter_new_embeddings=False):
num_new_tokens = (target_size - model.get_input_embeddings().weight.size(0))
model.resize_token_embeddings(target_size)
if (num_new_tokens > 0):
_mode()
def stable_init(embedding):
embedding_data = embedding.weight.data
embedding_avg = embedding_data[:(- num_new_tokens)].mean(dim=0, keepdim=True)
embedding_data[(- num_new_tokens):] = embedding_avg
if jitter_new_embeddings:
embedding_std = embedding_data[:(- num_new_tokens)].std(dim=0, keepdim=True)
embedding_data[(- num_new_tokens):] += (torch.randn_like(embedding_data[(- num_new_tokens):]) * embedding_std)
input_embeddings = model.get_input_embeddings()
output_embeddings = model.get_output_embeddings()
for embeddings in (input_embeddings, output_embeddings):
stable_init(embeddings) |
def op_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe():
return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, stateless_tied=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precompute_masks': False, 'output_hidden_states': False}, do_resize_token_embedding=True) |
class ChargingBar(Bar):
suffix = '%(percent)d%%'
bar_prefix = ' '
bar_suffix = ' '
empty_fill = ''
fill = '' |
_pipeline_test
_torch
_vision
class DocumentQuestionAnsweringPipelineTests(unittest.TestCase):
model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
_pytesseract
_vision
def get_test_pipeline(self, model, tokenizer, processor):
dqa_pipeline = pipeline('document-question-answering', model=model, tokenizer=tokenizer, image_processor=processor)
image = INVOICE_URL
word_boxes = list(zip(*apply_tesseract(load_image(image), None, '')))
question = 'What is the placebo?'
examples = [{'image': load_image(image), 'question': question}, {'image': image, 'question': question}, {'image': image, 'question': question, 'word_boxes': word_boxes}]
return (dqa_pipeline, examples)
def run_pipeline_test(self, dqa_pipeline, examples):
outputs = dqa_pipeline(examples, top_k=2)
self.assertEqual(outputs, ([[{'score': ANY(float), 'answer': ANY(str), 'start': ANY(int), 'end': ANY(int)}, {'score': ANY(float), 'answer': ANY(str), 'start': ANY(int), 'end': ANY(int)}]] * 3))
_torch
_detectron2
_pytesseract
def test_small_model_pt(self):
dqa_pipeline = pipeline('document-question-answering', model='hf-internal-testing/tiny-random-layoutlmv2')
image = INVOICE_URL
question = 'How many cats are there?'
expected_output = [{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39}, {'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40}]
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
outputs = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), expected_output)
image = './tests/fixtures/tests_samples/COCO/.png'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(outputs, [])
image = './tests/fixtures/tests_samples/COCO/.png'
words = []
boxes = []
outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2)
self.assertEqual(outputs, [])
_torch
_detectron2
_pytesseract
def test_large_model_pt(self):
dqa_pipeline = pipeline('document-question-answering', model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa', revision='9977165')
image = INVOICE_URL
question = 'What is the invoice number?'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16}])
outputs = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16}])
outputs = dqa_pipeline([{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), ([[{'score': 0.9944, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0009, 'answer': 'us-001', 'start': 16, 'end': 16}]] * 2))
_torch
_detectron2
_pytesseract
def test_large_model_pt_chunk(self):
dqa_pipeline = pipeline('document-question-answering', model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa', revision='9977165', max_seq_len=50)
image = INVOICE_URL
question = 'What is the invoice number?'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9974, 'answer': '', 'start': 23, 'end': 23}, {'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16}])
outputs = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9974, 'answer': '', 'start': 23, 'end': 23}, {'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16}])
outputs = dqa_pipeline([{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), ([[{'score': 0.9974, 'answer': '', 'start': 23, 'end': 23}, {'score': 0.9948, 'answer': 'us-001', 'start': 16, 'end': 16}]] * 2))
_torch
_pytesseract
_vision
def test_large_model_pt_layoutlm(self):
tokenizer = AutoTokenizer.from_pretrained('impira/layoutlm-document-qa', revision='3dc6de3', add_prefix_space=True)
dqa_pipeline = pipeline('document-question-answering', model='impira/layoutlm-document-qa', tokenizer=tokenizer, revision='3dc6de3')
image = INVOICE_URL
question = 'What is the invoice number?'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0819, 'answer': '', 'start': 23, 'end': 23}])
outputs = dqa_pipeline({'image': image, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0819, 'answer': '', 'start': 23, 'end': 23}])
outputs = dqa_pipeline([{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), ([[{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0819, 'answer': '', 'start': 23, 'end': 23}]] * 2))
word_boxes = list(zip(*apply_tesseract(load_image(image), None, '')))
outputs = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.4251, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.0819, 'answer': '', 'start': 23, 'end': 23}])
_torch
_pytesseract
_vision
def test_large_model_pt_layoutlm_chunk(self):
tokenizer = AutoTokenizer.from_pretrained('impira/layoutlm-document-qa', revision='3dc6de3', add_prefix_space=True)
dqa_pipeline = pipeline('document-question-answering', model='impira/layoutlm-document-qa', tokenizer=tokenizer, revision='3dc6de3', max_seq_len=50)
image = INVOICE_URL
question = 'What is the invoice number?'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16}])
outputs = dqa_pipeline([{'image': image, 'question': question}, {'image': image, 'question': question}], top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), ([[{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16}]] * 2))
word_boxes = list(zip(*apply_tesseract(load_image(image), None, '')))
outputs = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question}, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'score': 0.9999, 'answer': 'us-001', 'start': 16, 'end': 16}, {'score': 0.9998, 'answer': 'us-001', 'start': 16, 'end': 16}])
_torch
def test_large_model_pt_donut(self):
dqa_pipeline = pipeline('document-question-answering', model='naver-clova-ix/donut-base-finetuned-docvqa', tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa'), feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa')
image = INVOICE_URL
question = 'What is the invoice number?'
outputs = dqa_pipeline(image=image, question=question, top_k=2)
self.assertEqual(nested_simplify(outputs, decimals=4), [{'answer': 'us-001'}])
_tf
('Document question answering not implemented in TF')
def test_small_model_tf(self):
pass |
class FixAudioLength(object):
def __init__(self, time=1):
self.time = time
def __call__(self, data):
length = int((self.time * SAMPLE_RATE))
if (length < len(data)):
data = data[:length]
elif (length > len(data)):
data = np.pad(data, (0, (length - len(data))), 'constant')
return data |
def _unwrap_layers(module: nn.Module):
for (name, sub_module) in module.named_children():
if isinstance(sub_module, Wrapper):
sub_module.on_unwrap()
module.add_module(name, sub_module.layer)
else:
_unwrap_layers(sub_module) |
class TestSegmentationMask(unittest.TestCase):
def __init__(self, method_name='runTest'):
super(TestSegmentationMask, self).__init__(method_name)
poly = [[[423.0, 306.5, 406.5, 277.0, 400.0, 271.5, 389.5, 277.0, 387.5, 292.0, 384.5, 295.0, 374.5, 220.0, 378.5, 210.0, 391.0, 200.5, 404.0, 199.5, 414.0, 203.5, 425.5, 221.0, 438.5, 297.0, 423.0, 306.5], [100, 100, 200, 100, 200, 200, 100, 200]]]
width = 640
height = 480
size = (width, height)
self.P = SegmentationMask(poly, size, 'poly')
self.M = SegmentationMask(poly, size, 'poly').convert('mask')
def L1(self, A, B):
diff = (A.get_mask_tensor() - B.get_mask_tensor())
diff = torch.sum(torch.abs(diff.float())).item()
return diff
def test_convert(self):
M_hat = self.M.convert('poly').convert('mask')
P_hat = self.P.convert('mask').convert('poly')
diff_mask = self.L1(self.M, M_hat)
diff_poly = self.L1(self.P, P_hat)
self.assertTrue((diff_mask == diff_poly))
self.assertTrue((diff_mask <= 8169.0))
self.assertTrue((diff_poly <= 8169.0))
def test_crop(self):
box = [400, 250, 500, 300]
diff = self.L1(self.M.crop(box), self.P.crop(box))
self.assertTrue((diff <= 1.0))
def test_resize(self):
new_size = (50, 25)
M_hat = self.M.resize(new_size)
P_hat = self.P.resize(new_size)
diff = self.L1(M_hat, P_hat)
self.assertTrue((self.M.size == self.P.size))
self.assertTrue((M_hat.size == P_hat.size))
self.assertTrue((self.M.size != M_hat.size))
self.assertTrue((diff <= 255.0))
def test_transpose(self):
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
diff_hor = self.L1(self.M.transpose(FLIP_LEFT_RIGHT), self.P.transpose(FLIP_LEFT_RIGHT))
diff_ver = self.L1(self.M.transpose(FLIP_TOP_BOTTOM), self.P.transpose(FLIP_TOP_BOTTOM))
self.assertTrue((diff_hor <= 53250.0))
self.assertTrue((diff_ver <= 42494.0)) |
class ThreadPoolRunner(BaseRunner):
workers_num: int = 2
request_tls_verify: (bool | str) = True
request_proxy: (str | None) = None
request_cert: (RequestCert | None) = None
def _execute(self, results: TestResultSet, stop_event: threading.Event) -> Generator[(events.ExecutionEvent, None, None)]:
tasks_generator = iter(self.schema.get_all_operations())
generator_done = threading.Event()
tasks_queue: Queue = Queue()
for _ in range(self.workers_num):
try:
tasks_queue.queue.append(next(tasks_generator))
except StopIteration:
generator_done.set()
break
events_queue: Queue = Queue()
workers = self._init_workers(tasks_queue, events_queue, results, generator_done)
def stop_workers() -> None:
for worker in workers:
ident = cast(int, worker.ident)
stop_worker(ident)
worker.join()
is_finished = False
try:
while (not is_finished):
time.sleep(0.001)
is_finished = all(((not worker.is_alive()) for worker in workers))
while (not events_queue.empty()):
event = events_queue.get()
if (stop_event.is_set() or isinstance(event, events.Interrupted) or self._should_stop(event)):
stop_workers()
is_finished = True
if stop_event.is_set():
break
(yield event)
if (isinstance(event, events.BeforeExecution) and (not generator_done.is_set())):
try:
tasks_queue.put(next(tasks_generator))
except StopIteration:
generator_done.set()
except KeyboardInterrupt:
stop_workers()
(yield events.Interrupted())
def _init_workers(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet, generator_done: threading.Event) -> list[threading.Thread]:
workers = [threading.Thread(target=self._get_task(), kwargs=self._get_worker_kwargs(tasks_queue, events_queue, results, generator_done), name=f'schemathesis_{num}') for num in range(self.workers_num)]
for worker in workers:
worker.start()
return workers
def _get_task(self) -> Callable:
return thread_task
def _get_worker_kwargs(self, tasks_queue: Queue, events_queue: Queue, results: TestResultSet, generator_done: threading.Event) -> dict[(str, Any)]:
return {'tasks_queue': tasks_queue, 'events_queue': events_queue, 'generator_done': generator_done, 'checks': self.checks, 'targets': self.targets, 'settings': self.hypothesis_settings, 'generation_config': self.generation_config, 'auth': self.auth, 'auth_type': self.auth_type, 'headers': self.headers, 'seed': self.seed, 'results': results, 'stateful': self.stateful, 'stateful_recursion_limit': self.stateful_recursion_limit, 'data_generation_methods': self.schema.data_generation_methods, 'kwargs': {'request_timeout': self.request_timeout, 'request_tls_verify': self.request_tls_verify, 'request_proxy': self.request_proxy, 'request_cert': self.request_cert, 'store_interactions': self.store_interactions, 'max_response_time': self.max_response_time, 'dry_run': self.dry_run}} |
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, residual=False, batch_norm=False, activation=F.relu, dropout=0, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(in_features, out_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.residual = residual
if (self.in_features != self.out_features):
self.residual = False
self.batchnorm = (nn.BatchNorm1d(out_features) if batch_norm else None)
self.activation = activation
self.dropout = nn.Dropout(dropout)
self._norm = False
def reset_parameters(self):
stdv = (1.0 / math.sqrt(self.weight.size(1)))
self.weight.data.uniform_((- stdv), stdv)
if (self.bias is not None):
self.bias.data.uniform_((- stdv), stdv)
def forward(self, input, adj):
support = torch.matmul(input, self.weight)
node_repr = torch.matmul(adj, support)
if (self.bias is not None):
node_repr = (node_repr + self.bias)
if (self.batchnorm is not None):
node_repr = node_repr.transpose(1, 2)
node_repr = self.batchnorm(node_repr)
node_repr = node_repr.transpose(1, 2)
node_repr = self.activation(node_repr)
if self.residual:
node_repr = (input + node_repr)
node_repr = self.dropout(node_repr)
return node_repr
def __repr__(self):
return (((((self.__class__.__name__ + ' (') + str(self.in_features)) + ' -> ') + str(self.out_features)) + ')') |
class UpstreamExpert(UpstreamBase):
def __init__(self, model_config, **kwargs):
super().__init__(**kwargs)
with open(model_config, 'r') as file:
self.config = yaml.load(file, Loader=yaml.FullLoader)
if ('kaldi' in self.config):
(self.extracter, self.output_dim, frame_shift) = get_extracter(self.config)
self.downsample_rate = round(((frame_shift * SAMPLE_RATE) / 1000))
else:
(self.extracter, self.output_dim, _) = get_preprocessor(self.config, process_input_only=True)
self.downsample_rate = round(((self.config.get('hop_ms', 10) * SAMPLE_RATE) / 1000))
def _extractor_forward(self, wavs):
feats = []
for wav in wavs:
feats.append(self.extracter(wav))
return feats
def get_downsample_rates(self, key: str) -> int:
return self.downsample_rate
def _preprocessor_forward(self, wavs):
wav_lengths = [len(wav) for wav in wavs]
feats = pad_sequence(wavs, batch_first=True)
feats = feats.unsqueeze(1)
feats = self.extracter(feats)[0]
ratio = (len(feats[0]) / wav_lengths[0])
feat_lengths = [round((l * ratio)) for l in wav_lengths]
feats = [f[:l] for (f, l) in zip(feats, feat_lengths)]
return feats
def forward(self, wavs):
if ('kaldi' in self.config):
feats = self._extractor_forward(wavs)
else:
feats = self._preprocessor_forward(wavs)
padded_feats = pad_sequence(feats, batch_first=True)
return {'last_hidden_state': padded_feats, 'hidden_states': [padded_feats]} |
class TaggerPipelineServer(Distributed):
def worker(pipeline, corpus, ngrams=5):
for doc in corpus:
for name in pipeline:
pipeline[name].tag(doc, ngrams=ngrams)
return corpus
def apply(self, pipeline, documents, block_size=None):
items = itertools.chain.from_iterable(documents)
if (block_size is None):
num_items = np.sum([len(x) for x in documents])
block_size = int(np.ceil((num_items / self.num_workers)))
print(f'auto block size={block_size}')
blocks = (list(partition_all(block_size, items)) if block_size else documents)
print(f'Partitioned into {len(blocks)} blocks, {np.unique([len(x) for x in blocks])} sizes')
do = delayed(partial(TaggerPipelineServer.worker, pipeline))
jobs = (do(batch) for batch in blocks)
results = list(itertools.chain.from_iterable(self.client(jobs)))
i = 0
items = []
for n in [len(x) for x in documents]:
items.append(results[i:(i + n)].copy())
i += n
return items |
def cast_flat_shape(shape: Shape) -> Sequence[int]:
assert (not is_tuple_shape(shape))
return shape |
def vgg11(output_dim, k_lipschitz=None, p_drop=0.5):
if (k_lipschitz is not None):
k_lipschitz = (k_lipschitz ** (1.0 / 11.0))
return VGG(make_layers(cfg['A'], k_lipschitz=k_lipschitz), output_dim=output_dim, k_lipschitz=k_lipschitz, p_drop=p_drop) |
def is_valid_date_range(start_date: str, end_date: str, lower_bound: str) -> bool:
tommorrow = (datetime.today() + timedelta(days=1))
if ((tommorrow >= convert_date(end_date)) and (convert_date(start_date) >= convert_date(lower_bound))):
return True
else:
return False |
class DotAttention(nn.Module):
def __init__(self):
super().__init__()
pass
def forward(self, values, query):
attention_weights = self._get_weights(values, query)
representations = torch.bmm(values.transpose(1, 2), attention_weights.unsqueeze(2)).squeeze(2)
return representations
def _get_weights(self, values, query):
hidden = query.squeeze(0)
attention_weights = torch.bmm(values, hidden.unsqueeze(2)).squeeze(2)
attention_weights = F.softmax(attention_weights, 1)
return attention_weights |
def get_datasampler(dataset, mode):
return torch.utils.data.distributed.DistributedSampler(dataset, shuffle=(mode == 'train'), num_replicas=world_size(), rank=rank()) |
class Synthetic2DType(enum.Enum):
MOONS = enum.auto()
CHECKERBOARD = enum.auto()
CONCENTRIC_RINGS = enum.auto()
CONCENTRIC_SQUARES = enum.auto()
OLYMPIC_RINGS = enum.auto()
OLYMPIC_SQUARES = enum.auto() |
def pesq_mos(clean: str, enhanced: str):
(sr1, clean_wav) = wavfile.read(clean)
(sr2, enhanced_wav) = wavfile.read(enhanced)
assert (sr1 == sr2)
mode = ('nb' if (sr1 < 16000) else 'wb')
return pesq(sr1, clean_wav, enhanced_wav, mode) |
def load_test_data(path):
(sents1, sents2, labels) = ([], [], [])
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip().split('\t')
if (len(line) != 3):
continue
sents1.append(line[0])
sents2.append(line[1])
labels.append(int(line[2]))
if (len(sents1) > 10):
break
return (sents1, sents2, labels) |
class Conv_3d(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, stride=1, padding=0, bias=True, batchnorm=False):
super().__init__()
self.conv = [nn.Conv3d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias), SEGating(out_ch)]
if batchnorm:
self.conv += [nn.BatchNorm3d(out_ch)]
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
return self.conv(x) |
class InvertibleLinear(nn.Module):
def __init__(self, dim):
super(InvertibleLinear, self).__init__()
self.dim = dim
self.weight = nn.Parameter(torch.eye(dim)[torch.randperm(dim)])
def forward(self, x, logpx=None):
y = F.linear(x, self.weight)
if (logpx is None):
return y
else:
return (y, (logpx - self._logdetgrad))
def inverse(self, y, logpy=None):
x = F.linear(y, self.weight.inverse())
if (logpy is None):
return x
else:
return (x, (logpy + self._logdetgrad))
def _logdetgrad(self):
return torch.log(torch.abs(torch.det(self.weight)))
def extra_repr(self):
return 'dim={}'.format(self.dim) |
def __tar_xz_parallel(args, dirname, excludes=[]):
dirpath = f'{args.prefix}/{dirname}'
if (not os.path.exists(dirpath)):
return False
flags = [f"--exclude='{e}'" for e in excludes]
flag_str = ' '.join(flags)
tar_cmd = cmdsplit(f'tar cf - {flag_str} {dirname}')
tarproc = subprocess.Popen(tar_cmd, cwd=args.prefix, stdout=subprocess.PIPE)
xz_cmd = cmdsplit(f'xz -9 --threads={args.nprocesses} -')
xzproc = subprocess.Popen(xz_cmd, cwd=args.prefix, stdin=tarproc.stdout, stdout=subprocess.PIPE)
dd_cmd = cmdsplit(f'dd of={dirname}.tar.xz')
ddproc = subprocess.Popen(dd_cmd, cwd=args.prefix, stdin=xzproc.stdout, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
tar_rc = tarproc.wait()
xz_rc = xzproc.wait()
dd_rc = ddproc.wait()
if ((tar_rc == 0) and (xz_rc == 0) and (dd_rc == 0)):
return True
else:
return False |
class EncoderBlock(nn.Module):
def __init__(self, channel_in, channel_out):
super(EncoderBlock, self).__init__()
self.conv = nn.Conv2d(in_channels=channel_in, out_channels=channel_out, kernel_size=5, padding=2, stride=2, bias=False)
self.bn = nn.BatchNorm2d(num_features=channel_out, momentum=0.9)
def forward(self, ten, out=False, t=False):
if out:
ten = self.conv(ten)
ten_out = ten
ten = self.bn(ten)
ten = F.relu(ten, False)
return (ten, ten_out)
else:
ten = self.conv(ten)
ten = self.bn(ten)
ten = F.relu(ten, True)
return ten |
class ShuffleNet(nn.Module):
def __init__(self, num_classes, loss='softmax', num_groups=3, **kwargs):
super(ShuffleNet, self).__init__()
self.loss = loss
self.conv1 = nn.Sequential(nn.Conv2d(3, 24, 3, stride=2, padding=1, bias=False), nn.BatchNorm2d(24), nn.ReLU(), nn.MaxPool2d(3, stride=2, padding=1))
self.stage2 = nn.Sequential(Bottleneck(24, cfg[num_groups][0], 2, num_groups, group_conv1x1=False), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups), Bottleneck(cfg[num_groups][0], cfg[num_groups][0], 1, num_groups))
self.stage3 = nn.Sequential(Bottleneck(cfg[num_groups][0], cfg[num_groups][1], 2, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups), Bottleneck(cfg[num_groups][1], cfg[num_groups][1], 1, num_groups))
self.stage4 = nn.Sequential(Bottleneck(cfg[num_groups][1], cfg[num_groups][2], 2, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups), Bottleneck(cfg[num_groups][2], cfg[num_groups][2], 1, num_groups))
self.classifier = nn.Linear(cfg[num_groups][2], num_classes)
self.feat_dim = cfg[num_groups][2]
def forward(self, x):
x = self.conv1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = F.avg_pool2d(x, x.size()[2:]).view(x.size(0), (- 1))
if (not self.training):
return x
y = self.classifier(x)
if (self.loss == 'softmax'):
return y
elif (self.loss == 'triplet'):
return (y, x)
else:
raise KeyError('Unsupported loss: {}'.format(self.loss)) |
class SummaryWriter():
def __init__(self, log_dir='', **kwargs):
pass
def add_scalar(self, tag, value, global_step=None, walltime=None):
pass
def add_scalars(self, tag, tag_scalar_dict, global_step=None, walltime=None):
pass
def add_histogram(self, tag, values, global_step=None, bins='auto', walltime=None, max_bins=None):
pass
def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'):
pass
def add_images(self, tag, img_tensor, global_step=None, walltime=None, dataformats='NCHW'):
pass
def add_figure(self, tag, figure, global_step=None, close=True, walltime=None):
pass
def add_text(self, tag, text_string, global_step=None, walltime=None):
pass
def flush(self):
pass |
class FNCSimpleLabelSchema(LabelSchema):
def __init__(self):
super().__init__(['agree', 'disagree', 'not enough info']) |
class PosTaggingSpacy(PosTagging):
def __init__(self, nlp=None, separator='|', lang='en'):
if (not nlp):
print('Loading Spacy model')
print(('Spacy model loaded ' + lang))
else:
self.nlp = nlp
self.separator = separator
def pos_tag_raw_text(self, text, as_tuple_list=True):
text = re.sub('[ ]+', ' ', text).strip()
doc = self.nlp(text)
if as_tuple_list:
return [[(token.text, token.tag_) for token in sent] for sent in doc.sents]
return '[ENDSENT]'.join((' '.join((self.separator.join([token.text, token.tag_]) for token in sent)) for sent in doc.sents)) |
def _linprog_highs_ipm_doc(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, bounds=None, method='highs-ipm', callback=None, maxiter=None, disp=False, presolve=True, time_limit=None, dual_feasibility_tolerance=None, primal_feasibility_tolerance=None, ipm_optimality_tolerance=None, **unknown_options):
pass |
class LSTM_VIDEO(nn.Module):
def __init__(self, cfg):
super(LSTM_VIDEO, self).__init__()
self.input_size = cfg.DYNAMIC_FILTER.LSTM_VIDEO.INPUT_SIZE
self.num_layers = cfg.DYNAMIC_FILTER.LSTM_VIDEO.NUM_LAYERS
self.hidden_size = cfg.DYNAMIC_FILTER.LSTM_VIDEO.HIDDEN_SIZE
self.bias = cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIAS
self.dropout = cfg.DYNAMIC_FILTER.LSTM_VIDEO.DROPOUT
self.bidirectional = cfg.DYNAMIC_FILTER.LSTM_VIDEO.BIDIRECTIONAL
self.batch_first = cfg.DYNAMIC_FILTER.LSTM_VIDEO.BATCH_FIRST
if ((cfg.DATASETS.TRAIN == 'anet_cap_train') and (cfg.FEATURE_TYPE == 'c3d')):
self.input_size = 500
if (((cfg.DATASETS.TRAIN == 'tacos_train') or (cfg.DATASETS.TRAIN == 'charades_sta_train')) and (cfg.FEATURE_TYPE == 'c3d')):
self.input_size = 4096
self.lstm = nn.LSTM(input_size=self.input_size, hidden_size=self.hidden_size, num_layers=self.num_layers, bias=self.bias, dropout=self.dropout, bidirectional=self.bidirectional, batch_first=self.batch_first)
def forward(self, sequences, lengths, masks=None):
if (lengths is None):
raise 'ERROR in this tail you need lengths of sequences.'
return feed_forward_rnn(self.lstm, sequences, lengths=lengths) |
_model
def resnest50d_4s2x40d(pretrained=False, num_classes=1000, in_chans=3, **kwargs):
default_cfg = default_cfgs['resnest50d_4s2x40d']
model = ResNet(ResNestBottleneck, [3, 4, 6, 3], num_classes=num_classes, in_chans=in_chans, stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, block_args=dict(radix=4, avd=True, avd_first=True), **kwargs)
model.default_cfg = default_cfg
if pretrained:
load_pretrained(model, default_cfg, num_classes, in_chans)
return model |
class ForwardModuleRF(_RFModuleAsPTModule):
def __init__(self, rf_module: rf.Module, forward_step: Callable, extern_data: TensorDict):
super().__init__(rf_module)
self.forward_step_func = forward_step
self.extern_data = extern_data
def __call__(self, data: Dict[(str, torch.Tensor)]) -> Dict[(str, torch.Tensor)]:
extern_data = self.extern_data.copy_template()
extern_data.assign_from_raw_tensor_dict_(data, with_scalar_dyn_sizes=False, duplicate_dims_are_excluded=True)
self.forward_step_func(model=self.rf_module, extern_data=extern_data)
_check_matching_outputs()
return rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False) |
class ExtendedPopREO(BaseMetric):
def __init__(self, recommendations, config, params, eval_objects, additional_data):
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
self._pop_ratio = self._additional_data.get('pop_ratio', 0.8)
self._pop_obj = self._evaluation_objects.pop.get_custom_pop_obj(self._pop_ratio)
self._short_head = set(self._pop_obj.get_short_head())
self._long_tail = set(self._pop_obj.get_long_tail())
self._train = self._evaluation_objects.data.train_dict
self._num = []
self._den = []
def name():
return 'ExtendedPopREO'
def __user_pop_reo(self, user_recommendations, cutoff, long_tail, short_head, u_train, user_relevant_items):
recommended_items = set([i for (i, _) in user_recommendations[:cutoff] if (i in user_relevant_items)])
num_h = len((recommended_items & short_head))
num_t = len((recommended_items & long_tail))
den_h = len(((short_head & user_relevant_items) - u_train))
den_t = len(((long_tail & user_relevant_items) - u_train))
return (num_h, num_t, den_h, den_t)
def eval(self):
for (u, u_r) in self._recommendations.items():
if len(self._relevance.get_user_rel(u)):
(num_h, num_t, den_h, den_t) = self.__user_pop_reo(u_r, self._cutoff, self._long_tail, self._short_head, set(self._train[u].keys()), set(self._relevance.get_user_rel(u)))
self._num.append([num_h, num_t])
self._den.append([den_h, den_t])
self._num = np.sum(np.array(self._num), axis=0)
self._den = np.sum(np.array(self._den), axis=0)
pr = (self._num / self._den)
return (np.std(pr) / np.mean(pr)) |
def hook_batchnormNd(m, x, y):
num_ele = y.numel()
flops = (2 * num_ele)
if m.affine:
flops += (2 * num_ele)
return int(flops) |
class LWNN(Estimator):
def __init__(self, model, model_name, pg_est, table):
super(LWNN, self).__init__(table=table, model=model_name)
self.model = model.to(DEVICE)
self.model.eval()
self.pg_est = pg_est
def query(self, query):
if isinstance(query, Query):
query = encode_query(self.table, query, self.pg_est)
return self.query_vector(query)
def query_vector(self, vec):
start_stmp = time.time()
with torch.no_grad():
pred = self.model(torch.FloatTensor(vec).to(DEVICE)).cpu().item()
dur_ms = ((time.time() - start_stmp) * 1000.0)
return (np.maximum(np.round(decode_label(pred)), 0.0), dur_ms) |
class Hex(Type):
def from_str(self, s):
assert (s.startswith('0x') or s.startswith('0X'))
return int(s, 16) |
class MegatronParser(Parser):
def __init__(self) -> None:
if (not has_fairseq):
raise ImportError('\n\nPlease install fairseq_for_pipeline:')
super().__init__()
def _auto_file_name(self, args) -> str:
bw_str = str(args.bw).replace('.', '_')
model_str = str(args.arch)
output_file = f'{args.output_file}{model_str}_{args.n_partitions}p_bw{bw_str}'
if args.async_pipeline:
output_file += '_async'
return output_file
def _add_data_args(self, group):
group.add_argument('--dict_path', default='../misc/megatron_11b', help="path to the folder containing megatron's dict.txt")
def _add_model_args(self, group):
group.add_argument('--arch', choices=['transformer_lm_megatron', 'transformer_lm_megatron_11b'])
def _post_parse(self, args, argv):
env = os.environ
env['MASTER_ADDR'] = '127.0.0.1'
env['MASTER_PORT'] = '6767'
env['WORLD_SIZE'] = '1'
env['RANK'] = '0'
tmp = argparse.ArgumentParser()
fairseq_defaults = dict(cpu=True, distributed_world_size=1, model_parallel_size=1, task='language_modeling', share_decoder_input_output_embed=True, checkpoint_suffix='', distributed_backend='gloo', device_id=0, distributed_init_method=None, arch=args.arch)
tmp.set_defaults(**fairseq_defaults)
argv = ([args.dict_path] + argv)
fairseq_args = options.parse_args_and_arch(tmp, input_args=argv)
for (k, v) in vars(fairseq_args).items():
setattr(args, k, v)
return args
def _default_values(self) -> Dict:
partitioning_defaults = dict(save_memory_mode=True, partitioning_batch_size=1, analysis_batch_size=1, n_partitions=16, basic_blocks=['ModelParallelMultiheadAttention'])
return partitioning_defaults |
def random_orientation(G):
from sage.graphs.graph import Graph
if (not isinstance(G, Graph)):
raise ValueError('the input parameter must be a Graph')
D = DiGraph(data=[G.vertices(sort=False), []], format='vertices_and_edges', multiedges=G.allows_multiple_edges(), loops=G.allows_loops(), weighted=G.weighted(), pos=G.get_pos(), name='Random orientation of {}'.format(G.name()))
if hasattr(G, '_embedding'):
D._embedding = copy(G._embedding)
from sage.misc.prandom import getrandbits
rbits = getrandbits(G.size())
for (u, v, l) in G.edge_iterator():
if (rbits % 2):
D.add_edge(u, v, l)
else:
D.add_edge(v, u, l)
rbits >>= 1
return D |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.