code stringlengths 101 5.91M |
|---|
def distort_color(image, color_ordering=0, fast_mode=True, scope=None, lower=0.75, upper=1.25, hue_max_delta=0.1, brightness_max_delta=(16.0 / 255.0)):
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if (color_ordering == 0):
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=lower, upper=upper)
else:
image = tf.image.random_saturation(image, lower=lower, upper=upper)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
elif (color_ordering == 0):
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=lower, upper=upper)
image = tf.image.random_hue(image, max_delta=hue_max_delta)
image = tf.image.random_contrast(image, lower=lower, upper=upper)
elif (color_ordering == 1):
image = tf.image.random_saturation(image, lower=lower, upper=upper)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_contrast(image, lower=lower, upper=upper)
image = tf.image.random_hue(image, max_delta=hue_max_delta)
elif (color_ordering == 2):
image = tf.image.random_contrast(image, lower=lower, upper=upper)
image = tf.image.random_hue(image, max_delta=hue_max_delta)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
image = tf.image.random_saturation(image, lower=lower, upper=upper)
elif (color_ordering == 3):
image = tf.image.random_hue(image, max_delta=hue_max_delta)
image = tf.image.random_saturation(image, lower=lower, upper=upper)
image = tf.image.random_contrast(image, lower=lower, upper=upper)
image = tf.image.random_brightness(image, max_delta=brightness_max_delta)
else:
raise ValueError('color_ordering must be in [0, 3]')
return tf.clip_by_value(image, 0.0, 1.0) |
class Factory(BaseFactory):
def pt_defaults_scope_value():
return {'activation_fn': default_activation.current_value, 'batch_normalize': True, 'learned_moments_update_rate': 0.0003, 'variance_epsilon': 0.001, 'scale_after_normalization': True}
default_patch_feature_dim = 8
def __init__(self, recon_dist_param_num=1, options=None):
super().__init__(recon_dist_param_num, options)
if ('image_channels' in options):
self.image_channels = options['image_channels']
else:
self.image_channels = 3
def image_size(self):
return (80, 80)
def input_feature_dim(self):
return 64
def feature2image(self, feature_tensor):
output_channels = (3 * self.recon_dist_param_num)
hgd = [{'type': 'conv2d', 'depth': 64, 'decoder_depth': output_channels, 'decoder_activation_fn': None}, {'type': 'conv2d', 'depth': 64, 'decoder_depth': 32}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 128, 'decoder_depth': 64}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 256}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 512}, {'type': 'skip', 'layer_num': 2}, {'type': 'pool', 'pool': 'max', 'kernel': 2, 'stride': 2}, {'type': 'conv2d', 'depth': 512}]
with pt.defaults_scope(**self.pt_defaults_scope_value()):
output_tensor = hourglass(feature_tensor, hgd, net_type=(self.options['hourglass_type'] if ('hourglass_type' in self.options) else None), extra_highlevel_feature=None)
return output_tensor
rotate_dominating_features_if_necessary = GenericDecoderFactory.rotate_dominating_features_if_necessary |
def tokenize_for_cer(text):
tokens = list(filter((lambda tok: (len(tok.strip()) > 0)), list(text)))
return tokens |
def get_padding(kernel_size: int, stride: int=1, dilation: int=1, **_) -> int:
padding = (((stride - 1) + (dilation * (kernel_size - 1))) // 2)
return padding |
def softmax_sample(visit_counts, actions, t):
counts_exp = (np.exp(visit_counts) * (1 / t))
probs = (counts_exp / np.sum(counts_exp, axis=0))
action_idx = np.random.choice(len(actions), p=probs)
return actions[action_idx] |
def test_ext(args, device_id, pt, step):
device = ('cpu' if (args.visible_gpus == '-1') else 'cuda')
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info(('Loading checkpoint from %s' % test_from))
checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage))
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = ExtSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.test_batch_size, device, shuffle=False, is_test=True)
trainer = build_trainer(args, device_id, model, None)
trainer.test(test_iter, step) |
def eval(G, dataset, batch_size, training=True, latents=None, labels=None, ratio=1.0, drange_net=[(- 1), 1], vis_types=None, num=100, grid=None, grid_size=None, step=None, keep_samples=True, num_heads=1, components_num=16, section_size=100):
def prefix(step):
return ('' if (step is None) else '{:06d}_'.format(step))
def pattern_of(dir, step, suffix):
return 'eval/{}/{}%06d.{}'.format(dir, prefix(step), suffix)
vis = vis_types
if training:
vis = {'imgs'}
section_size = num = len(latents)
elif (vis is None):
vis = {'imgs'}
if (grid is None):
grid = training
save_images = misc.save_images_builder(drange_net, ratio, grid_size, grid, verbose=False)
dirs = []
if ('imgs' in vis):
dirs += ['images']
if (not keep_samples):
shutil.rmtree('eval')
for dir in dirs:
misc.mkdir(dnnlib.make_run_dir_path('eval/{}'.format(dir)))
for idx in range(0, num, section_size):
curr_size = curr_batch_size(num, idx, section_size)
if (latents is None):
latents = np.random.randn(curr_size, *G.input_shape[1:])
if (labels is None):
labels = dataset.get_minibatch_np(curr_size)[1]
ret = G.run(latents, labels, randomize_noise=False, minibatch_size=batch_size, return_dlatents=True)
images = ret[0]
if ('imgs' in vis):
save_images(images, pattern_of('images', step, 'png'), idx) |
def interpolate_like(input: ty.T, /, other: ty.T, mode: str='nearest', align_corners: bool=False) -> ty.T:
if (mode == 'nearest'):
align_corners = None
return F.interpolate(input, size=other.shape[(- 2):], mode=mode, align_corners=align_corners) |
class GrooveJoint(Constraint):
def __init__(self, a, b, groove_a, groove_b, anchr2):
self._constraint = cp.cpGrooveJointNew(a._body, b._body, groove_a, groove_b, anchr2)
self._ccontents = self._constraint.contents
self._pjc = cp.cast(self._constraint, ct.POINTER(cp.cpGrooveJoint)).contents
self._set_bodies(a, b)
def _get_anchr2(self):
return self._pjc.anchr2
def _set_anchr2(self, anchr):
self._pjc.anchr2 = anchr
anchr2 = property(_get_anchr2, _set_anchr2)
def _get_groove_a(self):
return self._pjc.grv_a
groove_a = property(_get_groove_a)
def _get_groove_b(self):
return self._pjc.grv_b
groove_b = property(_get_groove_b) |
def get_tree_starting_at(module, edges):
vertices_seen = [module]
new_edges = [edge for edge in edges if ((edge[0] == module) and (edge[1] != module))]
tree = [module]
while (len(new_edges) > 0):
tree.append(new_edges)
final_vertices = list({edge[1] for edge in new_edges})
vertices_seen.extend(final_vertices)
new_edges = [edge for edge in edges if ((edge[0] in final_vertices) and (edge[1] not in vertices_seen))]
return tree |
class PseudoLabel(Algorithm):
def __init__(self, input_shape, num_classes, num_domains, hparams, algorithm):
super().__init__(input_shape, num_classes, num_domains, hparams)
(self.model, self.optimizer) = self.configure_model_optimizer(algorithm, alpha=hparams['alpha'])
self.beta = hparams['beta']
self.steps = hparams['gamma']
assert (self.steps > 0), 'tent requires >= 1 step(s) to forward and update'
self.episodic = False
(self.model_state, self.optimizer_state) = copy_model_and_optimizer(self.model, self.optimizer)
def forward(self, x, adapt=False):
if adapt:
if self.episodic:
self.reset()
for _ in range(self.steps):
if self.hparams['cached_loader']:
outputs = self.forward_and_adapt(x, self.model.classifier, self.optimizer)
else:
self.model.featurizer.eval()
outputs = self.forward_and_adapt(x, self.model, self.optimizer)
self.model.featurizer.train()
elif self.hparams['cached_loader']:
outputs = self.model.classifier(x)
else:
outputs = self.model(x)
return outputs
_grad()
def forward_and_adapt(self, x, model, optimizer):
optimizer.zero_grad()
outputs = model(x)
(py, y_prime) = F.softmax(outputs, dim=(- 1)).max(1)
flag = (py > self.beta)
loss = F.cross_entropy(outputs[flag], y_prime[flag])
loss.backward()
optimizer.step()
return outputs
def configure_model_optimizer(self, algorithm, alpha):
adapted_algorithm = copy.deepcopy(algorithm)
optimizer = torch.optim.Adam(adapted_algorithm.parameters(), lr=(algorithm.hparams['lr'] * alpha), weight_decay=algorithm.hparams['weight_decay'])
return (adapted_algorithm, optimizer)
def predict(self, x, adapt=False):
return self(x, adapt)
def reset(self):
if ((self.model_state is None) or (self.optimizer_state is None)):
raise Exception('cannot reset without saved model/optimizer state')
load_model_and_optimizer(self.model, self.optimizer, self.model_state, self.optimizer_state) |
_request
def before_request():
authToken = request.cookies.get('auth')
try:
payload = jwt.decode(authToken, jwtKey, algorithms=['HS256'])
g.user = payload.get('sub', None)
g.email = payload.get('email', None)
g.admin = payload.get('admin', False)
g.inactive = payload.get('inactive', True)
g.loggedIn = True
except Exception:
g.user = None
g.email = None
g.admin = False
g.loggedIn = False
g.inactive = True |
_grad()
def distributed_sinkhorn(out):
Q = torch.exp((out / config_model.epsilon)).t()
B = Q.shape[1]
K = Q.shape[0]
sum_Q = torch.sum(Q)
Q /= sum_Q
for it in range(config_model.sinkhorn_iterations):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t() |
def train_one_epoch(config, model, criterion, data_loader, optimizer, epoch, mixup_fn, lr_scheduler, loss_scaler):
model.train()
optimizer.zero_grad()
num_steps = len(data_loader)
batch_time = AverageMeter()
loss_meter = AverageMeter()
norm_meter = AverageMeter()
scaler_meter = AverageMeter()
start = time.time()
end = time.time()
for (idx, (samples, targets)) in enumerate(data_loader):
samples = samples.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if (mixup_fn is not None):
(samples, targets) = mixup_fn(samples, targets)
with torch.cuda.amp.autocast(enabled=config.AMP_ENABLE):
outputs = model(samples)
loss = criterion(outputs, targets)
loss = (loss / config.TRAIN.ACCUMULATION_STEPS)
is_second_order = (hasattr(optimizer, 'is_second_order') and optimizer.is_second_order)
grad_norm = loss_scaler(loss, optimizer, clip_grad=config.TRAIN.CLIP_GRAD, parameters=model.parameters(), create_graph=is_second_order, update_grad=(((idx + 1) % config.TRAIN.ACCUMULATION_STEPS) == 0))
if (((idx + 1) % config.TRAIN.ACCUMULATION_STEPS) == 0):
optimizer.zero_grad()
lr_scheduler.step_update((((epoch * num_steps) + idx) // config.TRAIN.ACCUMULATION_STEPS))
loss_scale_value = loss_scaler.state_dict()['scale']
torch.cuda.synchronize()
loss_meter.update(loss.item(), targets.size(0))
if (grad_norm is not None):
norm_meter.update(grad_norm)
scaler_meter.update(loss_scale_value)
batch_time.update((time.time() - end))
end = time.time()
if ((idx % config.PRINT_FREQ) == 0):
lr = optimizer.param_groups[0]['lr']
wd = optimizer.param_groups[0]['weight_decay']
memory_used = (torch.cuda.max_memory_allocated() / (1024.0 * 1024.0))
etas = (batch_time.avg * (num_steps - idx))
logger.info(f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}] eta {datetime.timedelta(seconds=int(etas))} lr {lr:.6f} wd {wd:.4f} time {batch_time.val:.4f} ({batch_time.avg:.4f}) loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) grad_norm {norm_meter.val:.4f} ({norm_meter.avg:.4f}) loss_scale {scaler_meter.val:.4f} ({scaler_meter.avg:.4f}) mem {memory_used:.0f}MB')
epoch_time = (time.time() - start)
logger.info(f'EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}') |
def starcoder_tokenize(ctx: c_void_p, prompt: bytes, bos: bool=False) -> List[int]:
n_tokens = c_int(0)
c_tokens = _lib.tokenize_api(ctx, prompt, bos, pointer(n_tokens))
tokens = [c_tokens[i] for i in range(0, n_tokens.value)]
c_free(c_tokens)
return tokens |
class DenseNetDiscrimator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_spectral_norm=True):
super(DenseNetDiscrimator, self).__init__()
self.model = densenet121(pretrained=True, use_spectral_norm=use_spectral_norm)
self.use_sigmoid = use_sigmoid
if self.use_sigmoid:
self.sigmoid = nn.Sigmoid()
def forward(self, input):
if self.use_sigmoid:
return self.sigmoid(self.model(input))
else:
return self.model(input) |
def reset_keras(per_process_gpu_memory_fraction=1.0):
sess = K.get_session()
K.clear_session()
sess.close()
gc.collect()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
config.gpu_options.visible_device_list = '0'
K.set_session(tf.Session(config=config)) |
def conv_l1(x, nb_filters, kernel, stride=(1, 1)):
return Convolution2D(nb_filters, kernel, padding='same', kernel_initializer='he_uniform', kernel_regularizer=l1(0.01), strides=(stride, stride))(x) |
_module()
class DNLHead(FCNHead):
def __init__(self, reduction=2, use_scale=True, mode='embedded_gaussian', temperature=0.05, **kwargs):
super(DNLHead, self).__init__(num_convs=2, **kwargs)
self.reduction = reduction
self.use_scale = use_scale
self.mode = mode
self.temperature = temperature
self.dnl_block = DisentangledNonLocal2d(in_channels=self.channels, reduction=self.reduction, use_scale=self.use_scale, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, mode=self.mode, temperature=self.temperature)
def forward(self, inputs):
x = self._transform_inputs(inputs)
output = self.convs[0](x)
output = self.dnl_block(output)
output = self.convs[1](output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output |
def _set_material(world, pos, player, tunnels, simplex):
(x, y) = pos
simplex = functools.partial(_simplex, simplex)
uniform = world.random.uniform
start = (4 - np.sqrt((((x - player.pos[0]) ** 2) + ((y - player.pos[1]) ** 2))))
start += (2 * simplex(x, y, 8, 3))
start = (1 / (1 + np.exp((- start))))
water = (simplex(x, y, 3, {15: 1, 5: 0.15}, False) + 0.1)
water -= (2 * start)
mountain = simplex(x, y, 0, {15: 1, 5: 0.3})
mountain -= ((4 * start) + (0.3 * water))
if (start > 0.5):
world[(x, y)] = 'grass'
elif (mountain > 0.15):
if ((simplex(x, y, 6, 7) > 0.15) and (mountain > 0.3)):
world[(x, y)] = 'path'
elif (simplex((2 * x), (y / 5), 7, 3) > 0.4):
world[(x, y)] = 'path'
tunnels[(x, y)] = True
elif (simplex((x / 5), (2 * y), 7, 3) > 0.4):
world[(x, y)] = 'path'
tunnels[(x, y)] = True
elif ((simplex(x, y, 1, 8) > 0) and (uniform() > 0.85)):
world[(x, y)] = 'coal'
elif ((simplex(x, y, 2, 6) > 0.4) and (uniform() > 0.75)):
world[(x, y)] = 'iron'
elif ((mountain > 0.18) and (uniform() > 0.994)):
world[(x, y)] = 'diamond'
elif ((mountain > 0.3) and (simplex(x, y, 6, 5) > 0.35)):
world[(x, y)] = 'lava'
else:
world[(x, y)] = 'stone'
elif ((0.25 < water <= 0.35) and (simplex(x, y, 4, 9) > (- 0.2))):
world[(x, y)] = 'sand'
elif (0.3 < water):
world[(x, y)] = 'water'
elif ((simplex(x, y, 5, 7) > 0) and (uniform() > 0.8)):
world[(x, y)] = 'tree'
else:
world[(x, y)] = 'grass' |
class BoldMin():
def __init__(self, v, disp):
self.v = float(v)
if isinstance(disp, collections.Callable):
disp = disp(v)
assert isinstance(disp, str)
self.disp = disp
def apply(cls, cols):
vals = []
for (idx, v) in enumerate(cols):
if isinstance(v, cls):
vals.append((v.v, idx, v.disp))
cols[idx] = v.disp
vals.sort()
for (m, sty) in zip(vals[:2], ['bf', 'it']):
cols[m[1]] = ('\\text%s{%s}' % (sty, m[2])) |
class RandomApply(RandomTransforms):
def __init__(self, transforms, p=0.5):
super(RandomApply, self).__init__(transforms)
self.p = p
def __call__(self, img):
if (self.p < random.random()):
return img
for t in self.transforms:
img = t(img)
return img
def __repr__(self):
format_string = (self.__class__.__name__ + '(')
format_string += '\n p={}'.format(self.p)
for t in self.transforms:
format_string += '\n'
format_string += ' {}'.format(t)
format_string += '\n)'
return format_string |
class MobileBertModel():
def __init__(self, *args, **kwargs):
requires_pytorch(self)
def from_pretrained(self, *args, **kwargs):
requires_pytorch(self) |
class PolyOneOverXRect(PolyGenerator):
def help(self):
return 'Region of validity is from 1/kappa to 1, and from -1/kappa to -1. Error is epsilon'
def generate(self, degree=6, delta=2, kappa=3, epsilon=0.1, ensure_bounded=True, return_scale=False):
(coefs_invert, scale1) = PolyOneOverX().generate((2 * kappa), epsilon, ensure_bounded, return_scale=True)
(coefs_rect, scale2) = PolyRect().generate(degree, delta, kappa, ensure_bounded, return_scale=True)
poly_invert = np.polynomial.Polynomial(coefs_invert)
poly_rect = np.polynomial.Polynomial(coefs_rect)
pcoefs = (poly_invert * poly_rect).coef
if return_scale:
return (pcoefs, (scale1 * scale2))
else:
return pcoefs |
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], progress=progress)
model.load_state_dict(state_dict)
return model |
_start_docstrings('The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.', CAMEMBERT_START_DOCSTRING)
class TFCamembertModel(TFRobertaModel):
config_class = CamembertConfig |
def concat_images_with_tiled_vector_layer(images, vector, image_shape=None, vector_shape=None):
with K.name_scope('concat_images_with_tiled_vector_layer'):
if (not isinstance(images, list)):
images = [images]
if (vector_shape is None):
vector_shape = K.int_shape(vector)[1:]
if (image_shape is None):
image_shape = K.int_shape(images[0])[1:]
vector = Reshape([1, 1, vector_shape[(- 1)]])(vector)
tile_shape = (int(1), int(image_shape[0]), int(image_shape[1]), int(1))
tiled_vector = Lambda((lambda x: K.tile(x, tile_shape)))(vector)
x = Concatenate(axis=(- 1))((([] + images) + [tiled_vector]))
return x |
def inputs(eval_data):
if (not FLAGS.data_dir):
raise ValueError('Please supply a data_dir')
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
(images, labels) = cifar10_input.inputs(eval_data=eval_data, data_dir=data_dir, batch_size=FLAGS.batch_size)
if FLAGS.use_fp16:
images = tf.cast(images, tf.float16)
labels = tf.cast(labels, tf.float16)
return (images, labels) |
class SkipConnectionBlock(nn.Module):
def __init__(self, ngf, sub_ngf, down_block=None, submodule=None, up_block=None, flat_block=None, flat_layers=1, padding_type='reflect', norm_layer=nn.BatchNorm2d, act_layer=nn.ReLU(inplace=True), use_dropout=False):
super(SkipConnectionBlock, self).__init__()
self.submodule = submodule
if (submodule is not None):
assert ((down_block is not None) and (up_block is not None))
self.down_block = down_block(ngf, sub_ngf, 3, padding_type, norm_layer, act_layer)
self.up_block = up_block(sub_ngf, ngf, 3, padding_type, norm_layer, act_layer)
if (flat_block is not None):
self.flat_block = flat_block(ngf, 3, flat_layers, padding_type, norm_layer, act_layer)
else:
self.flat_block = None
def forward(self, x):
if (self.submodule is not None):
x = (x + self.up_block(self.submodule(self.down_block(x))))
if (self.flat_block is not None):
return self.flat_block(x)
return x |
def deep_speaker_loss(y_true, y_pred):
elements = c.BATCH_SIZE
anchor = y_pred[0:elements]
positive_ex = y_pred[elements:(2 * elements)]
negative_ex = y_pred[(2 * elements):]
sap = batch_cosine_similarity(anchor, positive_ex)
san = batch_cosine_similarity(anchor, negative_ex)
loss = K.maximum(((san - sap) + alpha), 0.0)
total_loss = K.sum(loss)
return total_loss |
class components(Mask):
def build_mask(self):
mask = np.zeros((self.face.shape[0:2] + (1,)), dtype=np.float32)
r_jaw = (self.landmarks[0:9], self.landmarks[17:18])
l_jaw = (self.landmarks[8:17], self.landmarks[26:27])
r_cheek = (self.landmarks[17:20], self.landmarks[8:9])
l_cheek = (self.landmarks[24:27], self.landmarks[8:9])
nose_ridge = (self.landmarks[19:25], self.landmarks[8:9])
r_eye = (self.landmarks[17:22], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9])
l_eye = (self.landmarks[22:27], self.landmarks[27:28], self.landmarks[31:36], self.landmarks[8:9])
nose = (self.landmarks[27:31], self.landmarks[31:36])
parts = [r_jaw, l_jaw, r_cheek, l_cheek, nose_ridge, r_eye, l_eye, nose]
for item in parts:
merged = np.concatenate(item)
cv2.fillConvexPoly(mask, cv2.convexHull(merged), 255.0)
return mask |
def optimizer(cfg: ConfigDict) -> optax.OptState:
epoch_size = (cfg.epoch_size if hasattr(cfg, 'epoch_size') else (- 1))
batch_size = minimum_batch_size(cfg)
total_steps = (cfg.epochs * (epoch_size // batch_size))
warmup_steps = cfg.get('warmup_steps', 0)
if (cfg.schedule == 'constant'):
schedule = cfg.learning_rate
elif (cfg.schedule == 'linear_decay'):
schedule = optax.polynomial_schedule(cfg.learning_rate, 0.0, 1, total_steps)
elif (cfg.schedule == 'cosine_decay'):
schedule = optax.cosine_decay_schedule(cfg.learning_rate, total_steps)
elif (cfg.schedule == 'warmup_cosine_decay'):
schedule = optax.warmup_cosine_decay_schedule(1e-07, cfg.learning_rate, warmup_steps, total_steps)
elif (cfg.schedule == 'scale_on_plateau'):
max_plateau_steps = (cfg.max_lr_plateau_epochs // cfg.epochs_per_eval)
return optax.chain(optax.scale_by_adam(), scale_lr_on_plateau((- cfg.learning_rate), max_plateau_steps, 0.8))
else:
raise ValueError(f'Unknown learning rate schedule, "{cfg.schedule}".')
if ((not hasattr(cfg, 'l2_regularization')) or (cfg.l2_regularization == 0.0)):
return optax.adam(schedule)
return optax.adamw(schedule, weight_decay=cfg.l2_regularization) |
class Bottleneck3d(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, use_final_relu=True):
super(Bottleneck3d, self).__init__()
bias = False
self.use_final_relu = use_final_relu
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=(3, 1, 1), padding=(1, 0, 0), bias=bias)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes, planes, kernel_size=(1, 3, 3), stride=(1, stride, stride), padding=(0, 1, 1), bias=bias)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, (planes * 4), kernel_size=1, bias=bias)
self.bn3 = nn.BatchNorm3d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
if self.use_final_relu:
out = self.relu(out)
return out |
def image_decode(video_path):
frames = []
try:
with Image.open(video_path) as img:
frames.append(np.array(img.convert('RGB')))
except BaseException as e:
raise RuntimeError('Caught "{}" when loading {}'.format(str(e), video_path))
return frames |
_optimizer('adadelta')
class Adadelta(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = torch.optim.Adadelta(params, **self.optimizer_config)
def add_args(parser):
parser.add_argument('--adadelta-rho', type=float, default=0.9, metavar='RHO', help='coefficient used for computing a running average of squared gradients')
parser.add_argument('--adadelta-eps', type=float, default=1e-06, metavar='EPS', help='term added to the denominator to improve numerical stability')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay')
parser.add_argument('--anneal-eps', action='store_true', help='flag to anneal eps')
def optimizer_config(self):
return {'lr': self.args.lr[0], 'rho': self.args.adadelta_rho, 'eps': self.args.adadelta_eps, 'weight_decay': self.args.weight_decay}
def supports_flat_params(self):
return True |
def _get_kins_instances_meta():
thing_ids = [k['id'] for k in KINS_CATEGORIES]
thing_colors = [k['color'] for k in KINS_CATEGORIES]
assert (len(thing_ids) == 7), len(thing_ids)
thing_dataset_id_to_contiguous_id = {k: i for (i, k) in enumerate(thing_ids)}
thing_classes = [k['name'] for k in KINS_CATEGORIES]
ret = {'thing_dataset_id_to_contiguous_id': thing_dataset_id_to_contiguous_id, 'thing_classes': thing_classes, 'thing_colors': thing_colors}
return ret |
def windows(*args, **kwargs):
pad = kwargs.pop('pad', 3)
try:
(si, ei) = apwindow.waveregions(args[2], pad=pad, asIndex=True)
except IOError:
try:
(si, ei) = apwindow.waveregions(args[2][:(- 1)], pad=pad, asIndex=True)
except IOError:
raise IOError(('Windows for element %s could not be loaded, please specify an existing APOGEE element' % args[2].lower().capitalize()))
if (args[2][(- 1)] == '1'):
si = si[:(len(si) // 2)]
ei = ei[:(len(ei) // 2)]
else:
si = si[(len(si) // 2):]
ei = ei[(len(ei) // 2):]
newargs = (args[0], args[1], args[2][:(- 1)])
for ii in range((len(args) - 3)):
newargs = (newargs + (args[(ii + 3)],))
args = newargs
dlam = apwindow.total_dlambda(args[2], pad=pad)
numw = apwindow.num(args[2])
if (numw > 20):
kwargs['skipdx'] = 0.003
kwargs['_noskipdiags'] = True
elif (numw > 15):
kwargs['skipdx'] = 0.01
kwargs['_startendskip'] = 0
if ((not kwargs.get('overplot', False)) and (not ('fig_width' in kwargs))):
if (dlam > 150.0):
kwargs['fig_width'] = 8.4
else:
kwargs['fig_width'] = 4.2
kwargs['_noxticks'] = True
kwargs['_labelwav'] = True
kwargs['labelLines'] = kwargs.get('labelLines', False)
if kwargs.pop('plot_weights', False):
kwargs['_plotw'] = apwindow.read(args[2], apStarWavegrid=True)
if kwargs.get('apStar', False):
kwargs['yrange'] = kwargs.get('yrange', [0.0, (1.1 * numpy.nanmax(args[1]))])
else:
kwargs['yrange'] = kwargs.get('yrange', [0.0, 1.2])
markLines = kwargs.get('markLines', (not ('overplot' in kwargs)))
if (markLines and (not ('_markwav' in kwargs))):
kwargs['_markwav'] = apwindow.lines(args[2])
waveregions(args[0], args[1], *args[3:], startindxs=si, endindxs=ei, **kwargs)
bovy_plot.bovy_text(('$\\mathrm{%s}$' % args[2].lower().capitalize()), top_left=True, fontsize=10, backgroundcolor='w')
return None |
class __DisplMixin():
def displ_item(self, index):
(sample, ann) = (self.__getitem__(index), self.annotation[index])
return OrderedDict({'file': os.path.basename(ann['image']), 'sentence': ann['sentence'], 'label': ann['label'], 'image': sample['image']}) |
def _test():
import torch
in_size = (480, 480)
aux = True
pretrained = False
models = [(fcn8sd_resnetd50b_voc, 21), (fcn8sd_resnetd101b_voc, 21), (fcn8sd_resnetd50b_coco, 21), (fcn8sd_resnetd101b_coco, 21), (fcn8sd_resnetd50b_ade20k, 150), (fcn8sd_resnetd101b_ade20k, 150), (fcn8sd_resnetd50b_cityscapes, 19), (fcn8sd_resnetd101b_cityscapes, 19)]
for (model, num_classes) in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
net.eval()
weight_count = _calc_width(net)
print('m={}, {}'.format(model.__name__, weight_count))
if aux:
assert ((model != fcn8sd_resnetd50b_voc) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_voc) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_coco) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_coco) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_ade20k) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_ade20k) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_cityscapes) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_cityscapes) or (weight_count == ))
else:
assert ((model != fcn8sd_resnetd50b_voc) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_voc) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_coco) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_coco) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_ade20k) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_ade20k) or (weight_count == ))
assert ((model != fcn8sd_resnetd50b_cityscapes) or (weight_count == ))
assert ((model != fcn8sd_resnetd101b_cityscapes) or (weight_count == ))
x = torch.randn(1, 3, in_size[0], in_size[1])
ys = net(x)
y = (ys[0] if aux else ys)
y.sum().backward()
assert ((y.size(0) == x.size(0)) and (y.size(1) == num_classes) and (y.size(2) == x.size(2)) and (y.size(3) == x.size(3))) |
def parse_code(net_code: str):
assert (net_code[1] == 'g')
assert (net_code[(- 1)] == 'f')
nb_gnn_layers = int(net_code[0])
nb_dense_layers = int(net_code[(- 2)])
is_max = (True if (net_code[2] == 'm') else False)
return (nb_gnn_layers, nb_dense_layers, is_max) |
class InvertedResidual(nn.Module):
def __init__(self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
hidden_dim = int(round((inp * expand_ratio)))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
layers: List[nn.Module] = []
if (expand_ratio != 1):
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer))
layers.extend([ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), norm_layer(oup)])
self.conv = nn.Sequential(*layers)
self.out_channels = oup
self._is_cn = (stride > 1)
def forward(self, x: Tensor) -> Tensor:
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x) |
def get_file_list(path, extension=None):
if (extension is None):
file_list = [(path + f) for f in listdir(path) if isfile(join(path, f))]
else:
file_list = [(path + f) for f in listdir(path) if (isfile(join(path, f)) and (splitext(f)[1] == extension))]
file_list = sorted_alphanum(file_list)
return file_list |
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
def test_logistic_regression():
cancer = load_breast_cancer()
(X, y) = (cancer.data, cancer.target)
feature_names = cancer.feature_names
sk_lr = SKLogistic(tol=0.01, random_state=1)
our_lr = LogisticRegression(tol=0.01, feature_names=feature_names, random_state=1)
sk_lr.fit(X, y)
our_lr.fit(X, y)
sk_pred = sk_lr.predict_proba(X)
our_pred = our_lr.predict_proba(X)
assert np.allclose(sk_pred, our_pred)
sk_pred = sk_lr.predict(X)
our_pred = our_lr.predict(X)
assert np.allclose(sk_pred, our_pred)
local_expl = our_lr.explain_local(X, y)
local_viz = local_expl.visualize(0)
assert (local_viz is not None)
local_expl = our_lr.explain_local(X)
local_viz = local_expl.visualize(0)
assert (local_viz is not None)
global_expl = our_lr.explain_global()
global_viz = global_expl.visualize()
assert (global_viz is not None) |
def SubsampleAndTraverse(length, num_walks, hyperedges, vertexMemberships, alpha=1.0, beta=0):
walksSAT = []
for hyperedge_index in hyperedges:
hyperedge = hyperedges[hyperedge_index]
walk_vertex = []
curr_vertex = random.choice(hyperedge['members'])
for _ in range(num_walks):
initial = True
hyperedge_num = hyperedge_index
curr_hyperedge = hyperedge
for i in range(length):
proba = ((float(alpha) / len(curr_hyperedge['members'])) + beta)
if (random.random() < proba):
adjacent_hyperedges = vertexMemberships[curr_vertex]
hyperedge_num = random.choice(adjacent_hyperedges)
curr_hyperedge = hyperedges[hyperedge_num]
walk_vertex.append(str(curr_vertex))
curr_vertex = random.choice(curr_hyperedge['members'])
walksSAT.append(walk_vertex)
return walksSAT |
def simxGetJointForce(clientID, jointHandle, operationMode):
force = ct.c_float()
return (c_GetJointForce(clientID, jointHandle, ct.byref(force), operationMode), force.value) |
class HeartEpisodicDataLoader(DataLoader):
def __init__(self, batch_size, data_dir='data/', split='train', shuffle=True, collate_fn=None, num_workers=1, data_name=None, signal_type=None, num_mesh=None, seq_len=None, k_shot=None):
self.dataset = HeartEpisodicDataset(data_dir, data_name, signal_type, num_mesh, seq_len, split, shuffle, k_shot=k_shot)
self.init_kwargs = {'batch_size': batch_size, 'shuffle': shuffle, 'drop_last': True, 'num_workers': num_workers}
super().__init__(dataset=self.dataset, **self.init_kwargs)
def next(self):
self.dataset.split()
return DataLoader(dataset=self.dataset, **self.init_kwargs) |
def main(args):
config = load_config(args)
if isinstance(config, omegaconf.dictconfig.DictConfig):
print(OmegaConf.to_yaml(config))
else:
pp = pprint.PrettyPrinter(indent=4)
pp.print(config)
mmtask = Task.config_task(config)
mmtask.build_model()
test_dataloader = get_dataloader(config)
checkpoint_search_path = os.path.dirname(config.eval.save_path)
results = []
prefix = os.path.basename(args.taskconfig)
if prefix.startswith('test'):
if ('best' not in config.fairseq.common_eval.path):
print('eval each epoch.')
for checkpoint in glob.glob((checkpoint_search_path + '/checkpoint*')):
model = mmtask.load_checkpoint(checkpoint)
ckpt = os.path.basename(checkpoint)
evaluator = Evaluator(config)
output = evaluator.evaluate(model, test_dataloader, (ckpt + '_merged'))
results.append((checkpoint, output))
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
evaluator = Evaluator(config)
output = evaluator.evaluate(model, test_dataloader)
results.append((config.fairseq.common_eval.path, output))
best_result = None
best_metric = 0.0
for (checkpoint, result) in results:
print(checkpoint)
evaluator.metric.print_computed_metrics(result)
best_score = evaluator.metric.best_metric(result)
if (best_score > best_metric):
best_result = (checkpoint, result)
best_metric = best_score
print('best results:')
print(best_result[0])
evaluator.metric.print_computed_metrics(best_result[1])
elif prefix.startswith('vis'):
model = mmtask.load_checkpoint(config.fairseq.common_eval.path)
predictor_cls = getattr(predictor_path, config.predictor)
predictor = predictor_cls(config)
predictor.predict_loop(model, test_dataloader, mmtask, None)
else:
raise ValueError('unknown prefix of the config file', args.taskconfig) |
class UNetMotionModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch']) |
def get_help_docs(dic):
docs = []
for (k, v) in dic.iteritems():
doc = inspect.getdoc(v)
comp_doc = (('%s %s' % (v.__name__, doc.rsplit('\n')[0])) if doc else v.__name__)
docs.append(("'%s': %s" % (k, comp_doc)))
return docs |
def get_optimizer_scheduler(net, cfg):
train_cls = getattr(cfg.TRAIN, 'TRAIN_CLS', False)
if train_cls:
print('Only training classification head. Learnable parameters are shown below.')
param_dicts = [{'params': [p for (n, p) in net.named_parameters() if (('cls' in n) and p.requires_grad)]}]
for (n, p) in net.named_parameters():
if ('cls' not in n):
p.requires_grad = False
else:
print(n)
else:
param_dicts = [{'params': [p for (n, p) in net.named_parameters() if (('backbone' not in n) and p.requires_grad)]}, {'params': [p for (n, p) in net.named_parameters() if (('backbone' in n) and p.requires_grad)], 'lr': (cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER)}]
if is_main_process():
print('Learnable parameters are shown below.')
for (n, p) in net.named_parameters():
if p.requires_grad:
print(n)
if (cfg.TRAIN.OPTIMIZER == 'ADAMW'):
optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
else:
raise ValueError('Unsupported Optimizer')
if (cfg.TRAIN.SCHEDULER.TYPE == 'step'):
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH)
elif (cfg.TRAIN.SCHEDULER.TYPE == 'Mstep'):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.TRAIN.SCHEDULER.MILESTONES, gamma=cfg.TRAIN.SCHEDULER.GAMMA)
else:
raise ValueError('Unsupported scheduler')
return (optimizer, lr_scheduler) |
class TSDFEncoder(nn.Module):
def __init__(self, nf_in, nf_per_level, nf_out, use_skip_sparse=True, use_skip_dense=True):
nn.Module.__init__(self)
assert (type(nf_per_level) is list)
data_dim = 3
self.use_skip_sparse = use_skip_sparse
self.use_skip_dense = use_skip_dense
self.use_aspp = False
self.use_bias = False
modules = []
for level in range(len(nf_per_level)):
nf_in = (nf_in if (level == 0) else nf_per_level[(level - 1)])
modules.append(PreEncoderLayer(nf_in, nf_per_level[level]))
self.process_sparse = nn.Sequential(*modules)
nf = nf_per_level[(- 1)]
nf0 = ((nf * 3) // 2)
self.encode_dense0 = nn.Sequential(nn.Conv3d(nf, nf0, kernel_size=4, stride=2, padding=1, bias=self.use_bias), nn.BatchNorm3d(nf0), nn.ReLU(True))
nf1 = (nf * 2)
if self.use_aspp:
self.aspp = ASPP(nf0, nf1)
self.encode_dense1 = nn.Sequential(nn.Conv3d(nf1, nf1, kernel_size=4, stride=2, padding=1, bias=self.use_bias), nn.BatchNorm3d(nf1), nn.ReLU(True))
else:
self.encode_dense1 = nn.Sequential(nn.Conv3d(nf0, nf1, kernel_size=4, stride=2, padding=1, bias=self.use_bias), nn.BatchNorm3d(nf1), nn.ReLU(True))
nf2 = nf1
self.bottleneck_dense2 = nn.Sequential(nn.Conv3d(nf1, nf2, kernel_size=1, bias=self.use_bias), nn.BatchNorm3d(nf2), nn.ReLU(True))
nf3 = (nf2 if (not self.use_skip_dense) else (nf1 + nf2))
nf4 = (nf3 // 2)
self.decode_dense3 = nn.Sequential(nn.ConvTranspose3d(nf3, nf4, kernel_size=4, stride=2, padding=1, bias=self.use_bias), nn.BatchNorm3d(nf4), nn.ReLU(True))
if self.use_skip_dense:
nf4 += nf0
nf5 = (nf4 // 2)
self.decode_dense4 = nn.Sequential(nn.ConvTranspose3d(nf4, nf5, kernel_size=4, stride=2, padding=1, bias=self.use_bias), nn.BatchNorm3d(nf5), nn.ReLU(True))
self.final = nn.Sequential(nn.Conv3d(nf5, nf_out, kernel_size=1, bias=self.use_bias), nn.BatchNorm3d(nf_out), nn.ReLU(True))
self.occpred = nn.Sequential(nn.Conv3d(nf_out, 1, kernel_size=1, bias=self.use_bias))
self.sdfpred = nn.Sequential(nn.Conv3d(nf_out, 1, kernel_size=1, bias=self.use_bias))
def forward(self, x):
feats_sparse = []
for k in range(len(self.process_sparse)):
(x, ft) = self.process_sparse[k](x)
if self.use_skip_sparse:
feats_sparse.extend(ft)
feats_sparse.append(x)
enc0 = self.encode_dense0(x)
if self.use_aspp:
enc_aspp = self.aspp(enc0)
enc1 = self.encode_dense1(enc_aspp)
else:
enc1 = self.encode_dense1(enc0)
bottleneck = self.bottleneck_dense2(enc1)
if self.use_skip_dense:
dec0 = self.decode_dense3(torch.cat([bottleneck, enc1], 1))
else:
dec0 = self.decode_dense3(bottleneck)
if self.use_skip_dense:
x = self.decode_dense4(torch.cat([dec0, enc0], 1))
else:
x = self.decode_dense4(dec0)
x = self.final(x)
occ = self.occpred(x)
sdf = self.sdfpred(x)
out = torch.cat([occ, sdf], 1)
return (x, out, feats_sparse) |
def save_results(results_dict, exp_dir, log=True):
results_file = os.path.join(exp_dir, 'results.json')
save_dict(results_dict, results_file)
if log:
logger = get_logger(log_dir=exp_dir)
results_table_str = dict_to_tabular_str(results_dict)
logger.info(((((('\n' + '\n') + ' Results \n') + '') + ('%s' % results_table_str)) + '\n')) |
class VanDropPath(nn.Module):
def __init__(self, drop_prob: Optional[float]=None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob) |
class QA_Metric():
def __init__(self, model=None, batch_size=8, max_seq_len=384, use_gpu=True):
if (model is None):
model = QA_Bert()
self.model = model
if (torch.cuda.is_available() and use_gpu):
self.gpu = True
self.model.model.to('cuda')
else:
self.gpu = False
self.batch_size = batch_size
self.max_seq_len = max_seq_len
def compute(self, questions, true_asws, evaluated_text):
if (not questions):
return {'summaqa_avg_prob': 0, 'summaqa_avg_fscore': 0}
(score_prob, score_f) = (0, 0)
probs = []
asws = []
slines = []
for (count, (question, true_asw)) in enumerate(zip(questions, true_asws)):
if (((count % self.batch_size) == 0) and (count != 0)):
input_ids = torch.tensor([ex['input_ids'] for ex in slines])
token_type_ids = torch.tensor([ex['token_type_ids'] for ex in slines])
attention_mask = torch.tensor([ex['attention_mask'] for ex in slines])
if self.gpu:
input_ids = input_ids.to('cuda')
token_type_ids = token_type_ids.to('cuda')
attention_mask = attention_mask.to('cuda')
(asw_pred, prob) = self.model.predict(input_ids, token_type_ids, attention_mask)
asws.extend(asw_pred)
probs.extend(prob)
slines = []
cur_dict = self.model.tokenizer.encode_plus(question, evaluated_text, max_length=self.max_seq_len, pad_to_max_length=True, return_token_type_ids=True)
slines.append(cur_dict)
if (slines != []):
input_ids = torch.tensor([ex['input_ids'] for ex in slines])
token_type_ids = torch.tensor([ex['token_type_ids'] for ex in slines])
attention_mask = torch.tensor([ex['attention_mask'] for ex in slines])
if self.gpu:
input_ids = input_ids.to('cuda')
token_type_ids = token_type_ids.to('cuda')
attention_mask = attention_mask.to('cuda')
(asw_pred, prob) = self.model.predict(input_ids, token_type_ids, attention_mask)
asws.extend(asw_pred)
probs.extend(prob)
for (asw, true_asw) in zip(asws, true_asws):
score_f += f1_score(asw, true_asw)
score_prob = sum(probs)
return {'summaqa_avg_prob': (score_prob / len(questions)), 'summaqa_avg_fscore': (score_f / len(questions))} |
def get_article_ids_past_seven_days():
with closing(getDb().cursor()) as cur:
sql = 'SELECT article_id FROM articles \n WHERE datestamp > date_sub(now(), INTERVAL 1 WEEK)\n ORDER BY article_id ASC'
cur.execute(sql)
return [x[0] for x in cur.fetchall()] |
def wget(src, filename):
if (run(['wget', src, '-O', filename]).returncode != 0):
raise ValueError('Failed to download', src, 'to', filename) |
class RandomHorizontalFlip(object):
def __call__(self, sample):
(image, label) = (sample['image'], sample['label'])
if (random.random() < 0.5):
image = image.transpose(Image.FLIP_LEFT_RIGHT)
label = label.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': image, 'label': label} |
class AsyncSumTree(SumTree):
async_ = True
def __init__(self, *args, **kwargs):
self.async_t = mp.RawValue('l', 0)
super().__init__(*args, **kwargs)
def _allocate_tree(self):
self.tree = np_mp_array(((2 ** self.tree_levels) - 1), np.float64)
self.tree.fill(0)
def reset(self):
super().reset()
self.async_t.value = 0
def advance(self, *args, **kwargs):
self.t = self.async_t.value
super().advance(*args, **kwargs)
self.async_t.value = self.t |
def blockdiag_butterfly_project_einsum_simple(M, nblocks1, nblocks2):
(m, n) = M.shape
(k, j) = (nblocks1, nblocks2)
M_permuted_batched = rearrange(M, '(l j) (k i) -> k j l i', k=nblocks1, j=nblocks2)
(U, Vt) = low_rank_project(M_permuted_batched, rank=1)
w1_bfly = rearrange(Vt, 'k j 1 i -> k j i')
w2_bfly = rearrange(U, 'k j l 1 -> j l k')
return (w1_bfly, w2_bfly) |
def test_digits_cosine_lazy_sparse():
model = SumRedundancySelection(100, 'precomputed', optimizer='lazy')
model.fit(X_digits_cosine_sparse)
assert_array_equal(model.ranking, digits_cosine_ranking)
assert_array_almost_equal(model.gains, digits_cosine_gains, 4) |
def build_fake_yaml():
fake_yaml = '\n model:\n name: fake_yaml\n framework: tensorflow\n device: cpu\n quantization:\n model_wise:\n weight:\n granularity: per_tensor\n scheme: sym\n dtype: int8\n algorithm: minmax\n evaluation:\n accuracy:\n metric:\n topk: 1\n tuning:\n strategy:\n name: basic\n accuracy_criterion:\n relative: 0.1\n workspace:\n path: saved\n '
y = yaml.load(fake_yaml, Loader=yaml.SafeLoader)
with open('fake_yaml.yaml', 'w', encoding='utf-8') as f:
yaml.dump(y, f)
f.close() |
class WeightedLoss(torch.nn.Module):
def __init__(self, loss_fns: List[Union[(Callable, torch.nn.Module)]], weights: Optional[List[float]]=None):
super().__init__()
self.loss_fns = loss_fns
if (weights is None):
weights = ([1.0] * len(loss_fns))
else:
assert (len(loss_fns) == len(weights)), 'Number of losses and weights must match.'
self.weights = weights
def forward(self, *args, **kwargs):
losses = [(weight * loss_fn(*args, **kwargs)) for (loss_fn, weight) in zip(self.loss_fns, self.weights)]
return sum(losses) |
class Timer(object):
def __init__(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = (time.time() - self.start_time)
self.total_time += self.diff
self.calls += 1
self.average_time = (self.total_time / self.calls)
if average:
self.duration = self.average_time
else:
self.duration = self.diff
return self.duration
def clear(self):
self.total_time = 0.0
self.calls = 0
self.start_time = 0.0
self.diff = 0.0
self.average_time = 0.0
self.duration = 0.0 |
def load_pretrain(model, pretrained_dict):
device = torch.cuda.current_device()
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model |
def train(policy, rollout_worker, evaluator, n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval, save_policies, **kwargs):
rank = MPI.COMM_WORLD.Get_rank()
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')
best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')
periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')
logger.info('Training...')
best_success_rate = (- 1)
for epoch in range(n_epochs):
rollout_worker.clear_history()
for _ in range(n_cycles):
episode = rollout_worker.generate_rollouts()
policy.store_episode(episode)
for _ in range(n_batches):
policy.train()
policy.update_target_net()
evaluator.clear_history()
for _ in range(n_test_rollouts):
evaluator.generate_rollouts()
logger.record_tabular('epoch', epoch)
for (key, val) in evaluator.logs('test'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in rollout_worker.logs('train'):
logger.record_tabular(key, mpi_average(val))
for (key, val) in policy.logs():
logger.record_tabular(key, mpi_average(val))
if (rank == 0):
logger.dump_tabular()
success_rate = mpi_average(evaluator.current_success_rate())
if ((rank == 0) and (success_rate >= best_success_rate) and save_policies):
best_success_rate = success_rate
logger.info('New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))
evaluator.save_policy(best_policy_path)
evaluator.save_policy(latest_policy_path)
if ((rank == 0) and (policy_save_interval > 0) and ((epoch % policy_save_interval) == 0) and save_policies):
policy_path = periodic_policy_path.format(epoch)
logger.info('Saving periodic policy to {} ...'.format(policy_path))
evaluator.save_policy(policy_path)
local_uniform = np.random.uniform(size=(1,))
root_uniform = local_uniform.copy()
MPI.COMM_WORLD.Bcast(root_uniform, root=0)
if (rank != 0):
assert (local_uniform[0] != root_uniform[0]) |
def split_last(x, shape):
shape = list(shape)
assert (shape.count((- 1)) <= 1)
if ((- 1) in shape):
shape[shape.index((- 1))] = int((x.size((- 1)) / (- np.prod(shape))))
return x.view(*x.size()[:(- 1)], *shape) |
def main(args):
os.makedirs(args.output, exist_ok=True)
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
cudnn.benchmark = True
logger = setup_logger(output=args.output, distributed_rank=dist.get_rank(), color=False, name='SEED')
if (dist.get_rank() == 0):
path = os.path.join(args.output, 'config.json')
with open(path, 'w') as f:
json.dump(vars(args), f, indent=2)
logger.info('Full config saved to {}'.format(path))
logger.info('world size: {}'.format(dist.get_world_size()))
logger.info('local_rank: {}'.format(args.local_rank))
logger.info('dist.get_rank(): {}'.format(dist.get_rank()))
else:
logger = setup_logger(output=args.output, color=False, name='SEED')
logger.info('Single GPU mode for debugging.')
logger.info("=> creating student encoder '{}'".format(args.student_arch))
logger.info("=> creating teacher encoder '{}'".format(args.teacher_arch))
assert (args.teacher_arch in models.__dict__)
if (args.teacher_arch == 'swav_resnet50'):
swav_mlp = 2048
elif (args.teacher_arch == 'swav_resnet50w2'):
swav_mlp = 8192
elif (args.teacher_arch == 'swav_resnet50w4'):
swav_mlp = 8192
elif (args.teacher_arch == 'swav_resnet50w5'):
swav_mlp = 10240
model = seed.small_patch_builder.SEED(models.__dict__[args.student_arch], models.__dict__[args.teacher_arch], args.dim, args.queue, args.temp, mlp=args.student_mlp, temp=args.distill_t, dist=args.distributed, swav_mlp=swav_mlp)
logger.info(model)
if args.distributed:
logger.info('Entering distributed mode.')
model = torch.nn.parallel.DistributedDataParallel(model.cuda(), device_ids=[args.local_rank], broadcast_buffers=False, find_unused_parameters=True)
logger.info('Model now distributed.')
args.lr_mult = (args.batch_size / 256)
args.warmup_epochs = 5
optimizer = torch.optim.SGD(model.parameters(), (args.lr_mult * args.lr), momentum=args.momentum, weight_decay=args.weight_decay)
if (dist.get_rank() == 0):
summary_writer = SummaryWriter(log_dir=args.output)
else:
summary_writer = None
else:
args.lr_mult = 1
args.warmup_epochs = 5
model = model.cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
summary_writer = SummaryWriter(log_dir=args.output)
if args.distill:
if os.path.isfile(args.distill):
model = load_swav_teacher_encoder(args, model, logger, distributed=args.distributed)
logger.info("=> Teacher checkpoint successfully loaded from '{}'".format(args.distill))
else:
logger.info('wrong distillation checkpoint.')
if args.resume:
if os.path.isfile(args.resume):
logger.info("=> loading checkpoint '{}'".format(args.resume))
model = resume_training(args, model, optimizer, logger)
else:
logger.info("=> no checkpoint found at '{}'".format(args.resume))
torch.cuda.empty_cache()
train_dataset = Small_Patch_TSVDataset(os.path.join(args.data, 'train.tsv'), swav_aug, swav_small_aug, num_patches=6)
logger.info('TSV Dataset done.')
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
assert ((args.batch_size // dist.get_world_size()) == (args.batch_size / dist.get_world_size())), 'Batch size is not divisible by num of gpus.'
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=(args.batch_size // dist.get_world_size()), shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True, drop_last=True)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
loss = train(train_loader, model, soft_cross_entropy, optimizer, epoch, args, logger)
if (summary_writer is not None):
summary_writer.add_scalar('train_loss', loss, epoch)
summary_writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if (dist.get_rank() == 0):
file_str = 'Teacher_{}_T-Epoch_{}_Student_{}_distill-Epoch_{}-checkpoint_{:04d}.pth.tar'.format(args.teacher_ssl, args.epochs, args.student_arch, args.teacher_arch, epoch)
save_checkpoint({'epoch': (epoch + 1), 'arch': args.student_arch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()}, is_best=False, filename=os.path.join(args.output, file_str))
logger.info('> checkpoint saved to {}'.format(os.path.join(args.output, file_str))) |
class PrecisionRecallCurve(Metric):
def get_pytorch_metric(self) -> 'PyTorchPrecisionRecallCurve':
from bigdl.orca.learn.pytorch import pytorch_metrics
return pytorch_metrics.PrecisionRecallCurve()
def get_name(self) -> str:
return 'PrecisionRecallCurve' |
def test_get_dynamic_voxelnet():
if (not torch.cuda.is_available()):
pytest.skip('test requires GPU and torch+cuda')
dynamic_voxelnet_cfg = _get_model_cfg('dynamic_voxelization/dv_second_secfpn_6x8_80e_kitti-3d-car.py')
self = build_detector(dynamic_voxelnet_cfg).cuda()
points_0 = torch.rand([2010, 4], device='cuda')
points_1 = torch.rand([2020, 4], device='cuda')
points = [points_0, points_1]
feats = self.extract_feat(points, None)
assert (feats[0].shape == torch.Size([2, 512, 200, 176])) |
def train_model(args):
CEMBED_SIZE = args.CEMBED_SIZE
WEMBED_SIZE = args.WEMBED_SIZE
HIDDEN_SIZE = args.HIDDEN_SIZE
MLP_SIZE = args.MLP_SIZE
SPARSE = args.SPARSE
TIMEOUT = args.TIMEOUT
num_train_files = 0
Us = []
batch_trains = []
if args.train:
train = file_conll(args.train).tupled_data
if args.multi_train:
train = []
for file_name in args.multi_train:
if ('ontonotes' in file_name):
print(len(file_conll(file_name).tupled_data))
train += file_conll(file_name).tupled_data[:2500]
else:
train += file_conll(file_name).tupled_data[:args.train_samples]
num_train_files += 1
U = torch.FloatTensor((2 * args.HIDDEN_SIZE), args.m_rank)
nn.init.xavier_uniform(U)
Us.append(U)
train_combined = []
train = []
for file_name in args.multi_train:
train += file_conll(file_name).tupled_data[:args.train_samples]
if ('ontonotes' in file_name):
train_combined.append(file_conll(file_name).tupled_data[:2500])
else:
train_combined.append(file_conll(file_name).tupled_data[:args.train_samples])
args.train_samples = max_samples = max(2500, args.train_samples)
for j in range(max_samples):
current_batch = []
for i in range(num_train_files):
current_batch.append(train_combined[i][(j % len(train_combined[i]))])
batch_trains.append(current_batch)
if args.dev:
dev = file_conll(args.dev).tupled_data
if args.test:
test = file_conll(args.test).tupled_data
args.num_train_files = num_train_files
words = []
tags = []
chars = set()
wc = Counter()
for sent in ((train + dev) + test):
for (w, p) in sent:
words.append(w)
tags.append(p)
wc[w] += 1
chars.update(w)
words.append('_UNK_')
chars.add('_UNK_')
chars.add('<*>')
vw = Vocab.from_corpus([words])
vt = Vocab.from_corpus([tags])
vc = Vocab.from_corpus([chars])
UNK = vw.w2i['_UNK_']
CUNK = vc.w2i['_UNK_']
pad_char = vc.w2i['<*>']
nwords = vw.size()
ntags = vt.size()
nchars = vc.size()
print(('nwords=%r, ntags=%r, nchars=%r' % (nwords, ntags, nchars)))
args.ntags = ntags
args.nwords = nwords
args.nchars = nchars
encoder_class = get_model_class('tagger')
encoder_class.add_config(parser)
encoder = encoder_class(args, vw, vc, vt, wc, UNK, CUNK, pad_char)
classifier = Classifier((2 * HIDDEN_SIZE), MLP_SIZE, ntags)
classifiers = []
for ind in range(num_train_files):
classifiers.append(Classifier((2 * HIDDEN_SIZE), MLP_SIZE, ntags))
requires_grad = (lambda x: x.requires_grad)
if args.CUDA:
map((lambda m: m.cuda()), (([encoder] + [classifier]) + classifiers))
Us = [Variable(U.cuda(), requires_grad=True) for U in Us]
else:
Us = [Variable(U, requires_grad=True) for U in Us]
optimizer_encoder = optim.Adam(encoder.parameters(), lr=0.001, weight_decay=0.0001)
task_params = list(classifier.parameters())
for x in classifiers:
task_params += list(x.parameters())
task_params += Us
optimizer_classifier = optim.Adam(filter(requires_grad, task_params), lr=0.001, weight_decay=0.0001)
print(('startup time: %r' % (time.time() - start)))
start_time = time.time()
i = 0
best_test = 0
best_dev = 0
for ITER in range(args.epochs):
(encoder, classifier, optimizer_encoder, optimizer_classifier) = train_epoch(encoder, classifier, classifiers, batch_trains, dev, test, optimizer_encoder, optimizer_classifier, start_time, i, Us)
print(('epoch %r finished' % ITER))
domain_encs = domain_encoding(batch_trains, args, encoder)
curr_dev = evaluate(encoder, args, batch_trains, classifier, classifiers, dev, domain_encs, Us)
curr_test = evaluate(encoder, args, batch_trains, classifier, classifiers, test, domain_encs, Us)
if (curr_dev > best_dev):
best_dev = curr_dev
best_test = curr_test
print(best_dev, best_test) |
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = (args.lr_backbone > 0)
return_interm_layers = (args.masks or (args.num_feature_levels > 1))
if ('resnet' in args.backbone):
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
else:
backbone = TransformerBackbone(args.backbone, train_backbone, return_interm_layers, args)
if args.upsample_backbone_output:
backbone = UpSampleWrapper(backbone, args.upsample_stride)
model = Joiner(backbone, position_embedding)
return model |
def get_shufflenetv2b(width_scale, shuffle_group_first=True, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
if (width_scale != 1.0):
channels = [[int((cij * width_scale)) for cij in ci] for ci in channels]
if (width_scale > 1.5):
final_block_channels = int((final_block_channels * width_scale))
net = ShuffleNetV2b(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, shuffle_group_first=shuffle_group_first, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def compute_lucrativity(setup_horner, setup_naive, eval_horner, eval_naive):
benefit_eval = (eval_naive - eval_horner)
loss_setup = (setup_horner - setup_naive)
return round((loss_setup / benefit_eval)) |
def maybe_download(url, dest):
if (not os.path.exists(dest)):
logger.info('Downloading %s to %s', url, dest)
download(url, dest) |
def wavread(fn):
(fs, data) = wavfile.read(fn)
data = (data.astype(np.float32) / (2 ** 15))
return (data, fs) |
class ModelOutput(OrderedDict):
def __post_init__(self):
class_fields = fields(self)
assert len(class_fields), f'{self.__class__.__name__} has no fields.'
assert all(((field.default is None) for field in class_fields[1:])), f'{self.__class__.__name__} should not have more than one required field.'
first_field = getattr(self, class_fields[0].name)
other_fields_are_none = all(((getattr(self, field.name) is None) for field in class_fields[1:]))
if (other_fields_are_none and (not is_tensor(first_field))):
try:
iterator = iter(first_field)
first_field_iterator = True
except TypeError:
first_field_iterator = False
if first_field_iterator:
for element in iterator:
if ((not isinstance(element, (list, tuple))) or (not (len(element) == 2)) or (not isinstance(element[0], str))):
break
setattr(self, element[0], element[1])
if (element[1] is not None):
self[element[0]] = element[1]
elif (first_field is not None):
self[class_fields[0].name] = first_field
else:
for field in class_fields:
v = getattr(self, field.name)
if (v is not None):
self[field.name] = v
def __delitem__(self, *args, **kwargs):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.')
def setdefault(self, *args, **kwargs):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.')
def pop(self, *args, **kwargs):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.')
def update(self, *args, **kwargs):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.')
def __getitem__(self, k):
if isinstance(k, str):
inner_dict = {k: v for (k, v) in self.items()}
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self, name, value):
if ((name in self.keys()) and (value is not None)):
super().__setitem__(name, value)
super().__setattr__(name, value)
def __setitem__(self, key, value):
super().__setitem__(key, value)
super().__setattr__(key, value)
def to_tuple(self) -> Tuple[Any]:
return tuple((self[k] for k in self.keys())) |
_model_architecture('transformer_lm', 'transformer_lm_gpt3_13')
def transformer_lm_gpt3_13(args):
args.decoder_layers = safe_getattr(args, 'decoder_layers', 40)
args.decoder_embed_dim = safe_getattr(args, 'decoder_embed_dim', 5120)
args.decoder_attention_heads = safe_getattr(args, 'decoder_attention_heads', 40)
base_gpt3_architecture(args) |
def collect_node_state(h, except_last=False):
retval = []
for i in list(h.keys())[:(- 1)]:
retval.append(h[i])
if (except_last == False):
retval.append(h[list(h.keys())[(- 1)]])
return torch.cat(retval, 0) |
def NN_loss(x, y, dim=0):
dist = pairwise_dist(x, y)
(values, indices) = dist.min(dim=dim)
return values.mean() |
def read_only_json_in_dir(dname, check_inv=False):
f = glob.glob(f'{dname}/*.json')
assert (len(f) == 1), f'json files in {dname}: {f}'
(f,) = f
with open(f) as fin:
ret = json.load(fin)
if check_inv:
assert (ret['nr_inverted'] == 0), f'inverted in {f}'
return ret |
def generate_random_ring_element(size, ring_size=(2 ** 64), device='cpu', **kwargs):
gen = (kwargs['generator'] if ('generator' in kwargs) else None)
rand_element = torch.empty(size=size, dtype=torch.long, device=device).random_((- (ring_size // 2)), to=((ring_size - 1) // 2), generator=gen)
if rand_element.is_cuda:
return CUDALongTensor(rand_element)
return rand_element |
def _destination_position(pdf, destination):
pagewidth = pdf.getPage(pdf.getDestinationPageNumber(destination)).cropBox.lowerRight[0]
if ((not destination.left) or (not destination.top)):
raise IncompleteCoordinatesError(destination)
column = ((2 * destination.left) // pagewidth)
return (pdf.getDestinationPageNumber(destination), column, (- destination.top), destination.left) |
def load_schema(name):
with open(((Path('tests') / 'schemas') / f'{name}.json'), 'r') as f:
return json.load(f) |
class PhysicsOracle(Baseline):
def __call__(self, token) -> Prediction:
(instance, sample) = token.split('_')
kinematics = _kinematics_from_tokens(self.helper, instance, sample)
ground_truth = self.helper.get_future_for_agent(instance, sample, self.sec_from_now, in_agent_frame=False)
assert (ground_truth.shape[0] == int((self.sec_from_now * self.sampled_at))), f'Ground truth does not correspond to {self.sec_from_now} seconds.'
path_funs = [_constant_acceleration_and_heading, _constant_magnitude_accel_and_yaw_rate, _constant_speed_and_yaw_rate, _constant_velocity_heading_from_kinematics]
paths = [path_fun(kinematics, self.sec_from_now, self.sampled_at) for path_fun in path_funs]
oracle = sorted(paths, key=(lambda path: np.linalg.norm((np.array(path) - ground_truth), ord='fro')))[0]
return Prediction(instance, sample, np.expand_dims(oracle, 0), np.array([1])) |
def rule(id, r, p):
_checkSettings()
for a in _rules:
if (a.isomorphism(r, 1, labelSettings=_ls) == 1):
r = a
break
else:
_rules.append(r)
f = r.print(p, p)
f = f[0]
fL = outputFile((f + '_L.pdf'))
fK = outputFile((f + '_K.pdf'))
fR = outputFile((f + '_R.pdf'))
texDefine(('rule-' + str(id)), ('{%s}{%s}{%s}' % (fL, fK, fR))) |
def resize_multiple(img, sizes=(8, 16, 32, 64, 128, 256, 512, 1024), quality=100):
imgs = []
for size in sizes:
imgs.append(resize_and_convert(img, size, quality))
return imgs |
def load_checkpoint(model, checkpoint_path, model_key='model|module|state_dict', strict=True):
state_dict = load_state_dict(checkpoint_path, model_key=model_key)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
print(incompatible_keys)
return incompatible_keys |
def train(train_generator, train_size, input_num, dims_num):
print('Start Train Job! ')
start = time.time()
inputs = InputLayer(input_shape=(input_num, dims_num), batch_size=batch_size)
layer1 = Conv1D(64, 3, activation='relu')
layer2 = Conv1D(64, 3, activation='relu')
layer3 = Conv1D(128, 3, activation='relu')
layer4 = Conv1D(128, 3, activation='relu')
layer5 = Dense(128, activation='relu')
output = Dense(2, activation='softmax', name='Output')
optimizer = Adam()
model = Sequential()
model.add(inputs)
model.add(layer1)
model.add(layer2)
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.5))
model.add(layer3)
model.add(layer4)
model.add(MaxPool1D(pool_size=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(layer5)
model.add(Dropout(0.5))
model.add(output)
call = TensorBoard(log_dir=log_dir, write_grads=True, histogram_freq=1)
early_stop = EarlyStopping(monitor='val_loss', mode='min', min_delta=0, patience=3, verbose=1, restore_best_weights=True)
model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch=(train_size // batch_size), epochs=epochs_num, callbacks=[call, early_stop])
model.save(model_dir)
end = time.time()
print(('Over train job in %f s' % (end - start))) |
class SEWForCTC(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def data_generator(data, dataloader, entity2idx):
for i in range(len(data)):
data_sample = data[i]
head = entity2idx[data_sample[0].strip()]
question = data_sample[1]
(question_tokenized, attention_mask) = dataloader.tokenize_question(question)
if (type(data_sample[2]) is str):
ans = entity2idx[data_sample[2]]
else:
ans = []
for entity in list(data_sample[2]):
if (entity.strip() in entity2idx):
ans.append(entity2idx[entity.strip()])
(yield (torch.tensor(head, dtype=torch.long), question_tokenized, attention_mask, ans, data_sample[1])) |
class GCN(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_classes, n_layers):
super(GCN, self).__init__()
self.g = g
self.layers = nn.ModuleList()
assert (n_layers >= 2)
self.layers.append(GraphConv(in_feats, n_hidden, allow_zero_in_degree=True))
for i in range((n_layers - 2)):
self.layers.append(GraphConv(n_hidden, n_hidden, allow_zero_in_degree=True))
self.layers.append(GraphConv(n_hidden, n_classes, allow_zero_in_degree=True))
def forward(self, features):
h = features
for (i, layer) in enumerate(self.layers):
h = layer(self.g, h)
return F.log_softmax(h, dim=1) |
def test_get_model_from_default_config():
ion_pos = jnp.array([[1.0, 2.0, 3.0], [(- 2.0), 3.0, (- 4.0)], [(- 0.5), 0.0, 0.0]])
ion_charges = jnp.array([1.0, 3.0, 2.0])
nelec = jnp.array([4, 3])
def _construct_model(model_type, use_det_resnet=True, determinant_fn_mode=None, explicit_antisym_subtype=None, use_products_covariance=False):
model_config = get_default_config_with_chosen_model(model_type, use_det_resnet=use_det_resnet, determinant_fn_mode=determinant_fn_mode, explicit_antisym_subtype=explicit_antisym_subtype, use_products_covariance=use_products_covariance).model
models.construct.get_model_from_config(model_config, nelec, ion_pos, ion_charges)
for model_type in ['explicit_antisym']:
for subtype in ['factorized', 'generic']:
_construct_model(model_type, explicit_antisym_subtype=subtype)
for model_type in ['ferminet', 'embedded_particle_ferminet', 'extended_orbital_matrix_ferminet']:
_construct_model(model_type, use_det_resnet=False)
for mode in ['sign_covariance', 'parallel_even', 'pairwise_even']:
_construct_model(model_type, use_det_resnet=True, determinant_fn_mode=mode)
for model_type in ['orbital_cofactor_net', 'per_particle_dets_net']:
for use_products_covariance in [False, True]:
_construct_model(model_type, use_products_covariance=use_products_covariance) |
def test_hourglass_backbone():
with pytest.raises(AssertionError):
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
HourglassNet(stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
HourglassNet(downsample_times=5, stage_channels=[256, 256, 384, 384, 384], stage_blocks=[2, 2, 2, 2, 2])
model = HourglassNet(num_stacks=1, stage_channels=(64, 64, 96, 96, 96, 128), feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert (len(feat) == 1)
assert (feat[0].shape == torch.Size([1, 64, 64, 64]))
model = HourglassNet(num_stacks=2, stage_channels=(64, 64, 96, 96, 96, 128), feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert (len(feat) == 2)
assert (feat[0].shape == torch.Size([1, 64, 64, 64]))
assert (feat[1].shape == torch.Size([1, 64, 64, 64])) |
_module()
class CSRNetDecoder(nn.Module):
def __init__(self, load_weights=False, ratio=4, in_channels=256, num_cls=4, using_bn=False, loss_weight=1.0, size_average=False):
super(CSRNetDecoder, self).__init__()
self.seen = 0
self.backend_feat = [512, 256, 128, 64]
self.backend = make_layers(self.backend_feat, in_channels=in_channels, batch_norm=using_bn, dilation=True)
self.output_layer = nn.Conv2d(64, num_cls, kernel_size=1)
self.ratio = ratio
self.interval_feats_dim = 64
if (not load_weights):
self._initialize_weights()
self.loss_func = nn.MSELoss(size_average=size_average)
self.loss_weight = loss_weight
def forward(self, x, density_maps):
(H, W) = density_maps.shape[2:]
x = self.backend(x)
x = self.output_layer(x)
loss_dict = dict(loss_cnt=(self.loss_weight * self.loss_func(x.reshape((- 1), H, W), density_maps.reshape((- 1), H, W))))
return loss_dict
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.01)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) |
def get_imagenet_data(size=224):
base_dir = os.path.dirname(__file__)
with open(os.path.join(base_dir, 'images', 'ground_truth_val2012')) as f:
ground_truth_val2012 = {x.split()[0]: int(x.split()[1]) for x in f.readlines() if (len(x.strip()) > 0)}
with open(os.path.join(base_dir, 'images', 'synset_id_to_class')) as f:
synset_to_class = {x.split()[1]: int(x.split()[0]) for x in f.readlines() if (len(x.strip()) > 0)}
with open(os.path.join(base_dir, 'images', 'imagenet_label_mapping')) as f:
image_label_mapping = {int(x.split(':')[0]): x.split(':')[1].strip() for x in f.readlines() if (len(x.strip()) > 0)}
def get_class(f):
ret = ground_truth_val2012.get(f, None)
if (ret is None):
ret = synset_to_class.get(f.split('_')[0], None)
if (ret is None):
ret = '--'
return ret
images = [(load_image(os.path.join(base_dir, 'images', f), size), get_class(f)) for f in os.listdir(os.path.join(base_dir, 'images')) if (f.lower().endswith('.jpg') or f.lower().endswith('.jpeg'))]
return (images, image_label_mapping) |
def getTrainingTestingData(batch_size):
(data, nyu2_train) = loadZipToMem('nyu_data.zip')
transformed_training = depthDatasetMemory(data, nyu2_train, transform=getDefaultTrainTransform())
transformed_testing = depthDatasetMemory(data, nyu2_train, transform=getNoTransform())
return (DataLoader(transformed_training, batch_size, shuffle=True), DataLoader(transformed_testing, batch_size, shuffle=False)) |
.parametrize('wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target', [(wide, None, None, None, X_wide, None, None, None, target), (None, tabmlp, None, None, None, X_tab, None, None, target), (None, tabresnet, None, None, None, X_tab, None, None, target), (None, tabtransformer, None, None, None, X_tab, None, None, target), (None, None, basic_rnn, None, None, None, X_text, None, target), (None, None, basic_transformer, None, None, None, X_text, None, target), (None, None, None, deepimage, None, None, None, X_img, target)])
def test_predict_with_individual_component(wide, deeptabular, deeptext, deepimage, X_wide, X_tab, X_text, X_img, target):
model = WideDeep(wide=wide, deeptabular=deeptabular, deeptext=deeptext, deepimage=deepimage)
trainer = Trainer(model, objective='binary', verbose=0)
trainer.fit(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img, target=target, batch_size=16)
preds = trainer.predict(X_wide=X_wide, X_tab=X_tab, X_text=X_text, X_img=X_img)
assert ((preds.shape[0] == 32) and ('train_loss' in trainer.history)) |
def save_image(image_numpy, image_path):
image_pil = None
if (image_numpy.shape[2] == 1):
image_numpy = np.reshape(image_numpy, (image_numpy.shape[0], image_numpy.shape[1]))
image_pil = Image.fromarray(image_numpy, 'L')
else:
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path) |
def write_version_py():
content = "# GENERATED VERSION FILE\n# TIME: {}\n\n__version__ = '{}'\nshort_version = '{}'\nversion_info = ({})\n"
sha = get_hash()
with open('mmdet/VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(SHORT_VERSION.split('.'))
VERSION = ((SHORT_VERSION + '+') + sha)
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.