code stringlengths 101 5.91M |
|---|
class EndSignal(object):
def __init__(self, id, errno=0, errmsg=''):
self.id = id
self.errno = errno
self.errmsg = errmsg |
def flash_save_checkpoint(checkpointer, step, model, optimizer, save_memory_interval, save_storage_interval, checkpoint_dir):
saved = False
if (((step % save_memory_interval) != 0) and ((step % save_storage_interval) != 0)):
return saved
with FSDP.state_dict_type(model, StateDictType.SHARDED_STATE_DICT):
state_dict = {'model': model.state_dict(), 'optim': FSDP.optim_state_dict(model, optimizer), 'step': step}
ckpt_dir = os.path.join(checkpoint_dir, str(step))
if ((step % save_memory_interval) == 0):
checkpointer.save_checkpoint(step, state_dict, ckpt_dir, storage_type=StorageType.MEMORY)
saved = True
if ((step % save_storage_interval) == 0):
checkpointer.save_checkpoint(step, state_dict, ckpt_dir, storage_type=StorageType.DISK)
saved = True
return saved |
class MixtureTable(Layer):
def __init__(self, dim=INTMAX, bigdl_type='float'):
super(MixtureTable, self).__init__(None, bigdl_type, dim) |
def get_parser():
parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)')
return parser |
def clean_nyt_nursinghomes(data_dir='../../raw/nyt_nursinghomes/', out_dir='.'):
df = load_nyt_nursinghomes(data_dir=data_dir)
cols = (['Name', 'City', 'State'] + [col for col in list(df.columns) if (col not in ['Name', 'City', 'State'])])
df = df[cols]
df.to_csv(oj(out_dir, 'nyt_nursinghomes.csv'), header=True, index=False)
return df |
def split_next_chamber(state: MazeGenerationState) -> MazeGenerationState:
(chambers, chamber) = stack_pop(state.chambers)
(*_, width, height) = chamber
new_state: MazeGenerationState = jax.lax.cond((width >= height), split_horizontally, split_vertically, MazeGenerationState(state.maze, chambers, state.key), chamber)
return new_state |
def set_regularization(model, kernel_regularizer=None, bias_regularizer=None):
for layer in model.layers:
if ((kernel_regularizer is not None) and hasattr(layer, 'kernel_regularizer')):
layer.kernel_regularizer = kernel_regularizer
if ((bias_regularizer is not None) and hasattr(layer, 'bias_regularizer')):
layer.bias_regularizer = bias_regularizer
out = tf.keras.models.model_from_json(model.to_json())
out.set_weights(model.get_weights())
return out |
def validate(valloader, model, criterion, epoch, use_cuda, mode):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
bar = Bar(f'{mode}', max=len(valloader))
with torch.no_grad():
for (batch_idx, (inputs, targets)) in enumerate(valloader):
data_time.update((time.time() - end))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda(non_blocking=True))
outputs = model(inputs)
loss = criterion(outputs, targets)
(prec1, prec5) = accuracy(outputs, targets, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(batch=(batch_idx + 1), size=len(valloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg)
bar.next()
bar.finish()
return (losses.avg, top1.avg) |
class DIV2K(srdata.SRData):
def __init__(self, args, train=True):
super(DIV2K, self).__init__(args, train)
self.repeat = (args.test_every // (args.n_train // args.batch_size))
def _scan(self):
list_hr = []
list_lr = [[] for _ in self.scale]
if self.train:
idx_begin = 0
idx_end = self.args.n_train
else:
idx_begin = self.args.n_train
idx_end = (self.args.offset_val + self.args.n_val)
for i in range((idx_begin + 1), (idx_end + 1)):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
for (si, s) in enumerate(self.scale):
list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext)))
return (list_hr, list_lr)
def _set_filesystem(self, dir_data):
self.apath = ((dir_data + '/DIV2K') + '/train')
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
self.dir_lr = os.path.join(self.apath, ('DIV2K_train_LR_' + self.args.manner_of_downsampling))
self.ext = '.png'
def _name_hrbin(self):
return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split))
def _name_lrbin(self, scale):
return os.path.join(self.apath, 'bin', '{}_bin_LR_X{}.npy'.format(self.split, scale))
def __len__(self):
if self.train:
return (len(self.images_hr) * self.repeat)
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return (idx % len(self.images_hr))
else:
return idx |
class ResUNetSP(ME.MinkowskiNetwork):
NORM_TYPE = 'BN'
BLOCK_NORM_TYPE = 'BN'
CHANNELS = [None, 32, 64, 128]
TR_CHANNELS = [None, 32, 64, 64]
DEPTHS = [None, 1, 1, 1, 1, 1, None]
REGION_TYPE = ME.RegionType.HYPER_CUBE
def __init__(self, in_channels=3, out_channels=32, bn_momentum=0.1, conv1_kernel_size=3, normalize_feature=False, D=3):
ME.MinkowskiNetwork.__init__(self, D)
NORM_TYPE = self.NORM_TYPE
BLOCK_NORM_TYPE = self.BLOCK_NORM_TYPE
CHANNELS = self.CHANNELS
TR_CHANNELS = self.TR_CHANNELS
DEPTHS = self.DEPTHS
REGION_TYPE = self.REGION_TYPE
self.normalize_feature = normalize_feature
self.conv1 = conv(in_channels=in_channels, out_channels=CHANNELS[1], kernel_size=conv1_kernel_size, stride=1, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm1 = get_norm(NORM_TYPE, CHANNELS[1], bn_momentum=bn_momentum, dimension=D)
self.block1 = nn.Sequential(*[get_block(BLOCK_NORM_TYPE, CHANNELS[1], CHANNELS[1], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D) for d in range(DEPTHS[1])])
self.pool2 = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
self.conv2 = conv(in_channels=CHANNELS[1], out_channels=CHANNELS[2], kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
self.norm2 = get_norm(NORM_TYPE, CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2 = nn.Sequential(*[get_block(BLOCK_NORM_TYPE, CHANNELS[2], CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D) for d in range(DEPTHS[2])])
self.pool3 = ME.MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
self.conv3 = conv(in_channels=CHANNELS[2], out_channels=CHANNELS[3], kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
self.norm3 = get_norm(NORM_TYPE, CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3 = nn.Sequential(*[get_block(BLOCK_NORM_TYPE, CHANNELS[3], CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D) for d in range(DEPTHS[3])])
self.pool3_tr = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
self.conv3_tr = conv_tr(in_channels=CHANNELS[3], out_channels=TR_CHANNELS[3], kernel_size=1, stride=1, dilation=1, bias=False, dimension=D)
self.norm3_tr = get_norm(NORM_TYPE, TR_CHANNELS[3], bn_momentum=bn_momentum, dimension=D)
self.block3_tr = nn.Sequential(*[get_block(BLOCK_NORM_TYPE, TR_CHANNELS[3], TR_CHANNELS[3], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D) for d in range(DEPTHS[(- 3)])])
self.pool2_tr = ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D)
self.conv2_tr = conv_tr(in_channels=(CHANNELS[2] + TR_CHANNELS[3]), out_channels=TR_CHANNELS[2], kernel_size=1, stride=1, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.norm2_tr = get_norm(NORM_TYPE, TR_CHANNELS[2], bn_momentum=bn_momentum, dimension=D)
self.block2_tr = nn.Sequential(*[get_block(BLOCK_NORM_TYPE, TR_CHANNELS[2], TR_CHANNELS[2], bn_momentum=bn_momentum, region_type=REGION_TYPE, dimension=D) for d in range(DEPTHS[(- 2)])])
self.conv1_tr = conv_tr(in_channels=(CHANNELS[1] + TR_CHANNELS[2]), out_channels=TR_CHANNELS[1], kernel_size=1, stride=1, dilation=1, bias=False, region_type=REGION_TYPE, dimension=D)
self.final = conv(in_channels=TR_CHANNELS[1], out_channels=out_channels, kernel_size=1, stride=1, dilation=1, bias=True, dimension=D)
def forward(self, x):
out_s1 = self.conv1(x)
out_s1 = self.norm1(out_s1)
out_s1 = MEF.relu(out_s1)
out_s1 = self.block1(out_s1)
out_s2 = self.pool2(out_s1)
out_s2 = self.conv2(out_s2)
out_s2 = self.norm2(out_s2)
out_s2 = MEF.relu(out_s2)
out_s2 = self.block2(out_s2)
out_s4 = self.pool3(out_s2)
out_s4 = self.conv3(out_s4)
out_s4 = self.norm3(out_s4)
out_s4 = MEF.relu(out_s4)
out_s4 = self.block3(out_s4)
out_s2t = self.pool3_tr(out_s4)
out_s2t = self.conv3_tr(out_s2t)
out_s2t = self.norm3_tr(out_s2t)
out_s2t = MEF.relu(out_s2t)
out_s2t = self.block3_tr(out_s2t)
out = ME.cat(out_s2t, out_s2)
out_s1t = self.conv2_tr(out)
out_s1t = self.pool3_tr(out_s1t)
out_s1t = self.norm2_tr(out_s1t)
out_s1t = MEF.relu(out_s1t)
out_s1t = self.block2_tr(out_s1t)
out = ME.cat(out_s1t, out_s1)
out = self.conv1_tr(out)
out = MEF.relu(out)
out = self.final(out)
if self.normalize_feature:
return ME.SparseTensor((out.F / (torch.norm(out.F, p=2, dim=1, keepdim=True) + 1e-08)), coordinate_map_key=out.coordinate_map_key, coordinate_manager=out.coordinate_manager)
else:
return out |
def _color(img, magnitude):
return ImageEnhance.Color(img).enhance((1 + (magnitude * random.choice([(- 1), 1])))) |
class ZeroBridge(Bridge):
def default_params():
return {}
def _create(self):
zero_state = nest.map_structure((lambda x: tf.zeros([self.batch_size, x], dtype=tf.float32)), self.decoder_state_size)
return zero_state |
class GroupViTTextModel(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=(7.0 / 16.0), last_epoch=(- 1)):
def _lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
no_progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, ((math.cos(((math.pi * num_cycles) * no_progress)) + 1) * 0.5))
return LambdaLR(optimizer, _lr_lambda, last_epoch) |
def to_katakana(str):
str = str.lower()
str = normalize_double_n(str)
tmp = ROMPAT.sub((lambda x: ROMKAN[x.group(0)]), str)
return tmp |
def create_data():
seq_len = 400
data = np.random.rand(seq_len)
horizon = np.random.randint(2, 50)
validation_data = np.random.rand(horizon)
return (data, validation_data) |
.parametrize('arch, expected_out_shape', [('resnet', 512), ('shufflenet', 1024), ('resnext', 2048), ('wide_resnet', 2048), ('regnet', 912), ('mobilenet', 1280), ('mnasnet', 1280), ('squeezenet', 512), ({'shufflenet': ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1}, 1024), ({'resnext': ResNeXt50_32X4D_Weights.IMAGENET1K_V2}, 2048)])
def test_pretrained_model_setup_defaults(arch, expected_out_shape):
model = Vision(pretrained_model_setup=arch, n_trainable=0)
out = model(X_images)
assert ((out.size(0) == 10) and (out.size(1) == expected_out_shape)) |
class Bottleneck(nn.Module):
expansion = 4
num_layers = 3
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
width = (int((planes * (base_width / 64.0))) * groups)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, (planes * self.expansion))
self.bn3 = norm_layer((planes * self.expansion))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out |
def is_2d_tensor(x_tensor):
return (isinstance(x_tensor, torch.Tensor) and (len(x_tensor.shape) == 2)) |
class DGN(nn.Module):
def __init__(self, n_agent, num_inputs, hidden_dim, num_actions):
super(DGN, self).__init__()
self.encoder = Encoder(num_inputs, hidden_dim)
self.att_1 = AttModel(n_agent, hidden_dim, hidden_dim, hidden_dim)
self.att_2 = AttModel(n_agent, hidden_dim, hidden_dim, hidden_dim)
self.q_net = Q_Net(hidden_dim, num_actions)
def forward(self, x, mask):
h1 = self.encoder(x)
h2 = self.att_1(h1, mask)
h3 = self.att_2(h2, mask)
q = self.q_net(h3)
return q |
def get_net_instance(net_type, net_name, *args, **kwargs):
a = try_get_net_instance(net_type, net_name, *args, **kwargs)
assert (a is not None), 'Cannot find such a net'
return a |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.