code stringlengths 17 6.64M |
|---|
class Demo(data.Dataset):
def __init__(self, args, train=False):
self.args = args
self.name = 'Demo'
self.scale = args.scale
self.idx_scale = 0
self.train = False
self.benchmark = False
self.filelist = []
for f in os.listdir(args.dir_demo):
if ((f.find('.png') >= 0) or (f.find('.jp') >= 0)):
self.filelist.append(os.path.join(args.dir_demo, f))
self.filelist.sort()
def __getitem__(self, idx):
filename = os.path.split(self.filelist[idx])[(- 1)]
(filename, _) = os.path.splitext(filename)
lr = misc.imread(self.filelist[idx])
lr = common.set_channel([lr], self.args.n_colors)[0]
return (common.np2Tensor([lr], self.args.rgb_range)[0], (- 1), filename)
def __len__(self):
return len(self.filelist)
def set_scale(self, idx_scale):
self.idx_scale = idx_scale
|
class DIV2K(srdata.SRData):
def __init__(self, args, train=True):
super(DIV2K, self).__init__(args, train)
self.repeat = (args.test_every // (args.n_train // args.batch_size))
def _scan(self):
list_hr = []
list_lr = [[] for _ in self.scale]
if self.train:
idx_begin = 0
idx_end = self.args.n_train
else:
idx_begin = self.args.n_train
idx_end = (self.args.offset_val + self.args.n_val)
for i in range((idx_begin + 1), (idx_end + 1)):
filename = '{:0>4}'.format(i)
list_hr.append(os.path.join(self.dir_hr, (filename + self.ext)))
for (si, s) in enumerate(self.scale):
list_lr[si].append(os.path.join(self.dir_lr, 'X{}/{}x{}{}'.format(s, filename, s, self.ext)))
return (list_hr, list_lr)
def _set_filesystem(self, dir_data):
self.apath = ((dir_data + '/DIV2K') + '/train')
self.dir_hr = os.path.join(self.apath, 'DIV2K_train_HR')
self.dir_lr = os.path.join(self.apath, ('DIV2K_train_LR_' + self.args.manner_of_downsampling))
self.ext = '.png'
def _name_hrbin(self):
return os.path.join(self.apath, 'bin', '{}_bin_HR.npy'.format(self.split))
def _name_lrbin(self, scale):
return os.path.join(self.apath, 'bin', '{}_bin_LR_X{}.npy'.format(self.split, scale))
def __len__(self):
if self.train:
return (len(self.images_hr) * self.repeat)
else:
return len(self.images_hr)
def _get_index(self, idx):
if self.train:
return (idx % len(self.images_hr))
else:
return idx
|
def _ms_loop(dataset, index_queue, data_queue, collate_fn, scale, seed, init_fn, worker_id):
global _use_shared_memory
_use_shared_memory = True
_set_worker_signal_handlers()
torch.set_num_threads(1)
torch.manual_seed(seed)
while True:
r = index_queue.get()
if (r is None):
break
(idx, batch_indices) = r
try:
idx_scale = 0
if ((len(scale) > 1) and dataset.train):
idx_scale = random.randrange(0, len(scale))
dataset.set_scale(idx_scale)
samples = collate_fn([dataset[i] for i in batch_indices])
samples.append(idx_scale)
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
|
class _MSDataLoaderIter(_DataLoaderIter):
def __init__(self, loader):
self.dataset = loader.dataset
self.scale = loader.scale
self.collate_fn = loader.collate_fn
self.batch_sampler = loader.batch_sampler
self.num_workers = loader.num_workers
self.pin_memory = (loader.pin_memory and torch.cuda.is_available())
self.timeout = loader.timeout
self.done_event = threading.Event()
self.sample_iter = iter(self.batch_sampler)
if (self.num_workers > 0):
self.worker_init_fn = loader.worker_init_fn
self.index_queues = [multiprocessing.Queue() for _ in range(self.num_workers)]
self.worker_queue_idx = 0
self.worker_result_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.worker_pids_set = False
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
base_seed = torch.LongTensor(1).random_()[0]
self.workers = [multiprocessing.Process(target=_ms_loop, args=(self.dataset, self.index_queues[i], self.worker_result_queue, self.collate_fn, self.scale, (base_seed + i), self.worker_init_fn, i)) for i in range(self.num_workers)]
if (self.pin_memory or (self.timeout > 0)):
self.data_queue = queue.Queue()
if self.pin_memory:
maybe_device_id = torch.cuda.current_device()
else:
maybe_device_id = None
self.worker_manager_thread = threading.Thread(target=_worker_manager_loop, args=(self.worker_result_queue, self.data_queue, self.done_event, self.pin_memory, maybe_device_id))
self.worker_manager_thread.daemon = True
self.worker_manager_thread.start()
else:
self.data_queue = self.worker_result_queue
for w in self.workers:
w.daemon = True
w.start()
_update_worker_pids(id(self), tuple((w.pid for w in self.workers)))
_set_SIGCHLD_handler()
self.worker_pids_set = True
for _ in range((2 * self.num_workers)):
self._put_indices()
|
class MSDataLoader(DataLoader):
def __init__(self, args, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, collate_fn=default_collate, pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None):
super(MSDataLoader, self).__init__(dataset, batch_size=batch_size, shuffle=shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=args.n_threads, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn)
self.scale = args.scale
def __iter__(self):
return _MSDataLoaderIter(self)
|
class Adversarial(nn.Module):
def __init__(self, args, gan_type):
super(Adversarial, self).__init__()
self.gan_type = gan_type
self.gan_k = args.gan_k
self.discriminator = discriminator.Discriminator(args, gan_type)
if (gan_type != 'WGAN_GP'):
self.optimizer = utility.make_optimizer(args, self.discriminator)
else:
self.optimizer = optim.Adam(self.discriminator.parameters(), betas=(0, 0.9), eps=1e-08, lr=1e-05)
self.scheduler = utility.make_scheduler(args, self.optimizer)
def forward(self, fake, real):
fake_detach = fake.detach()
self.loss = 0
for _ in range(self.gan_k):
self.optimizer.zero_grad()
d_fake = self.discriminator(fake_detach)
d_real = self.discriminator(real)
if (self.gan_type == 'GAN'):
label_fake = torch.zeros_like(d_fake)
label_real = torch.ones_like(d_real)
loss_d = (F.binary_cross_entropy_with_logits(d_fake, label_fake) + F.binary_cross_entropy_with_logits(d_real, label_real))
elif (self.gan_type.find('WGAN') >= 0):
loss_d = (d_fake - d_real).mean()
if (self.gan_type.find('GP') >= 0):
epsilon = torch.rand_like(fake).view((- 1), 1, 1, 1)
hat = (fake_detach.mul((1 - epsilon)) + real.mul(epsilon))
hat.requires_grad = True
d_hat = self.discriminator(hat)
gradients = torch.autograd.grad(outputs=d_hat.sum(), inputs=hat, retain_graph=True, create_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), (- 1))
gradient_norm = gradients.norm(2, dim=1)
gradient_penalty = (10 * gradient_norm.sub(1).pow(2).mean())
loss_d += gradient_penalty
self.loss += loss_d.item()
loss_d.backward()
self.optimizer.step()
if (self.gan_type == 'WGAN'):
for p in self.discriminator.parameters():
p.data.clamp_((- 1), 1)
self.loss /= self.gan_k
d_fake_for_g = self.discriminator(fake)
if (self.gan_type == 'GAN'):
loss_g = F.binary_cross_entropy_with_logits(d_fake_for_g, label_real)
elif (self.gan_type.find('WGAN') >= 0):
loss_g = (- d_fake_for_g.mean())
return loss_g
def state_dict(self, *args, **kwargs):
state_discriminator = self.discriminator.state_dict(*args, **kwargs)
state_optimizer = self.optimizer.state_dict()
return dict(**state_discriminator, **state_optimizer)
|
class Discriminator(nn.Module):
def __init__(self, args, gan_type='GAN'):
super(Discriminator, self).__init__()
in_channels = 3
out_channels = 64
depth = 7
bn = True
act = nn.LeakyReLU(negative_slope=0.2, inplace=True)
m_features = [common.BasicBlock(args.n_colors, out_channels, 3, bn=bn, act=act)]
for i in range(depth):
in_channels = out_channels
if ((i % 2) == 1):
stride = 1
out_channels *= 2
else:
stride = 2
m_features.append(common.BasicBlock(in_channels, out_channels, 3, stride=stride, bn=bn, act=act))
self.features = nn.Sequential(*m_features)
patch_size = (args.patch_size // (2 ** ((depth + 1) // 2)))
m_classifier = [nn.Linear((out_channels * (patch_size ** 2)), 1024), act, nn.Linear(1024, 1)]
self.classifier = nn.Sequential(*m_classifier)
def forward(self, x):
features = self.features(x)
output = self.classifier(features.view(features.size(0), (- 1)))
return output
|
class VGG(nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if (conv_index == '22'):
self.vgg = nn.Sequential(*modules[:8])
elif (conv_index == '54'):
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = ((0.229 * rgb_range), (0.224 * rgb_range), (0.225 * rgb_range))
self.sub_mean = common.MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.mse_loss(vgg_sr, vgg_hr)
return loss
|
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), bias=bias)
|
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=(- 1)):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = ((sign * rgb_range) * torch.Tensor(rgb_mean))
self.bias.data.div_(std)
self.requires_grad = False
|
class BasicBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, bn=False, act=nn.ReLU(True)):
m = [nn.Conv2d(in_channels, out_channels, kernel_size, padding=(kernel_size // 2), stride=stride, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if (act is not None):
m.append(act)
super(BasicBlock, self).__init__(*m)
|
class ResBlock(nn.Module):
def __init__(self, conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if (i == 0):
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
|
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if ((scale & (scale - 1)) == 0):
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, (4 * n_feat), 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
elif (scale == 3):
m.append(conv(n_feat, (9 * n_feat), 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feat))
if act:
m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
|
def make_model(args, parent=False):
return DDBPN(args)
|
def projection_conv(in_channels, out_channels, scale, up=True):
(kernel_size, stride, padding) = {2: (6, 2, 2), 4: (8, 4, 2), 8: (12, 8, 2)}[scale]
if up:
conv_f = nn.ConvTranspose2d
else:
conv_f = nn.Conv2d
return conv_f(in_channels, out_channels, kernel_size, stride=stride, padding=padding)
|
class DenseProjection(nn.Module):
def __init__(self, in_channels, nr, scale, up=True, bottleneck=True):
super(DenseProjection, self).__init__()
if bottleneck:
self.bottleneck = nn.Sequential(*[nn.Conv2d(in_channels, nr, 1), nn.PReLU(nr)])
inter_channels = nr
else:
self.bottleneck = None
inter_channels = in_channels
self.conv_1 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
self.conv_2 = nn.Sequential(*[projection_conv(nr, inter_channels, scale, (not up)), nn.PReLU(inter_channels)])
self.conv_3 = nn.Sequential(*[projection_conv(inter_channels, nr, scale, up), nn.PReLU(nr)])
def forward(self, x):
if (self.bottleneck is not None):
x = self.bottleneck(x)
a_0 = self.conv_1(x)
b_0 = self.conv_2(a_0)
e = b_0.sub(x)
a_1 = self.conv_3(e)
out = a_0.add(a_1)
return out
|
class DDBPN(nn.Module):
def __init__(self, args):
super(DDBPN, self).__init__()
scale = args.scale[0]
n0 = 128
nr = 32
self.depth = 6
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
initial = [nn.Conv2d(args.n_colors, n0, 3, padding=1), nn.PReLU(n0), nn.Conv2d(n0, nr, 1), nn.PReLU(nr)]
self.initial = nn.Sequential(*initial)
self.upmodules = nn.ModuleList()
self.downmodules = nn.ModuleList()
channels = nr
for i in range(self.depth):
self.upmodules.append(DenseProjection(channels, nr, scale, True, (i > 1)))
if (i != 0):
channels += nr
channels = nr
for i in range((self.depth - 1)):
self.downmodules.append(DenseProjection(channels, nr, scale, False, (i != 0)))
channels += nr
reconstruction = [nn.Conv2d((self.depth * nr), args.n_colors, 3, padding=1)]
self.reconstruction = nn.Sequential(*reconstruction)
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.initial(x)
h_list = []
l_list = []
for i in range((self.depth - 1)):
if (i == 0):
l = x
else:
l = torch.cat(l_list, dim=1)
h_list.append(self.upmodules[i](l))
l_list.append(self.downmodules[i](torch.cat(h_list, dim=1)))
h_list.append(self.upmodules[(- 1)](torch.cat(l_list, dim=1)))
out = self.reconstruction(torch.cat(h_list, dim=1))
out = self.add_mean(out)
return out
|
def make_model(args, parent=False):
return EDSR(args)
|
class EDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(EDSR, self).__init__()
n_resblock = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
scale = args.scale[0]
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
m_body = [common.ResBlock(conv, n_feats, kernel_size, act=act, res_scale=args.res_scale) for _ in range(n_resblock)]
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [common.Upsampler(conv, scale, n_feats, act=False), conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
x = self.add_mean(x)
return x
def load_state_dict(self, state_dict, strict=True):
own_state = self.state_dict()
for (name, param) in state_dict.items():
if (name in own_state):
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if (name.find('tail') == (- 1)):
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
elif strict:
if (name.find('tail') == (- 1)):
raise KeyError('unexpected key "{}" in state_dict'.format(name))
|
def make_model(args, parent=False):
return MDSR(args)
|
class MDSR(nn.Module):
def __init__(self, args, conv=common.default_conv):
super(MDSR, self).__init__()
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
self.scale_idx = 0
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.404)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std)
m_head = [conv(args.n_colors, n_feats, kernel_size)]
self.pre_process = nn.ModuleList([nn.Sequential(common.ResBlock(conv, n_feats, 5, act=act), common.ResBlock(conv, n_feats, 5, act=act)) for _ in args.scale])
m_body = [common.ResBlock(conv, n_feats, kernel_size, act=act) for _ in range(n_resblocks)]
m_body.append(conv(n_feats, n_feats, kernel_size))
self.upsample = nn.ModuleList([common.Upsampler(conv, s, n_feats, act=False) for s in args.scale])
m_tail = [conv(n_feats, args.n_colors, kernel_size)]
self.add_mean = common.MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
x = self.pre_process[self.scale_idx](x)
res = self.body(x)
res += x
x = self.upsample[self.scale_idx](res)
x = self.tail(x)
x = self.add_mean(x)
return x
def set_scale(self, scale_idx):
self.scale_idx = scale_idx
|
def set_template(args):
if (args.template.find('jpeg') >= 0):
args.data_train = 'DIV2K_jpeg'
args.data_test = 'DIV2K_jpeg'
args.epochs = 200
args.lr_decay = 100
if (args.template.find('EDSR_paper') >= 0):
args.model = 'EDSR'
args.n_resblocks = 32
args.n_feats = 256
args.res_scale = 0.1
if (args.template.find('MDSR') >= 0):
args.model = 'MDSR'
args.patch_size = 48
args.epochs = 650
if (args.template.find('DDBPN') >= 0):
args.model = 'DDBPN'
args.patch_size = 128
args.scale = '4'
args.data_test = 'Set5'
args.batch_size = 20
args.epochs = 1000
args.lr_decay = 500
args.gamma = 0.1
args.weight_decay = 0.0001
args.loss = '1*MSE'
if (args.template.find('GAN') >= 0):
args.epochs = 200
args.lr = 5e-05
args.lr_decay = 150
|
class Trainer():
def __init__(self, args, loader, my_model, my_loss, ckp):
self.args = args
self.scale = args.scale
self.ckp = ckp
self.loader_train = loader.loader_train
self.loader_test = loader.loader_test
self.model = my_model
self.loss = my_loss
self.optimizer = utility.make_optimizer(args, self.model)
self.scheduler = utility.make_scheduler(args, self.optimizer)
if (self.args.load != '.'):
self.optimizer.load_state_dict(torch.load(os.path.join(ckp.dir, 'optimizer.pt')))
for _ in range(len(ckp.log)):
self.scheduler.step()
self.error_last = 100000000.0
def train(self):
self.scheduler.step()
self.loss.step()
epoch = (self.scheduler.last_epoch + 1)
lr = self.scheduler.get_lr()[0]
self.ckp.write_log('[Epoch {}]\tLearning rate: {:.2e}'.format(epoch, Decimal(lr)))
self.loss.start_log()
self.model.train()
(timer_data, timer_model) = (utility.timer(), utility.timer())
for (batch, (lr, hr, _, idx_scale)) in enumerate(self.loader_train):
(lr, hr) = self.prepare([lr, hr])
timer_data.hold()
timer_model.tic()
self.optimizer.zero_grad()
(sr, sr_refine1, sr_refine2) = self.model(lr, idx_scale)
loss = ((self.loss(sr, hr) + self.loss(sr_refine1, hr)) + self.loss(sr_refine2, hr))
if (loss.item() < (self.args.skip_threshold * self.error_last)):
loss.backward()
self.optimizer.step()
else:
print('Skip this batch {}! (Loss: {})'.format((batch + 1), loss.item()))
timer_model.hold()
if (((batch + 1) % self.args.print_every) == 0):
self.ckp.write_log('[{}/{}]\t{}\t{:.1f}+{:.1f}s'.format(((batch + 1) * self.args.batch_size), len(self.loader_train.dataset), self.loss.display_loss(batch), timer_model.release(), timer_data.release()))
timer_data.tic()
self.loss.end_log(len(self.loader_train))
self.error_last = self.loss.log[((- 1), (- 1))]
def test(self):
epoch = (self.scheduler.last_epoch + 1)
self.ckp.write_log('\nEvaluation:')
self.ckp.add_log(torch.zeros(1, len(self.scale)))
self.model.eval()
timer_test = utility.timer()
with torch.no_grad():
for (idx_scale, scale) in enumerate(self.scale):
eval_acc = 0
eval_acc_refine1 = 0
eval_acc_refine2 = 0
self.loader_test.dataset.set_scale(idx_scale)
tqdm_test = tqdm(self.loader_test, ncols=80)
for (idx_img, (lr, hr, filename, _)) in enumerate(tqdm_test):
filename = filename[0]
no_eval = (hr.nelement() == 1)
if (not no_eval):
(lr, hr) = self.prepare([lr, hr])
else:
lr = self.prepare([lr])[0]
(sr, sr_refine1, sr_refine2) = self.model(lr, idx_scale)
sr = utility.quantize(sr, self.args.rgb_range)
sr_refine1 = utility.quantize(sr_refine1, self.args.rgb_range)
sr_refine2 = utility.quantize(sr_refine2, self.args.rgb_range)
save_list = [sr]
if (not no_eval):
eval_acc += utility.calc_psnr(sr, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
eval_acc_refine1 += utility.calc_psnr(sr_refine1, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
eval_acc_refine2 += utility.calc_psnr(sr_refine2, hr, scale, self.args.rgb_range, benchmark=self.loader_test.dataset.benchmark)
save_list.extend([sr_refine1, sr_refine2, lr, hr])
if self.args.save_results:
self.ckp.save_results(filename, save_list, scale)
self.ckp.log[((- 1), idx_scale)] = (eval_acc / len(self.loader_test))
best = self.ckp.log.max(0)
self.ckp.write_log('[{} x{}]\tPSNR: {:.3f}, PSNR of refine1: {:.3f}, PSNR of refine2: {:.3f} (Best: {:.3f} @epoch {})'.format(self.args.data_test, scale, self.ckp.log[((- 1), idx_scale)], (eval_acc_refine1 / len(self.loader_test)), (eval_acc_refine2 / len(self.loader_test)), best[0][idx_scale], (best[1][idx_scale] + 1)))
self.ckp.write_log('Total time: {:.2f}s\n'.format(timer_test.toc()), refresh=True)
if (not self.args.test_only):
self.ckp.save(self, epoch, is_best=((best[1][0] + 1) == epoch))
def prepare(self, l, volatile=False):
device = torch.device(('cpu' if self.args.cpu else 'cuda'))
def _prepare(tensor):
if (self.args.precision == 'half'):
tensor = tensor.half()
return tensor.to(device)
return [_prepare(_l) for _l in l]
def terminate(self):
if self.args.test_only:
self.test()
return True
else:
epoch = (self.scheduler.last_epoch + 1)
return (epoch >= self.args.epochs)
|
def angular_error(gt_mesh_name, gen_mesh_name, sample_num):
'\n This function computes a symmetric chamfer distance, i.e. the sum of both chamfers.\n\n gt_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction\n method (see compute_metrics.py for more)\n\n gen_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction\n method (see compute_metrics.py for more)\n\n '
gt_mesh = trimesh.load_mesh(gt_mesh_name)
gen_mesh = trimesh.load_mesh(gen_mesh_name)
(gt_points, gt_face_index) = trimesh.sample.sample_surface(gt_mesh, sample_num)
(gen_points, gen_face_index) = trimesh.sample.sample_surface(gen_mesh, sample_num)
gt_normals = gt_mesh.face_normals[gt_face_index]
gen_normals = gen_mesh.face_normals[gen_face_index]
gen_points_kd_tree = KDTree(gen_points)
(gt2gen_dist, gt2gen_vert_ids) = gen_points_kd_tree.query(gt_points)
gt2gen_closest_normals_on_gen = gen_normals[gt2gen_vert_ids]
gt2gen_cos_sim = np.mean(np.einsum('nk,nk->n', gt_normals, gt2gen_closest_normals_on_gen))
gt_points_kd_tree = KDTree(gt_points)
(gen2gt_dist, gen2gt_vert_ids) = gt_points_kd_tree.query(gen_points)
gen2gt_closest_normals_on_gen = gt_normals[gen2gt_vert_ids]
gen2gt_cos_sim = np.mean(np.einsum('nk,nk->n', gen_normals, gen2gt_closest_normals_on_gen))
cos_sim = ((np.abs(gt2gen_cos_sim) + np.abs(gen2gt_cos_sim)) / 2)
str_ang = f'''angle: {gt2gen_cos_sim:.6f} {gen2gt_cos_sim:.6f} {cos_sim:.6f}
'''
return (str_ang, cos_sim)
|
def print_matching(list_a, list_b):
counter = 0
for (a, b) in zip(list_a, list_b):
counter += 1
print(f'Matched {a} and {b}')
print(f'Matched {counter} of {len(list_a)} and {len(list_b)}')
|
def res2str(name_a, name_b, res_a2b, res_b2a, ms):
'\n this normalizes the results by bounding box diagonal\n and put into a new dict\n '
a2b_error_field = ms.mesh(3).vertex_quality_array()
b2a_error_field = ms.mesh(5).vertex_quality_array()
a2b_error_field /= res_a2b['diag_mesh_0']
b2a_error_field /= res_b2a['diag_mesh_0']
dist_Haus_a2b = a2b_error_field.max()
dist_Haus_b2a = b2a_error_field.max()
dist_symHausd = max(dist_Haus_a2b, dist_Haus_b2a)
dist_Cham_a2b = (a2b_error_field ** 2).mean()
dist_Cham_b2a = (b2a_error_field ** 2).mean()
dist_symChamf = ((dist_Cham_a2b + dist_Cham_b2a) / 2)
str_nma = f'''name_a: {name_a}
'''
str_nmb = f'''name_b: {name_b}
'''
str_itm = f'''---- a2b b2a sym
'''
str_hau = f'''haus: {dist_Haus_a2b:.6e} {dist_Haus_b2a:.6e} {dist_symHausd:.6e}
'''
str_chm = f'''chamfer: {dist_Cham_a2b:.6e} {dist_Cham_b2a:.6e} {dist_symChamf:.6e}
'''
str_dg0 = f'''diag a: {res_a2b['diag_mesh_0']:.6e}
'''
str_dg1 = f'''diag b: {res_a2b['diag_mesh_1']:.6e}
'''
str_num = f'''n_samples: {res_a2b['n_samples']}
'''
str_all = (((((((str_nma + str_nmb) + str_itm) + str_hau) + str_chm) + str_dg0) + str_dg1) + str_num)
return (str_all, dist_symHausd, dist_Haus_a2b, dist_Haus_b2a, dist_symChamf, dist_Cham_a2b, dist_Cham_b2a)
|
def compare_meshes(meshfile_a, meshfile_b, sample_num):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(meshfile_a)
ms.load_new_mesh(meshfile_b)
res_a2b = ms.hausdorff_distance(sampledmesh=0, targetmesh=1, savesample=True, samplevert=False, sampleedge=False, samplefauxedge=False, sampleface=True, samplenum=sample_num)
res_b2a = ms.hausdorff_distance(sampledmesh=1, targetmesh=0, savesample=True, samplevert=False, sampleedge=False, samplefauxedge=False, sampleface=True, samplenum=sample_num)
(str_res, d_haus, d_haus_a2b, d_haus_b2a, d_cham, d_cham_a2b, d_cham_b2a) = res2str(meshfile_a, meshfile_b, res_a2b, res_b2a, ms)
del ms
return (str_res, d_haus, d_cham)
|
def broyden(g, x_init, J_inv_init, max_steps=50, cvg_thresh=1e-05, dvg_thresh=1, eps=1e-06):
'Find roots of the given function g(x) = 0.\n This function is impleneted based on https://github.com/locuslab/deq.\n\n Tensor shape abbreviation:\n N: number of points\n D: space dimension\n Args:\n g (function): the function of which the roots are to be determined. shape: [N, D, 1]->[N, D, 1]\n x_init (tensor): initial value of the parameters. shape: [N, D, 1]\n J_inv_init (tensor): initial value of the inverse Jacobians. shape: [N, D, D]\n\n max_steps (int, optional): max number of iterations. Defaults to 50.\n cvg_thresh (float, optional): covergence threshold. Defaults to 1e-5.\n dvg_thresh (float, optional): divergence threshold. Defaults to 1.\n eps (float, optional): a small number added to the denominator to prevent numerical error. Defaults to 1e-6.\n\n Returns:\n result (tensor): root of the given function. shape: [N, D, 1]\n diff (tensor): corresponding loss. [N]\n valid_ids (tensor): identifiers of converged points. [N]\n '
x = x_init.clone().detach()
J_inv = J_inv_init.clone().detach()
ids_val = torch.ones(x.shape[0]).bool()
gx = g(x, mask=ids_val)
update = (- J_inv.bmm(gx))
x_opt = x.clone()
gx_norm_opt = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
delta_gx = torch.zeros_like(gx)
delta_x = torch.zeros_like(x)
ids_val = torch.ones_like(gx_norm_opt).bool()
for solvestep in range(max_steps):
delta_x[ids_val] = update
x[ids_val] += delta_x[ids_val]
delta_gx[ids_val] = (g(x, mask=ids_val) - gx[ids_val])
gx[ids_val] += delta_gx[ids_val]
gx_norm = torch.linalg.norm(gx.squeeze((- 1)), dim=(- 1))
ids_opt = (gx_norm < gx_norm_opt)
gx_norm_opt[ids_opt] = gx_norm.clone().detach()[ids_opt]
x_opt[ids_opt] = x.clone().detach()[ids_opt]
ids_val = ((gx_norm_opt > cvg_thresh) & (gx_norm < dvg_thresh))
if (ids_val.sum() <= 0):
break
vT = delta_x[ids_val].transpose((- 1), (- 2)).bmm(J_inv[ids_val])
a = (delta_x[ids_val] - J_inv[ids_val].bmm(delta_gx[ids_val]))
b = vT.bmm(delta_gx[ids_val])
b[(b >= 0)] += eps
b[(b < 0)] -= eps
u = (a / b)
ubmmvT = u.bmm(vT)
J_inv[ids_val] += ubmmvT
update = (- J_inv[ids_val].bmm(gx[ids_val]))
return {'result': x_opt, 'diff': gx_norm_opt, 'valid_ids': (gx_norm_opt < cvg_thresh)}
|
def calculate_iou(gt, prediction):
intersection = torch.logical_and(gt, prediction)
union = torch.logical_or(gt, prediction)
return (torch.sum(intersection) / torch.sum(union))
|
class VertexJointSelector(nn.Module):
def __init__(self, vertex_ids=None, use_hands=True, use_feet_keypoints=True, **kwargs):
super(VertexJointSelector, self).__init__()
extra_joints_idxs = []
face_keyp_idxs = np.array([vertex_ids['nose'], vertex_ids['reye'], vertex_ids['leye'], vertex_ids['rear'], vertex_ids['lear']], dtype=np.int64)
extra_joints_idxs = np.concatenate([extra_joints_idxs, face_keyp_idxs])
if use_feet_keypoints:
feet_keyp_idxs = np.array([vertex_ids['LBigToe'], vertex_ids['LSmallToe'], vertex_ids['LHeel'], vertex_ids['RBigToe'], vertex_ids['RSmallToe'], vertex_ids['RHeel']], dtype=np.int32)
extra_joints_idxs = np.concatenate([extra_joints_idxs, feet_keyp_idxs])
if use_hands:
self.tip_names = ['thumb', 'index', 'middle', 'ring', 'pinky']
tips_idxs = []
for hand_id in ['l', 'r']:
for tip_name in self.tip_names:
tips_idxs.append(vertex_ids[(hand_id + tip_name)])
extra_joints_idxs = np.concatenate([extra_joints_idxs, tips_idxs])
self.register_buffer('extra_joints_idxs', to_tensor(extra_joints_idxs, dtype=torch.long))
def forward(self, vertices, joints):
extra_joints = torch.index_select(vertices, 1, self.extra_joints_idxs)
joints = torch.cat([joints, extra_joints], dim=1)
return joints
|
def chamfer_loss_separate(output, target, weight=10000.0, phase='train', debug=False):
from chamferdist.chamferdist import ChamferDistance
cdist = ChamferDistance()
(model2scan, scan2model, idx1, idx2) = cdist(output, target)
if (phase == 'train'):
return (model2scan, scan2model, idx1, idx2)
else:
return ((torch.mean(model2scan, dim=(- 1)) * weight), (torch.mean(scan2model, dim=(- 1)) * weight))
|
def normal_loss(output_normals, target_normals, nearest_idx, weight=1.0, phase='train'):
'\n Given the set of nearest neighbors found by chamfer distance, calculate the\n L1 discrepancy between the predicted and GT normals on each nearest neighbor point pairs.\n Note: the input normals are already normalized (length==1).\n '
nearest_idx = nearest_idx.expand(3, (- 1), (- 1)).permute([1, 2, 0]).long()
target_normals_chosen = torch.gather(target_normals, dim=1, index=nearest_idx)
assert (output_normals.shape == target_normals_chosen.shape)
if (phase == 'train'):
lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='mean')
return (lnormal, target_normals_chosen)
else:
lnormal = F.l1_loss(output_normals, target_normals_chosen, reduction='none')
lnormal = lnormal.mean((- 1)).mean((- 1))
return (lnormal, target_normals_chosen)
|
def color_loss(output_colors, target_colors, nearest_idx, weight=1.0, phase='train', excl_holes=False):
'\n Similar to normal loss, used in training a color prediction model.\n '
nearest_idx = nearest_idx.expand(3, (- 1), (- 1)).permute([1, 2, 0]).long()
target_colors_chosen = torch.gather(target_colors, dim=1, index=nearest_idx)
assert (output_colors.shape == target_colors_chosen.shape)
if excl_holes:
colorsum = target_colors_chosen.sum((- 1))
mask = (colorsum != 0).float().unsqueeze((- 1))
else:
mask = 1.0
if (phase == 'train'):
lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none')
lcolor = (lcolor * mask)
lcolor = lcolor.mean()
return (lcolor, target_colors_chosen)
else:
lcolor = F.l1_loss(output_colors, target_colors_chosen, reduction='none')
lcolor = (lcolor * mask)
lcolor = lcolor.mean((- 1)).mean((- 1))
return (lcolor, target_colors_chosen)
|
class GaussianSmoothing(nn.Module):
'\n Apply gaussian smoothing on a\n 1d, 2d or 3d tensor. Filtering is performed seperately for each channel\n in the input using a depthwise convolution.\n Arguments:\n channels (int, sequence): Number of channels of the input tensors. Output will\n have this number of channels as well.\n kernel_size (int, sequence): Size of the gaussian kernel.\n sigma (float, sequence): Standard deviation of the gaussian kernel.\n dim (int, optional): The number of dimensions of the data.\n Default value is 2 (spatial).\n '
def __init__(self, channels=3, kernel_size=3, sigma=1.0, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = ([kernel_size] * dim)
if isinstance(sigma, numbers.Number):
sigma = ([sigma] * dim)
kernel = 1
meshgrids = torch.meshgrid([torch.arange(size, dtype=torch.float32) for size in kernel_size])
for (size, std, mgrid) in zip(kernel_size, sigma, meshgrids):
mean = ((size - 1) / 2)
kernel *= ((1 / (std * math.sqrt((2 * math.pi)))) * torch.exp((- (((mgrid - mean) / (2 * std)) ** 2))))
kernel = (kernel / torch.sum(kernel))
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *([1] * (kernel.dim() - 1)))
self.kernel_size = kernel_size[0]
self.dim = dim
self.register_buffer('weight', kernel)
self.groups = channels
if (dim == 1):
self.conv = F.conv1d
elif (dim == 2):
self.conv = F.conv2d
elif (dim == 3):
self.conv = F.conv3d
else:
raise RuntimeError('Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim))
def forward(self, input):
'\n Apply gaussian filter to input.\n Arguments:\n input (torch.Tensor): Input to apply gaussian filter on.\n Returns:\n filtered (torch.Tensor): Filtered output.\n '
pad_size = (self.kernel_size // 2)
if (self.dim == 1):
pad = F.pad(input, (pad_size, pad_size), mode='reflect')
elif (self.dim == 2):
pad = F.pad(input, (pad_size, pad_size, pad_size, pad_size), mode='reflect')
elif (self.dim == 3):
pad = F.pad(input, (pad_size, pad_size, pad_size, pad_size, pad_size, pad_size), mode='reflect')
return self.conv(pad, weight=self.weight.type_as(input), groups=self.groups)
|
class CBatchNorm2d(nn.Module):
' Conditional batch normalization layer class.\n Borrowed from Occupancy Network repo: https://github.com/autonomousvision/occupancy_networks\n Args:\n c_dim (int): dimension of latent conditioned code c\n f_channels (int): number of channels of the feature maps\n norm_method (str): normalization method\n '
def __init__(self, c_dim, f_channels, norm_method='batch_norm'):
super().__init__()
self.c_dim = c_dim
self.f_channels = f_channels
self.norm_method = norm_method
self.conv_gamma = nn.Conv1d(c_dim, f_channels, 1)
self.conv_beta = nn.Conv1d(c_dim, f_channels, 1)
if (norm_method == 'batch_norm'):
self.bn = nn.BatchNorm2d(f_channels, affine=False)
elif (norm_method == 'instance_norm'):
self.bn = nn.InstanceNorm2d(f_channels, affine=False)
elif (norm_method == 'group_norm'):
self.bn = nn.GroupNorm2d(f_channels, affine=False)
else:
raise ValueError('Invalid normalization method!')
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.conv_gamma.weight)
nn.init.zeros_(self.conv_beta.weight)
nn.init.ones_(self.conv_gamma.bias)
nn.init.zeros_(self.conv_beta.bias)
def forward(self, x, c):
assert (x.size(0) == c.size(0))
assert (c.size(1) == self.c_dim)
if (len(c.size()) == 2):
c = c.unsqueeze(2)
gamma = self.conv_gamma(c).unsqueeze((- 1))
beta = self.conv_beta(c).unsqueeze((- 1))
net = self.bn(x)
out = ((gamma * net) + beta)
return out
|
class Conv2DBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=4, stride=2, padding=1, use_bias=False, use_bn=True, use_relu=True):
super(Conv2DBlock, self).__init__()
self.use_bn = use_bn
self.use_relu = use_relu
self.conv = nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias)
if use_bn:
self.bn = nn.BatchNorm2d(output_nc, affine=False)
self.relu = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
if self.use_relu:
x = self.relu(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
return x
|
class UpConv2DBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=4, stride=2, padding=1, use_bias=False, use_bn=True, up_mode='upconv', use_dropout=False):
super(UpConv2DBlock, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.use_bn = use_bn
self.use_dropout = use_dropout
self.relu = nn.ReLU()
if (up_mode == 'upconv'):
self.up = nn.ConvTranspose2d(input_nc, output_nc, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias)
else:
self.up = nn.Sequential(nn.Upsample(mode='bilinear', scale_factor=2, align_corners=False), nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=1, stride=1))
if use_bn:
self.bn = nn.BatchNorm2d(output_nc, affine=False)
if use_dropout:
self.drop = nn.Dropout(0.5)
def forward(self, x, skip_input=None):
x = self.relu(x)
x = self.up(x)
if self.use_bn:
x = self.bn(x)
if self.use_dropout:
x = self.drop(x)
if (skip_input is not None):
x = torch.cat([x, skip_input], 1)
return x
|
class GeomConvLayers(nn.Module):
'\n A few convolutional layers to smooth the geometric feature tensor\n '
def __init__(self, input_nc=16, hidden_nc=16, output_nc=16, use_relu=False):
super().__init__()
self.use_relu = use_relu
self.conv1 = nn.Conv2d(input_nc, hidden_nc, kernel_size=5, stride=1, padding=2, bias=False)
self.conv2 = nn.Conv2d(hidden_nc, hidden_nc, kernel_size=5, stride=1, padding=2, bias=False)
self.conv3 = nn.Conv2d(hidden_nc, output_nc, kernel_size=5, stride=1, padding=2, bias=False)
if use_relu:
self.relu = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x):
x = self.conv1(x)
if self.use_relu:
x = self.relu(x)
x = self.conv2(x)
if self.use_relu:
x = self.relu(x)
x = self.conv3(x)
return x
|
class GeomConvBottleneckLayers(nn.Module):
'\n A u-net-like small bottleneck network for smoothing the geometric feature tensor\n '
def __init__(self, input_nc=16, hidden_nc=16, output_nc=16, use_relu=False):
super().__init__()
self.use_relu = use_relu
self.conv1 = nn.Conv2d(input_nc, hidden_nc, kernel_size=4, stride=2, padding=1, bias=False)
self.conv2 = nn.Conv2d(hidden_nc, (hidden_nc * 2), kernel_size=4, stride=2, padding=1, bias=False)
self.conv3 = nn.Conv2d((hidden_nc * 2), (hidden_nc * 4), kernel_size=4, stride=2, padding=1, bias=False)
self.up1 = nn.ConvTranspose2d((hidden_nc * 4), (hidden_nc * 2), kernel_size=4, stride=2, padding=1, bias=False)
self.up2 = nn.ConvTranspose2d((hidden_nc * 2), hidden_nc, kernel_size=4, stride=2, padding=1, bias=False)
self.up3 = nn.ConvTranspose2d(hidden_nc, output_nc, kernel_size=4, stride=2, padding=1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.up1(x)
x = self.up2(x)
x = self.up3(x)
return x
|
class GaussianSmoothingLayers(nn.Module):
'\n use a fixed, not-trainable gaussian smoother layers for smoothing the geometric feature tensor\n '
def __init__(self, channels=16, kernel_size=5, sigma=1.0):
super().__init__()
self.conv1 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
self.conv2 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
self.conv3 = GaussianSmoothing(channels, kernel_size=kernel_size, sigma=1.0, dim=2)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
|
class UnetNoCond5DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super().__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((4 * nf) * 2), (2 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv5 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode=up_mode)
if return_2branches:
self.upconvN4 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode=up_mode)
self.upconvN5 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
u1 = self.upconv1(d5, d4)
u2 = self.upconv2(u1, d3)
u3 = self.upconv3(u2, d2)
u4 = self.upconv4(u3, d1)
u5 = self.upconv5(u4)
if self.return_2branches:
uN4 = self.upconvN4(u3, d1)
uN5 = self.upconvN5(uN4)
return (u5, uN5)
return u5
|
class UnetNoCond6DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super(UnetNoCond6DS, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv6 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((4 * nf) * 3), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconvC5 = UpConv2DBlock(((2 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC6 = UpConv2DBlock(((1 * nf) * 3), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upsample')
if return_2branches:
self.upconvN5 = UpConv2DBlock(((2 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN6 = UpConv2DBlock(((1 * nf) * 3), 3, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
d6 = self.conv6(d5)
u1 = self.upconv1(d6, d5)
u2 = self.upconv2(u1, d4)
u3 = self.upconv3(u2, d3)
u4 = self.upconv4(u3, d2)
uc5 = self.upconvC5(u4, d1)
uc6 = self.upconvC6(uc5)
if self.return_2branches:
un5 = self.upconvN5(u4, d1)
un6 = self.upconvN6(un5)
return (uc6, un6)
return uc6
|
class UnetNoCond7DS(nn.Module):
def __init__(self, input_nc=3, output_nc=3, nf=64, up_mode='upconv', use_dropout=False, return_lowres=False, return_2branches=False):
super(UnetNoCond7DS, self).__init__()
assert (up_mode in ('upconv', 'upsample'))
self.return_lowres = return_lowres
self.return_2branches = return_2branches
self.conv1 = Conv2DBlock(input_nc, nf, 4, 2, 1, use_bias=False, use_bn=False, use_relu=False)
self.conv2 = Conv2DBlock((1 * nf), (2 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv3 = Conv2DBlock((2 * nf), (4 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv4 = Conv2DBlock((4 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv5 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv6 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=True)
self.conv7 = Conv2DBlock((8 * nf), (8 * nf), 4, 2, 1, use_bias=False, use_bn=False)
self.upconv1 = UpConv2DBlock((8 * nf), (8 * nf), 4, 2, 1, up_mode=up_mode)
self.upconv2 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv3 = UpConv2DBlock(((8 * nf) * 2), (8 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconv4 = UpConv2DBlock(((8 * nf) * 2), (4 * nf), 4, 2, 1, up_mode=up_mode, use_dropout=use_dropout)
self.upconvC5 = UpConv2DBlock(((4 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC6 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode='upsample')
self.upconvC7 = UpConv2DBlock(((1 * nf) * 2), output_nc, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upsample')
if return_2branches:
self.upconvN5 = UpConv2DBlock(((4 * nf) * 3), (2 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN6 = UpConv2DBlock(((2 * nf) * 2), (1 * nf), 4, 2, 1, up_mode='upconv')
self.upconvN7 = UpConv2DBlock(((1 * nf) * 2), 3, 4, 2, 1, use_bn=False, use_bias=True, up_mode='upconv')
def forward(self, x):
d1 = self.conv1(x)
d2 = self.conv2(d1)
d3 = self.conv3(d2)
d4 = self.conv4(d3)
d5 = self.conv5(d4)
d6 = self.conv6(d5)
d7 = self.conv7(d6)
u1 = self.upconv1(d7, d6)
u2 = self.upconv2(u1, d5)
u3 = self.upconv3(u2, d4)
u4 = self.upconv3(u3, d3)
uc5 = self.upconvC5(u4, d2)
uc6 = self.upconvC6(uc5, d1)
uc7 = self.upconvC7(uc6)
if self.return_2branches:
un5 = self.upconvN5(u4, d2)
un6 = self.upconvN6(un5, d1)
un7 = self.upconvN7(un6)
return (uc7, un7)
return uc7
|
class ShapeDecoder(nn.Module):
'\n The "Shape Decoder" in the POP paper Fig. 2. The same as the "shared MLP" in the SCALE paper.\n - with skip connection from the input features to the 4th layer\'s output features (like DeepSDF)\n - branches out at the second-to-last layer, one branch for position pred, one for normal pred\n '
def __init__(self, in_size, hsize=256, actv_fn='softplus'):
self.hsize = hsize
super(ShapeDecoder, self).__init__()
self.conv1 = torch.nn.Conv1d(in_size, self.hsize, 1)
self.conv2 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv3 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv4 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv5 = torch.nn.Conv1d((self.hsize + in_size), self.hsize, 1)
self.conv6 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv7 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv8 = torch.nn.Conv1d(self.hsize, 3, 1)
self.conv6N = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv7N = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv8N = torch.nn.Conv1d(self.hsize, 3, 1)
self.bn1 = torch.nn.BatchNorm1d(self.hsize)
self.bn2 = torch.nn.BatchNorm1d(self.hsize)
self.bn3 = torch.nn.BatchNorm1d(self.hsize)
self.bn4 = torch.nn.BatchNorm1d(self.hsize)
self.bn5 = torch.nn.BatchNorm1d(self.hsize)
self.bn6 = torch.nn.BatchNorm1d(self.hsize)
self.bn7 = torch.nn.BatchNorm1d(self.hsize)
self.bn6N = torch.nn.BatchNorm1d(self.hsize)
self.bn7N = torch.nn.BatchNorm1d(self.hsize)
self.actv_fn = (nn.ReLU() if (actv_fn == 'relu') else nn.Softplus())
def forward(self, x):
x1 = self.actv_fn(self.bn1(self.conv1(x)))
x2 = self.actv_fn(self.bn2(self.conv2(x1)))
x3 = self.actv_fn(self.bn3(self.conv3(x2)))
x4 = self.actv_fn(self.bn4(self.conv4(x3)))
x5 = self.actv_fn(self.bn5(self.conv5(torch.cat([x, x4], dim=1))))
x6 = self.actv_fn(self.bn6(self.conv6(x5)))
x7 = self.actv_fn(self.bn7(self.conv7(x6)))
x8 = self.conv8(x7)
xN6 = self.actv_fn(self.bn6N(self.conv6N(x5)))
xN7 = self.actv_fn(self.bn7N(self.conv7N(xN6)))
xN8 = self.conv8N(xN7)
return (x8, xN8)
|
class PreDeformer(nn.Module):
'\n '
def __init__(self, in_size, out_size=3, hsize=64, actv_fn='softplus'):
self.hsize = hsize
super(PreDeformer, self).__init__()
self.conv1 = torch.nn.Conv1d(in_size, self.hsize, 1)
self.conv2 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv3 = torch.nn.Conv1d(self.hsize, self.hsize, 1)
self.conv4 = torch.nn.Conv1d(self.hsize, out_size, 1)
self.bn1 = torch.nn.BatchNorm1d(self.hsize)
self.bn2 = torch.nn.BatchNorm1d(self.hsize)
self.bn3 = torch.nn.BatchNorm1d(self.hsize)
self.actv_fn = (nn.ReLU() if (actv_fn == 'relu') else nn.Softplus())
def forward(self, x):
x1 = self.actv_fn(self.bn1(self.conv1(x)))
x2 = self.actv_fn(self.bn2(self.conv2(x1)))
x3 = self.actv_fn(self.bn3(self.conv3(x2)))
x4 = self.conv4(x3)
return x4
|
def loadShader(shaderType, shaderFile):
strFilename = findFileOrThrow(shaderFile)
shaderData = None
print(f'Found shader filename = {strFilename}')
with open(strFilename, 'r') as f:
shaderData = f.read()
shader = glCreateShader(shaderType)
glShaderSource(shader, shaderData)
glCompileShader(shader)
status = glGetShaderiv(shader, GL_COMPILE_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetShaderInfoLog(shader)
strShaderType = ''
if (shaderType is GL_VERTEX_SHADER):
strShaderType = 'vertex'
elif (shaderType is GL_GEOMETRY_SHADER):
strShaderType = 'geometry'
elif (shaderType is GL_FRAGMENT_SHADER):
strShaderType = 'fragment'
print(((('Compilation failure for ' + strShaderType) + ' shader:\n') + str(strInfoLog)))
return shader
|
def createProgram(shaderList):
program = glCreateProgram()
for shader in shaderList:
glAttachShader(program, shader)
glLinkProgram(program)
status = glGetProgramiv(program, GL_LINK_STATUS)
if (status == GL_FALSE):
strInfoLog = glGetProgramInfoLog(program)
print(('Linker failure: \n' + str(strInfoLog)))
for shader in shaderList:
glDetachShader(program, shader)
return program
|
def findFileOrThrow(strBasename):
if os.path.isfile(strBasename):
return strBasename
LOCAL_FILE_DIR = ('data' + os.sep)
GLOBAL_FILE_DIR = (((os.path.dirname(os.path.abspath(__file__)) + os.sep) + 'data') + os.sep)
strFilename = (LOCAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
strFilename = (GLOBAL_FILE_DIR + strBasename)
if os.path.isfile(strFilename):
return strFilename
raise IOError(('Could not find target file ' + strBasename))
|
def tensor2numpy(tensor):
if isinstance(tensor, torch.Tensor):
return tensor.detach().cpu().numpy()
|
def vertex_normal_2_vertex_color(vertex_normal):
import torch
if torch.is_tensor(vertex_normal):
vertex_normal = vertex_normal.detach().cpu().numpy()
normal_length = ((vertex_normal ** 2).sum(1) ** 0.5)
normal_length = normal_length.reshape((- 1), 1)
vertex_normal /= normal_length
color = (((vertex_normal * 255) / 2.0) + 128)
return color.astype(np.ubyte)
|
def export_ply_with_vquality(filename, v_array=None, f_array=None, vq_array=None):
'\n v_array: vertex array\n vq_array: vertex quality array\n '
Nv = (v_array.shape[0] if (v_array is not None) else 0)
Nf = (f_array.shape[0] if (f_array is not None) else 0)
with open(filename, 'w') as plyfile:
plyfile.write(f'''ply
''')
plyfile.write(f'''format ascii 1.0
''')
plyfile.write(f'''comment trisst custom
''')
plyfile.write(f'''element vertex {Nv}
''')
plyfile.write(f'''property float x
''')
plyfile.write(f'''property float y
''')
plyfile.write(f'''property float z
''')
if (vq_array is not None):
plyfile.write(f'''property float quality
''')
plyfile.write(f'''element face {Nf}
''')
plyfile.write(f'''property list uchar int vertex_indices
''')
plyfile.write(f'''end_header
''')
for i in range(Nv):
plyfile.write(f'{v_array[i][0]} {v_array[i][1]} {v_array[i][2]} ')
if (vq_array is None):
plyfile.write('\n')
continue
plyfile.write(f'{vq_array[i]} ')
plyfile.write('\n')
continue
for i in range(Nf):
plyfile.write(f'''3 {f_array[i][0]} {f_array[i][1]} {f_array[i][2]}
''')
|
def customized_export_ply(outfile_name, v, f=None, v_n=None, v_c=None, f_c=None, e=None):
"\n Author: Jinlong Yang, jyang@tue.mpg.de\n\n Exports a point cloud / mesh to a .ply file\n supports vertex normal and color export\n such that the saved file will be correctly displayed in MeshLab\n\n # v: Vertex position, N_v x 3 float numpy array\n # f: Face, N_f x 3 int numpy array\n # v_n: Vertex normal, N_v x 3 float numpy array\n # v_c: Vertex color, N_v x (3 or 4) uchar numpy array\n # f_n: Face normal, N_f x 3 float numpy array\n # f_c: Face color, N_f x (3 or 4) uchar numpy array\n # e: Edge, N_e x 2 int numpy array\n # mode: ascii or binary ply file. Value is {'ascii', 'binary'}\n "
v_n_flag = False
v_c_flag = False
f_c_flag = False
N_v = v.shape[0]
assert (v.shape[1] == 3)
if (not (type(v_n) == type(None))):
assert (v_n.shape[0] == N_v)
if (type(v_n) == 'torch.Tensor'):
v_n = v_n.detach().cpu().numpy()
v_n_flag = True
if (not (type(v_c) == type(None))):
assert (v_c.shape[0] == N_v)
v_c_flag = True
if (v_c.shape[1] == 3):
alpha_channel = (np.zeros((N_v, 1), dtype=np.ubyte) + 255)
v_c = np.hstack((v_c, alpha_channel))
N_f = 0
if (not (type(f) == type(None))):
N_f = f.shape[0]
assert (f.shape[1] == 3)
if (not (type(f_c) == type(None))):
assert (f_c.shape[0] == f.shape[0])
f_c_flag = True
if (f_c.shape[1] == 3):
alpha_channel = (np.zeros((N_f, 1), dtype=np.ubyte) + 255)
f_c = np.hstack((f_c, alpha_channel))
N_e = 0
if (not (type(e) == type(None))):
N_e = e.shape[0]
with open(outfile_name, 'w') as file:
file.write('ply\n')
file.write('format ascii 1.0\n')
file.write(('element vertex %d\n' % N_v))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
if v_n_flag:
file.write('property float nx\n')
file.write('property float ny\n')
file.write('property float nz\n')
if v_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
file.write(('element face %d\n' % N_f))
file.write('property list uchar int vertex_indices\n')
if f_c_flag:
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('property uchar alpha\n')
if (not (N_e == 0)):
file.write(('element edge %d\n' % N_e))
file.write('property int vertex1\n')
file.write('property int vertex2\n')
file.write('end_header\n')
if (v_n_flag and v_c_flag):
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
elif v_n_flag:
for i in range(0, N_v):
file.write(('%f %f %f %f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_n[(i, 0)], v_n[(i, 1)], v_n[(i, 2)])))
elif v_c_flag:
for i in range(0, N_v):
file.write(('%f %f %f %d %d %d %d\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)], v_c[(i, 0)], v_c[(i, 1)], v_c[(i, 2)], v_c[(i, 3)])))
else:
for i in range(0, N_v):
file.write(('%f %f %f\n' % (v[(i, 0)], v[(i, 1)], v[(i, 2)])))
if f_c_flag:
for i in range(0, N_f):
file.write(('3 %d %d %d %d %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)], f_c[(i, 0)], f_c[(i, 1)], f_c[(i, 2)], f_c[(i, 3)])))
else:
for i in range(0, N_f):
file.write(('3 %d %d %d\n' % (f[(i, 0)], f[(i, 1)], f[(i, 2)])))
if (not (N_e == 0)):
for i in range(0, N_e):
file.write(('%d %d\n' % (e[(i, 0)], e[(i, 1)])))
|
def save_result_examples(save_dir, model_name, result_name, points, normals=None, patch_color=None, texture=None, coarse_pts=None, gt=None, epoch=None):
from os.path import join
import numpy as np
if (epoch is None):
normal_fn = '{}_{}_pred.ply'.format(model_name, result_name)
else:
normal_fn = '{}_epoch{}_{}_pred.ply'.format(model_name, str(epoch).zfill(4), result_name)
normal_fn = join(save_dir, normal_fn)
points = tensor2numpy(points)
if (normals is not None):
normals = tensor2numpy(normals)
color_normal = vertex_normal_2_vertex_color(normals)
customized_export_ply(normal_fn, v=points, v_n=normals, v_c=color_normal)
if (patch_color is not None):
patch_color = tensor2numpy(patch_color)
if (patch_color.max() < 1.1):
patch_color = (patch_color * 255.0).astype(np.ubyte)
pcolor_fn = normal_fn.replace('pred.ply', 'pred_patchcolor.ply')
customized_export_ply(pcolor_fn, v=points, v_c=patch_color)
if (texture is not None):
texture = tensor2numpy(texture)
if (texture.max() < 1.1):
texture = (texture * 255.0).astype(np.ubyte)
texture_fn = normal_fn.replace('pred.ply', 'pred_texture.ply')
customized_export_ply(texture_fn, v=points, v_c=texture)
if (coarse_pts is not None):
coarse_pts = tensor2numpy(coarse_pts)
coarse_fn = normal_fn.replace('pred.ply', 'interm.ply')
customized_export_ply(coarse_fn, v=coarse_pts)
if (gt is not None):
gt = tensor2numpy(gt)
gt_fn = normal_fn.replace('pred.ply', 'gt.ply')
customized_export_ply(gt_fn, v=gt)
|
def adjust_loss_weights(init_weight, current_epoch, mode='decay', start=400, every=20):
if (mode != 'binary'):
if (current_epoch < start):
if (mode == 'rise'):
weight = (init_weight * 1e-06)
else:
weight = init_weight
elif (every == 0):
weight = init_weight
elif (mode == 'rise'):
weight = (init_weight * (1.05 ** ((current_epoch - start) // every)))
else:
weight = (init_weight * (0.85 ** ((current_epoch - start) // every)))
return weight
|
def generate_previews():
gcoll = avt_preview_collections['thumbnail_previews']
image_location = gcoll.images_location
enum_items = []
gallery = ['dress01.jpg', 'dress02.jpg', 'dress03.jpg', 'dress04.jpg', 'dress05.jpg', 'dress06.jpg', 'glasses01.jpg', 'glasses02.jpg', 'hat01.jpg', 'hat02.jpg', 'hat03.jpg', 'hat04.jpg', 'jacket01.jpg', 'jacket02.jpg', 'pants01.jpg', 'pants02.jpg', 'pants03.jpg', 'pants04.jpg', 'pants05.jpg', 'pants06.jpg', 'shirt01.jpg', 'shirt02.jpg', 'shirt03.jpg', 'shirt04.jpg', 'shirt05.jpg', 'shirt06.jpg', 'shirt07.jpg', 'shoes01.jpg', 'shoes02.jpg', 'shoes03.jpg', 'shoes04.jpg', 'skirt01.jpg', 'skirt02.jpg', 'suit01.jpg', 'swimming01.jpg', 'swimming02.jpg', 'swimming03.jpg', 'swimming04.jpg']
a = 0
for i in gallery:
a = (a + 1)
imagename = i.split('.')[0]
filepath = ((image_location + '/') + i)
thumb = gcoll.load(filepath, filepath, 'IMAGE')
enum_items.append((i, i, imagename, thumb.icon_id, a))
return enum_items
|
def update_weights(self, context):
global mAvt
if (mAvt.body is not None):
obj = mAvt.body
else:
reload_avatar()
mAvt.val_breast = self.val_breast
mAvt.val_torso = self.val_torso
mAvt.val_hips = (- self.val_hips)
mAvt.val_armslegs = self.val_limbs
mAvt.val_weight = (- self.val_weight)
mAvt.val_strength = self.val_strength
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
|
def load_model_from_blend_file(filename):
with bpy.data.libraries.load(filename) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects]
for obj in data_to.objects:
bpy.context.scene.collection.objects.link(obj)
|
def reload_avatar():
global mAvt
mAvt.load_shape_model()
mAvt.eyes = bpy.data.objects['Avatar:High-poly']
mAvt.body = bpy.data.objects['Avatar:Body']
mAvt.skel = bpy.data.objects['Avatar']
mAvt.armature = bpy.data.armatures['Avatar']
mAvt.skel_ref = motion_utils.get_rest_pose(mAvt.skel, mAvt.list_bones)
mAvt.hips_pos = (mAvt.skel.matrix_world @ Matrix.Translation(mAvt.skel.pose.bones['Hips'].head)).to_translation()
list_matrices2 = []
for bone in mAvt.skel.pose.bones:
list_matrices2.append(bone.matrix_basis.copy())
mAvt.list_matrices_basis = list_matrices2
list_matrices3 = []
for bone in mAvt.skel.data.bones:
list_matrices3.append(bone.matrix_local.copy())
mAvt.list_matrices_local = list_matrices3
size = len(mAvt.body.data.vertices)
mAvt.body_kdtree = mathutils.kdtree.KDTree(size)
for (i, v) in enumerate(mAvt.body.data.vertices):
mAvt.body_kdtree.insert(v.co, i)
mAvt.body_kdtree.balance()
|
class AVATAR_OT_LoadModel(bpy.types.Operator):
bl_idname = 'avt.load_model'
bl_label = 'Load human model'
bl_description = 'Loads a parametric naked human model'
def execute(self, context):
global mAvt
global avt_path
scn = context.scene
obj = context.active_object
model_file = ('%s/body/models/avatar.blend' % avt_path)
load_model_from_blend_file(model_file)
mAvt.load_shape_model()
mAvt.eyes = bpy.data.objects['Avatar:High-poly']
mAvt.body = bpy.data.objects['Avatar:Body']
mAvt.skel = bpy.data.objects['Avatar']
mAvt.armature = bpy.data.armatures['Avatar']
mAvt.skel_ref = motion_utils.get_rest_pose(mAvt.skel, mAvt.list_bones)
mAvt.hips_pos = (mAvt.skel.matrix_world @ Matrix.Translation(mAvt.skel.pose.bones['Hips'].head)).to_translation()
list_matrices2 = []
for bone in mAvt.skel.pose.bones:
list_matrices2.append(bone.matrix_basis.copy())
mAvt.list_matrices_basis = list_matrices2
list_matrices3 = []
for bone in mAvt.skel.data.bones:
list_matrices3.append(bone.matrix_local.copy())
mAvt.list_matrices_local = list_matrices3
size = len(mAvt.body.data.vertices)
mAvt.body_kdtree = mathutils.kdtree.KDTree(size)
for (i, v) in enumerate(mAvt.body.data.vertices):
mAvt.body_kdtree.insert(v.co, i)
mAvt.body_kdtree.balance()
bpy.context.view_layer.objects.active = mAvt.body
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_add(type='COLLISION')
import material_utils
importlib.reload(material_utils)
skin_mat = material_utils.create_material_generic('skin', 0, 1)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, 'skin')
material_utils.assign_textures_generic_mat(mAvt.body, skin_mat, tex_img, tex_norm, tex_spec)
eyes_mat = material_utils.create_material_generic('eyes', 0, 1)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, 'eyes')
material_utils.assign_textures_generic_mat(mAvt.eyes, eyes_mat, tex_img, tex_norm, tex_spec)
return {'FINISHED'}
|
class AVATAR_OT_SetBodyShape(bpy.types.Operator):
bl_idname = 'avt.set_body_shape'
bl_label = 'Set Body Shape'
bl_description = 'Set Body Shape'
def execute(self, context):
global mAvt
obj = mAvt.body
cp_vals = obj.data.copy()
mAvt.np_mesh_prev = mAvt.read_verts(cp_vals)
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
return {'FINISHED'}
|
class AVATAR_OT_ResetParams(bpy.types.Operator):
bl_idname = 'avt.reset_params'
bl_label = 'Reset Parameters'
bl_description = 'Reset original parameters of body shape'
def execute(self, context):
global mAvt
obj = bpy.data.objects['Avatar:Body']
cp_vals = obj.data.copy()
mAvt.np_mesh_prev = mAvt.read_verts(cp_vals)
obj.val_breast = obj.val_torso = obj.val_hips = obj.val_limbs = 0.0
obj.val_weight = obj.val_strength = 0.0
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
return {'FINISHED'}
|
class AVATAR_PT_LoadPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_LoadPanel'
bl_label = 'Load model'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
bpy.types.Object.val_breast = FloatProperty(name='Breast Size', description='Breasts Size', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_torso = FloatProperty(name='Shoulders Fat', description='Shoulders Fat', default=0, min=(- 0.3), max=0.3, precision=2, update=update_weights)
bpy.types.Object.val_limbs = FloatProperty(name='Limbs Fat', description='Limbs Fat', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_hips = FloatProperty(name='Hips Fat', description='Hips Fat', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_weight = FloatProperty(name='Weight', description='Weight', default=0, min=(- 0.5), max=1.5, precision=2, update=update_weights)
bpy.types.Object.val_strength = FloatProperty(name='Strength', description='Body Strength', default=0, min=0.0, max=0.5, precision=2, update=update_weights)
def draw(self, context):
layout = self.layout
obj = context.object
scene = context.scene
row = layout.row()
row.operator('avt.load_model', text='Load human')
if ((obj is None) or (obj.type not in ['MESH', 'ARMATURE'])):
return
layout.separator()
layout.prop(obj, 'val_breast')
layout.prop(obj, 'val_torso')
layout.prop(obj, 'val_limbs')
layout.prop(obj, 'val_hips')
layout.prop(obj, 'val_weight')
layout.prop(obj, 'val_strength')
layout.separator()
row = layout.row()
row.operator('avt.reset_params', text='Reset parameters')
|
class AVATAR_OT_CreateStudio(bpy.types.Operator):
bl_idname = 'avt.create_studio'
bl_label = 'Create Studio'
bl_description = 'Set up a lighting studio for high quality renderings'
def execute(self, context):
global avt_path
dressing.load_studio(avt_path)
return {'FINISHED'}
|
class AVATAR_OT_WearCloth(bpy.types.Operator):
bl_idname = 'avt.wear_cloth'
bl_label = 'Wear Cloth'
bl_description = 'Dress human with selected cloth'
def execute(self, context):
global avt_path
scn = context.scene
obj = context.active_object
iconname = bpy.context.scene.avt_thumbnails
iconname = iconname.split('.')[0]
for o in bpy.context.scene.objects:
o.select_set(False)
c_file = ('%s/dressing/models/clothes/%s.obj' % (avt_path, iconname))
dressing.load_cloth(c_file, iconname)
cloth = bpy.data.objects[iconname]
cloth.select_set(True)
import material_utils
importlib.reload(material_utils)
mat_id = dressing.get_material_id(iconname)
cloth_mat = material_utils.create_material_generic(iconname, 0, mat_id)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, iconname)
material_utils.assign_textures_generic_mat(cloth, cloth_mat, tex_img, tex_norm, tex_spec)
return {'FINISHED'}
|
class AVATAR_PT_DressingPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_DressingPanel'
bl_label = 'Dress Human'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
def draw(self, context):
layout = self.layout
obj = context.object
scn = context.scene
row = layout.row()
row.template_icon_view(context.scene, 'avt_thumbnails')
row = layout.row()
col = row.column()
cols = col.row()
row = layout.row()
row.operator('avt.wear_cloth', text='Load selected cloth')
layout.separator()
row = layout.row()
row.operator('avt.create_studio', text='Create studio')
|
class AVATAR_OT_SetRestPose(bpy.types.Operator):
bl_idname = 'avt.set_rest_pose'
bl_label = 'Reset Pose'
bl_options = {'REGISTER'}
def execute(self, context):
global mAvt
motion_utils.set_rest_pose(mAvt.skel, mAvt.skel_ref, mAvt.list_bones)
mAvt.frame = 1
return {'FINISHED'}
|
class AVATAR_OT_LoadBVH(bpy.types.Operator):
bl_idname = 'avt.load_bvh'
bl_label = 'Load BVH'
bl_description = 'Transfer motion to human model'
filepath: bpy.props.StringProperty(subtype='FILE_PATH')
act_x: bpy.props.BoolProperty(name='X')
act_y: bpy.props.BoolProperty(name='Y')
act_z: bpy.props.BoolProperty(name='Z')
def invoke(self, context, event):
bpy.context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def execute(self, context):
global avt_path
global mAvt
scn = context.scene
obj = context.active_object
file_path_bvh = self.filepath
bone_corresp_file = ('%s/motion/rigs/%s.txt' % (avt_path, scn.skel_rig))
if (obj is not None):
retarget.retarget_addon(bone_corresp_file, file_path_bvh, obj, scn.skel_rig)
else:
print('Please, select a model to transfer the bvh action')
return {'FINISHED'}
|
class AVATAR_PT_MotionPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_MotionPanel'
bl_label = 'Motion'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
bpy.types.Object.bvh_offset = IntProperty(name='Offset', description='Start motion offset', default=0, min=0, max=250)
bpy.types.Object.bvh_start_origin = BoolProperty(name='Origin', description='Start at origin', default=False)
def draw(self, context):
layout = self.layout
obj = context.object
wm = context.window_manager
layout.operator('avt.set_rest_pose', text='Reset pose')
layout.prop(context.scene, 'skel_rig', text='')
layout.operator('avt.load_bvh', text='Load BVH')
|
def enum_menu_items():
global avt_path
rigs_folder = ('%s/motion/rigs' % avt_path)
rigs_names = [f for f in os.listdir(rigs_folder) if f.endswith('.txt')]
menu_items = []
i = 0
for rig in rigs_names:
i = (i + 1)
rigsplit = rig.split('.')
name = rigsplit[0]
menu_items.append((name, name, '', i))
return menu_items
|
def register():
gcoll = bpy.utils.previews.new()
gcoll.images_location = ('%s/dressing/cloth_previews' % avt_path)
avt_preview_collections['thumbnail_previews'] = gcoll
bpy.types.Scene.avt_thumbnails = EnumProperty(items=generate_previews())
bpy.types.Scene.skel_rig = bpy.props.EnumProperty(items=enum_menu_items())
from bpy.utils import register_class
for clas in classes:
register_class(clas)
|
def unregister():
from bpy.utils import unregister_class
for clas in classes:
unregister_class(clas)
for gcoll in avt_preview_collections.values():
bpy.utils.previews.remove(gcoll)
avt_preview_collections.clear()
del bpy.types.Scene.avt_thumbnails
del bpy.types.Scene.skel_rig
|
def read_eigenbody(filename):
eigenbody = []
f_eigen = open(filename, 'r')
for line in f_eigen:
eigenbody.append(float(line))
return np.array(eigenbody)
|
def compose_vertices_eigenmat(eigenmat):
eigenvertices = []
for i in range(0, len(eigenmat), 3):
eigenvertices.append([eigenmat[i], (- eigenmat[(i + 2)]), eigenmat[(i + 1)]])
return np.array(eigenvertices)
|
def get_material_id(name_cloth):
idx_list = clthlst.index(name_cloth)
return cloth_class[idx_list]
|
def load_cloth(cloth_file, cloth_name):
bpy.ops.import_scene.obj(filepath=cloth_file)
bpy.context.selected_objects[0].name = cloth_name
bpy.context.selected_objects[0].data.name = cloth_name
b = bpy.data.objects[cloth_name]
b.select_set(True)
bpy.context.view_layer.objects.active = b
bpy.ops.object.mode_set(mode='OBJECT')
if (bpy.data.objects.get('Avatar') is not None):
a = bpy.data.objects['Avatar']
b = bpy.data.objects[cloth_name]
a.select_set(True)
b.select_set(True)
bpy.context.view_layer.objects.active = a
bpy.ops.object.parent_set(type='ARMATURE_AUTO')
for obj in bpy.data.objects:
obj.select_set(False)
|
def read_file_textures(root_path, fold_name):
tex_col = tex_norm = tex_spec = None
ftex = open(('%s/dressing/textures/%s/default.txt' % (root_path, fold_name)), 'r')
lines = []
for line in ftex:
lines.append(line.strip())
ftex.close()
num_lines = len(lines)
if (num_lines == 1):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
elif (num_lines == 2):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
tex_norm = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[1]))
elif (num_lines == 3):
tex_col = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[0]))
tex_norm = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[1]))
tex_spec = ('%s/dressing/textures/%s/%s' % (root_path, fold_name, lines[2]))
else:
print('Error reading default texture file')
return (tex_col, tex_norm, tex_spec)
|
def load_studio(root_path):
s_file = ('%s/dressing/models/studio_plane.obj' % root_path)
bpy.ops.import_scene.obj(filepath=s_file)
bpy.context.selected_objects[0].name = 'studio_plane'
bpy.context.selected_objects[0].data.name = 'studio_plane'
for o in bpy.context.scene.objects:
if (o.type == 'CAMERA'):
o.select_set(True)
elif (o.type == 'LIGHT'):
o.select_set(True)
else:
o.select_set(False)
bpy.ops.object.delete()
cam_data = bpy.data.cameras.new('CameraData')
cam_object = bpy.data.objects.new(name='Camera', object_data=cam_data)
bpy.context.collection.objects.link(cam_object)
cam_object.location = (0, (- 66.2), 9.28)
cam_object.rotation_euler = (math.radians(90), 0, 0)
fill_data = bpy.data.lights.new(name='FillData', type='SUN')
fill_data.energy = 1
fill_object = bpy.data.objects.new(name='fill', object_data=fill_data)
bpy.context.collection.objects.link(fill_object)
bpy.context.view_layer.objects.active = fill_object
fill_object.location = (32.29, (- 25.6), 48.17)
fill_object.rotation_euler = (math.radians((- 15)), math.radians(30), math.radians((- 14)))
back_data = bpy.data.lights.new(name='BackData', type='SUN')
back_data.energy = 1
back_object = bpy.data.objects.new(name='back', object_data=back_data)
bpy.context.collection.objects.link(back_object)
bpy.context.view_layer.objects.active = back_object
back_object.location = (33.46, 46.93, 41.5)
back_object.rotation_euler = (math.radians(45), math.radians((- 23)), math.radians(31))
key_data = bpy.data.lights.new(name='KeyData', type='SUN')
key_data.energy = 1
key_object = bpy.data.objects.new(name='key', object_data=key_data)
bpy.context.collection.objects.link(key_object)
bpy.context.view_layer.objects.active = key_object
key_object.location = ((- 36.88), (- 30.55), 49.1)
key_object.rotation_euler = (math.radians(14), math.radians((- 54)), math.radians(11))
dg = bpy.context.evaluated_depsgraph_get()
dg.update()
|
def create_material_generic(matname, index, matid):
for m in bpy.data.materials:
if ('Default' in m.name):
bpy.data.materials.remove(m)
mat_name = ('%s_mat%02d' % (matname, index))
skinMat = (bpy.data.materials.get(mat_name) or bpy.data.materials.new(mat_name))
skinMat.pass_index = matid
skinMat.use_nodes = True
skinMat.node_tree.nodes.clear()
tex_image = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_image.location = (0, 0)
tex_norm = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_norm.location = (0, (- 600))
tex_spec = skinMat.node_tree.nodes.new(type='ShaderNodeTexImage')
tex_spec.location = (0, (- 300))
norm_map = skinMat.node_tree.nodes.new(type='ShaderNodeNormalMap')
norm_map.location = (300, (- 600))
principled = skinMat.node_tree.nodes.new(type='ShaderNodeBsdfPrincipled')
principled.location = (600, 0)
output = skinMat.node_tree.nodes.new(type='ShaderNodeOutputMaterial')
output.location = (1000, 0)
skinMat.node_tree.links.new(tex_image.outputs['Color'], principled.inputs['Base Color'])
skinMat.node_tree.links.new(tex_norm.outputs['Color'], norm_map.inputs['Color'])
skinMat.node_tree.links.new(norm_map.outputs['Normal'], principled.inputs['Normal'])
skinMat.node_tree.links.new(tex_spec.outputs['Color'], principled.inputs['Specular'])
skinMat.node_tree.links.new(principled.outputs['BSDF'], output.inputs['Surface'])
return skinMat
|
def assign_textures_generic_mat(body, cmat, tex_img, tex_norm, tex_spec):
body.select_set(True)
if (len(body.material_slots) == 0):
bpy.context.view_layer.objects.active = body
bpy.ops.object.material_slot_add()
body.material_slots[0].material = cmat
img_tex_img = img_tex_norm = img_tex_spec = None
if (tex_img is not None):
img_name = os.path.basename(tex_img)
img_tex_img = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_img))
if (tex_norm is not None):
img_name = os.path.basename(tex_norm)
img_tex_norm = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_norm))
if (tex_spec is not None):
img_name = os.path.basename(tex_spec)
img_tex_spec = (bpy.data.images.get(img_name) or bpy.data.images.load(tex_spec))
matnodes = cmat.node_tree.nodes
for n in matnodes:
if (n.type == 'NORMAL_MAP'):
matnodes.active = n
n.select = True
n.inputs[0].default_value = 1.0
if (n.type == 'TEX_IMAGE'):
if (n.name == 'Image Texture'):
if (img_tex_img is not None):
matnodes.active = n
n.select = True
n.image = img_tex_img
if (n.name == 'Image Texture.001'):
if (img_tex_norm is not None):
matnodes.active = n
n.select = True
n.image = img_tex_norm
n.image.colorspace_settings.name = 'Non-Color'
if (n.name == 'Image Texture.002'):
if (img_tex_spec is not None):
matnodes.active = n
n.select = True
n.image = img_tex_spec
n.image.colorspace_settings.name = 'Non-Color'
body.select_set(False)
|
def read_text_lines(filename):
list_bones = []
text_file = open(filename, 'r')
lines = text_file.readlines()
for line in lines:
line_split = line.split()
if (len(line_split) == 2):
list_bones.append([line_split[0], line_split[1]])
else:
list_bones.append([line_split[0], 'none'])
return list_bones
|
def find_bone_match(list_bones, bone_name):
bone_match = 'none'
for b in list_bones:
if (b[0] == bone_name):
bone_match = b[1]
break
return bone_match
|
def matrix_scale(scale_vec):
return Matrix([[scale_vec[0], 0, 0, 0], [0, scale_vec[1], 0, 0], [0, 0, scale_vec[2], 0], [0, 0, 0, 1]])
|
def matrix_for_bone_from_parent(bone, ao):
eb1 = ao.data.bones[bone.name]
E = eb1.matrix_local
ebp = ao.data.bones[bone.name].parent
E_p = ebp.matrix_local
return (E_p.inverted() @ E)
|
def matrix_the_hard_way(pose_bone, ao):
if (pose_bone.rotation_mode == 'QUATERNION'):
mr = pose_bone.rotation_quaternion.to_matrix().to_4x4()
else:
mr = pose_bone.rotation_euler.to_matrix().to_4x4()
m1 = ((Matrix.Translation(pose_bone.location) @ mr) @ matrix_scale(pose_bone.scale))
E = ao.data.bones[pose_bone.name].matrix_local
if (pose_bone.parent is None):
return (E @ m1)
else:
m2 = matrix_the_hard_way(pose_bone.parent, ao)
E_p = ao.data.bones[pose_bone.parent.name].matrix_local
return (((m2 @ E_p.inverted()) @ E) @ m1)
|
def worldMatrix(ArmatureObject, Bone):
_bone = ArmatureObject.pose.bones[Bone]
_obj = ArmatureObject
return (_obj.matrix_world * _bone.matrix)
|
def pose_to_match(arm, goal, bc):
'\n pose arm so that its bones line up with the REST pose of goal\n '
matrix_os = {}
for bone in arm.data.bones:
bone_match = find_bone_match(bc, bone.name)
if (bone_match is not 'none'):
ebp = goal.pose.bones[bone_match]
matrix_os[bone_match] = matrix_the_hard_way(ebp, goal)
print('DEBUG')
for to_pose in arm.pose.bones:
bone_match = find_bone_match(bc, to_pose.name)
if (bone_match is not 'none'):
goal_bone = bone_match
if (to_pose.parent is None):
len2 = arm.data.bones[to_pose.name].length
len1 = goal.data.bones[goal_bone].length
print(goal_bone)
m1 = ((arm.matrix_world @ matrix_os[goal_bone]) @ to_pose.bone.matrix_local)
(loc, rot, scale) = m1.decompose()
if ('QUATERNION' == to_pose.rotation_mode):
to_pose.rotation_quaternion = rot
else:
to_pose.rotation_euler = rot.to_euler(to_pose.rotation_mode)
else:
mp = (matrix_the_hard_way(to_pose.parent, arm) @ matrix_for_bone_from_parent(to_pose, arm))
print(mp)
m2 = (mp.inverted() @ matrix_os[goal_bone])
(loc, rot, scale) = m2.decompose()
if ('QUATERNION' == to_pose.rotation_mode):
to_pose.rotation_quaternion = rot
else:
to_pose.rotation_euler = rot.to_euler(to_pose.rotation_mode)
print('last debug')
print(rot)
to_pose.keyframe_insert('rotation_euler', frame=1, group=to_pose.name)
|
def set_rest_pose(skeleton):
for bone in skeleton.pose.bones:
bone.rotation_mode = 'XYZ'
bone.rotation_euler = (0, 0, 0)
|
def set_hips_origin(skeleton, hips_name):
hips_bone = skeleton.pose.bones[hips_name]
hips_bone.location = (0, 0, 0)
|
def find_scale_factor(skel, trg_skel, hips_name_skel, hips_name_target):
hips_pos_skel = (skel.matrix_world @ Matrix.Translation(skel.pose.bones[hips_name_skel].head)).to_translation()
hips_pos_targ = (trg_skel.matrix_world @ Matrix.Translation(trg_skel.pose.bones[hips_name_target].head)).to_translation()
print(hips_pos_skel)
print(hips_pos_targ)
return (hips_pos_targ[2] / hips_pos_skel[2])
|
def read_text_lines(filename):
list_bones = []
text_file = open(filename, 'r')
lines = text_file.readlines()
for line in lines:
line_split = line.split()
if (len(line_split) == 2):
list_bones.append([line_split[0], line_split[1]])
else:
list_bones.append([line_split[0], 'none'])
return list_bones
|
def find_bone_match(list_bones, bone_name):
bone_match = 'none'
for b in list_bones:
if (b[0] == bone_name):
bone_match = b[1]
break
return bone_match
|
def check_installation():
'Check whether mmcv-full has been installed successfully.'
np_boxes1 = np.asarray([[1.0, 1.0, 3.0, 4.0, 0.5], [2.0, 2.0, 3.0, 4.0, 0.6], [7.0, 7.0, 8.0, 8.0, 0.4]], dtype=np.float32)
np_boxes2 = np.asarray([[0.0, 2.0, 2.0, 5.0, 0.3], [2.0, 1.0, 3.0, 3.0, 0.5], [5.0, 5.0, 6.0, 7.0, 0.4]], dtype=np.float32)
boxes1 = torch.from_numpy(np_boxes1)
boxes2 = torch.from_numpy(np_boxes2)
box_iou_rotated(boxes1, boxes2)
print('CPU ops were compiled successfully.')
if torch.cuda.is_available():
boxes1 = boxes1.cuda()
boxes2 = boxes2.cuda()
box_iou_rotated(boxes1, boxes2)
print('CUDA ops were compiled successfully.')
else:
print('No CUDA runtime is found, skipping the checking of CUDA ops.')
|
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(((16 * 5) * 5), 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.loss_fn = nn.CrossEntropyLoss()
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view((- 1), ((16 * 5) * 5))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def train_step(self, data, optimizer):
(images, labels) = data
predicts = self(images)
loss = self.loss_fn(predicts, labels)
return {'loss': loss}
|
def quantize(arr, min_val, max_val, levels, dtype=np.int64):
'Quantize an array of (-inf, inf) to [0, levels-1].\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the quantized array.\n\n Returns:\n tuple: Quantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError(f'levels must be a positive integer, but got {levels}')
if (min_val >= max_val):
raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})')
arr = (np.clip(arr, min_val, max_val) - min_val)
quantized_arr = np.minimum(np.floor(((levels * arr) / (max_val - min_val))).astype(dtype), (levels - 1))
return quantized_arr
|
def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
'Dequantize an array.\n\n Args:\n arr (ndarray): Input array.\n min_val (scalar): Minimum value to be clipped.\n max_val (scalar): Maximum value to be clipped.\n levels (int): Quantization levels.\n dtype (np.type): The type of the dequantized array.\n\n Returns:\n tuple: Dequantized array.\n '
if (not (isinstance(levels, int) and (levels > 1))):
raise ValueError(f'levels must be a positive integer, but got {levels}')
if (min_val >= max_val):
raise ValueError(f'min_val ({min_val}) must be smaller than max_val ({max_val})')
dequantized_arr = ((((arr + 0.5).astype(dtype) * (max_val - min_val)) / levels) + min_val)
return dequantized_arr
|
class AlexNet(nn.Module):
'AlexNet backbone.\n\n Args:\n num_classes (int): number of classes for classification.\n '
def __init__(self, num_classes=(- 1)):
super(AlexNet, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2))
if (self.num_classes > 0):
self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes))
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
from ..runner import load_checkpoint
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
x = self.features(x)
if (self.num_classes > 0):
x = x.view(x.size(0), ((256 * 6) * 6))
x = self.classifier(x)
return x
|
@ACTIVATION_LAYERS.register_module(name='Clip')
@ACTIVATION_LAYERS.register_module()
class Clamp(nn.Module):
'Clamp activation layer.\n\n This activation function is to clamp the feature map value within\n :math:`[min, max]`. More details can be found in ``torch.clamp()``.\n\n Args:\n min (Number | optional): Lower-bound of the range to be clamped to.\n Default to -1.\n max (Number | optional): Upper-bound of the range to be clamped to.\n Default to 1.\n '
def __init__(self, min=(- 1.0), max=1.0):
super(Clamp, self).__init__()
self.min = min
self.max = max
def forward(self, x):
'Forward function.\n\n Args:\n x (torch.Tensor): The input tensor.\n\n Returns:\n torch.Tensor: Clamped tensor.\n '
return torch.clamp(x, min=self.min, max=self.max)
|
class GELU(nn.Module):
'Applies the Gaussian Error Linear Units function:\n\n .. math::\n \\text{GELU}(x) = x * \\Phi(x)\n where :math:`\\Phi(x)` is the Cumulative Distribution Function for\n Gaussian Distribution.\n\n Shape:\n - Input: :math:`(N, *)` where `*` means, any number of additional\n dimensions\n - Output: :math:`(N, *)`, same shape as the input\n\n .. image:: scripts/activation_images/GELU.png\n\n Examples::\n\n >>> m = nn.GELU()\n >>> input = torch.randn(2)\n >>> output = m(input)\n '
def forward(self, input):
return F.gelu(input)
|
def build_activation_layer(cfg):
'Build activation layer.\n\n Args:\n cfg (dict): The activation layer config, which should contain:\n\n - type (str): Layer type.\n - layer args: Args needed to instantiate an activation layer.\n\n Returns:\n nn.Module: Created activation layer.\n '
return build_from_cfg(cfg, ACTIVATION_LAYERS)
|
def last_zero_init(m):
if isinstance(m, nn.Sequential):
constant_init(m[(- 1)], val=0)
else:
constant_init(m, val=0)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.