code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def split_coeff(self, coeffs):
"""
Return:
coeffs_dict -- a dict of torch.tensors
Parameters:
coeffs -- torch.tensor, size (B, 256)
"""
id_coeffs = coeffs[:, :80]
exp_coeffs = coeffs[:, 80: 144]
tex_coeffs = coeffs[:, 144: 224]
angles = coeffs[:, 224: 227]
gammas = coeffs[:, 227: 254]
translations = coeffs[:, 254:]
return {
'id': id_coeffs,
'exp': exp_coeffs,
'tex': tex_coeffs,
'angle': angles,
'gamma': gammas,
'trans': translations
}
|
Return:
coeffs_dict -- a dict of torch.tensors
Parameters:
coeffs -- torch.tensor, size (B, 256)
|
split_coeff
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def compute_for_render(self, coeffs):
"""
Return:
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
face_color -- torch.tensor, size (B, N, 3), in RGB order
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
Parameters:
coeffs -- torch.tensor, size (B, 257)
"""
coef_dict = self.split_coeff(coeffs)
face_shape = self.compute_shape(coef_dict['id'], coef_dict['exp'])
rotation = self.compute_rotation(coef_dict['angle'])
face_shape_transformed = self.transform(face_shape, rotation, coef_dict['trans'])
face_vertex = self.to_camera(face_shape_transformed)
face_proj = self.to_image(face_vertex)
landmark = self.get_landmarks(face_proj)
face_texture = self.compute_texture(coef_dict['tex'])
face_norm = self.compute_norm(face_shape)
face_norm_roted = face_norm @ rotation
face_color = self.compute_color(face_texture, face_norm_roted, coef_dict['gamma'])
return face_vertex, face_texture, face_color, landmark
|
Return:
face_vertex -- torch.tensor, size (B, N, 3), in camera coordinate
face_color -- torch.tensor, size (B, N, 3), in RGB order
landmark -- torch.tensor, size (B, 68, 2), y direction is opposite to v direction
Parameters:
coeffs -- torch.tensor, size (B, 257)
|
compute_for_render
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/bfm.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/bfm.py
|
Apache-2.0
|
def modify_commandline_options(parser, is_train=True):
""" Configures options specific for CUT model
"""
# net structure and parameters
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='network structure')
parser.add_argument('--init_path', type=str, default='checkpoints/init_model/resnet50-0676ba61.pth')
parser.add_argument('--use_last_fc', type=util.str2bool, nargs='?', const=True, default=False, help='zero initialize the last fc')
parser.add_argument('--bfm_folder', type=str, default='BFM')
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model')
# renderer parameters
parser.add_argument('--focal', type=float, default=1015.)
parser.add_argument('--center', type=float, default=112.)
parser.add_argument('--camera_d', type=float, default=10.)
parser.add_argument('--z_near', type=float, default=5.)
parser.add_argument('--z_far', type=float, default=15.)
if is_train:
# training parameters
parser.add_argument('--net_recog', type=str, default='r50', choices=['r18', 'r43', 'r50'], help='face recog network structure')
parser.add_argument('--net_recog_path', type=str, default='checkpoints/recog_model/ms1mv3_arcface_r50_fp16/backbone.pth')
parser.add_argument('--use_crop_face', type=util.str2bool, nargs='?', const=True, default=False, help='use crop mask for photo loss')
parser.add_argument('--use_predef_M', type=util.str2bool, nargs='?', const=True, default=False, help='use predefined M for predicted face')
# augmentation parameters
parser.add_argument('--shift_pixs', type=float, default=10., help='shift pixels')
parser.add_argument('--scale_delta', type=float, default=0.1, help='delta scale factor')
parser.add_argument('--rot_angle', type=float, default=10., help='rot angles, degree')
# loss weights
parser.add_argument('--w_feat', type=float, default=0.2, help='weight for feat loss')
parser.add_argument('--w_color', type=float, default=1.92, help='weight for loss loss')
parser.add_argument('--w_reg', type=float, default=3.0e-4, help='weight for reg loss')
parser.add_argument('--w_id', type=float, default=1.0, help='weight for id_reg loss')
parser.add_argument('--w_exp', type=float, default=0.8, help='weight for exp_reg loss')
parser.add_argument('--w_tex', type=float, default=1.7e-2, help='weight for tex_reg loss')
parser.add_argument('--w_gamma', type=float, default=10.0, help='weight for gamma loss')
parser.add_argument('--w_lm', type=float, default=1.6e-3, help='weight for lm loss')
parser.add_argument('--w_reflc', type=float, default=5.0, help='weight for reflc loss')
opt, _ = parser.parse_known_args()
parser.set_defaults(
focal=1015., center=112., camera_d=10., use_last_fc=False, z_near=5., z_far=15.
)
if is_train:
parser.set_defaults(
use_crop_face=True, use_predef_M=False
)
return parser
|
Configures options specific for CUT model
|
modify_commandline_options
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/facerecon_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
"""
BaseModel.__init__(self, opt) # call the initialization method of BaseModel
self.visual_names = ['output_vis']
self.model_names = ['net_recon']
self.parallel_names = self.model_names + ['renderer']
self.net_recon = networks.define_net_recon(
net_recon=opt.net_recon, use_last_fc=opt.use_last_fc, init_path=opt.init_path
)
self.facemodel = ParametricFaceModel(
bfm_folder=opt.bfm_folder, camera_distance=opt.camera_d, focal=opt.focal, center=opt.center,
is_train=self.isTrain, default_name=opt.bfm_model
)
fov = 2 * np.arctan(opt.center / opt.focal) * 180 / np.pi
self.renderer = MeshRenderer(
rasterize_fov=fov, znear=opt.z_near, zfar=opt.z_far, rasterize_size=int(2 * opt.center)
)
if self.isTrain:
self.loss_names = ['all', 'feat', 'color', 'lm', 'reg', 'gamma', 'reflc']
self.net_recog = networks.define_net_recog(
net_recog=opt.net_recog, pretrained_path=opt.net_recog_path
)
# loss func name: (compute_%s_loss) % loss_name
self.compute_feat_loss = perceptual_loss
self.comupte_color_loss = photo_loss
self.compute_lm_loss = landmark_loss
self.compute_reg_loss = reg_loss
self.compute_reflc_loss = reflectance_loss
self.optimizer = torch.optim.Adam(self.net_recon.parameters(), lr=opt.lr)
self.optimizers = [self.optimizer]
self.parallel_names += ['net_recog']
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
|
Initialize this model class.
Parameters:
opt -- training/test options
A few things can be done here.
- (required) call the initialization function of BaseModel
- define loss function, visualization images, model names, and optimizers
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/facerecon_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
|
Apache-2.0
|
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.input_img = input['imgs'].to(self.device)
self.atten_mask = input['msks'].to(self.device) if 'msks' in input else None
self.gt_lm = input['lms'].to(self.device) if 'lms' in input else None
self.trans_m = input['M'].to(self.device) if 'M' in input else None
self.image_paths = input['im_paths'] if 'im_paths' in input else None
|
Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
|
set_input
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/facerecon_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
|
Apache-2.0
|
def compute_losses(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
assert self.net_recog.training == False
trans_m = self.trans_m
if not self.opt.use_predef_M:
trans_m = estimate_norm_torch(self.pred_lm, self.input_img.shape[-2])
pred_feat = self.net_recog(self.pred_face, trans_m)
gt_feat = self.net_recog(self.input_img, self.trans_m)
self.loss_feat = self.opt.w_feat * self.compute_feat_loss(pred_feat, gt_feat)
face_mask = self.pred_mask
if self.opt.use_crop_face:
face_mask, _, _ = self.renderer(self.pred_vertex, self.facemodel.front_face_buf)
face_mask = face_mask.detach()
self.loss_color = self.opt.w_color * self.comupte_color_loss(
self.pred_face, self.input_img, self.atten_mask * face_mask)
loss_reg, loss_gamma = self.compute_reg_loss(self.pred_coeffs_dict, self.opt)
self.loss_reg = self.opt.w_reg * loss_reg
self.loss_gamma = self.opt.w_gamma * loss_gamma
self.loss_lm = self.opt.w_lm * self.compute_lm_loss(self.pred_lm, self.gt_lm)
self.loss_reflc = self.opt.w_reflc * self.compute_reflc_loss(self.pred_tex, self.facemodel.skin_mask)
self.loss_all = self.loss_feat + self.loss_color + self.loss_reg + self.loss_gamma \
+ self.loss_lm + self.loss_reflc
|
Calculate losses, gradients, and update network weights; called in every training iteration
|
compute_losses
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/facerecon_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/facerecon_model.py
|
Apache-2.0
|
def forward(imageA, imageB, M):
"""
1 - cosine distance
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order
imageB --same as imageA
"""
imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size))
imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size))
# freeze bn
self.recog_net.eval()
id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2)
id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2)
cosine_d = torch.sum(id_featureA * id_featureB, dim=-1)
# assert torch.sum((cosine_d > 1).float()) == 0
return torch.sum(1 - cosine_d) / cosine_d.shape[0]
|
1 - cosine distance
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order
imageB --same as imageA
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/losses.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
|
Apache-2.0
|
def photo_loss(imageA, imageB, mask, eps=1e-6):
"""
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur)
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order
imageB --same as imageA
"""
loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask
loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device))
return loss
|
l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur)
Parameters:
imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order
imageB --same as imageA
|
photo_loss
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/losses.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
|
Apache-2.0
|
def landmark_loss(predict_lm, gt_lm, weight=None):
"""
weighted mse loss
Parameters:
predict_lm --torch.tensor (B, 68, 2)
gt_lm --torch.tensor (B, 68, 2)
weight --numpy.array (1, 68)
"""
if not weight:
weight = np.ones([68])
weight[28:31] = 20
weight[-8:] = 20
weight = np.expand_dims(weight, 0)
weight = torch.tensor(weight).to(predict_lm.device)
loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight
loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1])
return loss
|
weighted mse loss
Parameters:
predict_lm --torch.tensor (B, 68, 2)
gt_lm --torch.tensor (B, 68, 2)
weight --numpy.array (1, 68)
|
landmark_loss
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/losses.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
|
Apache-2.0
|
def reg_loss(coeffs_dict, opt=None):
"""
l2 norm without the sqrt, from yu's implementation (mse)
tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss
Parameters:
coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
"""
# coefficient regularization to ensure plausible 3d faces
if opt:
w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex
else:
w_id, w_exp, w_tex = 1, 1, 1, 1
creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \
w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \
w_tex * torch.sum(coeffs_dict['tex'] ** 2)
creg_loss = creg_loss / coeffs_dict['id'].shape[0]
# gamma regularization to ensure a nearly-monochromatic light
gamma = coeffs_dict['gamma'].reshape([-1, 3, 9])
gamma_mean = torch.mean(gamma, dim=1, keepdims=True)
gamma_loss = torch.mean((gamma - gamma_mean) ** 2)
return creg_loss, gamma_loss
|
l2 norm without the sqrt, from yu's implementation (mse)
tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss
Parameters:
coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans
|
reg_loss
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/losses.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
|
Apache-2.0
|
def reflectance_loss(texture, mask):
"""
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo
Parameters:
texture --torch.tensor, (B, N, 3)
mask --torch.tensor, (N), 1 or 0
"""
mask = mask.reshape([1, mask.shape[0], 1])
texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask)
loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask))
return loss
|
minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo
Parameters:
texture --torch.tensor, (B, N, 3)
mask --torch.tensor, (N), 1 or 0
|
reflectance_loss
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/losses.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/losses.py
|
Apache-2.0
|
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
|
ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
|
resnext50_32x4d
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/networks.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
|
Apache-2.0
|
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
|
resnext101_32x8d
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/networks.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
|
Apache-2.0
|
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
|
Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
|
wide_resnet50_2
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/networks.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
|
Apache-2.0
|
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
|
wide_resnet101_2
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/networks.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/networks.py
|
Apache-2.0
|
def modify_commandline_options(parser, is_train=True):
"""Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
if is_train:
parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
return parser
|
Add new model-specific options and rewrite default values for existing options.
Parameters:
parser -- the option parser
is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
|
modify_commandline_options
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/template_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
|
Apache-2.0
|
def optimize_parameters(self):
"""Update network weights; it will be called in every training iteration."""
self.forward() # first call forward to calculate intermediate results
self.optimizer.zero_grad() # clear network G's existing gradients
self.backward() # calculate gradients for network G
self.optimizer.step() # update gradients for network G
|
Update network weights; it will be called in every training iteration.
|
optimize_parameters
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/template_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/template_model.py
|
Apache-2.0
|
def find_model_using_name(model_name):
"""Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
"""
model_filename = "face3d.models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, BaseModel):
model = cls
if model is None:
print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
|
Import the module "models/[model_name]_model.py".
In the file, the class called DatasetNameModel() will
be instantiated. It has to be a subclass of BaseModel,
and it is case-insensitive.
|
find_model_using_name
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/__init__.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/__init__.py
|
Apache-2.0
|
def create_model(opt):
"""Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
"""
model = find_model_using_name(opt.model)
instance = model(opt)
print("model [%s] was created" % type(instance).__name__)
return instance
|
Create a model given the option.
This function warps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from models import create_model
>>> model = create_model(opt)
|
create_model
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/__init__.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/__init__.py
|
Apache-2.0
|
def __init__(self, rank, local_rank, world_size, batch_size, resume,
margin_softmax, num_classes, sample_rate=1.0, embedding_size=512, prefix="./"):
"""
rank: int
Unique process(GPU) ID from 0 to world_size - 1.
local_rank: int
Unique process(GPU) ID within the server from 0 to 7.
world_size: int
Number of GPU.
batch_size: int
Batch size on current rank(GPU).
resume: bool
Select whether to restore the weight of softmax.
margin_softmax: callable
A function of margin softmax, eg: cosface, arcface.
num_classes: int
The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
required.
sample_rate: float
The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
embedding_size: int
The feature dimension, default is 512.
prefix: str
Path for save checkpoint, default is './'.
"""
super(PartialFC, self).__init__()
#
self.num_classes: int = num_classes
self.rank: int = rank
self.local_rank: int = local_rank
self.device: torch.device = torch.device("cuda:{}".format(self.local_rank))
self.world_size: int = world_size
self.batch_size: int = batch_size
self.margin_softmax: callable = margin_softmax
self.sample_rate: float = sample_rate
self.embedding_size: int = embedding_size
self.prefix: str = prefix
self.num_local: int = num_classes // world_size + int(rank < num_classes % world_size)
self.class_start: int = num_classes // world_size * rank + min(rank, num_classes % world_size)
self.num_sample: int = int(self.sample_rate * self.num_local)
self.weight_name = os.path.join(self.prefix, "rank_{}_softmax_weight.pt".format(self.rank))
self.weight_mom_name = os.path.join(self.prefix, "rank_{}_softmax_weight_mom.pt".format(self.rank))
if resume:
try:
self.weight: torch.Tensor = torch.load(self.weight_name)
self.weight_mom: torch.Tensor = torch.load(self.weight_mom_name)
if self.weight.shape[0] != self.num_local or self.weight_mom.shape[0] != self.num_local:
raise IndexError
logging.info("softmax weight resume successfully!")
logging.info("softmax weight mom resume successfully!")
except (FileNotFoundError, KeyError, IndexError):
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
logging.info("softmax weight init!")
logging.info("softmax weight mom init!")
else:
self.weight = torch.normal(0, 0.01, (self.num_local, self.embedding_size), device=self.device)
self.weight_mom: torch.Tensor = torch.zeros_like(self.weight)
logging.info("softmax weight init successfully!")
logging.info("softmax weight mom init successfully!")
self.stream: torch.cuda.Stream = torch.cuda.Stream(local_rank)
self.index = None
if int(self.sample_rate) == 1:
self.update = lambda: 0
self.sub_weight = Parameter(self.weight)
self.sub_weight_mom = self.weight_mom
else:
self.sub_weight = Parameter(torch.empty((0, 0)).cuda(local_rank))
|
rank: int
Unique process(GPU) ID from 0 to world_size - 1.
local_rank: int
Unique process(GPU) ID within the server from 0 to 7.
world_size: int
Number of GPU.
batch_size: int
Batch size on current rank(GPU).
resume: bool
Select whether to restore the weight of softmax.
margin_softmax: callable
A function of margin softmax, eg: cosface, arcface.
num_classes: int
The number of class center storage in current rank(CPU/GPU), usually is total_classes // world_size,
required.
sample_rate: float
The partial fc sampling rate, when the number of classes increases to more than 2 millions, Sampling
can greatly speed up training, and reduce a lot of GPU memory, default is 1.0.
embedding_size: int
The feature dimension, default is 512.
prefix: str
Path for save checkpoint, default is './'.
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/partial_fc.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
|
Apache-2.0
|
def sample(self, total_label):
"""
Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
`num_sample`.
total_label: tensor
Label after all gather, which cross all GPUs.
"""
index_positive = (self.class_start <= total_label) & (total_label < self.class_start + self.num_local)
total_label[~index_positive] = -1
total_label[index_positive] -= self.class_start
if int(self.sample_rate) != 1:
positive = torch.unique(total_label[index_positive], sorted=True)
if self.num_sample - positive.size(0) >= 0:
perm = torch.rand(size=[self.num_local], device=self.device)
perm[positive] = 2.0
index = torch.topk(perm, k=self.num_sample)[1]
index = index.sort()[0]
else:
index = positive
self.index = index
total_label[index_positive] = torch.searchsorted(index, total_label[index_positive])
self.sub_weight = Parameter(self.weight[index])
self.sub_weight_mom = self.weight_mom[index]
|
Sample all positive class centers in each rank, and random select neg class centers to filling a fixed
`num_sample`.
total_label: tensor
Label after all gather, which cross all GPUs.
|
sample
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/partial_fc.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
|
Apache-2.0
|
def update(self):
""" Set updated weight and weight_mom to memory bank.
"""
self.weight_mom[self.index] = self.sub_weight_mom
self.weight[self.index] = self.sub_weight
|
Set updated weight and weight_mom to memory bank.
|
update
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/partial_fc.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
|
Apache-2.0
|
def prepare(self, label, optimizer):
"""
get sampled class centers for cal softmax.
label: tensor
Label tensor on each rank.
optimizer: opt
Optimizer for partial fc, which need to get weight mom.
"""
with torch.cuda.stream(self.stream):
total_label = torch.zeros(
size=[self.batch_size * self.world_size], device=self.device, dtype=torch.long)
dist.all_gather(list(total_label.chunk(self.world_size, dim=0)), label)
self.sample(total_label)
optimizer.state.pop(optimizer.param_groups[-1]['params'][0], None)
optimizer.param_groups[-1]['params'][0] = self.sub_weight
optimizer.state[self.sub_weight]['momentum_buffer'] = self.sub_weight_mom
norm_weight = normalize(self.sub_weight)
return total_label, norm_weight
|
get sampled class centers for cal softmax.
label: tensor
Label tensor on each rank.
optimizer: opt
Optimizer for partial fc, which need to get weight mom.
|
prepare
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/partial_fc.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
|
Apache-2.0
|
def forward_backward(self, label, features, optimizer):
"""
Partial fc forward and backward with model parallel
label: tensor
Label tensor on each rank(GPU)
features: tensor
Features tensor on each rank(GPU)
optimizer: optimizer
Optimizer for partial fc
Returns:
--------
x_grad: tensor
The gradient of features.
loss_v: tensor
Loss value for cross entropy.
"""
total_label, norm_weight = self.prepare(label, optimizer)
total_features = torch.zeros(
size=[self.batch_size * self.world_size, self.embedding_size], device=self.device)
dist.all_gather(list(total_features.chunk(self.world_size, dim=0)), features.data)
total_features.requires_grad = True
logits = self.forward(total_features, norm_weight)
logits = self.margin_softmax(logits, total_label)
with torch.no_grad():
max_fc = torch.max(logits, dim=1, keepdim=True)[0]
dist.all_reduce(max_fc, dist.ReduceOp.MAX)
# calculate exp(logits) and all-reduce
logits_exp = torch.exp(logits - max_fc)
logits_sum_exp = logits_exp.sum(dim=1, keepdims=True)
dist.all_reduce(logits_sum_exp, dist.ReduceOp.SUM)
# calculate prob
logits_exp.div_(logits_sum_exp)
# get one-hot
grad = logits_exp
index = torch.where(total_label != -1)[0]
one_hot = torch.zeros(size=[index.size()[0], grad.size()[1]], device=grad.device)
one_hot.scatter_(1, total_label[index, None], 1)
# calculate loss
loss = torch.zeros(grad.size()[0], 1, device=grad.device)
loss[index] = grad[index].gather(1, total_label[index, None])
dist.all_reduce(loss, dist.ReduceOp.SUM)
loss_v = loss.clamp_min_(1e-30).log_().mean() * (-1)
# calculate grad
grad[index] -= one_hot
grad.div_(self.batch_size * self.world_size)
logits.backward(grad)
if total_features.grad is not None:
total_features.grad.detach_()
x_grad: torch.Tensor = torch.zeros_like(features, requires_grad=True)
# feature gradient all-reduce
dist.reduce_scatter(x_grad, list(total_features.grad.chunk(self.world_size, dim=0)))
x_grad = x_grad * self.world_size
# backward backbone
return x_grad, loss_v
|
Partial fc forward and backward with model parallel
label: tensor
Label tensor on each rank(GPU)
features: tensor
Features tensor on each rank(GPU)
optimizer: optimizer
Optimizer for partial fc
Returns:
--------
x_grad: tensor
The gradient of features.
loss_v: tensor
Loss value for cross entropy.
|
forward_backward
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/partial_fc.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/partial_fc.py
|
Apache-2.0
|
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
self.scale_clip()
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
|
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
|
scale
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/models/arcface_torch/utils/utils_amp.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/models/arcface_torch/utils/utils_amp.py
|
Apache-2.0
|
def __init__(self, cmd_line=None):
"""Reset the class; indicates the class hasn't been initialized"""
self.initialized = False
self.cmd_line = None
if cmd_line is not None:
self.cmd_line = cmd_line.split()
|
Reset the class; indicates the class hasn't been initialized
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/options/base_options.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
|
Apache-2.0
|
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--name', type=str, default='face_recon', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--vis_batch_nums', type=float, default=1, help='batch nums of images for visulization')
parser.add_argument('--eval_batch_nums', type=float, default=float('inf'), help='batch nums of images for evaluation')
parser.add_argument('--use_ddp', type=util.str2bool, nargs='?', const=True, default=True, help='whether use distributed data parallel')
parser.add_argument('--ddp_port', type=str, default='12355', help='ddp port')
parser.add_argument('--display_per_batch', type=util.str2bool, nargs='?', const=True, default=True, help='whether use batch to show losses')
parser.add_argument('--add_image', type=util.str2bool, nargs='?', const=True, default=True, help='whether add image to tensorboard')
parser.add_argument('--world_size', type=int, default=1, help='batch nums of images for evaluation')
# model parameters
parser.add_argument('--model', type=str, default='facerecon', help='chooses which model to use.')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
self.initialized = True
return parser
|
Define the common options that are used in both training and test.
|
initialize
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/options/base_options.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
|
Apache-2.0
|
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
if self.cmd_line is None:
opt, _ = parser.parse_known_args()
else:
opt, _ = parser.parse_known_args(self.cmd_line)
# set cuda visible devices
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_ids
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
if self.cmd_line is None:
opt, _ = parser.parse_known_args() # parse again with new defaults
else:
opt, _ = parser.parse_known_args(self.cmd_line) # parse again with new defaults
# modify dataset-related parser options
if opt.dataset_mode:
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
# save and return the parser
self.parser = parser
if self.cmd_line is None:
return parser.parse_args()
else:
return parser.parse_args(self.cmd_line)
|
Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
|
gather_options
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/options/base_options.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
|
Apache-2.0
|
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
try:
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
except PermissionError as error:
print("permission error {}".format(error))
pass
|
Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
|
print_options
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/options/base_options.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
|
Apache-2.0
|
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
# set gpu ids
str_ids = opt.gpu_ids.split(',')
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
gpu_ids.append(id)
opt.world_size = len(gpu_ids)
# if len(opt.gpu_ids) > 0:
# torch.cuda.set_device(gpu_ids[0])
if opt.world_size == 1:
opt.use_ddp = False
if opt.phase != 'test':
# set continue_train automatically
if opt.pretrained_name is None:
model_dir = os.path.join(opt.checkpoints_dir, opt.name)
else:
model_dir = os.path.join(opt.checkpoints_dir, opt.pretrained_name)
if os.path.isdir(model_dir):
model_pths = [i for i in os.listdir(model_dir) if i.endswith('pth')]
if os.path.isdir(model_dir) and len(model_pths) != 0:
opt.continue_train= True
# update the latest epoch count
if opt.continue_train:
if opt.epoch == 'latest':
epoch_counts = [int(i.split('.')[0].split('_')[-1]) for i in model_pths if 'latest' not in i]
if len(epoch_counts) != 0:
opt.epoch_count = max(epoch_counts) + 1
else:
opt.epoch_count = int(opt.epoch) + 1
self.print_options(opt)
self.opt = opt
return self.opt
|
Parse our options, create checkpoints directory suffix, and set up gpu device.
|
parse
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/options/base_options.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/options/base_options.py
|
Apache-2.0
|
def __init__(self, web_dir, title, refresh=0):
"""Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
"""
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
|
Initialize the HTML classes
Parameters:
web_dir (str) -- a directory that stores the webpage. HTML file will be created at <web_dir>/index.html; images will be saved at <web_dir/images/
title (str) -- the webpage name
refresh (int) -- how often the website refresh itself; if 0; no refreshing
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/html.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
|
Apache-2.0
|
def add_images(self, ims, txts, links, width=400):
"""add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
"""
self.t = table(border=1, style="table-layout: fixed;") # Insert a table
self.doc.add(self.t)
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
|
add images to the HTML file
Parameters:
ims (str list) -- a list of image paths
txts (str list) -- a list of image names shown on the website
links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
|
add_images
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/html.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
|
Apache-2.0
|
def save(self):
"""save the current content to the HTML file"""
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
|
save the current content to the HTML file
|
save
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/html.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/html.py
|
Apache-2.0
|
def forward(self, vertex, tri, feat=None):
"""
Return:
mask -- torch.tensor, size (B, 1, H, W)
depth -- torch.tensor, size (B, 1, H, W)
features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None
Parameters:
vertex -- torch.tensor, size (B, N, 3)
tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles
feat(optional) -- torch.tensor, size (B, C), features
"""
device = vertex.device
rsize = int(self.rasterize_size)
ndc_proj = self.ndc_proj.to(device)
# trans to homogeneous coordinates of 3d vertices, the direction of y is the same as v
if vertex.shape[-1] == 3:
vertex = torch.cat([vertex, torch.ones([*vertex.shape[:2], 1]).to(device)], dim=-1)
vertex[..., 1] = -vertex[..., 1]
vertex_ndc = vertex @ ndc_proj.t()
if self.glctx is None:
self.glctx = dr.RasterizeGLContext(device=device)
print("create glctx on device cuda:%d"%device.index)
ranges = None
if isinstance(tri, List) or len(tri.shape) == 3:
vum = vertex_ndc.shape[1]
fnum = torch.tensor([f.shape[0] for f in tri]).unsqueeze(1).to(device)
fstartidx = torch.cumsum(fnum, dim=0) - fnum
ranges = torch.cat([fstartidx, fnum], axis=1).type(torch.int32).cpu()
for i in range(tri.shape[0]):
tri[i] = tri[i] + i*vum
vertex_ndc = torch.cat(vertex_ndc, dim=0)
tri = torch.cat(tri, dim=0)
# for range_mode vetex: [B*N, 4], tri: [B*M, 3], for instance_mode vetex: [B, N, 4], tri: [M, 3]
tri = tri.type(torch.int32).contiguous()
rast_out, _ = dr.rasterize(self.glctx, vertex_ndc.contiguous(), tri, resolution=[rsize, rsize], ranges=ranges)
depth, _ = dr.interpolate(vertex.reshape([-1,4])[...,2].unsqueeze(1).contiguous(), rast_out, tri)
depth = depth.permute(0, 3, 1, 2)
mask = (rast_out[..., 3] > 0).float().unsqueeze(1)
depth = mask * depth
image = None
if feat is not None:
image, _ = dr.interpolate(feat, rast_out, tri)
image = image.permute(0, 3, 1, 2)
image = mask * image
return mask, depth, image
|
Return:
mask -- torch.tensor, size (B, 1, H, W)
depth -- torch.tensor, size (B, 1, H, W)
features(optional) -- torch.tensor, size (B, C, H, W) if feat is not None
Parameters:
vertex -- torch.tensor, size (B, N, 3)
tri -- torch.tensor, size (B, M, 3) or (M, 3), triangles
feat(optional) -- torch.tensor, size (B, C), features
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/nvdiffrast.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/nvdiffrast.py
|
Apache-2.0
|
def align_img(img, lm, lm3D, mask=None, target_size=224., rescale_factor=102.):
"""
Return:
transparams --numpy.array (raw_W, raw_H, scale, tx, ty)
img_new --PIL.Image (target_size, target_size, 3)
lm_new --numpy.array (68, 2), y direction is opposite to v direction
mask_new --PIL.Image (target_size, target_size)
Parameters:
img --PIL.Image (raw_H, raw_W, 3)
lm --numpy.array (68, 2), y direction is opposite to v direction
lm3D --numpy.array (5, 3)
mask --PIL.Image (raw_H, raw_W, 3)
"""
w0, h0 = img.size
if lm.shape[0] != 5:
lm5p = extract_5p(lm)
else:
lm5p = lm
# calculate translation and scale factors using 5 facial landmarks and standard landmarks of a 3D face
t, s = POS(lm5p.transpose(), lm3D.transpose())
s = rescale_factor/s
# processing the image
img_new, lm_new, mask_new = resize_n_crop_img(img, lm, t, s, target_size=target_size, mask=mask)
trans_params = np.array([w0, h0, s, t[0], t[1]])
return trans_params, img_new, lm_new, mask_new
|
Return:
transparams --numpy.array (raw_W, raw_H, scale, tx, ty)
img_new --PIL.Image (target_size, target_size, 3)
lm_new --numpy.array (68, 2), y direction is opposite to v direction
mask_new --PIL.Image (target_size, target_size)
Parameters:
img --PIL.Image (raw_H, raw_W, 3)
lm --numpy.array (68, 2), y direction is opposite to v direction
lm3D --numpy.array (5, 3)
mask --PIL.Image (raw_H, raw_W, 3)
|
align_img
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/preprocess.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/preprocess.py
|
Apache-2.0
|
def estimate_norm(lm_68p, H):
# from https://github.com/deepinsight/insightface/blob/c61d3cd208a603dfa4a338bd743b320ce3e94730/recognition/common/face_align.py#L68
"""
Return:
trans_m --numpy.array (2, 3)
Parameters:
lm --numpy.array (68, 2), y direction is opposite to v direction
H --int/float , image height
"""
lm = extract_5p(lm_68p)
lm[:, -1] = H - 1 - lm[:, -1]
tform = trans.SimilarityTransform()
src = np.array(
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
[41.5493, 92.3655], [70.7299, 92.2041]],
dtype=np.float32)
tform.estimate(lm, src)
M = tform.params
if np.linalg.det(M) == 0:
M = np.eye(3)
return M[0:2, :]
|
Return:
trans_m --numpy.array (2, 3)
Parameters:
lm --numpy.array (68, 2), y direction is opposite to v direction
H --int/float , image height
|
estimate_norm
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/preprocess.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/preprocess.py
|
Apache-2.0
|
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array, range(0, 1)
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor.clamp(0.0, 1.0).cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 # post-processing: transpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
|
"Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array, range(0, 1)
imtype (type) -- the desired type of the converted numpy array
|
tensor2im
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def diagnose_network(net, name='network'):
"""Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
"""
mean = 0.0
count = 0
for param in net.parameters():
if param.grad is not None:
mean += torch.mean(torch.abs(param.grad.data))
count += 1
if count > 0:
mean = mean / count
print(name)
print(mean)
|
Calculate and print the mean of average absolute(gradients)
Parameters:
net (torch network) -- Torch network
name (str) -- the name of the network
|
diagnose_network
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def save_image(image_numpy, image_path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio is None:
pass
elif aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
elif aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(image_path)
|
Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
image_path (str) -- the path of the image
|
save_image
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def print_numpy(x, val=True, shp=False):
"""Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
"""
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
|
Print the mean, min, max, median, std, and size of a numpy array
Parameters:
val (bool) -- if print the values of the numpy array
shp (bool) -- if print the shape of the numpy array
|
print_numpy
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
|
create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
|
mkdirs
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def draw_landmarks(img, landmark, color='r', step=2):
"""
Return:
img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255)
Parameters:
img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255)
landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction
color -- str, 'r' or 'b' (red or blue)
"""
if color =='r':
c = np.array([255., 0, 0])
else:
c = np.array([0, 0, 255.])
_, H, W, _ = img.shape
img, landmark = img.copy(), landmark.copy()
landmark[..., 1] = H - 1 - landmark[..., 1]
landmark = np.round(landmark).astype(np.int32)
for i in range(landmark.shape[1]):
x, y = landmark[:, i, 0], landmark[:, i, 1]
for j in range(-step, step):
for k in range(-step, step):
u = np.clip(x + j, 0, W - 1)
v = np.clip(y + k, 0, H - 1)
for m in range(landmark.shape[0]):
img[m, v[m], u[m]] = c
return img
|
Return:
img -- numpy.array, (B, H, W, 3) img with landmark, RGB order, range (0, 255)
Parameters:
img -- numpy.array, (B, H, W, 3), RGB order, range (0, 255)
landmark -- numpy.array, (B, 68, 2), y direction is opposite to v direction
color -- str, 'r' or 'b' (red or blue)
|
draw_landmarks
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/util.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/util.py
|
Apache-2.0
|
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im_data in visuals.items():
im = util.tensor2im(im_data)
image_name = '%s/%s.png' % (label, name)
os.makedirs(os.path.join(image_dir, label), exist_ok=True)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
|
Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
|
save_images
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/visualizer.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
|
Apache-2.0
|
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: create a tensorboard writer
Step 3: create an HTML object for saving HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.use_html = opt.isTrain and not opt.no_html
self.writer = SummaryWriter(os.path.join(opt.checkpoints_dir, 'logs', opt.name))
self.win_size = opt.display_winsize
self.name = opt.name
self.saved = False
if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
# create a logging file to store training losses
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
|
Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: create a tensorboard writer
Step 3: create an HTML object for saving HTML filters
Step 4: create a logging file to store training losses
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/visualizer.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
|
Apache-2.0
|
def display_current_results(self, visuals, total_iters, epoch, save_result):
"""Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
for label, image in visuals.items():
self.writer.add_image(label, util.tensor2im(image), total_iters, dataformats='HWC')
if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
self.saved = True
# save images to the disk
for label, image in visuals.items():
image_numpy = util.tensor2im(image)
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=0)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
image_numpy = util.tensor2im(image)
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
|
Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
|
display_current_results
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/visualizer.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
|
Apache-2.0
|
def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message) # save the message
|
print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
|
print_current_losses
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/visualizer.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
|
Apache-2.0
|
def display_current_results(self, visuals, total_iters, epoch, dataset='train', save_results=False, count=0, name=None,
add_image=True):
"""Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
dataset (str) - - 'train' or 'val' or 'test'
"""
# if (not add_image) and (not save_results): return
for label, image in visuals.items():
for i in range(image.shape[0]):
image_numpy = util.tensor2im(image[i])
if add_image:
self.writer.add_image(label + '%s_%02d'%(dataset, i + count),
image_numpy, total_iters, dataformats='HWC')
if save_results:
save_path = os.path.join(self.img_dir, dataset, 'epoch_%s_%06d'%(epoch, total_iters))
if not os.path.isdir(save_path):
os.makedirs(save_path)
if name is not None:
img_path = os.path.join(save_path, '%s.png' % name)
else:
img_path = os.path.join(save_path, '%s_%03d.png' % (label, i + count))
util.save_image(image_numpy, img_path)
|
Display current results on tensorboad; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
total_iters (int) -- total iterations
epoch (int) - - the current epoch
dataset (str) - - 'train' or 'val' or 'test'
|
display_current_results
|
python
|
OpenTalker/video-retalking
|
third_part/face3d/util/visualizer.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face3d/util/visualizer.py
|
Apache-2.0
|
def transform(point, center, scale, resolution, invert=False):
"""Generate and affine transformation matrix.
Given a set of points, a center, a scale and a targer resolution, the
function generates and affine transformation matrix. If invert is ``True``
it will produce the inverse transformation.
Arguments:
point {torch.tensor} -- the input 2D point
center {torch.tensor or numpy.array} -- the center around which to perform the transformations
scale {float} -- the scale of the face/object
resolution {float} -- the output resolution
Keyword Arguments:
invert {bool} -- define wherever the function should produce the direct or the
inverse transformation matrix (default: {False})
"""
_pt = torch.ones(3)
_pt[0] = point[0]
_pt[1] = point[1]
h = 200.0 * scale
t = torch.eye(3)
t[0, 0] = resolution / h
t[1, 1] = resolution / h
t[0, 2] = resolution * (-center[0] / h + 0.5)
t[1, 2] = resolution * (-center[1] / h + 0.5)
if invert:
t = torch.inverse(t)
new_point = (torch.matmul(t, _pt))[0:2]
return new_point.int()
|
Generate and affine transformation matrix.
Given a set of points, a center, a scale and a targer resolution, the
function generates and affine transformation matrix. If invert is ``True``
it will produce the inverse transformation.
Arguments:
point {torch.tensor} -- the input 2D point
center {torch.tensor or numpy.array} -- the center around which to perform the transformations
scale {float} -- the scale of the face/object
resolution {float} -- the output resolution
Keyword Arguments:
invert {bool} -- define wherever the function should produce the direct or the
inverse transformation matrix (default: {False})
|
transform
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def crop(image, center, scale, resolution=256.0):
"""Center crops an image or set of heatmaps
Arguments:
image {numpy.array} -- an rgb image
center {numpy.array} -- the center of the object, usually the same as of the bounding box
scale {float} -- scale of the face
Keyword Arguments:
resolution {float} -- the size of the output cropped image (default: {256.0})
Returns:
[type] -- [description]
""" # Crop around the center point
""" Crops the image around the center. Input is expected to be an np.ndarray """
ul = transform([1, 1], center, scale, resolution, True)
br = transform([resolution, resolution], center, scale, resolution, True)
# pad = math.ceil(torch.norm((ul - br).float()) / 2.0 - (br[0] - ul[0]) / 2.0)
if image.ndim > 2:
newDim = np.array([br[1] - ul[1], br[0] - ul[0],
image.shape[2]], dtype=np.int32)
newImg = np.zeros(newDim, dtype=np.uint8)
else:
newDim = np.array([br[1] - ul[1], br[0] - ul[0]], dtype=np.int)
newImg = np.zeros(newDim, dtype=np.uint8)
ht = image.shape[0]
wd = image.shape[1]
newX = np.array(
[max(1, -ul[0] + 1), min(br[0], wd) - ul[0]], dtype=np.int32)
newY = np.array(
[max(1, -ul[1] + 1), min(br[1], ht) - ul[1]], dtype=np.int32)
oldX = np.array([max(1, ul[0] + 1), min(br[0], wd)], dtype=np.int32)
oldY = np.array([max(1, ul[1] + 1), min(br[1], ht)], dtype=np.int32)
newImg[newY[0] - 1:newY[1], newX[0] - 1:newX[1]
] = image[oldY[0] - 1:oldY[1], oldX[0] - 1:oldX[1], :]
newImg = cv2.resize(newImg, dsize=(int(resolution), int(resolution)),
interpolation=cv2.INTER_LINEAR)
return newImg
|
Center crops an image or set of heatmaps
Arguments:
image {numpy.array} -- an rgb image
center {numpy.array} -- the center of the object, usually the same as of the bounding box
scale {float} -- scale of the face
Keyword Arguments:
resolution {float} -- the size of the output cropped image (default: {256.0})
Returns:
[type] -- [description]
|
crop
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def get_preds_fromhm(hm, center=None, scale=None):
"""Obtain (x,y) coordinates given a set of N heatmaps. If the center
and the scale is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
center {torch.tensor} -- the center of the bounding box (default: {None})
scale {float} -- face scale (default: {None})
"""
max, idx = torch.max(
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
idx += 1
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm_ = hm[i, j, :]
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
diff = torch.FloatTensor(
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
preds[i, j].add_(diff.sign_().mul_(.25))
preds.add_(-.5)
preds_orig = torch.zeros(preds.size())
if center is not None and scale is not None:
for i in range(hm.size(0)):
for j in range(hm.size(1)):
preds_orig[i, j] = transform(
preds[i, j], center, scale, hm.size(2), True)
return preds, preds_orig
|
Obtain (x,y) coordinates given a set of N heatmaps. If the center
and the scale is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
center {torch.tensor} -- the center of the bounding box (default: {None})
scale {float} -- face scale (default: {None})
|
get_preds_fromhm
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def get_preds_fromhm_batch(hm, centers=None, scales=None):
"""Obtain (x,y) coordinates given a set of N heatmaps. If the centers
and the scales is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
centers {torch.tensor} -- the centers of the bounding box (default: {None})
scales {float} -- face scales (default: {None})
"""
max, idx = torch.max(
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
idx += 1
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm_ = hm[i, j, :]
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
diff = torch.FloatTensor(
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
preds[i, j].add_(diff.sign_().mul_(.25))
preds.add_(-.5)
preds_orig = torch.zeros(preds.size())
if centers is not None and scales is not None:
for i in range(hm.size(0)):
for j in range(hm.size(1)):
preds_orig[i, j] = transform(
preds[i, j], centers[i], scales[i], hm.size(2), True)
return preds, preds_orig
|
Obtain (x,y) coordinates given a set of N heatmaps. If the centers
and the scales is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
centers {torch.tensor} -- the centers of the bounding box (default: {None})
scales {float} -- face scales (default: {None})
|
get_preds_fromhm_batch
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def shuffle_lr(parts, pairs=None):
"""Shuffle the points left-right according to the axis of symmetry
of the object.
Arguments:
parts {torch.tensor} -- a 3D or 4D object containing the
heatmaps.
Keyword Arguments:
pairs {list of integers} -- [order of the flipped points] (default: {None})
"""
if pairs is None:
pairs = [16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0,
26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 27, 28, 29, 30, 35,
34, 33, 32, 31, 45, 44, 43, 42, 47, 46, 39, 38, 37, 36, 41,
40, 54, 53, 52, 51, 50, 49, 48, 59, 58, 57, 56, 55, 64, 63,
62, 61, 60, 67, 66, 65]
if parts.ndimension() == 3:
parts = parts[pairs, ...]
else:
parts = parts[:, pairs, ...]
return parts
|
Shuffle the points left-right according to the axis of symmetry
of the object.
Arguments:
parts {torch.tensor} -- a 3D or 4D object containing the
heatmaps.
Keyword Arguments:
pairs {list of integers} -- [order of the flipped points] (default: {None})
|
shuffle_lr
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def flip(tensor, is_label=False):
"""Flip an image or a set of heatmaps left-right
Arguments:
tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]
Keyword Arguments:
is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
"""
if not torch.is_tensor(tensor):
tensor = torch.from_numpy(tensor)
if is_label:
tensor = shuffle_lr(tensor).flip(tensor.ndimension() - 1)
else:
tensor = tensor.flip(tensor.ndimension() - 1)
return tensor
|
Flip an image or a set of heatmaps left-right
Arguments:
tensor {numpy.array or torch.tensor} -- [the input image or heatmaps]
Keyword Arguments:
is_label {bool} -- [denote wherever the input is an image or a set of heatmaps ] (default: {False})
|
flip
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def appdata_dir(appname=None, roaming=False):
""" appdata_dir(appname=None, roaming=False)
Get the path to the application directory, where applications are allowed
to write user specific files (e.g. configurations). For non-user specific
data, consider using common_appdata_dir().
If appname is given, a subdir is appended (and created if necessary).
If roaming is True, will prefer a roaming directory (Windows Vista/7).
"""
# Define default user directory
userDir = os.getenv('FACEALIGNMENT_USERDIR', None)
if userDir is None:
userDir = os.path.expanduser('~')
if not os.path.isdir(userDir): # pragma: no cover
userDir = '/var/tmp' # issue #54
# Get system app data dir
path = None
if sys.platform.startswith('win'):
path1, path2 = os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')
path = (path2 or path1) if roaming else (path1 or path2)
elif sys.platform.startswith('darwin'):
path = os.path.join(userDir, 'Library', 'Application Support')
# On Linux and as fallback
if not (path and os.path.isdir(path)):
path = userDir
# Maybe we should store things local to the executable (in case of a
# portable distro or a frozen application that wants to be portable)
prefix = sys.prefix
if getattr(sys, 'frozen', None):
prefix = os.path.abspath(os.path.dirname(sys.executable))
for reldir in ('settings', '../settings'):
localpath = os.path.abspath(os.path.join(prefix, reldir))
if os.path.isdir(localpath): # pragma: no cover
try:
open(os.path.join(localpath, 'test.write'), 'wb').close()
os.remove(os.path.join(localpath, 'test.write'))
except IOError:
pass # We cannot write in this directory
else:
path = localpath
break
# Get path specific for this app
if appname:
if path == userDir:
appname = '.' + appname.lstrip('.') # Make it a hidden directory
path = os.path.join(path, appname)
if not os.path.isdir(path): # pragma: no cover
os.mkdir(path)
# Done
return path
|
appdata_dir(appname=None, roaming=False)
Get the path to the application directory, where applications are allowed
to write user specific files (e.g. configurations). For non-user specific
data, consider using common_appdata_dir().
If appname is given, a subdir is appended (and created if necessary).
If roaming is True, will prefer a roaming directory (Windows Vista/7).
|
appdata_dir
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/utils.py
|
Apache-2.0
|
def detect_from_directory(self, path, extensions=['.jpg', '.png'], recursive=False, show_progress_bar=True):
"""Detects faces from all the images present in a given directory.
Arguments:
path {string} -- a string containing a path that points to the folder containing the images
Keyword Arguments:
extensions {list} -- list of string containing the extensions to be
consider in the following format: ``.extension_name`` (default:
{['.jpg', '.png']}) recursive {bool} -- option wherever to scan the
folder recursively (default: {False}) show_progress_bar {bool} --
display a progressbar (default: {True})
Example:
>>> directory = 'data'
... detected_faces = detect_from_directory(directory)
{A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
"""
if self.verbose:
logger = logging.getLogger(__name__)
if len(extensions) == 0:
if self.verbose:
logger.error("Expected at list one extension, but none was received.")
raise ValueError
if self.verbose:
logger.info("Constructing the list of images.")
additional_pattern = '/**/*' if recursive else '/*'
files = []
for extension in extensions:
files.extend(glob.glob(path + additional_pattern + extension, recursive=recursive))
if self.verbose:
logger.info("Finished searching for images. %s images found", len(files))
logger.info("Preparing to run the detection.")
predictions = {}
for image_path in tqdm(files, disable=not show_progress_bar):
if self.verbose:
logger.info("Running the face detector on image: %s", image_path)
predictions[image_path] = self.detect_from_image(image_path)
if self.verbose:
logger.info("The detector was successfully run on all %s images", len(files))
return predictions
|
Detects faces from all the images present in a given directory.
Arguments:
path {string} -- a string containing a path that points to the folder containing the images
Keyword Arguments:
extensions {list} -- list of string containing the extensions to be
consider in the following format: ``.extension_name`` (default:
{['.jpg', '.png']}) recursive {bool} -- option wherever to scan the
folder recursively (default: {False}) show_progress_bar {bool} --
display a progressbar (default: {True})
Example:
>>> directory = 'data'
... detected_faces = detect_from_directory(directory)
{A dictionary of [lists containing bounding boxes(x1, y1, x2, y2)]}
|
detect_from_directory
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/detection/core.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/core.py
|
Apache-2.0
|
def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):
"""Convert path (represented as a string) or torch.tensor to a numpy.ndarray
Arguments:
tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
"""
if isinstance(tensor_or_path, str):
return cv2.imread(tensor_or_path) if not rgb else cv2.imread(tensor_or_path)[..., ::-1]
elif torch.is_tensor(tensor_or_path):
# Call cpu in case its coming from cuda
return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()
elif isinstance(tensor_or_path, np.ndarray):
return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path
else:
raise TypeError
|
Convert path (represented as a string) or torch.tensor to a numpy.ndarray
Arguments:
tensor_or_path {numpy.ndarray, torch.tensor or string} -- path to the image, or the image itself
|
tensor_or_path_to_ndarray
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/detection/core.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/core.py
|
Apache-2.0
|
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
|
Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
|
encode
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/detection/sfd/bbox.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/sfd/bbox.py
|
Apache-2.0
|
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
|
Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
|
decode
|
python
|
OpenTalker/video-retalking
|
third_part/face_detection/detection/sfd/bbox.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/face_detection/detection/sfd/bbox.py
|
Apache-2.0
|
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
# change default flag, make sure instance norm behave as the same in both train and eval
# https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/395
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
|
Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
|
get_norm_layer
|
python
|
OpenTalker/video-retalking
|
third_part/ganimation_replicate/model/model_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/ganimation_replicate/model/model_utils.py
|
Apache-2.0
|
def forward(self,
styles,
conditions,
input_is_latent=False,
noise=None,
randomize_noise=True,
truncation=1,
truncation_latent=None,
inject_index=None,
return_latents=False):
"""Forward function for StyleGAN2GeneratorSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
"""
# style codes -> latents with Style MLP layer
if not input_is_latent:
styles = [self.style_mlp(s) for s in styles]
# noises
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers # for each style conv layer
else: # use the stored noise
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
# style truncation
if truncation < 1:
style_truncation = []
for style in styles:
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_truncation
# get style latents with injection
if len(styles) == 1:
inject_index = self.num_latent
if styles[0].ndim < 3:
# repeat latent code for all the layers
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: # used for encoder with different latent code for each layer
latent = styles[0]
elif len(styles) == 2: # mixing noises
if inject_index is None:
inject_index = random.randint(1, self.num_latent - 1)
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
latent = torch.cat([latent1, latent2], 1)
# main generation
out = self.constant_input(latent.shape[0])
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
noise[2::2], self.to_rgbs):
out = conv1(out, latent[:, i], noise=noise1)
# the conditions may have fewer levels
if i < len(conditions):
# SFT part to combine the conditions
if self.sft_half: # only apply SFT to half of the channels
out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
out_sft = out_sft * conditions[i - 1] + conditions[i]
out = torch.cat([out_same, out_sft], dim=1)
else: # apply SFT to all the channels
out = out * conditions[i - 1] + conditions[i]
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
|
Forward function for StyleGAN2GeneratorSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
Apache-2.0
|
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
"""Forward function for GFPGANv1.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
"""
conditions = []
unet_skips = []
out_rgbs = []
# encoder
feat = self.conv_body_first(x)
for i in range(self.log_size - 2):
feat = self.conv_body_down[i](feat)
unet_skips.insert(0, feat)
feat = self.final_conv(feat)
# style code
style_code = self.final_linear(feat.view(feat.size(0), -1))
if self.different_w:
style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
# decode
for i in range(self.log_size - 2):
# add unet skip
feat = feat + unet_skips[i]
# ResUpLayer
feat = self.conv_body_up[i](feat)
# generate scale and shift for SFT layers
scale = self.condition_scale[i](feat)
conditions.append(scale.clone())
shift = self.condition_shift[i](feat)
conditions.append(shift.clone())
# generate rgb images
if return_rgb:
out_rgbs.append(self.toRGB[i](feat))
# decoder
image, _ = self.stylegan_decoder([style_code],
conditions,
return_latents=return_latents,
input_is_latent=self.input_is_latent,
randomize_noise=randomize_noise)
return image, out_rgbs
|
Forward function for GFPGANv1.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
Apache-2.0
|
def forward(self, x, return_feats=False):
"""Forward function for FacialComponentDiscriminator.
Args:
x (Tensor): Input images.
return_feats (bool): Whether to return intermediate features. Default: False.
"""
feat = self.conv1(x)
feat = self.conv3(self.conv2(feat))
rlt_feats = []
if return_feats:
rlt_feats.append(feat.clone())
feat = self.conv5(self.conv4(feat))
if return_feats:
rlt_feats.append(feat.clone())
out = self.final_conv(feat)
if return_feats:
return out, rlt_feats
else:
return out, None
|
Forward function for FacialComponentDiscriminator.
Args:
x (Tensor): Input images.
return_feats (bool): Whether to return intermediate features. Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_arch.py
|
Apache-2.0
|
def forward(self,
styles,
conditions,
input_is_latent=False,
noise=None,
randomize_noise=True,
truncation=1,
truncation_latent=None,
inject_index=None,
return_latents=False):
"""Forward function for StyleGAN2GeneratorCSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
"""
# style codes -> latents with Style MLP layer
if not input_is_latent:
styles = [self.style_mlp(s) for s in styles]
# noises
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers # for each style conv layer
else: # use the stored noise
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
# style truncation
if truncation < 1:
style_truncation = []
for style in styles:
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_truncation
# get style latents with injection
if len(styles) == 1:
inject_index = self.num_latent
if styles[0].ndim < 3:
# repeat latent code for all the layers
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: # used for encoder with different latent code for each layer
latent = styles[0]
elif len(styles) == 2: # mixing noises
if inject_index is None:
inject_index = random.randint(1, self.num_latent - 1)
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
latent = torch.cat([latent1, latent2], 1)
# main generation
out = self.constant_input(latent.shape[0])
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
noise[2::2], self.to_rgbs):
out = conv1(out, latent[:, i], noise=noise1)
# the conditions may have fewer levels
if i < len(conditions):
# SFT part to combine the conditions
if self.sft_half: # only apply SFT to half of the channels
out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
out_sft = out_sft * conditions[i - 1] + conditions[i]
out = torch.cat([out_same, out_sft], dim=1)
else: # apply SFT to all the channels
out = out * conditions[i - 1] + conditions[i]
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
|
Forward function for StyleGAN2GeneratorCSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
|
Apache-2.0
|
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
"""Forward function for GFPGANv1Clean.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
"""
conditions = []
unet_skips = []
out_rgbs = []
# encoder
feat = F.leaky_relu_(self.conv_body_first(x), negative_slope=0.2)
for i in range(self.log_size - 2):
feat = self.conv_body_down[i](feat)
unet_skips.insert(0, feat)
feat = F.leaky_relu_(self.final_conv(feat), negative_slope=0.2)
# style code
style_code = self.final_linear(feat.view(feat.size(0), -1))
if self.different_w:
style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
# decode
for i in range(self.log_size - 2):
# add unet skip
feat = feat + unet_skips[i]
# ResUpLayer
feat = self.conv_body_up[i](feat)
# generate scale and shift for SFT layers
scale = self.condition_scale[i](feat)
conditions.append(scale.clone())
shift = self.condition_shift[i](feat)
conditions.append(shift.clone())
# generate rgb images
if return_rgb:
out_rgbs.append(self.toRGB[i](feat))
# decoder
image, _ = self.stylegan_decoder([style_code],
conditions,
return_latents=return_latents,
input_is_latent=self.input_is_latent,
randomize_noise=randomize_noise)
return image, out_rgbs
|
Forward function for GFPGANv1Clean.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpganv1_clean_arch.py
|
Apache-2.0
|
def forward(self,
styles,
conditions,
input_is_latent=False,
noise=None,
randomize_noise=True,
truncation=1,
truncation_latent=None,
inject_index=None,
return_latents=False):
"""Forward function for StyleGAN2GeneratorBilinearSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
"""
# style codes -> latents with Style MLP layer
if not input_is_latent:
styles = [self.style_mlp(s) for s in styles]
# noises
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers # for each style conv layer
else: # use the stored noise
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
# style truncation
if truncation < 1:
style_truncation = []
for style in styles:
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_truncation
# get style latents with injection
if len(styles) == 1:
inject_index = self.num_latent
if styles[0].ndim < 3:
# repeat latent code for all the layers
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: # used for encoder with different latent code for each layer
latent = styles[0]
elif len(styles) == 2: # mixing noises
if inject_index is None:
inject_index = random.randint(1, self.num_latent - 1)
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
latent = torch.cat([latent1, latent2], 1)
# main generation
out = self.constant_input(latent.shape[0])
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
noise[2::2], self.to_rgbs):
out = conv1(out, latent[:, i], noise=noise1)
# the conditions may have fewer levels
if i < len(conditions):
# SFT part to combine the conditions
if self.sft_half: # only apply SFT to half of the channels
out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
out_sft = out_sft * conditions[i - 1] + conditions[i]
out = torch.cat([out_same, out_sft], dim=1)
else: # apply SFT to all the channels
out = out * conditions[i - 1] + conditions[i]
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
|
Forward function for StyleGAN2GeneratorBilinearSFT.
Args:
styles (list[Tensor]): Sample codes of styles.
conditions (list[Tensor]): SFT conditions to generators.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
|
Apache-2.0
|
def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
"""Forward function for GFPGANBilinear.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
"""
conditions = []
unet_skips = []
out_rgbs = []
# encoder
feat = self.conv_body_first(x)
for i in range(self.log_size - 2):
feat = self.conv_body_down[i](feat)
unet_skips.insert(0, feat)
feat = self.final_conv(feat)
# style code
style_code = self.final_linear(feat.view(feat.size(0), -1))
if self.different_w:
style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
# decode
for i in range(self.log_size - 2):
# add unet skip
feat = feat + unet_skips[i]
# ResUpLayer
feat = self.conv_body_up[i](feat)
# generate scale and shift for SFT layers
scale = self.condition_scale[i](feat)
conditions.append(scale.clone())
shift = self.condition_shift[i](feat)
conditions.append(shift.clone())
# generate rgb images
if return_rgb:
out_rgbs.append(self.toRGB[i](feat))
# decoder
image, _ = self.stylegan_decoder([style_code],
conditions,
return_latents=return_latents,
input_is_latent=self.input_is_latent,
randomize_noise=randomize_noise)
return image, out_rgbs
|
Forward function for GFPGANBilinear.
Args:
x (Tensor): Input images.
return_latents (bool): Whether to return style latents. Default: False.
return_rgb (bool): Whether return intermediate rgb images. Default: True.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/gfpgan_bilinear_arch.py
|
Apache-2.0
|
def forward(self, x, style):
"""Forward function.
Args:
x (Tensor): Tensor with shape (b, c, h, w).
style (Tensor): Tensor with shape (b, num_style_feat).
Returns:
Tensor: Modulated tensor after convolution.
"""
b, c, h, w = x.shape # c = c_in
# weight modulation
style = self.modulation(style).view(b, 1, c, 1, 1)
# self.weight: (1, c_out, c_in, k, k); style: (b, 1, c, 1, 1)
weight = self.scale * self.weight * style # (b, c_out, c_in, k, k)
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + self.eps)
weight = weight * demod.view(b, self.out_channels, 1, 1, 1)
weight = weight.view(b * self.out_channels, c, self.kernel_size, self.kernel_size)
if self.sample_mode == 'upsample':
x = F.interpolate(x, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners)
elif self.sample_mode == 'downsample':
x = F.interpolate(x, scale_factor=0.5, mode=self.interpolation_mode, align_corners=self.align_corners)
b, c, h, w = x.shape
x = x.view(1, b * c, h, w)
# weight: (b*c_out, c_in, k, k), groups=b
out = F.conv2d(x, weight, padding=self.padding, groups=b)
out = out.view(b, self.out_channels, *out.shape[2:4])
return out
|
Forward function.
Args:
x (Tensor): Tensor with shape (b, c, h, w).
style (Tensor): Tensor with shape (b, num_style_feat).
Returns:
Tensor: Modulated tensor after convolution.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
Apache-2.0
|
def forward(self, x, style, skip=None):
"""Forward function.
Args:
x (Tensor): Feature tensor with shape (b, c, h, w).
style (Tensor): Tensor with shape (b, num_style_feat).
skip (Tensor): Base/skip tensor. Default: None.
Returns:
Tensor: RGB images.
"""
out = self.modulated_conv(x, style)
out = out + self.bias
if skip is not None:
if self.upsample:
skip = F.interpolate(
skip, scale_factor=2, mode=self.interpolation_mode, align_corners=self.align_corners)
out = out + skip
return out
|
Forward function.
Args:
x (Tensor): Feature tensor with shape (b, c, h, w).
style (Tensor): Tensor with shape (b, num_style_feat).
skip (Tensor): Base/skip tensor. Default: None.
Returns:
Tensor: RGB images.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
Apache-2.0
|
def forward(self,
styles,
input_is_latent=False,
noise=None,
randomize_noise=True,
truncation=1,
truncation_latent=None,
inject_index=None,
return_latents=False):
"""Forward function for StyleGAN2Generator.
Args:
styles (list[Tensor]): Sample codes of styles.
input_is_latent (bool): Whether input is latent style.
Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is
False. Default: True.
truncation (float): TODO. Default: 1.
truncation_latent (Tensor | None): TODO. Default: None.
inject_index (int | None): The injection index for mixing noise.
Default: None.
return_latents (bool): Whether to return style latents.
Default: False.
"""
# style codes -> latents with Style MLP layer
if not input_is_latent:
styles = [self.style_mlp(s) for s in styles]
# noises
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers # for each style conv layer
else: # use the stored noise
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
# style truncation
if truncation < 1:
style_truncation = []
for style in styles:
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_truncation
# get style latent with injection
if len(styles) == 1:
inject_index = self.num_latent
if styles[0].ndim < 3:
# repeat latent code for all the layers
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: # used for encoder with different latent code for each layer
latent = styles[0]
elif len(styles) == 2: # mixing noises
if inject_index is None:
inject_index = random.randint(1, self.num_latent - 1)
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
latent = torch.cat([latent1, latent2], 1)
# main generation
out = self.constant_input(latent.shape[0])
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
noise[2::2], self.to_rgbs):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
|
Forward function for StyleGAN2Generator.
Args:
styles (list[Tensor]): Sample codes of styles.
input_is_latent (bool): Whether input is latent style.
Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is
False. Default: True.
truncation (float): TODO. Default: 1.
truncation_latent (Tensor | None): TODO. Default: None.
inject_index (int | None): The injection index for mixing noise.
Default: None.
return_latents (bool): Whether to return style latents.
Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_bilinear_arch.py
|
Apache-2.0
|
def forward(self,
styles,
input_is_latent=False,
noise=None,
randomize_noise=True,
truncation=1,
truncation_latent=None,
inject_index=None,
return_latents=False):
"""Forward function for StyleGAN2GeneratorClean.
Args:
styles (list[Tensor]): Sample codes of styles.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
"""
# style codes -> latents with Style MLP layer
if not input_is_latent:
styles = [self.style_mlp(s) for s in styles]
# noises
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers # for each style conv layer
else: # use the stored noise
noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
# style truncation
if truncation < 1:
style_truncation = []
for style in styles:
style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
styles = style_truncation
# get style latents with injection
if len(styles) == 1:
inject_index = self.num_latent
if styles[0].ndim < 3:
# repeat latent code for all the layers
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else: # used for encoder with different latent code for each layer
latent = styles[0]
elif len(styles) == 2: # mixing noises
if inject_index is None:
inject_index = random.randint(1, self.num_latent - 1)
latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
latent = torch.cat([latent1, latent2], 1)
# main generation
out = self.constant_input(latent.shape[0])
out = self.style_conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
noise[2::2], self.to_rgbs):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
|
Forward function for StyleGAN2GeneratorClean.
Args:
styles (list[Tensor]): Sample codes of styles.
input_is_latent (bool): Whether input is latent style. Default: False.
noise (Tensor | None): Input noise or None. Default: None.
randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
truncation (float): The truncation ratio. Default: 1.
truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
inject_index (int | None): The injection index for mixing noise. Default: None.
return_latents (bool): Whether to return style latents. Default: False.
|
forward
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/archs/stylegan2_clean_arch.py
|
Apache-2.0
|
def color_jitter(img, shift):
"""jitter color: randomly jitter the RGB values, in numpy formats"""
jitter_val = np.random.uniform(-shift, shift, 3).astype(np.float32)
img = img + jitter_val
img = np.clip(img, 0, 1)
return img
|
jitter color: randomly jitter the RGB values, in numpy formats
|
color_jitter
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
Apache-2.0
|
def color_jitter_pt(img, brightness, contrast, saturation, hue):
"""jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats"""
fn_idx = torch.randperm(4)
for fn_id in fn_idx:
if fn_id == 0 and brightness is not None:
brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
img = adjust_brightness(img, brightness_factor)
if fn_id == 1 and contrast is not None:
contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
img = adjust_contrast(img, contrast_factor)
if fn_id == 2 and saturation is not None:
saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
img = adjust_saturation(img, saturation_factor)
if fn_id == 3 and hue is not None:
hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
img = adjust_hue(img, hue_factor)
return img
|
jitter color: randomly jitter the brightness, contrast, saturation, and hue, in torch Tensor formats
|
color_jitter_pt
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
Apache-2.0
|
def get_component_coordinates(self, index, status):
"""Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file"""
components_bbox = self.components_list[f'{index:08d}']
if status[0]: # hflip
# exchange right and left eye
tmp = components_bbox['left_eye']
components_bbox['left_eye'] = components_bbox['right_eye']
components_bbox['right_eye'] = tmp
# modify the width coordinate
components_bbox['left_eye'][0] = self.out_size - components_bbox['left_eye'][0]
components_bbox['right_eye'][0] = self.out_size - components_bbox['right_eye'][0]
components_bbox['mouth'][0] = self.out_size - components_bbox['mouth'][0]
# get coordinates
locations = []
for part in ['left_eye', 'right_eye', 'mouth']:
mean = components_bbox[part][0:2]
half_len = components_bbox[part][2]
if 'eye' in part:
half_len *= self.eye_enlarge_ratio
loc = np.hstack((mean - half_len + 1, mean + half_len))
loc = torch.from_numpy(loc).float()
locations.append(loc)
return locations
|
Get facial component (left_eye, right_eye, mouth) coordinates from a pre-loaded pth file
|
get_component_coordinates
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/data/ffhq_degradation_dataset.py
|
Apache-2.0
|
def construct_img_pyramid(self):
"""Construct image pyramid for intermediate restoration loss"""
pyramid_gt = [self.gt]
down_img = self.gt
for _ in range(0, self.log_size - 3):
down_img = F.interpolate(down_img, scale_factor=0.5, mode='bilinear', align_corners=False)
pyramid_gt.insert(0, down_img)
return pyramid_gt
|
Construct image pyramid for intermediate restoration loss
|
construct_img_pyramid
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/models/gfpgan_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/models/gfpgan_model.py
|
Apache-2.0
|
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
n, c, h, w = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
|
Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
|
_gram_mat
|
python
|
OpenTalker/video-retalking
|
third_part/GFPGAN/gfpgan/models/gfpgan_model.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GFPGAN/gfpgan/models/gfpgan_model.py
|
Apache-2.0
|
def _umeyama(src, dst, estimate_scale=True, scale=1.0):
"""Estimate N-D similarity transformation with or without scaling.
Parameters
----------
src : (M, N) array
Source coordinates.
dst : (M, N) array
Destination coordinates.
estimate_scale : bool
Whether to estimate scaling factor.
Returns
-------
T : (N + 1, N + 1)
The homogeneous similarity transformation matrix. The matrix contains
NaN values only if the problem is not well-conditioned.
References
----------
.. [1] "Least-squares estimation of transformation parameters between two
point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
"""
num = src.shape[0]
dim = src.shape[1]
# Compute mean of src and dst.
src_mean = src.mean(axis=0)
dst_mean = dst.mean(axis=0)
# Subtract mean from src and dst.
src_demean = src - src_mean
dst_demean = dst - dst_mean
# Eq. (38).
A = dst_demean.T @ src_demean / num
# Eq. (39).
d = np.ones((dim,), dtype=np.double)
if np.linalg.det(A) < 0:
d[dim - 1] = -1
T = np.eye(dim + 1, dtype=np.double)
U, S, V = np.linalg.svd(A)
# Eq. (40) and (43).
rank = np.linalg.matrix_rank(A)
if rank == 0:
return np.nan * T
elif rank == dim - 1:
if np.linalg.det(U) * np.linalg.det(V) > 0:
T[:dim, :dim] = U @ V
else:
s = d[dim - 1]
d[dim - 1] = -1
T[:dim, :dim] = U @ np.diag(d) @ V
d[dim - 1] = s
else:
T[:dim, :dim] = U @ np.diag(d) @ V
if estimate_scale:
# Eq. (41) and (42).
scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d)
else:
scale = scale
T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)
T[:dim, :dim] *= scale
return T, scale
|
Estimate N-D similarity transformation with or without scaling.
Parameters
----------
src : (M, N) array
Source coordinates.
dst : (M, N) array
Destination coordinates.
estimate_scale : bool
Whether to estimate scaling factor.
Returns
-------
T : (N + 1, N + 1)
The homogeneous similarity transformation matrix. The matrix contains
NaN values only if the problem is not well-conditioned.
References
----------
.. [1] "Least-squares estimation of transformation parameters between two
point patterns", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`
|
_umeyama
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/align_faces.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/align_faces.py
|
Apache-2.0
|
def remove_prefix(self, state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
|
Old style model is stored with all names of parameters sharing common prefix 'module.'
|
remove_prefix
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/retinaface_detection.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/retinaface_detection.py
|
Apache-2.0
|
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
|
Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
|
detection_collate
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/data/wider_face.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/data/wider_face.py
|
Apache-2.0
|
def __init__(self, cfg = None, phase = 'train'):
"""
:param cfg: Network related settings.
:param phase: train or test.
"""
super(RetinaFace,self).__init__()
self.phase = phase
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
if cfg['pretrain']:
checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu'))
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:] # remove module.
new_state_dict[name] = v
# load params
backbone.load_state_dict(new_state_dict)
elif cfg['name'] == 'Resnet50':
import torchvision.models as models
backbone = models.resnet50(pretrained=cfg['pretrain'])
self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = cfg['out_channel']
self.fpn = FPN(in_channels_list,out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
|
:param cfg: Network related settings.
:param phase: train or test.
|
__init__
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/facemodels/retinaface.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/facemodels/retinaface.py
|
Apache-2.0
|
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax
|
Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
point_form
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy
boxes[:, 2:] - boxes[:, :2], 1) # w, h
|
Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
|
center_size
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
|
We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
|
intersect
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def matrix_iou(a, b):
"""
return iou of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
|
return iou of a and b, numpy version for data augenmentation
|
matrix_iou
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def matrix_iof(a, b):
"""
return iof of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
return area_i / np.maximum(area_a[:, np.newaxis], 1)
|
return iof of a and b, numpy version for data augenmentation
|
matrix_iof
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def encode_landm(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 10].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded landm (tensor), Shape: [num_priors, 10]
"""
# dist b/t match center and prior's center
matched = torch.reshape(matched, (matched.size(0), 5, 2))
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, :, 2:])
# g_cxcy /= priors[:, :, 2:]
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
# return target for smooth_l1_loss
return g_cxcy
|
Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 10].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded landm (tensor), Shape: [num_priors, 10]
|
encode_landm
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
), dim=1)
return landms
|
Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
|
decode_landm
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
|
Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
|
log_sum_exp
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = torch.Tensor(scores.size(0)).fill_(0).long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w*h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter/union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
|
Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
|
nms
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_detect/utils/box_utils.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_detect/utils/box_utils.py
|
Apache-2.0
|
def positive_cap(num):
""" Cap a number to ensure positivity
:param num: positive or negative number
:returns: (overflow, capped_number)
"""
if num < 0:
return 0, abs(num)
else:
return num, 0
|
Cap a number to ensure positivity
:param num: positive or negative number
:returns: (overflow, capped_number)
|
positive_cap
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/aligner.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
|
Apache-2.0
|
def roi_coordinates(rect, size, scale):
""" Align the rectangle into the center and return the top-left coordinates
within the new size. If rect is smaller, we add borders.
:param rect: (x, y, w, h) bounding rectangle of the face
:param size: (width, height) are the desired dimensions
:param scale: scaling factor of the rectangle to be resized
:returns: 4 numbers. Top-left coordinates of the aligned ROI.
(x, y, border_x, border_y). All values are > 0.
"""
rectx, recty, rectw, recth = rect
new_height, new_width = size
mid_x = int((rectx + rectw/2) * scale)
mid_y = int((recty + recth/2) * scale)
roi_x = mid_x - int(new_width/2)
roi_y = mid_y - int(new_height/2)
roi_x, border_x = positive_cap(roi_x)
roi_y, border_y = positive_cap(roi_y)
return roi_x, roi_y, border_x, border_y
|
Align the rectangle into the center and return the top-left coordinates
within the new size. If rect is smaller, we add borders.
:param rect: (x, y, w, h) bounding rectangle of the face
:param size: (width, height) are the desired dimensions
:param scale: scaling factor of the rectangle to be resized
:returns: 4 numbers. Top-left coordinates of the aligned ROI.
(x, y, border_x, border_y). All values are > 0.
|
roi_coordinates
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/aligner.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
|
Apache-2.0
|
def scaling_factor(rect, size):
""" Calculate the scaling factor for the current image to be
resized to the new dimensions
:param rect: (x, y, w, h) bounding rectangle of the face
:param size: (width, height) are the desired dimensions
:returns: floating point scaling factor
"""
new_height, new_width = size
rect_h, rect_w = rect[2:]
height_ratio = rect_h / new_height
width_ratio = rect_w / new_width
scale = 1
if height_ratio > width_ratio:
new_recth = 0.8 * new_height
scale = new_recth / rect_h
else:
new_rectw = 0.8 * new_width
scale = new_rectw / rect_w
return scale
|
Calculate the scaling factor for the current image to be
resized to the new dimensions
:param rect: (x, y, w, h) bounding rectangle of the face
:param size: (width, height) are the desired dimensions
:returns: floating point scaling factor
|
scaling_factor
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/aligner.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
|
Apache-2.0
|
def resize_image(img, scale):
""" Resize image with the provided scaling factor
:param img: image to be resized
:param scale: scaling factor for resizing the image
"""
cur_height, cur_width = img.shape[:2]
new_scaled_height = int(scale * cur_height)
new_scaled_width = int(scale * cur_width)
return cv2.resize(img, (new_scaled_width, new_scaled_height))
|
Resize image with the provided scaling factor
:param img: image to be resized
:param scale: scaling factor for resizing the image
|
resize_image
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/aligner.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
|
Apache-2.0
|
def resize_align(img, points, size):
""" Resize image and associated points, align face to the center
and crop to the desired size
:param img: image to be resized
:param points: *m* x 2 array of points
:param size: (height, width) tuple of new desired size
"""
new_height, new_width = size
# Resize image based on bounding rectangle
rect = cv2.boundingRect(np.array([points], np.int32))
scale = scaling_factor(rect, size)
img = resize_image(img, scale)
# Align bounding rect to center
cur_height, cur_width = img.shape[:2]
roi_x, roi_y, border_x, border_y = roi_coordinates(rect, size, scale)
roi_h = np.min([new_height-border_y, cur_height-roi_y])
roi_w = np.min([new_width-border_x, cur_width-roi_x])
# Crop to supplied size
crop = np.zeros((new_height, new_width, 3), img.dtype)
crop[border_y:border_y+roi_h, border_x:border_x+roi_w] = (
img[roi_y:roi_y+roi_h, roi_x:roi_x+roi_w])
# Scale and align face points to the crop
points[:, 0] = (points[:, 0] * scale) + (border_x - roi_x)
points[:, 1] = (points[:, 1] * scale) + (border_y - roi_y)
return (crop, points)
|
Resize image and associated points, align face to the center
and crop to the desired size
:param img: image to be resized
:param points: *m* x 2 array of points
:param size: (height, width) tuple of new desired size
|
resize_align
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/aligner.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/aligner.py
|
Apache-2.0
|
def mask_from_points(size, points):
""" Create a mask of supplied size from supplied points
:param size: tuple of output mask size
:param points: array of [x, y] points
:returns: mask of values 0 and 255 where
255 indicates the convex hull containing the points
"""
radius = 10 # kernel size
kernel = np.ones((radius, radius), np.uint8)
mask = np.zeros(size, np.uint8)
cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)
mask = cv2.erode(mask, kernel)
return mask
|
Create a mask of supplied size from supplied points
:param size: tuple of output mask size
:param points: array of [x, y] points
:returns: mask of values 0 and 255 where
255 indicates the convex hull containing the points
|
mask_from_points
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/blender.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
|
Apache-2.0
|
def overlay_image(foreground_image, mask, background_image):
""" Overlay foreground image onto the background given a mask
:param foreground_image: foreground image points
:param mask: [0-255] values in mask
:param background_image: background image points
:returns: image with foreground where mask > 0 overlaid on background image
"""
foreground_pixels = mask > 0
background_image[..., :3][foreground_pixels] = foreground_image[..., :3][foreground_pixels]
return background_image
|
Overlay foreground image onto the background given a mask
:param foreground_image: foreground image points
:param mask: [0-255] values in mask
:param background_image: background image points
:returns: image with foreground where mask > 0 overlaid on background image
|
overlay_image
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/blender.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
|
Apache-2.0
|
def apply_mask(img, mask):
""" Apply mask to supplied image
:param img: max 3 channel image
:param mask: [0-255] values in mask
:returns: new image with mask applied
"""
masked_img = np.copy(img)
num_channels = 3
for c in range(num_channels):
masked_img[..., c] = img[..., c] * (mask / 255)
return masked_img
|
Apply mask to supplied image
:param img: max 3 channel image
:param mask: [0-255] values in mask
:returns: new image with mask applied
|
apply_mask
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/blender.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/blender.py
|
Apache-2.0
|
def boundary_points(points, width_percent=0.1, height_percent=0.1):
""" Produce additional boundary points
:param points: *m* x 2 array of x,y points
:param width_percent: [-1, 1] percentage of width to taper inwards. Negative for opposite direction
:param height_percent: [-1, 1] percentage of height to taper downwards. Negative for opposite direction
:returns: 2 additional points at the top corners
"""
x, y, w, h = cv2.boundingRect(np.array([points], np.int32))
spacerw = int(w * width_percent)
spacerh = int(h * height_percent)
return [[x+spacerw, y+spacerh],
[x+w-spacerw, y+spacerh]]
|
Produce additional boundary points
:param points: *m* x 2 array of x,y points
:param width_percent: [-1, 1] percentage of width to taper inwards. Negative for opposite direction
:param height_percent: [-1, 1] percentage of height to taper downwards. Negative for opposite direction
:returns: 2 additional points at the top corners
|
boundary_points
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/locator.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
|
Apache-2.0
|
def face_points_dlib(img, add_boundary_points=True):
""" Locates 68 face points using dlib (http://dlib.net)
Requires shape_predictor_68_face_landmarks.dat to be in face_morpher/data
Download at: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
:param img: an image array
:param add_boundary_points: bool to add additional boundary points
:returns: Array of x,y face points. Empty array if no face found
"""
try:
points = []
rgbimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rects = dlib_detector(rgbimg, 1)
if rects and len(rects) > 0:
# We only take the first found face
shapes = dlib_predictor(rgbimg, rects[0])
points = np.array([(shapes.part(i).x, shapes.part(i).y) for i in range(68)], np.int32)
if add_boundary_points:
# Add more points inwards and upwards as dlib only detects up to eyebrows
points = np.vstack([
points,
boundary_points(points, 0.1, -0.03),
boundary_points(points, 0.13, -0.05),
boundary_points(points, 0.15, -0.08),
boundary_points(points, 0.33, -0.12)])
return points
except Exception as e:
print(e)
return []
|
Locates 68 face points using dlib (http://dlib.net)
Requires shape_predictor_68_face_landmarks.dat to be in face_morpher/data
Download at: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
:param img: an image array
:param add_boundary_points: bool to add additional boundary points
:returns: Array of x,y face points. Empty array if no face found
|
face_points_dlib
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/locator.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
|
Apache-2.0
|
def weighted_average_points(start_points, end_points, percent=0.5):
""" Weighted average of two sets of supplied points
:param start_points: *m* x 2 array of start face points.
:param end_points: *m* x 2 array of end face points.
:param percent: [0, 1] percentage weight on start_points
:returns: *m* x 2 array of weighted average points
"""
if percent <= 0:
return end_points
elif percent >= 1:
return start_points
else:
return np.asarray(start_points*percent + end_points*(1-percent), np.int32)
|
Weighted average of two sets of supplied points
:param start_points: *m* x 2 array of start face points.
:param end_points: *m* x 2 array of end face points.
:param percent: [0, 1] percentage weight on start_points
:returns: *m* x 2 array of weighted average points
|
weighted_average_points
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/locator.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/locator.py
|
Apache-2.0
|
def morph(src_img, src_points, dest_img, dest_points,
video, width=500, height=600, num_frames=20, fps=10,
out_frames=None, out_video=None, plot=False, background='black'):
"""
Create a morph sequence from source to destination image
:param src_img: ndarray source image
:param src_points: source image array of x,y face points
:param dest_img: ndarray destination image
:param dest_points: destination image array of x,y face points
:param video: facemorpher.videoer.Video object
"""
size = (height, width)
stall_frames = np.clip(int(fps*0.15), 1, fps) # Show first & last longer
plt = plotter.Plotter(plot, num_images=num_frames, out_folder=out_frames)
num_frames -= (stall_frames * 2) # No need to process src and dest image
plt.plot_one(src_img)
video.write(src_img, 1)
# Produce morph frames!
for percent in np.linspace(1, 0, num=num_frames):
points = locator.weighted_average_points(src_points, dest_points, percent)
src_face = warper.warp_image(src_img, src_points, points, size)
end_face = warper.warp_image(dest_img, dest_points, points, size)
average_face = blender.weighted_average(src_face, end_face, percent)
if background in ('transparent', 'average'):
mask = blender.mask_from_points(average_face.shape[:2], points)
average_face = np.dstack((average_face, mask))
if background == 'average':
average_background = blender.weighted_average(src_img, dest_img, percent)
average_face = blender.overlay_image(average_face, mask, average_background)
plt.plot_one(average_face)
plt.save(average_face)
video.write(average_face)
plt.plot_one(dest_img)
video.write(dest_img, stall_frames)
plt.show()
|
Create a morph sequence from source to destination image
:param src_img: ndarray source image
:param src_points: source image array of x,y face points
:param dest_img: ndarray destination image
:param dest_points: destination image array of x,y face points
:param video: facemorpher.videoer.Video object
|
morph
|
python
|
OpenTalker/video-retalking
|
third_part/GPEN/face_morpher/facemorpher/morpher.py
|
https://github.com/OpenTalker/video-retalking/blob/master/third_part/GPEN/face_morpher/facemorpher/morpher.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.