file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
handyinfer/visualization/__init__.py | Python | from .vis_depth_estimation import vis_depth_estimation
from .vis_face_alignment import vis_face_alignment
__all__ = ['vis_face_alignment', 'vis_depth_estimation']
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/visualization/vis_depth_estimation.py | Python | import matplotlib
import matplotlib.cm
import numpy as np
import torch
def vis_depth_estimation(value,
vmin=None,
vmax=None,
cmap='gray_r',
invalid_val=-99,
invalid_mask=None,
background_color=(128, 128, 128, 255),
gamma_corrected=False,
value_transform=None):
"""Converts a depth map to a color image.
Args:
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W).
All singular dimensions are squeezed
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used.
Defaults to None.
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used.
Defaults to None.
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'.
Defaults to -99.
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels.
Defaults to (128, 128, 128, 255).
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
value_transform (Callable, optional): Apply transform function to valid pixels before coloring.
Defaults to None.
Returns:
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
"""
if isinstance(value, torch.Tensor):
value = value.detach().cpu().numpy()
value = value.squeeze()
if invalid_mask is None:
invalid_mask = value == invalid_val
mask = np.logical_not(invalid_mask)
# normalize
vmin = np.percentile(value[mask], 2) if vmin is None else vmin
vmax = np.percentile(value[mask], 85) if vmax is None else vmax
if vmin != vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value * 0.
# squeeze last dim if it exists
# grey out the invalid values
value[invalid_mask] = np.nan
cmapper = matplotlib.cm.get_cmap(cmap)
if value_transform:
value = value_transform(value)
# value = value / value.max()
value = cmapper(value, bytes=True) # (nxmx4)
# img = value[:, :, :]
img = value[...]
img[invalid_mask] = background_color
# return img.transpose((2, 0, 1))
if gamma_corrected:
# gamma correction
img = img / 255
img = np.power(img, 2.2)
img = img * 255
img = img.astype(np.uint8)
return img
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
handyinfer/visualization/vis_face_alignment.py | Python | import cv2
import numpy as np
def vis_face_alignment(img, landmarks, save_path=None, to_bgr=False):
img = np.copy(img)
h, w = img.shape[0:2]
circle_size = int(max(h, w) / 150)
if to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for landmarks_face in landmarks:
for lm in landmarks_face:
cv2.circle(img, (int(lm[0]), int(lm[1])), 1, (0, 150, 0), circle_size)
# save img
if save_path is not None:
cv2.imwrite(save_path, img)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
inference/get_data.sh | Shell | # salient object detection
wget https://huggingface.co/Xintao/HandyInfer/resolve/main/data/jump_cat.png -P inference/data
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
inference/inference_depth_estimation.py | Python | import argparse
import cv2
import torch
from handyinfer.depth_estimation import init_depth_estimation_model
from handyinfer.utils import img2tensor, tensor2img_fast
def main(args):
device = torch.device('cuda')
depth_net = init_depth_estimation_model(args.model_name)
img = cv2.imread(args.img_path)
img = img2tensor(img) / 255.
img = img.to(device).unsqueeze(0)
with torch.no_grad():
pred = depth_net.infer(img)
# save img
if args.save_path is not None:
pred = tensor2img_fast(pred)
cv2.imwrite(args.save_path, pred)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='inference/data/test_depth_estimation.jpg')
parser.add_argument('--save_path', type=str, default='result_depth_estimate.png')
parser.add_argument('--model_name', type=str, default='ZoeD_N')
args = parser.parse_args()
main(args)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
inference/inference_face_alignment.py | Python | import argparse
import cv2
import torch
from handyinfer.face_alignment import init_face_alignment_model, landmark_98_to_68
from handyinfer.visualization import vis_face_alignment
def main(args):
# initialize model
align_net = init_face_alignment_model(args.model_name)
img = cv2.imread(args.img_path)
with torch.no_grad():
landmarks = align_net.get_landmarks(img)
if args.to68:
landmarks = landmark_98_to_68(landmarks)
vis_face_alignment(img, [landmarks], args.save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='inference/data/test_face_alignment.jpg')
parser.add_argument('--save_path', type=str, default='result_face_alignment.png')
parser.add_argument('--model_name', type=str, default='awing_fan')
parser.add_argument('--half', action='store_true')
parser.add_argument('--to68', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
inference/inference_saliency_detection.py | Python | import argparse
import cv2
import torch
import torch.nn.functional as F
from handyinfer.saliency_detection import init_saliency_detection_model
from handyinfer.utils import tensor2img_fast
def main(args):
# initialize model
sod_net = init_saliency_detection_model(args.model_name)
img = cv2.imread(args.img_path)
with torch.no_grad():
pred = sod_net(img)
pred = F.interpolate(pred, img.shape[0:2], mode='bilinear', align_corners=False)
pred = tensor2img_fast(pred)
# save img
if args.save_path is not None:
cv2.imwrite(args.save_path, pred)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='inference/data/jump_cat.png')
parser.add_argument('--save_path', type=str, default='result_saliency_detection.png')
parser.add_argument('--model_name', type=str, default='inspyrenet')
parser.add_argument('--half', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
setup.py | Python | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'handyinfer/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='handyinfer',
version=get_version(),
description='Handy inference',
# long_description=readme(),
# long_description_content_type='text/markdown',
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision',
url='https://github.com/xinntao/HandyInfer',
include_package_data=True,
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
zip_safe=False)
| xinntao/HandyInfer | 7 | Python | xinntao | Xintao | Tencent | |
clean_bib.py | Python | import argparse
import bibtexparser
from bibtexparser.bibdatabase import (BibDatabase, BibDataString, BibDataStringExpression)
from bibtexparser.bparser import BibTexParser
from bibtexparser.bwriter import BibTexWriter
# bibtex strings (e.g., #cvpr#) for conferences
CONFERENCE_BIBSTR = {
'CVPR': '#cvpr#',
'CVPR Workshops': '#cvprw#',
'ICCV': '#iccv#',
'ICCV Workshops': '#iccvw#',
'ECCV': '#eccv#',
'ECCV Workshops': '#eccvw#',
'NeurIPS': '#nips#',
'ICPR': '#icpr#',
'BMVC': '#bmvc#',
'ACM MM': '#acmmm#',
'ICME': '#icme#',
'ICASSP': '#icassp#',
'ICIP': '#icip#',
'ACCV': '#accv#',
'ICLR': '#iclr#',
'IJCAI': '#ijcai#',
'PR': '#pr#',
'AAAI': '#aaai#',
'ICML': '#icml#',
}
# bibtex strings (e.g., #pami#) for journals
JOURNAL_BIBSTR = {
'IEEE TPAMI': '#pami#',
'IJCV': '#ijcv#',
'ACM TOG': '#tog#',
'IEEE TIP': '#tip#',
'IEEE TVCG': '#tvcg#',
'IEEE TCSVT': '#tcsvt#',
'IEEE TMM': '#tmm#',
'IEEE TCSVT': '#csvt#'
}
# shortname dict for conferences
CONFERENCE_SHORTNAMES = {
'cvpr': 'CVPR',
'cvprw': 'CVPR Workshops',
'iccv': 'ICCV',
'iccvw': 'ICCV Workshops',
'eccv': 'ECCV',
'eccvw': 'ECCV Workshops',
'nips': 'NeurIPS',
'icpr': 'ICPR',
'bmvc': 'BMVC',
'acmmm': 'ACM MM',
'icme': 'ICME',
'icassp': 'ICASSP',
'icip': 'ICIP',
'accv': 'ACCV',
'iclr': 'ICLR',
'ijcai': 'IJCAI',
'pr': 'PR',
'aaai': 'AAAI',
'icml': 'ICML'
}
# shortname dict for journals
JOURNAL_SHORTNAMES = {
'pami': 'IEEE TPAMI',
'ijcv': 'IJCV',
'tog': 'ACM TOG',
'tip': 'IEEE TIP',
'tvcg': 'IEEE TVCG',
'tcsvt': 'IEEE TCSVT',
'tmm': 'IEEE TMM',
'csvt': 'IEEE TCSVT'
}
SHORTNAME_BIBSTR = r"""@String(PAMI = {IEEE TPAMI})
@String(IJCV = {IJCV})
@String(CVPR = {CVPR})
@String(CVPRW = {CVPR Workshops})
@String(ICCV = {ICCV})
@String(ICCVW = {ICCV Workshops})
@String(ECCV = {ECCV})
@String(ECCVW = {ECCV Workshops})
@String(NIPS = {NeurIPS})
@String(ICPR = {ICPR})
@String(BMVC = {BMVC})
@String(TOG = {ACM TOG})
@String(TIP = {IEEE TIP})
@String(TVCG = {IEEE TVCG})
@String(TCSVT = {IEEE TCSVT})
@String(TMM = {IEEE TMM})
@String(ACMMM = {ACM MM})
@String(ICME = {ICME})
@String(ICASSP= {ICASSP})
@String(ICIP = {ICIP})
@String(ACCV = {ACCV})
@String(ICLR = {ICLR})
@String(IJCAI = {IJCAI})
@String(PR = {PR})
@String(AAAI = {AAAI})
@String(CSVT = {IEEE TCSVT})
@String(ICML = {ICML})
"""
def main(args):
# read bib file and parse without interpreting the BibDataString, e.g., #cvpr#
parser = BibTexParser(interpolate_strings=False)
with open(args.input, encoding='utf-8') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file, parser)
new_bib_database = BibDatabase() # storing the new bib entries
new_bib_database.entries = []
conference_names = [v.lower() for v in CONFERENCE_BIBSTR.keys()]
journal_names = [v.lower() for v in JOURNAL_BIBSTR.keys()]
# fields to be reserved in each bib entry
reserved_fields = [
'ID', 'ENTRYTYPE', 'title', 'author', 'booktitle', 'year', 'journal', 'comment', 'groups', 'timestamp', 'file',
'howpublished'
]
# fields to be removed in each bib entry
removed_fields = [
'pages', 'number', 'volume', 'organization', 'date', 'owner', 'publisher', 'journaltitle', 'eprint',
'eprintclass', 'eprinttype', 'institution'
]
# for duplicate checking
all_ids = []
all_titles = []
for entry in bib_database.entries:
entry_type = entry['ENTRYTYPE']
entry_id = entry['ID']
new_entry = dict(ID=entry_id, ENTRYTYPE=entry_type)
# change data to year
if 'date' in entry and 'year' not in entry:
new_entry['year'] = entry['date']
# change journaltitle to booktitle or journal
if 'journaltitle' in entry:
if entry_type == 'inproceedings':
new_entry['booktitle'] = entry['journaltitle']
elif entry_type == 'article':
new_entry['journal'] = entry['journaltitle']
# remove unnecessary keys
for key in entry.keys():
if key not in reserved_fields:
if key in removed_fields:
print(f'Remove {key} for {entry_id}')
else:
status = input(f'Unknown key: {key} for {entry_id}\n'
'> Enter R/r for reserving, or leave it for deleting: ')
if status.lower() == 'r':
new_entry[key] = entry[key]
else:
new_entry[key] = entry[key]
# for inproceedings
if entry_type == 'inproceedings':
# check booktitle
if isinstance(new_entry['booktitle'], str): # pass the BibDataString type (e.g., #cvpr#')
booktitle_text = new_entry['booktitle'].lower()
new_booktitle = None
if 'international conference on computer vision' in booktitle_text or 'iccv' in booktitle_text:
if 'workshop' in booktitle_text:
new_booktitle = 'iccvw'
else:
new_booktitle = 'iccv'
elif 'computer vision and pattern recognition' in booktitle_text or 'cvpr' in booktitle_text:
if 'workshop' in booktitle_text:
new_booktitle = 'cvprw'
else:
new_booktitle = 'cvpr'
elif 'international conference on machine learning' in booktitle_text or 'icml' in booktitle_text:
if 'workshop' in booktitle_text:
new_booktitle = 'icmlw'
else:
new_booktitle = 'icml'
else:
new_booktitle = input(f'Unknown conference name: {new_entry["booktitle"]}\n'
f'> Please enter the abbreviation(Leave it for not changing): ')
if new_booktitle is not None and new_booktitle != '':
new_entry['booktitle'] = BibDataStringExpression([BibDataString(new_bib_database, new_booktitle)])
elif entry_type == 'article':
# remove the 'arXiv preprint' string
if 'journal' in new_entry and isinstance(new_entry['journal'],
str) and new_entry['journal'].startswith('arXiv preprint'):
new_entry['journal'] = new_entry['journal'].replace('arXiv preprint ', '')
# check journal
if 'journal' in new_entry and isinstance(new_entry['journal'],
str) and not new_entry['journal'].startswith('arXiv:'):
journal_text = new_entry['journal'].lower()
new_journal = None
if 'pattern analysis and machine intelligence' in journal_text or 'pami' in journal_text:
new_journal = 'iccvw'
else:
new_journal = input(f'Unknown journal name: {new_entry["journal"]}\n'
f'> Please enter the abbreviation(Leave it for not changing): ')
if new_journal is not None and new_journal != '':
new_entry['journal'] = BibDataStringExpression([BibDataString(new_bib_database, new_journal)])
# check duplication
if new_entry['ID'].lower() in all_ids or new_entry['title'].lower() in all_titles:
print(f'Entry has already exists, please check: (ID: {new_entry["ID"].lower() in all_ids}, '
f'title: {new_entry["title"].lower() in all_titles})')
print(new_entry)
input('Press any to continue (will not add it to the new bib file):')
continue
# append to the database
new_bib_database.entries.append(new_entry)
all_ids.append(new_entry['ID'].lower())
all_titles.append(new_entry['title'].lower())
writer = BibTexWriter()
writer.indent = ' ' # indent entries with 4 spaces instead of one
# write to the new bib file
with open(args.output, 'w', encoding='utf-8') as bibtex_file:
bibtex_file.write(writer.write(new_bib_database))
# add shortname bib string to the bib file
with open(args.output, 'r+', encoding='utf-8') as f:
content = f.read()
f.seek(0, 0)
f.write(SHORTNAME_BIBSTR.rstrip('\r\n') + '\n\n' + content)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, help='Input path to *.bib file', default='bib.bib')
parser.add_argument('--output', type=str, help='Output path to *.bib file', default='bib_clean.bib')
args = parser.parse_args()
main(args)
| xinntao/HandyLatex | 16 | Collections of Beautiful Latex Snippets | Python | xinntao | Xintao | Tencent |
setup.py | Python | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import sys
import time
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
version_file = 'basicsr/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=None):
if sources_cuda is None:
sources_cuda = []
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
if '--cuda_ext' in sys.argv:
ext_modules = [
make_cuda_ext(
name='deform_conv_ext',
module='basicsr.ops.dcn',
sources=['src/deform_conv_ext.cpp'],
sources_cuda=['src/deform_conv_cuda.cpp', 'src/deform_conv_cuda_kernel.cu']),
make_cuda_ext(
name='fused_act_ext',
module='basicsr.ops.fused_act',
sources=['src/fused_bias_act.cpp'],
sources_cuda=['src/fused_bias_act_kernel.cu']),
make_cuda_ext(
name='upfirdn2d_ext',
module='basicsr.ops.upfirdn2d',
sources=['src/upfirdn2d.cpp'],
sources_cuda=['src/upfirdn2d_kernel.cu']),
]
sys.argv.remove('--cuda_ext')
else:
ext_modules = []
write_version_py()
setup(
name='basicsr',
version=get_version(),
description='Open Source Image and Video Super-Resolution Toolbox',
long_description=readme(),
long_description_content_type='text/markdown',
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision, restoration, super resolution',
url='https://github.com/xinntao/BasicSR',
include_package_data=True,
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| xinntao/ProjectTemplate-Python | 234 | Python Project Template | Python | xinntao | Xintao | Tencent |
cog_predict.py | Python | # flake8: noqa
# This file is used for deploying replicate models
# running: cog predict -i img=@inputs/00017_gray.png -i version='General - v3' -i scale=2 -i face_enhance=True -i tile=0
# push: cog push r8.im/xinntao/realesrgan
import os
os.system('pip install gfpgan')
os.system('python setup.py develop')
import cv2
import shutil
import tempfile
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from realesrgan.utils import RealESRGANer
try:
from cog import BasePredictor, Input, Path
from gfpgan import GFPGANer
except Exception:
print('please install cog and realesrgan package')
class Predictor(BasePredictor):
def setup(self):
os.makedirs('output', exist_ok=True)
# download weights
if not os.path.exists('weights/realesr-general-x4v3.pth'):
os.system(
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P ./weights'
)
if not os.path.exists('weights/GFPGANv1.4.pth'):
os.system('wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P ./weights')
if not os.path.exists('weights/RealESRGAN_x4plus.pth'):
os.system(
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./weights'
)
if not os.path.exists('weights/RealESRGAN_x4plus_anime_6B.pth'):
os.system(
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth -P ./weights'
)
if not os.path.exists('weights/realesr-animevideov3.pth'):
os.system(
'wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth -P ./weights'
)
def choose_model(self, scale, version, tile=0):
half = True if torch.cuda.is_available() else False
if version == 'General - RealESRGANplus':
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
model_path = 'weights/RealESRGAN_x4plus.pth'
self.upsampler = RealESRGANer(
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
elif version == 'General - v3':
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
model_path = 'weights/realesr-general-x4v3.pth'
self.upsampler = RealESRGANer(
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
elif version == 'Anime - anime6B':
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
model_path = 'weights/RealESRGAN_x4plus_anime_6B.pth'
self.upsampler = RealESRGANer(
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
elif version == 'AnimeVideo - v3':
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
model_path = 'weights/realesr-animevideov3.pth'
self.upsampler = RealESRGANer(
scale=4, model_path=model_path, model=model, tile=tile, tile_pad=10, pre_pad=0, half=half)
self.face_enhancer = GFPGANer(
model_path='weights/GFPGANv1.4.pth',
upscale=scale,
arch='clean',
channel_multiplier=2,
bg_upsampler=self.upsampler)
def predict(
self,
img: Path = Input(description='Input'),
version: str = Input(
description='RealESRGAN version. Please see [Readme] below for more descriptions',
choices=['General - RealESRGANplus', 'General - v3', 'Anime - anime6B', 'AnimeVideo - v3'],
default='General - v3'),
scale: float = Input(description='Rescaling factor', default=2),
face_enhance: bool = Input(
description='Enhance faces with GFPGAN. Note that it does not work for anime images/vidoes', default=False),
tile: int = Input(
description=
'Tile size. Default is 0, that is no tile. When encountering the out-of-GPU-memory issue, please specify it, e.g., 400 or 200',
default=0)
) -> Path:
if tile <= 100 or tile is None:
tile = 0
print(f'img: {img}. version: {version}. scale: {scale}. face_enhance: {face_enhance}. tile: {tile}.')
try:
extension = os.path.splitext(os.path.basename(str(img)))[1]
img = cv2.imread(str(img), cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
elif len(img.shape) == 2:
img_mode = None
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
else:
img_mode = None
h, w = img.shape[0:2]
if h < 300:
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
self.choose_model(scale, version, tile)
try:
if face_enhance:
_, _, output = self.face_enhancer.enhance(
img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = self.upsampler.enhance(img, outscale=scale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set "tile" to a smaller size, e.g., 400.')
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
# save_path = f'output/out.{extension}'
# cv2.imwrite(save_path, output)
out_path = Path(tempfile.mkdtemp()) / f'out.{extension}'
cv2.imwrite(str(out_path), output)
except Exception as error:
print('global exception: ', error)
finally:
clean_folder('output')
return out_path
def clean_folder(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print(f'Failed to delete {file_path}. Reason: {e}')
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
inference_realesrgan.py | Python | import argparse
import cv2
import glob
import os
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
def main():
"""Inference demo for Real-ESRGAN.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='RealESRGAN_x4plus',
help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus | '
'realesr-animevideov3 | realesr-general-x4v3'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument(
'-dn',
'--denoise_strength',
type=float,
default=0.5,
help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '
'Only used for the realesr-general-x4v3 model'))
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument(
'--model_path', type=str, default=None, help='[Option] Model path. Usually, you do not need to specify it')
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument(
'--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).')
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
parser.add_argument(
'-g', '--gpu-id', type=int, default=None, help='gpu device to use (default=None) can be 0,1,2 for multi-gpu')
args = parser.parse_args()
# determine models according to model names
args.model_name = args.model_name.split('.')[0]
if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
netscale = 4
file_url = [
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
]
# determine model paths
if args.model_path is not None:
model_path = args.model_path
else:
model_path = os.path.join('weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
for url in file_url:
# model_path will be updated
model_path = load_file_from_url(
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
# use dni to control the denoise strength
dni_weight = None
if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
model_path = [model_path, wdn_model_path]
dni_weight = [args.denoise_strength, 1 - args.denoise_strength]
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
dni_weight=dni_weight,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=not args.fp32,
gpu_id=args.gpu_id)
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler)
os.makedirs(args.output, exist_ok=True)
if os.path.isfile(args.input):
paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
for idx, path in enumerate(paths):
imgname, extension = os.path.splitext(os.path.basename(path))
print('Testing', idx, imgname)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3 and img.shape[2] == 4:
img_mode = 'RGBA'
else:
img_mode = None
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
if args.ext == 'auto':
extension = extension[1:]
else:
extension = args.ext
if img_mode == 'RGBA': # RGBA images should be saved in png format
extension = 'png'
if args.suffix == '':
save_path = os.path.join(args.output, f'{imgname}.{extension}')
else:
save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}')
cv2.imwrite(save_path, output)
if __name__ == '__main__':
main()
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
inference_realesrgan_video.py | Python | import argparse
import cv2
import glob
import mimetypes
import numpy as np
import os
import shutil
import subprocess
import torch
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.utils.download_util import load_file_from_url
from os import path as osp
from tqdm import tqdm
from realesrgan import RealESRGANer
from realesrgan.archs.srvgg_arch import SRVGGNetCompact
try:
import ffmpeg
except ImportError:
import pip
pip.main(['install', '--user', 'ffmpeg-python'])
import ffmpeg
def get_video_meta_info(video_path):
ret = {}
probe = ffmpeg.probe(video_path)
video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
has_audio = any(stream['codec_type'] == 'audio' for stream in probe['streams'])
ret['width'] = video_streams[0]['width']
ret['height'] = video_streams[0]['height']
ret['fps'] = eval(video_streams[0]['avg_frame_rate'])
ret['audio'] = ffmpeg.input(video_path).audio if has_audio else None
ret['nb_frames'] = int(video_streams[0]['nb_frames'])
return ret
def get_sub_video(args, num_process, process_idx):
if num_process == 1:
return args.input
meta = get_video_meta_info(args.input)
duration = int(meta['nb_frames'] / meta['fps'])
part_time = duration // num_process
print(f'duration: {duration}, part_time: {part_time}')
os.makedirs(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'), exist_ok=True)
out_path = osp.join(args.output, f'{args.video_name}_inp_tmp_videos', f'{process_idx:03d}.mp4')
cmd = [
args.ffmpeg_bin, f'-i {args.input}', '-ss', f'{part_time * process_idx}',
f'-to {part_time * (process_idx + 1)}' if process_idx != num_process - 1 else '', '-async 1', out_path, '-y'
]
print(' '.join(cmd))
subprocess.call(' '.join(cmd), shell=True)
return out_path
class Reader:
def __init__(self, args, total_workers=1, worker_idx=0):
self.args = args
input_type = mimetypes.guess_type(args.input)[0]
self.input_type = 'folder' if input_type is None else input_type
self.paths = [] # for image&folder type
self.audio = None
self.input_fps = None
if self.input_type.startswith('video'):
video_path = get_sub_video(args, total_workers, worker_idx)
self.stream_reader = (
ffmpeg.input(video_path).output('pipe:', format='rawvideo', pix_fmt='bgr24',
loglevel='error').run_async(
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
meta = get_video_meta_info(video_path)
self.width = meta['width']
self.height = meta['height']
self.input_fps = meta['fps']
self.audio = meta['audio']
self.nb_frames = meta['nb_frames']
else:
if self.input_type.startswith('image'):
self.paths = [args.input]
else:
paths = sorted(glob.glob(os.path.join(args.input, '*')))
tot_frames = len(paths)
num_frame_per_worker = tot_frames // total_workers + (1 if tot_frames % total_workers else 0)
self.paths = paths[num_frame_per_worker * worker_idx:num_frame_per_worker * (worker_idx + 1)]
self.nb_frames = len(self.paths)
assert self.nb_frames > 0, 'empty folder'
from PIL import Image
tmp_img = Image.open(self.paths[0])
self.width, self.height = tmp_img.size
self.idx = 0
def get_resolution(self):
return self.height, self.width
def get_fps(self):
if self.args.fps is not None:
return self.args.fps
elif self.input_fps is not None:
return self.input_fps
return 24
def get_audio(self):
return self.audio
def __len__(self):
return self.nb_frames
def get_frame_from_stream(self):
img_bytes = self.stream_reader.stdout.read(self.width * self.height * 3) # 3 bytes for one pixel
if not img_bytes:
return None
img = np.frombuffer(img_bytes, np.uint8).reshape([self.height, self.width, 3])
return img
def get_frame_from_list(self):
if self.idx >= self.nb_frames:
return None
img = cv2.imread(self.paths[self.idx])
self.idx += 1
return img
def get_frame(self):
if self.input_type.startswith('video'):
return self.get_frame_from_stream()
else:
return self.get_frame_from_list()
def close(self):
if self.input_type.startswith('video'):
self.stream_reader.stdin.close()
self.stream_reader.wait()
class Writer:
def __init__(self, args, audio, height, width, video_save_path, fps):
out_width, out_height = int(width * args.outscale), int(height * args.outscale)
if out_height > 2160:
print('You are generating video that is larger than 4K, which will be very slow due to IO speed.',
'We highly recommend to decrease the outscale(aka, -s).')
if audio is not None:
self.stream_writer = (
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',
framerate=fps).output(
audio,
video_save_path,
pix_fmt='yuv420p',
vcodec='libx264',
loglevel='error',
acodec='copy').overwrite_output().run_async(
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
else:
self.stream_writer = (
ffmpeg.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{out_width}x{out_height}',
framerate=fps).output(
video_save_path, pix_fmt='yuv420p', vcodec='libx264',
loglevel='error').overwrite_output().run_async(
pipe_stdin=True, pipe_stdout=True, cmd=args.ffmpeg_bin))
def write_frame(self, frame):
frame = frame.astype(np.uint8).tobytes()
self.stream_writer.stdin.write(frame)
def close(self):
self.stream_writer.stdin.close()
self.stream_writer.wait()
def inference_video(args, video_save_path, device=None, total_workers=1, worker_idx=0):
# ---------------------- determine models according to model names ---------------------- #
args.model_name = args.model_name.split('.pth')[0]
if args.model_name == 'RealESRGAN_x4plus': # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth']
elif args.model_name == 'RealESRNet_x4plus': # x4 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth']
elif args.model_name == 'RealESRGAN_x4plus_anime_6B': # x4 RRDBNet model with 6 blocks
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth']
elif args.model_name == 'RealESRGAN_x2plus': # x2 RRDBNet model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
netscale = 2
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth']
elif args.model_name == 'realesr-animevideov3': # x4 VGG-style model (XS size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
netscale = 4
file_url = ['https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth']
elif args.model_name == 'realesr-general-x4v3': # x4 VGG-style model (S size)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
netscale = 4
file_url = [
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-wdn-x4v3.pth',
'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth'
]
# ---------------------- determine model paths ---------------------- #
model_path = os.path.join('weights', args.model_name + '.pth')
if not os.path.isfile(model_path):
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
for url in file_url:
# model_path will be updated
model_path = load_file_from_url(
url=url, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
# use dni to control the denoise strength
dni_weight = None
if args.model_name == 'realesr-general-x4v3' and args.denoise_strength != 1:
wdn_model_path = model_path.replace('realesr-general-x4v3', 'realesr-general-wdn-x4v3')
model_path = [model_path, wdn_model_path]
dni_weight = [args.denoise_strength, 1 - args.denoise_strength]
# restorer
upsampler = RealESRGANer(
scale=netscale,
model_path=model_path,
dni_weight=dni_weight,
model=model,
tile=args.tile,
tile_pad=args.tile_pad,
pre_pad=args.pre_pad,
half=not args.fp32,
device=device,
)
if 'anime' in args.model_name and args.face_enhance:
print('face_enhance is not supported in anime models, we turned this option off for you. '
'if you insist on turning it on, please manually comment the relevant lines of code.')
args.face_enhance = False
if args.face_enhance: # Use GFPGAN for face enhancement
from gfpgan import GFPGANer
face_enhancer = GFPGANer(
model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
upscale=args.outscale,
arch='clean',
channel_multiplier=2,
bg_upsampler=upsampler) # TODO support custom device
else:
face_enhancer = None
reader = Reader(args, total_workers, worker_idx)
audio = reader.get_audio()
height, width = reader.get_resolution()
fps = reader.get_fps()
writer = Writer(args, audio, height, width, video_save_path, fps)
pbar = tqdm(total=len(reader), unit='frame', desc='inference')
while True:
img = reader.get_frame()
if img is None:
break
try:
if args.face_enhance:
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
else:
output, _ = upsampler.enhance(img, outscale=args.outscale)
except RuntimeError as error:
print('Error', error)
print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
else:
writer.write_frame(output)
torch.cuda.synchronize(device)
pbar.update(1)
reader.close()
writer.close()
def run(args):
args.video_name = osp.splitext(os.path.basename(args.input))[0]
video_save_path = osp.join(args.output, f'{args.video_name}_{args.suffix}.mp4')
if args.extract_frame_first:
tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')
os.makedirs(tmp_frames_folder, exist_ok=True)
os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {tmp_frames_folder}/frame%08d.png')
args.input = tmp_frames_folder
num_gpus = torch.cuda.device_count()
num_process = num_gpus * args.num_process_per_gpu
if num_process == 1:
inference_video(args, video_save_path)
return
ctx = torch.multiprocessing.get_context('spawn')
pool = ctx.Pool(num_process)
os.makedirs(osp.join(args.output, f'{args.video_name}_out_tmp_videos'), exist_ok=True)
pbar = tqdm(total=num_process, unit='sub_video', desc='inference')
for i in range(num_process):
sub_video_save_path = osp.join(args.output, f'{args.video_name}_out_tmp_videos', f'{i:03d}.mp4')
pool.apply_async(
inference_video,
args=(args, sub_video_save_path, torch.device(i % num_gpus), num_process, i),
callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
# combine sub videos
# prepare vidlist.txt
with open(f'{args.output}/{args.video_name}_vidlist.txt', 'w') as f:
for i in range(num_process):
f.write(f'file \'{args.video_name}_out_tmp_videos/{i:03d}.mp4\'\n')
cmd = [
args.ffmpeg_bin, '-f', 'concat', '-safe', '0', '-i', f'{args.output}/{args.video_name}_vidlist.txt', '-c',
'copy', f'{video_save_path}'
]
print(' '.join(cmd))
subprocess.call(cmd)
shutil.rmtree(osp.join(args.output, f'{args.video_name}_out_tmp_videos'))
if osp.exists(osp.join(args.output, f'{args.video_name}_inp_tmp_videos')):
shutil.rmtree(osp.join(args.output, f'{args.video_name}_inp_tmp_videos'))
os.remove(f'{args.output}/{args.video_name}_vidlist.txt')
def main():
"""Inference demo for Real-ESRGAN.
It mainly for restoring anime videos.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', type=str, default='inputs', help='Input video, image or folder')
parser.add_argument(
'-n',
'--model_name',
type=str,
default='realesr-animevideov3',
help=('Model names: realesr-animevideov3 | RealESRGAN_x4plus_anime_6B | RealESRGAN_x4plus | RealESRNet_x4plus |'
' RealESRGAN_x2plus | realesr-general-x4v3'
'Default:realesr-animevideov3'))
parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
parser.add_argument(
'-dn',
'--denoise_strength',
type=float,
default=0.5,
help=('Denoise strength. 0 for weak denoise (keep noise), 1 for strong denoise ability. '
'Only used for the realesr-general-x4v3 model'))
parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
parser.add_argument(
'--fp32', action='store_true', help='Use fp32 precision during inference. Default: fp16 (half precision).')
parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
parser.add_argument('--ffmpeg_bin', type=str, default='ffmpeg', help='The path to ffmpeg')
parser.add_argument('--extract_frame_first', action='store_true')
parser.add_argument('--num_process_per_gpu', type=int, default=1)
parser.add_argument(
'--alpha_upsampler',
type=str,
default='realesrgan',
help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
parser.add_argument(
'--ext',
type=str,
default='auto',
help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
args = parser.parse_args()
args.input = args.input.rstrip('/').rstrip('\\')
os.makedirs(args.output, exist_ok=True)
if mimetypes.guess_type(args.input)[0] is not None and mimetypes.guess_type(args.input)[0].startswith('video'):
is_video = True
else:
is_video = False
if is_video and args.input.endswith('.flv'):
mp4_path = args.input.replace('.flv', '.mp4')
os.system(f'ffmpeg -i {args.input} -codec copy {mp4_path}')
args.input = mp4_path
if args.extract_frame_first and not is_video:
args.extract_frame_first = False
run(args)
if args.extract_frame_first:
tmp_frames_folder = osp.join(args.output, f'{args.video_name}_inp_tmp_frames')
shutil.rmtree(tmp_frames_folder)
if __name__ == '__main__':
main()
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/__init__.py | Python | # flake8: noqa
from .archs import *
from .data import *
from .models import *
from .utils import *
from .version import *
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/archs/__init__.py | Python | import importlib
from basicsr.utils import scandir
from os import path as osp
# automatically scan and import arch modules for registry
# scan all the files that end with '_arch.py' under the archs folder
arch_folder = osp.dirname(osp.abspath(__file__))
arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
# import all the arch modules
_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames]
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/archs/discriminator_arch.py | Python | from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
@ARCH_REGISTRY.register()
class UNetDiscriminatorSN(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)
It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
Arg:
num_in_ch (int): Channel number of inputs. Default: 3.
num_feat (int): Channel number of base intermediate features. Default: 64.
skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
"""
def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
super(UNetDiscriminatorSN, self).__init__()
self.skip_connection = skip_connection
norm = spectral_norm
# the first convolution
self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
# downsample
self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
# upsample
self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
# extra convolutions
self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
def forward(self, x):
# downsample
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
# upsample
x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
if self.skip_connection:
x4 = x4 + x2
x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
if self.skip_connection:
x5 = x5 + x1
x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
if self.skip_connection:
x6 = x6 + x0
# extra convolutions
out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
out = self.conv9(out)
return out
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/archs/srvgg_arch.py | Python | from basicsr.utils.registry import ARCH_REGISTRY
from torch import nn as nn
from torch.nn import functional as F
@ARCH_REGISTRY.register()
class SRVGGNetCompact(nn.Module):
"""A compact VGG-style network structure for super-resolution.
It is a compact network structure, which performs upsampling in the last layer and no convolution is
conducted on the HR feature space.
Args:
num_in_ch (int): Channel number of inputs. Default: 3.
num_out_ch (int): Channel number of outputs. Default: 3.
num_feat (int): Channel number of intermediate features. Default: 64.
num_conv (int): Number of convolution layers in the body network. Default: 16.
upscale (int): Upsampling factor. Default: 4.
act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
"""
def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
super(SRVGGNetCompact, self).__init__()
self.num_in_ch = num_in_ch
self.num_out_ch = num_out_ch
self.num_feat = num_feat
self.num_conv = num_conv
self.upscale = upscale
self.act_type = act_type
self.body = nn.ModuleList()
# the first conv
self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
# the first activation
if act_type == 'relu':
activation = nn.ReLU(inplace=True)
elif act_type == 'prelu':
activation = nn.PReLU(num_parameters=num_feat)
elif act_type == 'leakyrelu':
activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.body.append(activation)
# the body structure
for _ in range(num_conv):
self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
# activation
if act_type == 'relu':
activation = nn.ReLU(inplace=True)
elif act_type == 'prelu':
activation = nn.PReLU(num_parameters=num_feat)
elif act_type == 'leakyrelu':
activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.body.append(activation)
# the last conv
self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
# upsample
self.upsampler = nn.PixelShuffle(upscale)
def forward(self, x):
out = x
for i in range(0, len(self.body)):
out = self.body[i](out)
out = self.upsampler(out)
# add the nearest upsampled image, so that the network learns the residual
base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
out += base
return out
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/data/__init__.py | Python | import importlib
from basicsr.utils import scandir
from os import path as osp
# automatically scan and import dataset modules for registry
# scan all the files that end with '_dataset.py' under the data folder
data_folder = osp.dirname(osp.abspath(__file__))
dataset_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(data_folder) if v.endswith('_dataset.py')]
# import all the dataset modules
_dataset_modules = [importlib.import_module(f'realesrgan.data.{file_name}') for file_name in dataset_filenames]
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/data/realesrgan_dataset.py | Python | import cv2
import math
import numpy as np
import os
import os.path as osp
import random
import time
import torch
from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
from basicsr.data.transforms import augment
from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
from torch.utils import data as data
@DATASET_REGISTRY.register()
class RealESRGANDataset(data.Dataset):
"""Dataset used for Real-ESRGAN model:
Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
It loads gt (Ground-Truth) images, and augments them.
It also generates blur kernels and sinc kernels for generating low-quality images.
Note that the low-quality images are processed in tensors on GPUS for faster processing.
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
meta_info (str): Path for meta information file.
io_backend (dict): IO backend type and other kwarg.
use_hflip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
Please see more options in the codes.
"""
def __init__(self, opt):
super(RealESRGANDataset, self).__init__()
self.opt = opt
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.gt_folder = opt['dataroot_gt']
# file client (lmdb io backend)
if self.io_backend_opt['type'] == 'lmdb':
self.io_backend_opt['db_paths'] = [self.gt_folder]
self.io_backend_opt['client_keys'] = ['gt']
if not self.gt_folder.endswith('.lmdb'):
raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
self.paths = [line.split('.')[0] for line in fin]
else:
# disk backend with meta_info
# Each line in the meta_info describes the relative path to an image
with open(self.opt['meta_info']) as fin:
paths = [line.strip().split(' ')[0] for line in fin]
self.paths = [os.path.join(self.gt_folder, v) for v in paths]
# blur settings for the first degradation
self.blur_kernel_size = opt['blur_kernel_size']
self.kernel_list = opt['kernel_list']
self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
self.blur_sigma = opt['blur_sigma']
self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
# blur settings for the second degradation
self.blur_kernel_size2 = opt['blur_kernel_size2']
self.kernel_list2 = opt['kernel_list2']
self.kernel_prob2 = opt['kernel_prob2']
self.blur_sigma2 = opt['blur_sigma2']
self.betag_range2 = opt['betag_range2']
self.betap_range2 = opt['betap_range2']
self.sinc_prob2 = opt['sinc_prob2']
# a final sinc filter
self.final_sinc_prob = opt['final_sinc_prob']
self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
# TODO: kernel range is now hard-coded, should be in the configure file
self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
self.pulse_tensor[10, 10] = 1
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
# -------------------------------- Load gt images -------------------------------- #
# Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
gt_path = self.paths[index]
# avoid errors caused by high latency in reading files
retry = 3
while retry > 0:
try:
img_bytes = self.file_client.get(gt_path, 'gt')
except (IOError, OSError) as e:
logger = get_root_logger()
logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
# change another file to read
index = random.randint(0, self.__len__())
gt_path = self.paths[index]
time.sleep(1) # sleep 1s for occasional server congestion
else:
break
finally:
retry -= 1
img_gt = imfrombytes(img_bytes, float32=True)
# -------------------- Do augmentation for training: flip, rotation -------------------- #
img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
# crop or pad to 400
# TODO: 400 is hard-coded. You may change it accordingly
h, w = img_gt.shape[0:2]
crop_pad_size = 400
# pad
if h < crop_pad_size or w < crop_pad_size:
pad_h = max(0, crop_pad_size - h)
pad_w = max(0, crop_pad_size - w)
img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
# crop
if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
h, w = img_gt.shape[0:2]
# randomly choose top and left coordinates
top = random.randint(0, h - crop_pad_size)
left = random.randint(0, w - crop_pad_size)
img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
# ------------------------ Generate kernels (used in the first degradation) ------------------------ #
kernel_size = random.choice(self.kernel_range)
if np.random.uniform() < self.opt['sinc_prob']:
# this sinc filter setting is for kernels ranging from [7, 21]
if kernel_size < 13:
omega_c = np.random.uniform(np.pi / 3, np.pi)
else:
omega_c = np.random.uniform(np.pi / 5, np.pi)
kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
else:
kernel = random_mixed_kernels(
self.kernel_list,
self.kernel_prob,
kernel_size,
self.blur_sigma,
self.blur_sigma, [-math.pi, math.pi],
self.betag_range,
self.betap_range,
noise_range=None)
# pad kernel
pad_size = (21 - kernel_size) // 2
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
# ------------------------ Generate kernels (used in the second degradation) ------------------------ #
kernel_size = random.choice(self.kernel_range)
if np.random.uniform() < self.opt['sinc_prob2']:
if kernel_size < 13:
omega_c = np.random.uniform(np.pi / 3, np.pi)
else:
omega_c = np.random.uniform(np.pi / 5, np.pi)
kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
else:
kernel2 = random_mixed_kernels(
self.kernel_list2,
self.kernel_prob2,
kernel_size,
self.blur_sigma2,
self.blur_sigma2, [-math.pi, math.pi],
self.betag_range2,
self.betap_range2,
noise_range=None)
# pad kernel
pad_size = (21 - kernel_size) // 2
kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
# ------------------------------------- the final sinc kernel ------------------------------------- #
if np.random.uniform() < self.opt['final_sinc_prob']:
kernel_size = random.choice(self.kernel_range)
omega_c = np.random.uniform(np.pi / 3, np.pi)
sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
sinc_kernel = torch.FloatTensor(sinc_kernel)
else:
sinc_kernel = self.pulse_tensor
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
kernel = torch.FloatTensor(kernel)
kernel2 = torch.FloatTensor(kernel2)
return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
return return_d
def __len__(self):
return len(self.paths)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/data/realesrgan_paired_dataset.py | Python | import os
from basicsr.data.data_util import paired_paths_from_folder, paired_paths_from_lmdb
from basicsr.data.transforms import augment, paired_random_crop
from basicsr.utils import FileClient, imfrombytes, img2tensor
from basicsr.utils.registry import DATASET_REGISTRY
from torch.utils import data as data
from torchvision.transforms.functional import normalize
@DATASET_REGISTRY.register()
class RealESRGANPairedDataset(data.Dataset):
"""Paired image dataset for image restoration.
Read LQ (Low Quality, e.g. LR (Low Resolution), blurry, noisy, etc) and GT image pairs.
There are three modes:
1. 'lmdb': Use lmdb files.
If opt['io_backend'] == lmdb.
2. 'meta_info': Use meta information file to generate paths.
If opt['io_backend'] != lmdb and opt['meta_info'] is not None.
3. 'folder': Scan folders to generate paths.
The rest.
Args:
opt (dict): Config for train datasets. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
meta_info (str): Path for meta information file.
io_backend (dict): IO backend type and other kwarg.
filename_tmpl (str): Template for each filename. Note that the template excludes the file extension.
Default: '{}'.
gt_size (int): Cropped patched size for gt patches.
use_hflip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
phase (str): 'train' or 'val'.
"""
def __init__(self, opt):
super(RealESRGANPairedDataset, self).__init__()
self.opt = opt
self.file_client = None
self.io_backend_opt = opt['io_backend']
# mean and std for normalizing the input images
self.mean = opt['mean'] if 'mean' in opt else None
self.std = opt['std'] if 'std' in opt else None
self.gt_folder, self.lq_folder = opt['dataroot_gt'], opt['dataroot_lq']
self.filename_tmpl = opt['filename_tmpl'] if 'filename_tmpl' in opt else '{}'
# file client (lmdb io backend)
if self.io_backend_opt['type'] == 'lmdb':
self.io_backend_opt['db_paths'] = [self.lq_folder, self.gt_folder]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
self.paths = paired_paths_from_lmdb([self.lq_folder, self.gt_folder], ['lq', 'gt'])
elif 'meta_info' in self.opt and self.opt['meta_info'] is not None:
# disk backend with meta_info
# Each line in the meta_info describes the relative path to an image
with open(self.opt['meta_info']) as fin:
paths = [line.strip() for line in fin]
self.paths = []
for path in paths:
gt_path, lq_path = path.split(', ')
gt_path = os.path.join(self.gt_folder, gt_path)
lq_path = os.path.join(self.lq_folder, lq_path)
self.paths.append(dict([('gt_path', gt_path), ('lq_path', lq_path)]))
else:
# disk backend
# it will scan the whole folder to get meta info
# it will be time-consuming for folders with too many files. It is recommended using an extra meta txt file
self.paths = paired_paths_from_folder([self.lq_folder, self.gt_folder], ['lq', 'gt'], self.filename_tmpl)
def __getitem__(self, index):
if self.file_client is None:
self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
scale = self.opt['scale']
# Load gt and lq images. Dimension order: HWC; channel order: BGR;
# image range: [0, 1], float32.
gt_path = self.paths[index]['gt_path']
img_bytes = self.file_client.get(gt_path, 'gt')
img_gt = imfrombytes(img_bytes, float32=True)
lq_path = self.paths[index]['lq_path']
img_bytes = self.file_client.get(lq_path, 'lq')
img_lq = imfrombytes(img_bytes, float32=True)
# augmentation for training
if self.opt['phase'] == 'train':
gt_size = self.opt['gt_size']
# random crop
img_gt, img_lq = paired_random_crop(img_gt, img_lq, gt_size, scale, gt_path)
# flip, rotation
img_gt, img_lq = augment([img_gt, img_lq], self.opt['use_hflip'], self.opt['use_rot'])
# BGR to RGB, HWC to CHW, numpy to tensor
img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
# normalize
if self.mean is not None or self.std is not None:
normalize(img_lq, self.mean, self.std, inplace=True)
normalize(img_gt, self.mean, self.std, inplace=True)
return {'lq': img_lq, 'gt': img_gt, 'lq_path': lq_path, 'gt_path': gt_path}
def __len__(self):
return len(self.paths)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/models/__init__.py | Python | import importlib
from basicsr.utils import scandir
from os import path as osp
# automatically scan and import model modules for registry
# scan all the files that end with '_model.py' under the model folder
model_folder = osp.dirname(osp.abspath(__file__))
model_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(model_folder) if v.endswith('_model.py')]
# import all the model modules
_model_modules = [importlib.import_module(f'realesrgan.models.{file_name}') for file_name in model_filenames]
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/models/realesrgan_model.py | Python | import numpy as np
import random
import torch
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
from basicsr.data.transforms import paired_random_crop
from basicsr.models.srgan_model import SRGANModel
from basicsr.utils import DiffJPEG, USMSharp
from basicsr.utils.img_process_util import filter2D
from basicsr.utils.registry import MODEL_REGISTRY
from collections import OrderedDict
from torch.nn import functional as F
@MODEL_REGISTRY.register()
class RealESRGANModel(SRGANModel):
"""RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
It mainly performs:
1. randomly synthesize LQ images in GPU tensors
2. optimize the networks with GAN training.
"""
def __init__(self, opt):
super(RealESRGANModel, self).__init__(opt)
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
self.queue_size = opt.get('queue_size', 180)
@torch.no_grad()
def _dequeue_and_enqueue(self):
"""It is the training pair pool for increasing the diversity in a batch.
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
to increase the degradation diversity in a batch.
"""
# initialize
b, c, h, w = self.lq.size()
if not hasattr(self, 'queue_lr'):
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
_, c, h, w = self.gt.size()
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
self.queue_ptr = 0
if self.queue_ptr == self.queue_size: # the pool is full
# do dequeue and enqueue
# shuffle
idx = torch.randperm(self.queue_size)
self.queue_lr = self.queue_lr[idx]
self.queue_gt = self.queue_gt[idx]
# get first b samples
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
# update the queue
self.queue_lr[0:b, :, :, :] = self.lq.clone()
self.queue_gt[0:b, :, :, :] = self.gt.clone()
self.lq = lq_dequeue
self.gt = gt_dequeue
else:
# only do enqueue
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
self.queue_ptr = self.queue_ptr + b
@torch.no_grad()
def feed_data(self, data):
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
"""
if self.is_train and self.opt.get('high_order_degradation', True):
# training data synthesis
self.gt = data['gt'].to(self.device)
self.gt_usm = self.usm_sharpener(self.gt)
self.kernel1 = data['kernel1'].to(self.device)
self.kernel2 = data['kernel2'].to(self.device)
self.sinc_kernel = data['sinc_kernel'].to(self.device)
ori_h, ori_w = self.gt.size()[2:4]
# ----------------------- The first degradation process ----------------------- #
# blur
out = filter2D(self.gt_usm, self.kernel1)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, scale_factor=scale, mode=mode)
# add noise
gray_noise_prob = self.opt['gray_noise_prob']
if np.random.uniform() < self.opt['gaussian_noise_prob']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
out = self.jpeger(out, quality=jpeg_p)
# ----------------------- The second degradation process ----------------------- #
# blur
if np.random.uniform() < self.opt['second_blur_prob']:
out = filter2D(out, self.kernel2)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range2'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
# add noise
gray_noise_prob = self.opt['gray_noise_prob2']
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range2'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression + the final sinc filter
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
# as one operation.
# We consider two orders:
# 1. [resize back + sinc filter] + JPEG compression
# 2. JPEG compression + [resize back + sinc filter]
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
if np.random.uniform() < 0.5:
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
else:
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# clamp and round
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
# random crop
gt_size = self.opt['gt_size']
(self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size,
self.opt['scale'])
# training pair pool
self._dequeue_and_enqueue()
# sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue
self.gt_usm = self.usm_sharpener(self.gt)
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
else:
# for paired training or validation
self.lq = data['lq'].to(self.device)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt_usm = self.usm_sharpener(self.gt)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
# do not use the synthetic process during validation
self.is_train = False
super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
self.is_train = True
def optimize_parameters(self, current_iter):
# usm sharpening
l1_gt = self.gt_usm
percep_gt = self.gt_usm
gan_gt = self.gt_usm
if self.opt['l1_gt_usm'] is False:
l1_gt = self.gt
if self.opt['percep_gt_usm'] is False:
percep_gt = self.gt
if self.opt['gan_gt_usm'] is False:
gan_gt = self.gt
# optimize net_g
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_g_total = 0
loss_dict = OrderedDict()
if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters):
# pixel loss
if self.cri_pix:
l_g_pix = self.cri_pix(self.output, l1_gt)
l_g_total += l_g_pix
loss_dict['l_g_pix'] = l_g_pix
# perceptual loss
if self.cri_perceptual:
l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt)
if l_g_percep is not None:
l_g_total += l_g_percep
loss_dict['l_g_percep'] = l_g_percep
if l_g_style is not None:
l_g_total += l_g_style
loss_dict['l_g_style'] = l_g_style
# gan loss
fake_g_pred = self.net_d(self.output)
l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False)
l_g_total += l_g_gan
loss_dict['l_g_gan'] = l_g_gan
l_g_total.backward()
self.optimizer_g.step()
# optimize net_d
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
# real
real_d_pred = self.net_d(gan_gt)
l_d_real = self.cri_gan(real_d_pred, True, is_disc=True)
loss_dict['l_d_real'] = l_d_real
loss_dict['out_d_real'] = torch.mean(real_d_pred.detach())
l_d_real.backward()
# fake
fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9
l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True)
loss_dict['l_d_fake'] = l_d_fake
loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach())
l_d_fake.backward()
self.optimizer_d.step()
if self.ema_decay > 0:
self.model_ema(decay=self.ema_decay)
self.log_dict = self.reduce_loss_dict(loss_dict)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/models/realesrnet_model.py | Python | import numpy as np
import random
import torch
from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt
from basicsr.data.transforms import paired_random_crop
from basicsr.models.sr_model import SRModel
from basicsr.utils import DiffJPEG, USMSharp
from basicsr.utils.img_process_util import filter2D
from basicsr.utils.registry import MODEL_REGISTRY
from torch.nn import functional as F
@MODEL_REGISTRY.register()
class RealESRNetModel(SRModel):
"""RealESRNet Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
It is trained without GAN losses.
It mainly performs:
1. randomly synthesize LQ images in GPU tensors
2. optimize the networks with GAN training.
"""
def __init__(self, opt):
super(RealESRNetModel, self).__init__(opt)
self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
self.usm_sharpener = USMSharp().cuda() # do usm sharpening
self.queue_size = opt.get('queue_size', 180)
@torch.no_grad()
def _dequeue_and_enqueue(self):
"""It is the training pair pool for increasing the diversity in a batch.
Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a
batch could not have different resize scaling factors. Therefore, we employ this training pair pool
to increase the degradation diversity in a batch.
"""
# initialize
b, c, h, w = self.lq.size()
if not hasattr(self, 'queue_lr'):
assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}'
self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda()
_, c, h, w = self.gt.size()
self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda()
self.queue_ptr = 0
if self.queue_ptr == self.queue_size: # the pool is full
# do dequeue and enqueue
# shuffle
idx = torch.randperm(self.queue_size)
self.queue_lr = self.queue_lr[idx]
self.queue_gt = self.queue_gt[idx]
# get first b samples
lq_dequeue = self.queue_lr[0:b, :, :, :].clone()
gt_dequeue = self.queue_gt[0:b, :, :, :].clone()
# update the queue
self.queue_lr[0:b, :, :, :] = self.lq.clone()
self.queue_gt[0:b, :, :, :] = self.gt.clone()
self.lq = lq_dequeue
self.gt = gt_dequeue
else:
# only do enqueue
self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone()
self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone()
self.queue_ptr = self.queue_ptr + b
@torch.no_grad()
def feed_data(self, data):
"""Accept data from dataloader, and then add two-order degradations to obtain LQ images.
"""
if self.is_train and self.opt.get('high_order_degradation', True):
# training data synthesis
self.gt = data['gt'].to(self.device)
# USM sharpen the GT images
if self.opt['gt_usm'] is True:
self.gt = self.usm_sharpener(self.gt)
self.kernel1 = data['kernel1'].to(self.device)
self.kernel2 = data['kernel2'].to(self.device)
self.sinc_kernel = data['sinc_kernel'].to(self.device)
ori_h, ori_w = self.gt.size()[2:4]
# ----------------------- The first degradation process ----------------------- #
# blur
out = filter2D(self.gt, self.kernel1)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, scale_factor=scale, mode=mode)
# add noise
gray_noise_prob = self.opt['gray_noise_prob']
if np.random.uniform() < self.opt['gaussian_noise_prob']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range'])
out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts
out = self.jpeger(out, quality=jpeg_p)
# ----------------------- The second degradation process ----------------------- #
# blur
if np.random.uniform() < self.opt['second_blur_prob']:
out = filter2D(out, self.kernel2)
# random resize
updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0]
if updown_type == 'up':
scale = np.random.uniform(1, self.opt['resize_range2'][1])
elif updown_type == 'down':
scale = np.random.uniform(self.opt['resize_range2'][0], 1)
else:
scale = 1
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(
out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode)
# add noise
gray_noise_prob = self.opt['gray_noise_prob2']
if np.random.uniform() < self.opt['gaussian_noise_prob2']:
out = random_add_gaussian_noise_pt(
out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob)
else:
out = random_add_poisson_noise_pt(
out,
scale_range=self.opt['poisson_scale_range2'],
gray_prob=gray_noise_prob,
clip=True,
rounds=False)
# JPEG compression + the final sinc filter
# We also need to resize images to desired sizes. We group [resize back + sinc filter] together
# as one operation.
# We consider two orders:
# 1. [resize back + sinc filter] + JPEG compression
# 2. JPEG compression + [resize back + sinc filter]
# Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines.
if np.random.uniform() < 0.5:
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
else:
# JPEG compression
jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2'])
out = torch.clamp(out, 0, 1)
out = self.jpeger(out, quality=jpeg_p)
# resize back + the final sinc filter
mode = random.choice(['area', 'bilinear', 'bicubic'])
out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode)
out = filter2D(out, self.sinc_kernel)
# clamp and round
self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255.
# random crop
gt_size = self.opt['gt_size']
self.gt, self.lq = paired_random_crop(self.gt, self.lq, gt_size, self.opt['scale'])
# training pair pool
self._dequeue_and_enqueue()
self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract
else:
# for paired training or validation
self.lq = data['lq'].to(self.device)
if 'gt' in data:
self.gt = data['gt'].to(self.device)
self.gt_usm = self.usm_sharpener(self.gt)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
# do not use the synthetic process during validation
self.is_train = False
super(RealESRNetModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img)
self.is_train = True
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/train.py | Python | # flake8: noqa
import os.path as osp
from basicsr.train import train_pipeline
import realesrgan.archs
import realesrgan.data
import realesrgan.models
if __name__ == '__main__':
root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir))
train_pipeline(root_path)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
realesrgan/utils.py | Python | import cv2
import math
import numpy as np
import os
import queue
import threading
import torch
from basicsr.utils.download_util import load_file_from_url
from torch.nn import functional as F
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class RealESRGANer():
"""A helper class for upsampling images with RealESRGAN.
Args:
scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4.
model_path (str): The path to the pretrained model. It can be urls (will first download it automatically).
model (nn.Module): The defined network. Default: None.
tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop
input images into tiles, and then process each of them. Finally, they will be merged into one image.
0 denotes for do not use tile. Default: 0.
tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10.
pre_pad (int): Pad the input images to avoid border artifacts. Default: 10.
half (float): Whether to use half precision during inference. Default: False.
"""
def __init__(self,
scale,
model_path,
dni_weight=None,
model=None,
tile=0,
tile_pad=10,
pre_pad=10,
half=False,
device=None,
gpu_id=None):
self.scale = scale
self.tile_size = tile
self.tile_pad = tile_pad
self.pre_pad = pre_pad
self.mod_scale = None
self.half = half
# initialize model
if gpu_id:
self.device = torch.device(
f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu') if device is None else device
else:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
if isinstance(model_path, list):
# dni
assert len(model_path) == len(dni_weight), 'model_path and dni_weight should have the save length.'
loadnet = self.dni(model_path[0], model_path[1], dni_weight)
else:
# if the model_path starts with https, it will first download models to the folder: weights
if model_path.startswith('https://'):
model_path = load_file_from_url(
url=model_path, model_dir=os.path.join(ROOT_DIR, 'weights'), progress=True, file_name=None)
loadnet = torch.load(model_path, map_location=torch.device('cpu'))
# prefer to use params_ema
if 'params_ema' in loadnet:
keyname = 'params_ema'
else:
keyname = 'params'
model.load_state_dict(loadnet[keyname], strict=True)
model.eval()
self.model = model.to(self.device)
if self.half:
self.model = self.model.half()
def dni(self, net_a, net_b, dni_weight, key='params', loc='cpu'):
"""Deep network interpolation.
``Paper: Deep Network Interpolation for Continuous Imagery Effect Transition``
"""
net_a = torch.load(net_a, map_location=torch.device(loc))
net_b = torch.load(net_b, map_location=torch.device(loc))
for k, v_a in net_a[key].items():
net_a[key][k] = dni_weight[0] * v_a + dni_weight[1] * net_b[key][k]
return net_a
def pre_process(self, img):
"""Pre-process, such as pre-pad and mod pad, so that the images can be divisible
"""
img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float()
self.img = img.unsqueeze(0).to(self.device)
if self.half:
self.img = self.img.half()
# pre_pad
if self.pre_pad != 0:
self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect')
# mod pad for divisible borders
if self.scale == 2:
self.mod_scale = 2
elif self.scale == 1:
self.mod_scale = 4
if self.mod_scale is not None:
self.mod_pad_h, self.mod_pad_w = 0, 0
_, _, h, w = self.img.size()
if (h % self.mod_scale != 0):
self.mod_pad_h = (self.mod_scale - h % self.mod_scale)
if (w % self.mod_scale != 0):
self.mod_pad_w = (self.mod_scale - w % self.mod_scale)
self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect')
def process(self):
# model inference
self.output = self.model(self.img)
def tile_process(self):
"""It will first crop input images to tiles, and then process each tile.
Finally, all the processed tiles are merged into one images.
Modified from: https://github.com/ata4/esrgan-launcher
"""
batch, channel, height, width = self.img.shape
output_height = height * self.scale
output_width = width * self.scale
output_shape = (batch, channel, output_height, output_width)
# start with black image
self.output = self.img.new_zeros(output_shape)
tiles_x = math.ceil(width / self.tile_size)
tiles_y = math.ceil(height / self.tile_size)
# loop over all tiles
for y in range(tiles_y):
for x in range(tiles_x):
# extract tile from input image
ofs_x = x * self.tile_size
ofs_y = y * self.tile_size
# input tile area on total image
input_start_x = ofs_x
input_end_x = min(ofs_x + self.tile_size, width)
input_start_y = ofs_y
input_end_y = min(ofs_y + self.tile_size, height)
# input tile area on total image with padding
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
input_end_x_pad = min(input_end_x + self.tile_pad, width)
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
input_end_y_pad = min(input_end_y + self.tile_pad, height)
# input tile dimensions
input_tile_width = input_end_x - input_start_x
input_tile_height = input_end_y - input_start_y
tile_idx = y * tiles_x + x + 1
input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
# upscale tile
try:
with torch.no_grad():
output_tile = self.model(input_tile)
except RuntimeError as error:
print('Error', error)
print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
# output tile area on total image
output_start_x = input_start_x * self.scale
output_end_x = input_end_x * self.scale
output_start_y = input_start_y * self.scale
output_end_y = input_end_y * self.scale
# output tile area without padding
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
# put tile into output image
self.output[:, :, output_start_y:output_end_y,
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
output_start_x_tile:output_end_x_tile]
def post_process(self):
# remove extra pad
if self.mod_scale is not None:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale]
# remove prepad
if self.pre_pad != 0:
_, _, h, w = self.output.size()
self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale]
return self.output
@torch.no_grad()
def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'):
h_input, w_input = img.shape[0:2]
# img: numpy
img = img.astype(np.float32)
if np.max(img) > 256: # 16-bit image
max_range = 65535
print('\tInput is a 16-bit image')
else:
max_range = 255
img = img / max_range
if len(img.shape) == 2: # gray image
img_mode = 'L'
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
elif img.shape[2] == 4: # RGBA image with alpha channel
img_mode = 'RGBA'
alpha = img[:, :, 3]
img = img[:, :, 0:3]
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if alpha_upsampler == 'realesrgan':
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB)
else:
img_mode = 'RGB'
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ------------------- process image (without the alpha channel) ------------------- #
self.pre_process(img)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_img = self.post_process()
output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0))
if img_mode == 'L':
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY)
# ------------------- process the alpha channel if necessary ------------------- #
if img_mode == 'RGBA':
if alpha_upsampler == 'realesrgan':
self.pre_process(alpha)
if self.tile_size > 0:
self.tile_process()
else:
self.process()
output_alpha = self.post_process()
output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0))
output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY)
else: # use the cv2 resize for alpha channel
h, w = alpha.shape[0:2]
output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR)
# merge the alpha channel
output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA)
output_img[:, :, 3] = output_alpha
# ------------------------------ return ------------------------------ #
if max_range == 65535: # 16-bit image
output = (output_img * 65535.0).round().astype(np.uint16)
else:
output = (output_img * 255.0).round().astype(np.uint8)
if outscale is not None and outscale != float(self.scale):
output = cv2.resize(
output, (
int(w_input * outscale),
int(h_input * outscale),
), interpolation=cv2.INTER_LANCZOS4)
return output, img_mode
class PrefetchReader(threading.Thread):
"""Prefetch images.
Args:
img_list (list[str]): A image list of image paths to be read.
num_prefetch_queue (int): Number of prefetch queue.
"""
def __init__(self, img_list, num_prefetch_queue):
super().__init__()
self.que = queue.Queue(num_prefetch_queue)
self.img_list = img_list
def run(self):
for img_path in self.img_list:
img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
self.que.put(img)
self.que.put(None)
def __next__(self):
next_item = self.que.get()
if next_item is None:
raise StopIteration
return next_item
def __iter__(self):
return self
class IOConsumer(threading.Thread):
def __init__(self, opt, que, qid):
super().__init__()
self._queue = que
self.qid = qid
self.opt = opt
def run(self):
while True:
msg = self._queue.get()
if isinstance(msg, str) and msg == 'quit':
break
output = msg['output']
save_path = msg['save_path']
cv2.imwrite(save_path, output)
print(f'IO worker {self.qid} is done.')
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
scripts/extract_subimages.py | Python | import argparse
import cv2
import numpy as np
import os
import sys
from basicsr.utils import scandir
from multiprocessing import Pool
from os import path as osp
from tqdm import tqdm
def main(args):
"""A multi-thread tool to crop large images to sub-images for faster IO.
opt (dict): Configuration dict. It contains:
n_thread (int): Thread number.
compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size
and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2.
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
Usage:
For each folder, run this script.
Typically, there are GT folder and LQ folder to be processed for DIV2K dataset.
After process, each sub_folder should have the same number of subimages.
Remember to modify opt configurations according to your settings.
"""
opt = {}
opt['n_thread'] = args.n_thread
opt['compression_level'] = args.compression_level
opt['input_folder'] = args.input
opt['save_folder'] = args.output
opt['crop_size'] = args.crop_size
opt['step'] = args.step
opt['thresh_size'] = args.thresh_size
extract_subimages(opt)
def extract_subimages(opt):
"""Crop images to subimages.
Args:
opt (dict): Configuration dict. It contains:
input_folder (str): Path to the input folder.
save_folder (str): Path to save folder.
n_thread (int): Thread number.
"""
input_folder = opt['input_folder']
save_folder = opt['save_folder']
if not osp.exists(save_folder):
os.makedirs(save_folder)
print(f'mkdir {save_folder} ...')
else:
print(f'Folder {save_folder} already exists. Exit.')
sys.exit(1)
# scan all images
img_list = list(scandir(input_folder, full_path=True))
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
pool = Pool(opt['n_thread'])
for path in img_list:
pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1))
pool.close()
pool.join()
pbar.close()
print('All processes done.')
def worker(path, opt):
"""Worker for each process.
Args:
path (str): Image path.
opt (dict): Configuration dict. It contains:
crop_size (int): Crop size.
step (int): Step for overlapped sliding window.
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
save_folder (str): Path to save folder.
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
Returns:
process_info (str): Process information displayed in progress bar.
"""
crop_size = opt['crop_size']
step = opt['step']
thresh_size = opt['thresh_size']
img_name, extension = osp.splitext(osp.basename(path))
# remove the x2, x3, x4 and x8 in the filename for DIV2K
img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
h, w = img.shape[0:2]
h_space = np.arange(0, h - crop_size + 1, step)
if h - (h_space[-1] + crop_size) > thresh_size:
h_space = np.append(h_space, h - crop_size)
w_space = np.arange(0, w - crop_size + 1, step)
if w - (w_space[-1] + crop_size) > thresh_size:
w_space = np.append(w_space, w - crop_size)
index = 0
for x in h_space:
for y in w_space:
index += 1
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
cropped_img = np.ascontiguousarray(cropped_img)
cv2.imwrite(
osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img,
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
process_info = f'Processing {img_name} ...'
return process_info
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder')
parser.add_argument('--crop_size', type=int, default=480, help='Crop size')
parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window')
parser.add_argument(
'--thresh_size',
type=int,
default=0,
help='Threshold size. Patches whose size is lower than thresh_size will be dropped.')
parser.add_argument('--n_thread', type=int, default=20, help='Thread number.')
parser.add_argument('--compression_level', type=int, default=3, help='Compression level')
args = parser.parse_args()
main(args)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
scripts/generate_meta_info.py | Python | import argparse
import cv2
import glob
import os
def main(args):
txt_file = open(args.meta_info, 'w')
for folder, root in zip(args.input, args.root):
img_paths = sorted(glob.glob(os.path.join(folder, '*')))
for img_path in img_paths:
status = True
if args.check:
# read the image once for check, as some images may have errors
try:
img = cv2.imread(img_path)
except (IOError, OSError) as error:
print(f'Read {img_path} error: {error}')
status = False
if img is None:
status = False
print(f'Img is None: {img_path}')
if status:
# get the relative path
img_name = os.path.relpath(img_path, root)
print(img_name)
txt_file.write(f'{img_name}\n')
if __name__ == '__main__':
"""Generate meta info (txt file) for only Ground-Truth images.
It can also generate meta info from several folders into one txt file.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
nargs='+',
default=['datasets/DF2K/DF2K_HR', 'datasets/DF2K/DF2K_multiscale'],
help='Input folder, can be a list')
parser.add_argument(
'--root',
nargs='+',
default=['datasets/DF2K', 'datasets/DF2K'],
help='Folder root, should have the length as input folders')
parser.add_argument(
'--meta_info',
type=str,
default='datasets/DF2K/meta_info/meta_info_DF2Kmultiscale.txt',
help='txt path for meta info')
parser.add_argument('--check', action='store_true', help='Read image to check whether it is ok')
args = parser.parse_args()
assert len(args.input) == len(args.root), ('Input folder and folder root should have the same length, but got '
f'{len(args.input)} and {len(args.root)}.')
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
main(args)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
scripts/generate_meta_info_pairdata.py | Python | import argparse
import glob
import os
def main(args):
txt_file = open(args.meta_info, 'w')
# sca images
img_paths_gt = sorted(glob.glob(os.path.join(args.input[0], '*')))
img_paths_lq = sorted(glob.glob(os.path.join(args.input[1], '*')))
assert len(img_paths_gt) == len(img_paths_lq), ('GT folder and LQ folder should have the same length, but got '
f'{len(img_paths_gt)} and {len(img_paths_lq)}.')
for img_path_gt, img_path_lq in zip(img_paths_gt, img_paths_lq):
# get the relative paths
img_name_gt = os.path.relpath(img_path_gt, args.root[0])
img_name_lq = os.path.relpath(img_path_lq, args.root[1])
print(f'{img_name_gt}, {img_name_lq}')
txt_file.write(f'{img_name_gt}, {img_name_lq}\n')
if __name__ == '__main__':
"""This script is used to generate meta info (txt file) for paired images.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
nargs='+',
default=['datasets/DF2K/DIV2K_train_HR_sub', 'datasets/DF2K/DIV2K_train_LR_bicubic_X4_sub'],
help='Input folder, should be [gt_folder, lq_folder]')
parser.add_argument('--root', nargs='+', default=[None, None], help='Folder root, will use the ')
parser.add_argument(
'--meta_info',
type=str,
default='datasets/DF2K/meta_info/meta_info_DIV2K_sub_pair.txt',
help='txt path for meta info')
args = parser.parse_args()
assert len(args.input) == 2, 'Input folder should have two elements: gt folder and lq folder'
assert len(args.root) == 2, 'Root path should have two elements: root for gt folder and lq folder'
os.makedirs(os.path.dirname(args.meta_info), exist_ok=True)
for i in range(2):
if args.input[i].endswith('/'):
args.input[i] = args.input[i][:-1]
if args.root[i] is None:
args.root[i] = os.path.dirname(args.input[i])
main(args)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
scripts/generate_multiscale_DF2K.py | Python | import argparse
import glob
import os
from PIL import Image
def main(args):
# For DF2K, we consider the following three scales,
# and the smallest image whose shortest edge is 400
scale_list = [0.75, 0.5, 1 / 3]
shortest_edge = 400
path_list = sorted(glob.glob(os.path.join(args.input, '*')))
for path in path_list:
print(path)
basename = os.path.splitext(os.path.basename(path))[0]
img = Image.open(path)
width, height = img.size
for idx, scale in enumerate(scale_list):
print(f'\t{scale:.2f}')
rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
# save the smallest image which the shortest edge is 400
if width < height:
ratio = height / width
width = shortest_edge
height = int(width * ratio)
else:
ratio = width / height
height = shortest_edge
width = int(height * ratio)
rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
if __name__ == '__main__':
"""Generate multi-scale versions for GT images with LANCZOS resampling.
It is now used for DF2K dataset (DIV2K + Flickr 2K)
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
main(args)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
scripts/pytorch2onnx.py | Python | import argparse
import torch
import torch.onnx
from basicsr.archs.rrdbnet_arch import RRDBNet
def main(args):
# An instance of the model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
if args.params:
keyname = 'params'
else:
keyname = 'params_ema'
model.load_state_dict(torch.load(args.input)[keyname])
# set the train mode to false since we will only run the forward pass.
model.train(False)
model.cpu().eval()
# An example input
x = torch.rand(1, 3, 64, 64)
# Export the model
with torch.no_grad():
torch_out = torch.onnx._export(model, x, args.output, opset_version=11, export_params=True)
print(torch_out.shape)
if __name__ == '__main__':
"""Convert pytorch model to onnx models"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input', type=str, default='experiments/pretrained_models/RealESRGAN_x4plus.pth', help='Input model path')
parser.add_argument('--output', type=str, default='realesrgan-x4.onnx', help='Output onnx path')
parser.add_argument('--params', action='store_false', help='Use params instead of params_ema')
args = parser.parse_args()
main(args)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
setup.py | Python | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'realesrgan/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='realesrgan',
version=get_version(),
description='Real-ESRGAN aims at developing Practical Algorithms for General Image Restoration',
long_description=readme(),
long_description_content_type='text/markdown',
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision, pytorch, image restoration, super-resolution, esrgan, real-esrgan',
url='https://github.com/xinntao/Real-ESRGAN',
include_package_data=True,
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='BSD-3-Clause License',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
zip_safe=False)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
tests/test_dataset.py | Python | import pytest
import yaml
from realesrgan.data.realesrgan_dataset import RealESRGANDataset
from realesrgan.data.realesrgan_paired_dataset import RealESRGANPairedDataset
def test_realesrgan_dataset():
with open('tests/data/test_realesrgan_dataset.yml', mode='r') as f:
opt = yaml.load(f, Loader=yaml.FullLoader)
dataset = RealESRGANDataset(opt)
assert dataset.io_backend_opt['type'] == 'disk' # io backend
assert len(dataset) == 2 # whether to read correct meta info
assert dataset.kernel_list == [
'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
] # correct initialization the degradation configurations
assert dataset.betag_range2 == [0.5, 4]
# test __getitem__
result = dataset.__getitem__(0)
# check returned keys
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 400, 400)
assert result['kernel1'].shape == (21, 21)
assert result['kernel2'].shape == (21, 21)
assert result['sinc_kernel'].shape == (21, 21)
assert result['gt_path'] == 'tests/data/gt/baboon.png'
# ------------------ test lmdb backend -------------------- #
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
opt['io_backend']['type'] = 'lmdb'
dataset = RealESRGANDataset(opt)
assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
assert len(dataset.paths) == 2 # whether to read correct meta info
assert dataset.kernel_list == [
'iso', 'aniso', 'generalized_iso', 'generalized_aniso', 'plateau_iso', 'plateau_aniso'
] # correct initialization the degradation configurations
assert dataset.betag_range2 == [0.5, 4]
# test __getitem__
result = dataset.__getitem__(1)
# check returned keys
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 400, 400)
assert result['kernel1'].shape == (21, 21)
assert result['kernel2'].shape == (21, 21)
assert result['sinc_kernel'].shape == (21, 21)
assert result['gt_path'] == 'comic'
# ------------------ test with sinc_prob = 0 -------------------- #
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
opt['io_backend']['type'] = 'lmdb'
opt['sinc_prob'] = 0
opt['sinc_prob2'] = 0
opt['final_sinc_prob'] = 0
dataset = RealESRGANDataset(opt)
result = dataset.__getitem__(0)
# check returned keys
expected_keys = ['gt', 'kernel1', 'kernel2', 'sinc_kernel', 'gt_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 400, 400)
assert result['kernel1'].shape == (21, 21)
assert result['kernel2'].shape == (21, 21)
assert result['sinc_kernel'].shape == (21, 21)
assert result['gt_path'] == 'baboon'
# ------------------ lmdb backend should have paths ends with lmdb -------------------- #
with pytest.raises(ValueError):
opt['dataroot_gt'] = 'tests/data/gt'
opt['io_backend']['type'] = 'lmdb'
dataset = RealESRGANDataset(opt)
def test_realesrgan_paired_dataset():
with open('tests/data/test_realesrgan_paired_dataset.yml', mode='r') as f:
opt = yaml.load(f, Loader=yaml.FullLoader)
dataset = RealESRGANPairedDataset(opt)
assert dataset.io_backend_opt['type'] == 'disk' # io backend
assert len(dataset) == 2 # whether to read correct meta info
# test __getitem__
result = dataset.__getitem__(0)
# check returned keys
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 128, 128)
assert result['lq'].shape == (3, 32, 32)
assert result['gt_path'] == 'tests/data/gt/baboon.png'
assert result['lq_path'] == 'tests/data/lq/baboon.png'
# ------------------ test lmdb backend -------------------- #
opt['dataroot_gt'] = 'tests/data/gt.lmdb'
opt['dataroot_lq'] = 'tests/data/lq.lmdb'
opt['io_backend']['type'] = 'lmdb'
dataset = RealESRGANPairedDataset(opt)
assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
assert len(dataset) == 2 # whether to read correct meta info
# test __getitem__
result = dataset.__getitem__(1)
# check returned keys
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 128, 128)
assert result['lq'].shape == (3, 32, 32)
assert result['gt_path'] == 'comic'
assert result['lq_path'] == 'comic'
# ------------------ test paired_paths_from_folder -------------------- #
opt['dataroot_gt'] = 'tests/data/gt'
opt['dataroot_lq'] = 'tests/data/lq'
opt['io_backend'] = dict(type='disk')
opt['meta_info'] = None
dataset = RealESRGANPairedDataset(opt)
assert dataset.io_backend_opt['type'] == 'disk' # io backend
assert len(dataset) == 2 # whether to read correct meta info
# test __getitem__
result = dataset.__getitem__(0)
# check returned keys
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 128, 128)
assert result['lq'].shape == (3, 32, 32)
# ------------------ test normalization -------------------- #
dataset.mean = [0.5, 0.5, 0.5]
dataset.std = [0.5, 0.5, 0.5]
# test __getitem__
result = dataset.__getitem__(0)
# check returned keys
expected_keys = ['gt', 'lq', 'gt_path', 'lq_path']
assert set(expected_keys).issubset(set(result.keys()))
# check shape and contents
assert result['gt'].shape == (3, 128, 128)
assert result['lq'].shape == (3, 32, 32)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
tests/test_discriminator_arch.py | Python | import torch
from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN
def test_unetdiscriminatorsn():
"""Test arch: UNetDiscriminatorSN."""
# model init and forward (cpu)
net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True)
img = torch.rand((1, 3, 32, 32), dtype=torch.float32)
output = net(img)
assert output.shape == (1, 1, 32, 32)
# model init and forward (gpu)
if torch.cuda.is_available():
net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 32, 32)
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
tests/test_model.py | Python | import torch
import yaml
from basicsr.archs.rrdbnet_arch import RRDBNet
from basicsr.data.paired_image_dataset import PairedImageDataset
from basicsr.losses.losses import GANLoss, L1Loss, PerceptualLoss
from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN
from realesrgan.models.realesrgan_model import RealESRGANModel
from realesrgan.models.realesrnet_model import RealESRNetModel
def test_realesrnet_model():
with open('tests/data/test_realesrnet_model.yml', mode='r') as f:
opt = yaml.load(f, Loader=yaml.FullLoader)
# build model
model = RealESRNetModel(opt)
# test attributes
assert model.__class__.__name__ == 'RealESRNetModel'
assert isinstance(model.net_g, RRDBNet)
assert isinstance(model.cri_pix, L1Loss)
assert isinstance(model.optimizers[0], torch.optim.Adam)
# prepare data
gt = torch.rand((1, 3, 32, 32), dtype=torch.float32)
kernel1 = torch.rand((1, 5, 5), dtype=torch.float32)
kernel2 = torch.rand((1, 5, 5), dtype=torch.float32)
sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32)
data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel)
model.feed_data(data)
# check dequeue
model.feed_data(data)
# check data shape
assert model.lq.shape == (1, 3, 8, 8)
assert model.gt.shape == (1, 3, 32, 32)
# change probability to test if-else
model.opt['gaussian_noise_prob'] = 0
model.opt['gray_noise_prob'] = 0
model.opt['second_blur_prob'] = 0
model.opt['gaussian_noise_prob2'] = 0
model.opt['gray_noise_prob2'] = 0
model.feed_data(data)
# check data shape
assert model.lq.shape == (1, 3, 8, 8)
assert model.gt.shape == (1, 3, 32, 32)
# ----------------- test nondist_validation -------------------- #
# construct dataloader
dataset_opt = dict(
name='Demo',
dataroot_gt='tests/data/gt',
dataroot_lq='tests/data/lq',
io_backend=dict(type='disk'),
scale=4,
phase='val')
dataset = PairedImageDataset(dataset_opt)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
assert model.is_train is True
model.nondist_validation(dataloader, 1, None, False)
assert model.is_train is True
def test_realesrgan_model():
with open('tests/data/test_realesrgan_model.yml', mode='r') as f:
opt = yaml.load(f, Loader=yaml.FullLoader)
# build model
model = RealESRGANModel(opt)
# test attributes
assert model.__class__.__name__ == 'RealESRGANModel'
assert isinstance(model.net_g, RRDBNet) # generator
assert isinstance(model.net_d, UNetDiscriminatorSN) # discriminator
assert isinstance(model.cri_pix, L1Loss)
assert isinstance(model.cri_perceptual, PerceptualLoss)
assert isinstance(model.cri_gan, GANLoss)
assert isinstance(model.optimizers[0], torch.optim.Adam)
assert isinstance(model.optimizers[1], torch.optim.Adam)
# prepare data
gt = torch.rand((1, 3, 32, 32), dtype=torch.float32)
kernel1 = torch.rand((1, 5, 5), dtype=torch.float32)
kernel2 = torch.rand((1, 5, 5), dtype=torch.float32)
sinc_kernel = torch.rand((1, 5, 5), dtype=torch.float32)
data = dict(gt=gt, kernel1=kernel1, kernel2=kernel2, sinc_kernel=sinc_kernel)
model.feed_data(data)
# check dequeue
model.feed_data(data)
# check data shape
assert model.lq.shape == (1, 3, 8, 8)
assert model.gt.shape == (1, 3, 32, 32)
# change probability to test if-else
model.opt['gaussian_noise_prob'] = 0
model.opt['gray_noise_prob'] = 0
model.opt['second_blur_prob'] = 0
model.opt['gaussian_noise_prob2'] = 0
model.opt['gray_noise_prob2'] = 0
model.feed_data(data)
# check data shape
assert model.lq.shape == (1, 3, 8, 8)
assert model.gt.shape == (1, 3, 32, 32)
# ----------------- test nondist_validation -------------------- #
# construct dataloader
dataset_opt = dict(
name='Demo',
dataroot_gt='tests/data/gt',
dataroot_lq='tests/data/lq',
io_backend=dict(type='disk'),
scale=4,
phase='val')
dataset = PairedImageDataset(dataset_opt)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, shuffle=False, num_workers=0)
assert model.is_train is True
model.nondist_validation(dataloader, 1, None, False)
assert model.is_train is True
# ----------------- test optimize_parameters -------------------- #
model.feed_data(data)
model.optimize_parameters(1)
assert model.output.shape == (1, 3, 32, 32)
assert isinstance(model.log_dict, dict)
# check returned keys
expected_keys = ['l_g_pix', 'l_g_percep', 'l_g_gan', 'l_d_real', 'out_d_real', 'l_d_fake', 'out_d_fake']
assert set(expected_keys).issubset(set(model.log_dict.keys()))
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
tests/test_utils.py | Python | import numpy as np
from basicsr.archs.rrdbnet_arch import RRDBNet
from realesrgan.utils import RealESRGANer
def test_realesrganer():
# initialize with default model
restorer = RealESRGANer(
scale=4,
model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth',
model=None,
tile=10,
tile_pad=10,
pre_pad=2,
half=False)
assert isinstance(restorer.model, RRDBNet)
assert restorer.half is False
# initialize with user-defined model
model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
restorer = RealESRGANer(
scale=4,
model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth',
model=model,
tile=10,
tile_pad=10,
pre_pad=2,
half=True)
# test attribute
assert isinstance(restorer.model, RRDBNet)
assert restorer.half is True
# ------------------ test pre_process ---------------- #
img = np.random.random((12, 12, 3)).astype(np.float32)
restorer.pre_process(img)
assert restorer.img.shape == (1, 3, 14, 14)
# with modcrop
restorer.scale = 1
restorer.pre_process(img)
assert restorer.img.shape == (1, 3, 16, 16)
# ------------------ test process ---------------- #
restorer.process()
assert restorer.output.shape == (1, 3, 64, 64)
# ------------------ test post_process ---------------- #
restorer.mod_scale = 4
output = restorer.post_process()
assert output.shape == (1, 3, 60, 60)
# ------------------ test tile_process ---------------- #
restorer.scale = 4
img = np.random.random((12, 12, 3)).astype(np.float32)
restorer.pre_process(img)
restorer.tile_process()
assert restorer.output.shape == (1, 3, 64, 64)
# ------------------ test enhance ---------------- #
img = np.random.random((12, 12, 3)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (24, 24, 3)
assert result[1] == 'RGB'
# ------------------ test enhance with 16-bit image---------------- #
img = np.random.random((4, 4, 3)).astype(np.uint16) + 512
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8, 3)
assert result[1] == 'RGB'
# ------------------ test enhance with gray image---------------- #
img = np.random.random((4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8)
assert result[1] == 'L'
# ------------------ test enhance with RGBA---------------- #
img = np.random.random((4, 4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2)
assert result[0].shape == (8, 8, 4)
assert result[1] == 'RGBA'
# ------------------ test enhance with RGBA, alpha_upsampler---------------- #
restorer.tile_size = 0
img = np.random.random((4, 4, 4)).astype(np.float32)
result = restorer.enhance(img, outscale=2, alpha_upsampler=None)
assert result[0].shape == (8, 8, 4)
assert result[1] == 'RGBA'
| xinntao/Real-ESRGAN | 34,354 | Real-ESRGAN aims at developing Practical Algorithms for General Image/Video Restoration. | Python | xinntao | Xintao | Tencent |
facexlib/__init__.py | Python | # flake8: noqa
from .alignment import *
from .detection import *
from .recognition import *
from .tracking import *
from .utils import *
from .version import __gitsha__, __version__
from .visualization import *
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/alignment/__init__.py | Python | import torch
from facexlib.utils import load_file_from_url
from .awing_arch import FAN
from .convert_98_to_68_landmarks import landmark_98_to_68
__all__ = ['FAN', 'landmark_98_to_68']
def init_alignment_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'awing_fan':
model = FAN(num_modules=4, num_landmarks=98, device=device)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/alignment_WFLW_4HG.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.load_state_dict(torch.load(model_path)['state_dict'], strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/alignment/awing_arch.py | Python | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def calculate_points(heatmaps):
# change heatmaps to landmarks
B, N, H, W = heatmaps.shape
HW = H * W
BN_range = np.arange(B * N)
heatline = heatmaps.reshape(B, N, HW)
indexes = np.argmax(heatline, axis=2)
preds = np.stack((indexes % W, indexes // W), axis=2)
preds = preds.astype(np.float, copy=False)
inr = indexes.ravel()
heatline = heatline.reshape(B * N, HW)
x_up = heatline[BN_range, inr + 1]
x_down = heatline[BN_range, inr - 1]
# y_up = heatline[BN_range, inr + W]
if any((inr + W) >= 4096):
y_up = heatline[BN_range, 4095]
else:
y_up = heatline[BN_range, inr + W]
if any((inr - W) <= 0):
y_down = heatline[BN_range, 0]
else:
y_down = heatline[BN_range, inr - W]
think_diff = np.sign(np.stack((x_up - x_down, y_up - y_down), axis=1))
think_diff *= .25
preds += think_diff.reshape(B, N, 2)
preds += .5
return preds
class AddCoordsTh(nn.Module):
def __init__(self, x_dim=64, y_dim=64, with_r=False, with_boundary=False):
super(AddCoordsTh, self).__init__()
self.x_dim = x_dim
self.y_dim = y_dim
self.with_r = with_r
self.with_boundary = with_boundary
def forward(self, input_tensor, heatmap=None):
"""
input_tensor: (batch, c, x_dim, y_dim)
"""
batch_size_tensor = input_tensor.shape[0]
xx_ones = torch.ones([1, self.y_dim], dtype=torch.int32, device=input_tensor.device)
xx_ones = xx_ones.unsqueeze(-1)
xx_range = torch.arange(self.x_dim, dtype=torch.int32, device=input_tensor.device).unsqueeze(0)
xx_range = xx_range.unsqueeze(1)
xx_channel = torch.matmul(xx_ones.float(), xx_range.float())
xx_channel = xx_channel.unsqueeze(-1)
yy_ones = torch.ones([1, self.x_dim], dtype=torch.int32, device=input_tensor.device)
yy_ones = yy_ones.unsqueeze(1)
yy_range = torch.arange(self.y_dim, dtype=torch.int32, device=input_tensor.device).unsqueeze(0)
yy_range = yy_range.unsqueeze(-1)
yy_channel = torch.matmul(yy_range.float(), yy_ones.float())
yy_channel = yy_channel.unsqueeze(-1)
xx_channel = xx_channel.permute(0, 3, 2, 1)
yy_channel = yy_channel.permute(0, 3, 2, 1)
xx_channel = xx_channel / (self.x_dim - 1)
yy_channel = yy_channel / (self.y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_channel = yy_channel * 2 - 1
xx_channel = xx_channel.repeat(batch_size_tensor, 1, 1, 1)
yy_channel = yy_channel.repeat(batch_size_tensor, 1, 1, 1)
if self.with_boundary and heatmap is not None:
boundary_channel = torch.clamp(heatmap[:, -1:, :, :], 0.0, 1.0)
zero_tensor = torch.zeros_like(xx_channel)
xx_boundary_channel = torch.where(boundary_channel > 0.05, xx_channel, zero_tensor)
yy_boundary_channel = torch.where(boundary_channel > 0.05, yy_channel, zero_tensor)
if self.with_boundary and heatmap is not None:
xx_boundary_channel = xx_boundary_channel.to(input_tensor.device)
yy_boundary_channel = yy_boundary_channel.to(input_tensor.device)
ret = torch.cat([input_tensor, xx_channel, yy_channel], dim=1)
if self.with_r:
rr = torch.sqrt(torch.pow(xx_channel, 2) + torch.pow(yy_channel, 2))
rr = rr / torch.max(rr)
ret = torch.cat([ret, rr], dim=1)
if self.with_boundary and heatmap is not None:
ret = torch.cat([ret, xx_boundary_channel, yy_boundary_channel], dim=1)
return ret
class CoordConvTh(nn.Module):
"""CoordConv layer as in the paper."""
def __init__(self, x_dim, y_dim, with_r, with_boundary, in_channels, first_one=False, *args, **kwargs):
super(CoordConvTh, self).__init__()
self.addcoords = AddCoordsTh(x_dim=x_dim, y_dim=y_dim, with_r=with_r, with_boundary=with_boundary)
in_channels += 2
if with_r:
in_channels += 1
if with_boundary and not first_one:
in_channels += 2
self.conv = nn.Conv2d(in_channels=in_channels, *args, **kwargs)
def forward(self, input_tensor, heatmap=None):
ret = self.addcoords(input_tensor, heatmap)
last_channel = ret[:, -2:, :, :]
ret = self.conv(ret)
return ret, last_channel
def conv3x3(in_planes, out_planes, strd=1, padding=1, bias=False, dilation=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=strd, padding=padding, bias=bias, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
# self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
# self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(ConvBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = conv3x3(in_planes, int(out_planes / 2))
self.bn2 = nn.BatchNorm2d(int(out_planes / 2))
self.conv2 = conv3x3(int(out_planes / 2), int(out_planes / 4), padding=1, dilation=1)
self.bn3 = nn.BatchNorm2d(int(out_planes / 4))
self.conv3 = conv3x3(int(out_planes / 4), int(out_planes / 4), padding=1, dilation=1)
if in_planes != out_planes:
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(True),
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out1 = self.bn1(x)
out1 = F.relu(out1, True)
out1 = self.conv1(out1)
out2 = self.bn2(out1)
out2 = F.relu(out2, True)
out2 = self.conv2(out2)
out3 = self.bn3(out2)
out3 = F.relu(out3, True)
out3 = self.conv3(out3)
out3 = torch.cat((out1, out2, out3), 1)
if self.downsample is not None:
residual = self.downsample(residual)
out3 += residual
return out3
class HourGlass(nn.Module):
def __init__(self, num_modules, depth, num_features, first_one=False):
super(HourGlass, self).__init__()
self.num_modules = num_modules
self.depth = depth
self.features = num_features
self.coordconv = CoordConvTh(
x_dim=64,
y_dim=64,
with_r=True,
with_boundary=True,
in_channels=256,
first_one=first_one,
out_channels=256,
kernel_size=1,
stride=1,
padding=0)
self._generate_network(self.depth)
def _generate_network(self, level):
self.add_module('b1_' + str(level), ConvBlock(256, 256))
self.add_module('b2_' + str(level), ConvBlock(256, 256))
if level > 1:
self._generate_network(level - 1)
else:
self.add_module('b2_plus_' + str(level), ConvBlock(256, 256))
self.add_module('b3_' + str(level), ConvBlock(256, 256))
def _forward(self, level, inp):
# Upper branch
up1 = inp
up1 = self._modules['b1_' + str(level)](up1)
# Lower branch
low1 = F.avg_pool2d(inp, 2, stride=2)
low1 = self._modules['b2_' + str(level)](low1)
if level > 1:
low2 = self._forward(level - 1, low1)
else:
low2 = low1
low2 = self._modules['b2_plus_' + str(level)](low2)
low3 = low2
low3 = self._modules['b3_' + str(level)](low3)
up2 = F.interpolate(low3, scale_factor=2, mode='nearest')
return up1 + up2
def forward(self, x, heatmap):
x, last_channel = self.coordconv(x, heatmap)
return self._forward(self.depth, x), last_channel
class FAN(nn.Module):
def __init__(self, num_modules=1, end_relu=False, gray_scale=False, num_landmarks=68, device='cuda'):
super(FAN, self).__init__()
self.device = device
self.num_modules = num_modules
self.gray_scale = gray_scale
self.end_relu = end_relu
self.num_landmarks = num_landmarks
# Base part
if self.gray_scale:
self.conv1 = CoordConvTh(
x_dim=256,
y_dim=256,
with_r=True,
with_boundary=False,
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3)
else:
self.conv1 = CoordConvTh(
x_dim=256,
y_dim=256,
with_r=True,
with_boundary=False,
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = ConvBlock(64, 128)
self.conv3 = ConvBlock(128, 128)
self.conv4 = ConvBlock(128, 256)
# Stacking part
for hg_module in range(self.num_modules):
if hg_module == 0:
first_one = True
else:
first_one = False
self.add_module('m' + str(hg_module), HourGlass(1, 4, 256, first_one))
self.add_module('top_m_' + str(hg_module), ConvBlock(256, 256))
self.add_module('conv_last' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
self.add_module('bn_end' + str(hg_module), nn.BatchNorm2d(256))
self.add_module('l' + str(hg_module), nn.Conv2d(256, num_landmarks + 1, kernel_size=1, stride=1, padding=0))
if hg_module < self.num_modules - 1:
self.add_module('bl' + str(hg_module), nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0))
self.add_module('al' + str(hg_module),
nn.Conv2d(num_landmarks + 1, 256, kernel_size=1, stride=1, padding=0))
def forward(self, x):
x, _ = self.conv1(x)
x = F.relu(self.bn1(x), True)
# x = F.relu(self.bn1(self.conv1(x)), True)
x = F.avg_pool2d(self.conv2(x), 2, stride=2)
x = self.conv3(x)
x = self.conv4(x)
previous = x
outputs = []
boundary_channels = []
tmp_out = None
for i in range(self.num_modules):
hg, boundary_channel = self._modules['m' + str(i)](previous, tmp_out)
ll = hg
ll = self._modules['top_m_' + str(i)](ll)
ll = F.relu(self._modules['bn_end' + str(i)](self._modules['conv_last' + str(i)](ll)), True)
# Predict heatmaps
tmp_out = self._modules['l' + str(i)](ll)
if self.end_relu:
tmp_out = F.relu(tmp_out) # HACK: Added relu
outputs.append(tmp_out)
boundary_channels.append(boundary_channel)
if i < self.num_modules - 1:
ll = self._modules['bl' + str(i)](ll)
tmp_out_ = self._modules['al' + str(i)](tmp_out)
previous = previous + ll + tmp_out_
return outputs, boundary_channels
def get_landmarks(self, img):
H, W, _ = img.shape
offset = W / 64, H / 64, 0, 0
img = cv2.resize(img, (256, 256))
inp = img[..., ::-1]
inp = torch.from_numpy(np.ascontiguousarray(inp.transpose((2, 0, 1)))).float()
inp = inp.to(self.device)
inp.div_(255.0).unsqueeze_(0)
outputs, _ = self.forward(inp)
out = outputs[-1][:, :-1, :, :]
heatmaps = out.detach().cpu().numpy()
pred = calculate_points(heatmaps).reshape(-1, 2)
pred *= offset[:2]
pred += offset[-2:]
return pred
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/alignment/convert_98_to_68_landmarks.py | Python | import numpy as np
def load_txt_file(file_path):
"""Load data or string from txt file."""
with open(file_path, 'r') as cfile:
content = cfile.readlines()
cfile.close()
content = [x.strip() for x in content]
num_lines = len(content)
return content, num_lines
def anno_parser(anno_path, num_pts, line_offset=0):
"""Parse the annotation.
Args:
anno_path: path of anno file (suffix .txt)
num_pts: number of landmarks.
line_offset: first point starts, default: 0.
Returns:
pts: num_pts x 2 (x, y)
"""
data, _ = load_txt_file(anno_path)
n_points = num_pts
# read points coordinate.
pts = np.zeros((n_points, 2), dtype='float32')
for point_index in range(n_points):
try:
pts_list = data[point_index + line_offset].split(',')
pts[point_index, 0] = float(pts_list[0])
pts[point_index, 1] = float(pts_list[1])
except ValueError:
print(f'Error in loading points in {anno_path}')
return pts
def landmark_98_to_68(landmark_98):
"""Transfer 98 landmark positions to 68 landmark positions.
Args:
landmark_98(numpy array): Polar coordinates of 98 landmarks, (98, 2)
Returns:
landmark_68(numpy array): Polar coordinates of 98 landmarks, (68, 2)
"""
landmark_68 = np.zeros((68, 2), dtype='float32')
# cheek
for i in range(0, 33):
if i % 2 == 0:
landmark_68[int(i / 2), :] = landmark_98[i, :]
# nose
for i in range(51, 60):
landmark_68[i - 24, :] = landmark_98[i, :]
# mouth
for i in range(76, 96):
landmark_68[i - 28, :] = landmark_98[i, :]
# left eyebrow
landmark_68[17, :] = landmark_98[33, :]
landmark_68[18, :] = (landmark_98[34, :] + landmark_98[41, :]) / 2
landmark_68[19, :] = (landmark_98[35, :] + landmark_98[40, :]) / 2
landmark_68[20, :] = (landmark_98[36, :] + landmark_98[39, :]) / 2
landmark_68[21, :] = (landmark_98[37, :] + landmark_98[38, :]) / 2
# right eyebrow
landmark_68[22, :] = (landmark_98[42, :] + landmark_98[50, :]) / 2
landmark_68[23, :] = (landmark_98[43, :] + landmark_98[49, :]) / 2
landmark_68[24, :] = (landmark_98[44, :] + landmark_98[48, :]) / 2
landmark_68[25, :] = (landmark_98[45, :] + landmark_98[47, :]) / 2
landmark_68[26, :] = landmark_98[46, :]
# left eye
LUT_landmark_68_left_eye = [36, 37, 38, 39, 40, 41]
LUT_landmark_98_left_eye = [60, 61, 63, 64, 65, 67]
for idx, landmark_98_index in enumerate(LUT_landmark_98_left_eye):
landmark_68[LUT_landmark_68_left_eye[idx], :] = landmark_98[landmark_98_index, :]
# right eye
LUT_landmark_68_right_eye = [42, 43, 44, 45, 46, 47]
LUT_landmark_98_right_eye = [68, 69, 71, 72, 73, 75]
for idx, landmark_98_index in enumerate(LUT_landmark_98_right_eye):
landmark_68[LUT_landmark_68_right_eye[idx], :] = landmark_98[landmark_98_index, :]
return landmark_68
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/assessment/__init__.py | Python | import torch
from facexlib.utils import load_file_from_url
from .hyperiqa_net import HyperIQA
def init_assessment_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'hypernet':
model = HyperIQA(16, 112, 224, 112, 56, 28, 14, 7)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/assessment_hyperIQA.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
# load the pre-trained hypernet model
hypernet_model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.hypernet.load_state_dict((torch.load(hypernet_model_path, map_location=lambda storage, loc: storage)))
model = model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/assessment/hyperiqa_net.py | Python | import torch as torch
import torch.nn as nn
from torch.nn import functional as F
class HyperIQA(nn.Module):
"""
Combine the hypernet and target network within a network.
"""
def __init__(self, *args):
super(HyperIQA, self).__init__()
self.hypernet = HyperNet(*args)
def forward(self, img):
net_params = self.hypernet(img)
# build the target network
target_net = TargetNet(net_params)
for param in target_net.parameters():
param.requires_grad = False
# predict the face quality
pred = target_net(net_params['target_in_vec'])
return pred
class HyperNet(nn.Module):
"""
Hyper network for learning perceptual rules.
Args:
lda_out_channels: local distortion aware module output size.
hyper_in_channels: input feature channels for hyper network.
target_in_size: input vector size for target network.
target_fc(i)_size: fully connection layer size of target network.
feature_size: input feature map width/height for hyper network.
Note:
For size match, input args must satisfy: 'target_fc(i)_size * target_fc(i+1)_size' is divisible by 'feature_size ^ 2'. # noqa E501
"""
def __init__(self, lda_out_channels, hyper_in_channels, target_in_size, target_fc1_size, target_fc2_size,
target_fc3_size, target_fc4_size, feature_size):
super(HyperNet, self).__init__()
self.hyperInChn = hyper_in_channels
self.target_in_size = target_in_size
self.f1 = target_fc1_size
self.f2 = target_fc2_size
self.f3 = target_fc3_size
self.f4 = target_fc4_size
self.feature_size = feature_size
self.res = resnet50_backbone(lda_out_channels, target_in_size)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# Conv layers for resnet output features
self.conv1 = nn.Sequential(
nn.Conv2d(2048, 1024, 1, padding=(0, 0)), nn.ReLU(inplace=True), nn.Conv2d(1024, 512, 1, padding=(0, 0)),
nn.ReLU(inplace=True), nn.Conv2d(512, self.hyperInChn, 1, padding=(0, 0)), nn.ReLU(inplace=True))
# Hyper network part, conv for generating target fc weights, fc for generating target fc biases
self.fc1w_conv = nn.Conv2d(
self.hyperInChn, int(self.target_in_size * self.f1 / feature_size**2), 3, padding=(1, 1))
self.fc1b_fc = nn.Linear(self.hyperInChn, self.f1)
self.fc2w_conv = nn.Conv2d(self.hyperInChn, int(self.f1 * self.f2 / feature_size**2), 3, padding=(1, 1))
self.fc2b_fc = nn.Linear(self.hyperInChn, self.f2)
self.fc3w_conv = nn.Conv2d(self.hyperInChn, int(self.f2 * self.f3 / feature_size**2), 3, padding=(1, 1))
self.fc3b_fc = nn.Linear(self.hyperInChn, self.f3)
self.fc4w_conv = nn.Conv2d(self.hyperInChn, int(self.f3 * self.f4 / feature_size**2), 3, padding=(1, 1))
self.fc4b_fc = nn.Linear(self.hyperInChn, self.f4)
self.fc5w_fc = nn.Linear(self.hyperInChn, self.f4)
self.fc5b_fc = nn.Linear(self.hyperInChn, 1)
def forward(self, img):
feature_size = self.feature_size
res_out = self.res(img)
# input vector for target net
target_in_vec = res_out['target_in_vec'].view(-1, self.target_in_size, 1, 1)
# input features for hyper net
hyper_in_feat = self.conv1(res_out['hyper_in_feat']).view(-1, self.hyperInChn, feature_size, feature_size)
# generating target net weights & biases
target_fc1w = self.fc1w_conv(hyper_in_feat).view(-1, self.f1, self.target_in_size, 1, 1)
target_fc1b = self.fc1b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f1)
target_fc2w = self.fc2w_conv(hyper_in_feat).view(-1, self.f2, self.f1, 1, 1)
target_fc2b = self.fc2b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f2)
target_fc3w = self.fc3w_conv(hyper_in_feat).view(-1, self.f3, self.f2, 1, 1)
target_fc3b = self.fc3b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f3)
target_fc4w = self.fc4w_conv(hyper_in_feat).view(-1, self.f4, self.f3, 1, 1)
target_fc4b = self.fc4b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, self.f4)
target_fc5w = self.fc5w_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1, self.f4, 1, 1)
target_fc5b = self.fc5b_fc(self.pool(hyper_in_feat).squeeze()).view(-1, 1)
out = {}
out['target_in_vec'] = target_in_vec
out['target_fc1w'] = target_fc1w
out['target_fc1b'] = target_fc1b
out['target_fc2w'] = target_fc2w
out['target_fc2b'] = target_fc2b
out['target_fc3w'] = target_fc3w
out['target_fc3b'] = target_fc3b
out['target_fc4w'] = target_fc4w
out['target_fc4b'] = target_fc4b
out['target_fc5w'] = target_fc5w
out['target_fc5b'] = target_fc5b
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBackbone(nn.Module):
def __init__(self, lda_out_channels, in_chn, block, layers, num_classes=1000):
super(ResNetBackbone, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# local distortion aware module
self.lda1_pool = nn.Sequential(
nn.Conv2d(256, 16, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
)
self.lda1_fc = nn.Linear(16 * 64, lda_out_channels)
self.lda2_pool = nn.Sequential(
nn.Conv2d(512, 32, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
)
self.lda2_fc = nn.Linear(32 * 16, lda_out_channels)
self.lda3_pool = nn.Sequential(
nn.Conv2d(1024, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.AvgPool2d(7, stride=7),
)
self.lda3_fc = nn.Linear(64 * 4, lda_out_channels)
self.lda4_pool = nn.AvgPool2d(7, stride=7)
self.lda4_fc = nn.Linear(2048, in_chn - lda_out_channels * 3)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
# the same effect as lda operation in the paper, but save much more memory
lda_1 = self.lda1_fc(self.lda1_pool(x).view(x.size(0), -1))
x = self.layer2(x)
lda_2 = self.lda2_fc(self.lda2_pool(x).view(x.size(0), -1))
x = self.layer3(x)
lda_3 = self.lda3_fc(self.lda3_pool(x).view(x.size(0), -1))
x = self.layer4(x)
lda_4 = self.lda4_fc(self.lda4_pool(x).view(x.size(0), -1))
vec = torch.cat((lda_1, lda_2, lda_3, lda_4), 1)
out = {}
out['hyper_in_feat'] = x
out['target_in_vec'] = vec
return out
def resnet50_backbone(lda_out_channels, in_chn, **kwargs):
"""Constructs a ResNet-50 model_hyper."""
model = ResNetBackbone(lda_out_channels, in_chn, Bottleneck, [3, 4, 6, 3], **kwargs)
return model
class TargetNet(nn.Module):
"""
Target network for quality prediction.
"""
def __init__(self, paras):
super(TargetNet, self).__init__()
self.l1 = nn.Sequential(
TargetFC(paras['target_fc1w'], paras['target_fc1b']),
nn.Sigmoid(),
)
self.l2 = nn.Sequential(
TargetFC(paras['target_fc2w'], paras['target_fc2b']),
nn.Sigmoid(),
)
self.l3 = nn.Sequential(
TargetFC(paras['target_fc3w'], paras['target_fc3b']),
nn.Sigmoid(),
)
self.l4 = nn.Sequential(
TargetFC(paras['target_fc4w'], paras['target_fc4b']),
nn.Sigmoid(),
TargetFC(paras['target_fc5w'], paras['target_fc5b']),
)
def forward(self, x):
q = self.l1(x)
# q = F.dropout(q)
q = self.l2(q)
q = self.l3(q)
q = self.l4(q).squeeze()
return q
class TargetFC(nn.Module):
"""
Fully connection operations for target net
Note:
Weights & biases are different for different images in a batch,
thus here we use group convolution for calculating images in a batch with individual weights & biases.
"""
def __init__(self, weight, bias):
super(TargetFC, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, input_):
input_re = input_.view(-1, input_.shape[0] * input_.shape[1], input_.shape[2], input_.shape[3])
weight_re = self.weight.view(self.weight.shape[0] * self.weight.shape[1], self.weight.shape[2],
self.weight.shape[3], self.weight.shape[4])
bias_re = self.bias.view(self.bias.shape[0] * self.bias.shape[1])
out = F.conv2d(input=input_re, weight=weight_re, bias=bias_re, groups=self.weight.shape[0])
return out.view(input_.shape[0], self.weight.shape[1], input_.shape[2], input_.shape[3])
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/__init__.py | Python | import torch
from copy import deepcopy
from facexlib.utils import load_file_from_url
from .retinaface import RetinaFace
def init_detection_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'retinaface_resnet50':
model = RetinaFace(network_name='resnet50', half=half, device=device)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth'
elif model_name == 'retinaface_mobile0.25':
model = RetinaFace(network_name='mobile0.25', half=half, device=device)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_mobilenet0.25_Final.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
# TODO: clean pretrained model
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith('module.'):
load_net[k[7:]] = v
load_net.pop(k)
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/align_trans.py | Python | import cv2
import numpy as np
from .matlab_cp2tform import get_similarity_transform_for_cv2
# reference facial points, a list of coordinates (x,y)
REFERENCE_FACIAL_POINTS = [[30.29459953, 51.69630051], [65.53179932, 51.50139999], [48.02519989, 71.73660278],
[33.54930115, 92.3655014], [62.72990036, 92.20410156]]
DEFAULT_CROP_SIZE = (96, 112)
class FaceWarpException(Exception):
def __str__(self):
return 'In File {}:{}'.format(__file__, super.__str__(self))
def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):
"""
Function:
----------
get reference 5 key points according to crop settings:
0. Set default crop_size:
if default_square:
crop_size = (112, 112)
else:
crop_size = (96, 112)
1. Pad the crop_size by inner_padding_factor in each side;
2. Resize crop_size into (output_size - outer_padding*2),
pad into output_size with outer_padding;
3. Output reference_5point;
Parameters:
----------
@output_size: (w, h) or None
size of aligned face image
@inner_padding_factor: (w_factor, h_factor)
padding factor for inner (w, h)
@outer_padding: (w_pad, h_pad)
each row is a pair of coordinates (x, y)
@default_square: True or False
if True:
default crop_size = (112, 112)
else:
default crop_size = (96, 112);
!!! make sure, if output_size is not None:
(output_size - outer_padding)
= some_scale * (default crop_size * (1.0 +
inner_padding_factor))
Returns:
----------
@reference_5point: 5x2 np.array
each row is a pair of transformed coordinates (x, y)
"""
tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)
tmp_crop_size = np.array(DEFAULT_CROP_SIZE)
# 0) make the inner region a square
if default_square:
size_diff = max(tmp_crop_size) - tmp_crop_size
tmp_5pts += size_diff / 2
tmp_crop_size += size_diff
if (output_size and output_size[0] == tmp_crop_size[0] and output_size[1] == tmp_crop_size[1]):
return tmp_5pts
if (inner_padding_factor == 0 and outer_padding == (0, 0)):
if output_size is None:
return tmp_5pts
else:
raise FaceWarpException('No paddings to do, output_size must be None or {}'.format(tmp_crop_size))
# check output size
if not (0 <= inner_padding_factor <= 1.0):
raise FaceWarpException('Not (0 <= inner_padding_factor <= 1.0)')
if ((inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0) and output_size is None):
output_size = tmp_crop_size * \
(1 + inner_padding_factor * 2).astype(np.int32)
output_size += np.array(outer_padding)
if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):
raise FaceWarpException('Not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1])')
# 1) pad the inner region according inner_padding_factor
if inner_padding_factor > 0:
size_diff = tmp_crop_size * inner_padding_factor * 2
tmp_5pts += size_diff / 2
tmp_crop_size += np.round(size_diff).astype(np.int32)
# 2) resize the padded inner region
size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2
if size_bf_outer_pad[0] * tmp_crop_size[1] != size_bf_outer_pad[1] * tmp_crop_size[0]:
raise FaceWarpException('Must have (output_size - outer_padding)'
'= some_scale * (crop_size * (1.0 + inner_padding_factor)')
scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]
tmp_5pts = tmp_5pts * scale_factor
# size_diff = tmp_crop_size * (scale_factor - min(scale_factor))
# tmp_5pts = tmp_5pts + size_diff / 2
tmp_crop_size = size_bf_outer_pad
# 3) add outer_padding to make output_size
reference_5point = tmp_5pts + np.array(outer_padding)
tmp_crop_size = output_size
return reference_5point
def get_affine_transform_matrix(src_pts, dst_pts):
"""
Function:
----------
get affine transform matrix 'tfm' from src_pts to dst_pts
Parameters:
----------
@src_pts: Kx2 np.array
source points matrix, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points matrix, each row is a pair of coordinates (x, y)
Returns:
----------
@tfm: 2x3 np.array
transform matrix from src_pts to dst_pts
"""
tfm = np.float32([[1, 0, 0], [0, 1, 0]])
n_pts = src_pts.shape[0]
ones = np.ones((n_pts, 1), src_pts.dtype)
src_pts_ = np.hstack([src_pts, ones])
dst_pts_ = np.hstack([dst_pts, ones])
A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)
if rank == 3:
tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])
elif rank == 2:
tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])
return tfm
def warp_and_crop_face(src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type='smilarity'):
"""
Function:
----------
apply affine transform 'trans' to uv
Parameters:
----------
@src_img: 3x3 np.array
input image
@facial_pts: could be
1)a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
@reference_pts: could be
1) a list of K coordinates (x,y)
or
2) Kx2 or 2xK np.array
each row or col is a pair of coordinates (x, y)
or
3) None
if None, use default reference facial points
@crop_size: (w, h)
output face image size
@align_type: transform type, could be one of
1) 'similarity': use similarity transform
2) 'cv2_affine': use the first 3 points to do affine transform,
by calling cv2.getAffineTransform()
3) 'affine': use all points to do affine transform
Returns:
----------
@face_img: output face image with size (w, h) = @crop_size
"""
if reference_pts is None:
if crop_size[0] == 96 and crop_size[1] == 112:
reference_pts = REFERENCE_FACIAL_POINTS
else:
default_square = False
inner_padding_factor = 0
outer_padding = (0, 0)
output_size = crop_size
reference_pts = get_reference_facial_points(output_size, inner_padding_factor, outer_padding,
default_square)
ref_pts = np.float32(reference_pts)
ref_pts_shp = ref_pts.shape
if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:
raise FaceWarpException('reference_pts.shape must be (K,2) or (2,K) and K>2')
if ref_pts_shp[0] == 2:
ref_pts = ref_pts.T
src_pts = np.float32(facial_pts)
src_pts_shp = src_pts.shape
if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:
raise FaceWarpException('facial_pts.shape must be (K,2) or (2,K) and K>2')
if src_pts_shp[0] == 2:
src_pts = src_pts.T
if src_pts.shape != ref_pts.shape:
raise FaceWarpException('facial_pts and reference_pts must have the same shape')
if align_type == 'cv2_affine':
tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])
elif align_type == 'affine':
tfm = get_affine_transform_matrix(src_pts, ref_pts)
else:
tfm = get_similarity_transform_for_cv2(src_pts, ref_pts)
face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]))
return face_img
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/matlab_cp2tform.py | Python | import numpy as np
from numpy.linalg import inv, lstsq
from numpy.linalg import matrix_rank as rank
from numpy.linalg import norm
class MatlabCp2tormException(Exception):
def __str__(self):
return 'In File {}:{}'.format(__file__, super.__str__(self))
def tformfwd(trans, uv):
"""
Function:
----------
apply affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of transformed coordinates (x, y)
"""
uv = np.hstack((uv, np.ones((uv.shape[0], 1))))
xy = np.dot(uv, trans)
xy = xy[:, 0:-1]
return xy
def tforminv(trans, uv):
"""
Function:
----------
apply the inverse of affine transform 'trans' to uv
Parameters:
----------
@trans: 3x3 np.array
transform matrix
@uv: Kx2 np.array
each row is a pair of coordinates (x, y)
Returns:
----------
@xy: Kx2 np.array
each row is a pair of inverse-transformed coordinates (x, y)
"""
Tinv = inv(trans)
xy = tformfwd(Tinv, uv)
return xy
def findNonreflectiveSimilarity(uv, xy, options=None):
options = {'K': 2}
K = options['K']
M = xy.shape[0]
x = xy[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
y = xy[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
tmp1 = np.hstack((x, y, np.ones((M, 1)), np.zeros((M, 1))))
tmp2 = np.hstack((y, -x, np.zeros((M, 1)), np.ones((M, 1))))
X = np.vstack((tmp1, tmp2))
u = uv[:, 0].reshape((-1, 1)) # use reshape to keep a column vector
v = uv[:, 1].reshape((-1, 1)) # use reshape to keep a column vector
U = np.vstack((u, v))
# We know that X * r = U
if rank(X) >= 2 * K:
r, _, _, _ = lstsq(X, U, rcond=-1)
r = np.squeeze(r)
else:
raise Exception('cp2tform:twoUniquePointsReq')
sc = r[0]
ss = r[1]
tx = r[2]
ty = r[3]
Tinv = np.array([[sc, -ss, 0], [ss, sc, 0], [tx, ty, 1]])
T = inv(Tinv)
T[:, 2] = np.array([0, 0, 1])
return T, Tinv
def findSimilarity(uv, xy, options=None):
options = {'K': 2}
# uv = np.array(uv)
# xy = np.array(xy)
# Solve for trans1
trans1, trans1_inv = findNonreflectiveSimilarity(uv, xy, options)
# Solve for trans2
# manually reflect the xy data across the Y-axis
xyR = xy
xyR[:, 0] = -1 * xyR[:, 0]
trans2r, trans2r_inv = findNonreflectiveSimilarity(uv, xyR, options)
# manually reflect the tform to undo the reflection done on xyR
TreflectY = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
trans2 = np.dot(trans2r, TreflectY)
# Figure out if trans1 or trans2 is better
xy1 = tformfwd(trans1, uv)
norm1 = norm(xy1 - xy)
xy2 = tformfwd(trans2, uv)
norm2 = norm(xy2 - xy)
if norm1 <= norm2:
return trans1, trans1_inv
else:
trans2_inv = inv(trans2)
return trans2, trans2_inv
def get_similarity_transform(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'trans':
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y, 1] = [u, v, 1] * trans
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
@reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
trans_inv: 3x3 np.array
inverse of trans, transform matrix from xy to uv
"""
if reflective:
trans, trans_inv = findSimilarity(src_pts, dst_pts)
else:
trans, trans_inv = findNonreflectiveSimilarity(src_pts, dst_pts)
return trans, trans_inv
def cvt_tform_mat_for_cv2(trans):
"""
Function:
----------
Convert Transform Matrix 'trans' into 'cv2_trans' which could be
directly used by cv2.warpAffine():
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y].T = cv_trans * [u, v, 1].T
Parameters:
----------
@trans: 3x3 np.array
transform matrix from uv to xy
Returns:
----------
@cv2_trans: 2x3 np.array
transform matrix from src_pts to dst_pts, could be directly used
for cv2.warpAffine()
"""
cv2_trans = trans[:, 0:2].T
return cv2_trans
def get_similarity_transform_for_cv2(src_pts, dst_pts, reflective=True):
"""
Function:
----------
Find Similarity Transform Matrix 'cv2_trans' which could be
directly used by cv2.warpAffine():
u = src_pts[:, 0]
v = src_pts[:, 1]
x = dst_pts[:, 0]
y = dst_pts[:, 1]
[x, y].T = cv_trans * [u, v, 1].T
Parameters:
----------
@src_pts: Kx2 np.array
source points, each row is a pair of coordinates (x, y)
@dst_pts: Kx2 np.array
destination points, each row is a pair of transformed
coordinates (x, y)
reflective: True or False
if True:
use reflective similarity transform
else:
use non-reflective similarity transform
Returns:
----------
@cv2_trans: 2x3 np.array
transform matrix from src_pts to dst_pts, could be directly used
for cv2.warpAffine()
"""
trans, trans_inv = get_similarity_transform(src_pts, dst_pts, reflective)
cv2_trans = cvt_tform_mat_for_cv2(trans)
return cv2_trans
if __name__ == '__main__':
"""
u = [0, 6, -2]
v = [0, 3, 5]
x = [-1, 0, 4]
y = [-1, -10, 4]
# In Matlab, run:
#
# uv = [u'; v'];
# xy = [x'; y'];
# tform_sim=cp2tform(uv,xy,'similarity');
#
# trans = tform_sim.tdata.T
# ans =
# -0.0764 -1.6190 0
# 1.6190 -0.0764 0
# -3.2156 0.0290 1.0000
# trans_inv = tform_sim.tdata.Tinv
# ans =
#
# -0.0291 0.6163 0
# -0.6163 -0.0291 0
# -0.0756 1.9826 1.0000
# xy_m=tformfwd(tform_sim, u,v)
#
# xy_m =
#
# -3.2156 0.0290
# 1.1833 -9.9143
# 5.0323 2.8853
# uv_m=tforminv(tform_sim, x,y)
#
# uv_m =
#
# 0.5698 1.3953
# 6.0872 2.2733
# -2.6570 4.3314
"""
u = [0, 6, -2]
v = [0, 3, 5]
x = [-1, 0, 4]
y = [-1, -10, 4]
uv = np.array((u, v)).T
xy = np.array((x, y)).T
print('\n--->uv:')
print(uv)
print('\n--->xy:')
print(xy)
trans, trans_inv = get_similarity_transform(uv, xy)
print('\n--->trans matrix:')
print(trans)
print('\n--->trans_inv matrix:')
print(trans_inv)
print('\n---> apply transform to uv')
print('\nxy_m = uv_augmented * trans')
uv_aug = np.hstack((uv, np.ones((uv.shape[0], 1))))
xy_m = np.dot(uv_aug, trans)
print(xy_m)
print('\nxy_m = tformfwd(trans, uv)')
xy_m = tformfwd(trans, uv)
print(xy_m)
print('\n---> apply inverse transform to xy')
print('\nuv_m = xy_augmented * trans_inv')
xy_aug = np.hstack((xy, np.ones((xy.shape[0], 1))))
uv_m = np.dot(xy_aug, trans_inv)
print(uv_m)
print('\nuv_m = tformfwd(trans_inv, xy)')
uv_m = tformfwd(trans_inv, xy)
print(uv_m)
uv_m = tforminv(trans, xy)
print('\nuv_m = tforminv(trans, xy)')
print(uv_m)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/retinaface.py | Python | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
from facexlib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
from facexlib.detection.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
from facexlib.detection.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
py_cpu_nms)
def generate_config(network_name):
cfg_mnet = {
'name': 'mobilenet0.25',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 32,
'ngpu': 1,
'epoch': 250,
'decay1': 190,
'decay2': 220,
'image_size': 640,
'return_layers': {
'stage1': 1,
'stage2': 2,
'stage3': 3
},
'in_channel': 32,
'out_channel': 64
}
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 24,
'ngpu': 4,
'epoch': 100,
'decay1': 70,
'decay2': 90,
'image_size': 840,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
if network_name == 'mobile0.25':
return cfg_mnet
elif network_name == 'resnet50':
return cfg_re50
else:
raise NotImplementedError(f'network_name={network_name}')
class RetinaFace(nn.Module):
def __init__(self, network_name='resnet50', half=False, phase='test', device=None):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
super(RetinaFace, self).__init__()
self.half_inference = half
cfg = generate_config(network_name)
self.backbone = cfg['name']
self.model_name = f'retinaface_{network_name}'
self.cfg = cfg
self.phase = phase
self.target_size, self.max_size = 1600, 2150
self.resize, self.scale, self.scale1 = 1., None, None
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]], device=self.device)
self.reference = get_reference_facial_points(default_square=True)
# Build network.
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
elif cfg['name'] == 'Resnet50':
import torchvision.models as models
backbone = models.resnet50(pretrained=False)
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = cfg['out_channel']
self.fpn = FPN(in_channels_list, out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
self.to(self.device)
self.eval()
if self.half_inference:
self.half()
def forward(self, inputs):
out = self.body(inputs)
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
out = list(out.values())
# FPN
fpn = self.fpn(out)
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
ldm_regressions = (torch.cat(tmp, dim=1))
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output
def __detect_faces(self, inputs):
# get scale
height, width = inputs.shape[2:]
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32, device=self.device)
tmp = [width, height, width, height, width, height, width, height, width, height]
self.scale1 = torch.tensor(tmp, dtype=torch.float32, device=self.device)
# forawrd
inputs = inputs.to(self.device)
if self.half_inference:
inputs = inputs.half()
loc, conf, landmarks = self(inputs)
# get priorbox
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
priors = priorbox.forward().to(self.device)
return loc, conf, landmarks, priors
# single image detection
def transform(self, image, use_origin_size):
# convert to opencv format
if isinstance(image, Image.Image):
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image = image.astype(np.float32)
# testing scale
im_size_min = np.min(image.shape[0:2])
im_size_max = np.max(image.shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
# convert to torch.tensor format
# image -= (104, 117, 123)
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image).unsqueeze(0)
return image, resize
def detect_faces(
self,
image,
conf_threshold=0.8,
nms_threshold=0.4,
use_origin_size=True,
):
image, self.resize = self.transform(image, use_origin_size)
image = image.to(self.device)
if self.half_inference:
image = image.half()
image = image - self.mean_tensor
loc, conf, landmarks, priors = self.__detect_faces(image)
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
boxes = boxes * self.scale / self.resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
landmarks = landmarks * self.scale1 / self.resize
landmarks = landmarks.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_threshold)[0]
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
# sort
order = scores.argsort()[::-1]
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
# do NMS
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
# self.t['forward_pass'].toc()
# print(self.t['forward_pass'].average_time)
# import sys
# sys.stdout.flush()
return np.concatenate((bounding_boxes, landmarks), axis=1)
def __align_multi(self, image, boxes, landmarks, limit=None):
if len(boxes) < 1:
return [], []
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
for landmark in landmarks:
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
faces.append(warped_face)
return np.concatenate((boxes, landmarks), axis=1), faces
def align_multi(self, img, conf_threshold=0.8, limit=None):
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
return self.__align_multi(img, boxes, landmarks, limit)
# batched detection
def batched_transform(self, frames, use_origin_size):
"""
Arguments:
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
type=np.float32, BGR format).
use_origin_size: whether to use origin size.
"""
from_PIL = True if isinstance(frames[0], Image.Image) else False
# convert to opencv format
if from_PIL:
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
frames = np.asarray(frames, dtype=np.float32)
# testing scale
im_size_min = np.min(frames[0].shape[0:2])
im_size_max = np.max(frames[0].shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
if not from_PIL:
frames = F.interpolate(frames, scale_factor=resize)
else:
frames = [
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
for frame in frames
]
# convert to torch.tensor format
if not from_PIL:
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
else:
frames = frames.transpose((0, 3, 1, 2))
frames = torch.from_numpy(frames)
return frames, resize
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
"""
Arguments:
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
type=np.uint8, BGR format).
conf_threshold: confidence threshold.
nms_threshold: nms threshold.
use_origin_size: whether to use origin size.
Returns:
final_bounding_boxes: list of np.array ([n_boxes, 5],
type=np.float32).
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
"""
# self.t['forward_pass'].tic()
frames, self.resize = self.batched_transform(frames, use_origin_size)
frames = frames.to(self.device)
frames = frames - self.mean_tensor
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
final_bounding_boxes, final_landmarks = [], []
# decode
priors = priors.unsqueeze(0)
b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize
b_landmarks = batched_decode_landm(b_landmarks, priors, self.cfg['variance']) * self.scale1 / self.resize
b_conf = b_conf[:, :, 1]
# index for selection
b_indice = b_conf > conf_threshold
# concat
b_loc_and_conf = torch.cat((b_loc, b_conf.unsqueeze(-1)), dim=2).float()
for pred, landm, inds in zip(b_loc_and_conf, b_landmarks, b_indice):
# ignore low scores
pred, landm = pred[inds, :], landm[inds, :]
if pred.shape[0] == 0:
final_bounding_boxes.append(np.array([], dtype=np.float32))
final_landmarks.append(np.array([], dtype=np.float32))
continue
# sort
# order = score.argsort(descending=True)
# box, landm, score = box[order], landm[order], score[order]
# to CPU
bounding_boxes, landm = pred.cpu().numpy(), landm.cpu().numpy()
# NMS
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landm[keep]
# append
final_bounding_boxes.append(bounding_boxes)
final_landmarks.append(landmarks)
# self.t['forward_pass'].toc(average=True)
# self.batch_time += self.t['forward_pass'].diff
# self.total_frame += len(frames)
# print(self.batch_time / self.total_frame)
return final_bounding_boxes, final_landmarks
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/retinaface_net.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
def conv_bn(inp, oup, stride=1, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True))
def conv_bn_no_relu(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
)
def conv_bn1X1(inp, oup, stride, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True))
def conv_dw(inp, oup, stride, leaky=0.1):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
class SSH(nn.Module):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert out_channel % 4 == 0
leaky = 0
if (out_channel <= 64):
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
def forward(self, input):
conv3X3 = self.conv3X3(input)
conv5X5_1 = self.conv5X5_1(input)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7x7_3(conv7X7_2)
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
out = F.relu(out)
return out
class FPN(nn.Module):
def __init__(self, in_channels_list, out_channels):
super(FPN, self).__init__()
leaky = 0
if (out_channels <= 64):
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
def forward(self, input):
# names = list(input.keys())
# input = list(input.values())
output1 = self.output1(input[0])
output2 = self.output2(input[1])
output3 = self.output3(input[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode='nearest')
output2 = output2 + up3
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode='nearest')
output1 = output1 + up2
output1 = self.merge1(output1)
out = [output1, output2, output3]
return out
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
self.stage1 = nn.Sequential(
conv_bn(3, 8, 2, leaky=0.1), # 3
conv_dw(8, 16, 1), # 7
conv_dw(16, 32, 2), # 11
conv_dw(32, 32, 1), # 19
conv_dw(32, 64, 2), # 27
conv_dw(64, 64, 1), # 43
)
self.stage2 = nn.Sequential(
conv_dw(64, 128, 2), # 43 + 16 = 59
conv_dw(128, 128, 1), # 59 + 32 = 91
conv_dw(128, 128, 1), # 91 + 32 = 123
conv_dw(128, 128, 1), # 123 + 32 = 155
conv_dw(128, 128, 1), # 155 + 32 = 187
conv_dw(128, 128, 1), # 187 + 32 = 219
)
self.stage3 = nn.Sequential(
conv_dw(128, 256, 2), # 219 +3 2 = 241
conv_dw(256, 256, 1), # 241 + 64 = 301
)
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256, 1000)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.avg(x)
# x = self.model(x)
x = x.view(-1, 256)
x = self.fc(x)
return x
class ClassHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
def make_class_head(fpn_num=3, inchannels=64, anchor_num=2):
classhead = nn.ModuleList()
for i in range(fpn_num):
classhead.append(ClassHead(inchannels, anchor_num))
return classhead
def make_bbox_head(fpn_num=3, inchannels=64, anchor_num=2):
bboxhead = nn.ModuleList()
for i in range(fpn_num):
bboxhead.append(BboxHead(inchannels, anchor_num))
return bboxhead
def make_landmark_head(fpn_num=3, inchannels=64, anchor_num=2):
landmarkhead = nn.ModuleList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
return landmarkhead
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/detection/retinaface_utils.py | Python | import numpy as np
import torch
import torchvision
from itertools import product as product
from math import ceil
class PriorBox(object):
def __init__(self, cfg, image_size=None, phase='train'):
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.clip = cfg['clip']
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)] for step in self.steps]
self.name = 's'
def forward(self):
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
keep = torchvision.ops.nms(
boxes=torch.Tensor(dets[:, :4]),
scores=torch.Tensor(dets[:, 4]),
iou_threshold=thresh,
)
return list(keep)
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat(
(
boxes[:, :2] - boxes[:, 2:] / 2, # xmin, ymin
boxes[:, :2] + boxes[:, 2:] / 2),
1) # xmax, ymax
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
return torch.cat(
(boxes[:, 2:] + boxes[:, :2]) / 2, # cx, cy
boxes[:, 2:] - boxes[:, :2],
1) # w, h
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
inter = intersect(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def matrix_iou(a, b):
"""
return iou of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)
return area_i / (area_a[:, np.newaxis] + area_b - area_i)
def matrix_iof(a, b):
"""
return iof of a and b, numpy version for data augenmentation
"""
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
return area_i / np.maximum(area_a[:, np.newaxis], 1)
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
"""Match each prior box with the ground truth box of the highest jaccard
overlap, encode the bounding boxes, then return the matched indices
corresponding to both confidence and location preds.
Args:
threshold: (float) The overlap threshold used when matching boxes.
truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].
priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].
variances: (tensor) Variances corresponding to each prior coord,
Shape: [num_priors, 4].
labels: (tensor) All the class labels for the image, Shape: [num_obj].
landms: (tensor) Ground truth landms, Shape [num_obj, 10].
loc_t: (tensor) Tensor to be filled w/ encoded location targets.
conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.
landm_t: (tensor) Tensor to be filled w/ encoded landm targets.
idx: (int) current batch index
Return:
The matched indices corresponding to 1)location 2)confidence
3)landm preds.
"""
# jaccard index
overlaps = jaccard(truths, point_form(priors))
# (Bipartite Matching)
# [1,num_objects] best prior for each ground truth
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
# ignore hard gt
valid_gt_idx = best_prior_overlap[:, 0] >= 0.2
best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]
if best_prior_idx_filter.shape[0] <= 0:
loc_t[idx] = 0
conf_t[idx] = 0
return
# [1,num_priors] best ground truth for each prior
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_prior_idx.squeeze_(1)
best_prior_idx_filter.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior
# TODO refactor: index best_prior_idx with long tensor
# ensure every gt matches with its prior of max overlap
for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来
conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来
conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本
loc = encode(matches, priors, variances)
matches_landm = landms[best_truth_idx]
landm = encode_landm(matches_landm, priors, variances)
loc_t[idx] = loc # [num_priors,4] encoded offsets to learn
conf_t[idx] = conf # [num_priors] top class label for each prior
landm_t[idx] = landm
def encode(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 4].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded boxes (tensor), Shape: [num_priors, 4]
"""
# dist b/t match center and prior's center
g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, 2:])
# match wh / prior wh
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
# return target for smooth_l1_loss
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
def encode_landm(matched, priors, variances):
"""Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 10].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded landm (tensor), Shape: [num_priors, 10]
"""
# dist b/t match center and prior's center
matched = torch.reshape(matched, (matched.size(0), 5, 2))
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
# encode variance
g_cxcy /= (variances[0] * priors[:, :, 2:])
# g_cxcy /= priors[:, :, 2:]
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
# return target for smooth_l1_loss
return g_cxcy
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
tmp = (
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
)
landms = torch.cat(tmp, dim=1)
return landms
def batched_decode(b_loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
b_loc (tensor): location predictions for loc layers,
Shape: [num_batches,num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [1,num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = (
priors[:, :, :2] + b_loc[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, 2:] * torch.exp(b_loc[:, :, 2:] * variances[1]),
)
boxes = torch.cat(boxes, dim=2)
boxes[:, :, :2] -= boxes[:, :, 2:] / 2
boxes[:, :, 2:] += boxes[:, :, :2]
return boxes
def batched_decode_landm(pre, priors, variances):
"""Decode landm from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
pre (tensor): landm predictions for loc layers,
Shape: [num_batches,num_priors,10]
priors (tensor): Prior boxes in center-offset form.
Shape: [1,num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded landm predictions
"""
landms = (
priors[:, :, :2] + pre[:, :, :2] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 2:4] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 4:6] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 6:8] * variances[0] * priors[:, :, 2:],
priors[:, :, :2] + pre[:, :, 8:10] * variances[0] * priors[:, :, 2:],
)
landms = torch.cat(landms, dim=2)
return landms
def log_sum_exp(x):
"""Utility function for computing log_sum_exp while determining
This will be used to determine unaveraged confidence loss across
all examples in a batch.
Args:
x (Variable(tensor)): conf_preds from conf layers
"""
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max
# Original author: Francisco Massa:
# https://github.com/fmassa/object-detection.torch
# Ported to PyTorch by Max deGroot (02/01/2017)
def nms(boxes, scores, overlap=0.5, top_k=200):
"""Apply non-maximum suppression at test time to avoid detecting too many
overlapping bounding boxes for a given object.
Args:
boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
scores: (tensor) The class predscores for the img, Shape:[num_priors].
overlap: (float) The overlap thresh for suppressing unnecessary boxes.
top_k: (int) The Maximum number of box preds to consider.
Return:
The indices of the kept boxes with respect to num_priors.
"""
keep = torch.Tensor(scores.size(0)).fill_(0).long()
if boxes.numel() == 0:
return keep
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = torch.mul(x2 - x1, y2 - y1)
v, idx = scores.sort(0) # sort in ascending order
# I = I[v >= 0.01]
idx = idx[-top_k:] # indices of the top-k largest vals
xx1 = boxes.new()
yy1 = boxes.new()
xx2 = boxes.new()
yy2 = boxes.new()
w = boxes.new()
h = boxes.new()
# keep = torch.Tensor()
count = 0
while idx.numel() > 0:
i = idx[-1] # index of current largest val
# keep.append(i)
keep[count] = i
count += 1
if idx.size(0) == 1:
break
idx = idx[:-1] # remove kept element from view
# load bboxes of next highest vals
torch.index_select(x1, 0, idx, out=xx1)
torch.index_select(y1, 0, idx, out=yy1)
torch.index_select(x2, 0, idx, out=xx2)
torch.index_select(y2, 0, idx, out=yy2)
# store element-wise max with next highest score
xx1 = torch.clamp(xx1, min=x1[i])
yy1 = torch.clamp(yy1, min=y1[i])
xx2 = torch.clamp(xx2, max=x2[i])
yy2 = torch.clamp(yy2, max=y2[i])
w.resize_as_(xx2)
h.resize_as_(yy2)
w = xx2 - xx1
h = yy2 - yy1
# check sizes of xx1 and xx2.. after each iteration
w = torch.clamp(w, min=0.0)
h = torch.clamp(h, min=0.0)
inter = w * h
# IoU = i / (area(a) + area(b) - i)
rem_areas = torch.index_select(area, 0, idx) # load remaining areas)
union = (rem_areas - inter) + area[i]
IoU = inter / union # store result in iou
# keep only elements with an IoU <= overlap
idx = idx[IoU.le(overlap)]
return keep, count
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/headpose/__init__.py | Python | import torch
from facexlib.utils import load_file_from_url
from .hopenet_arch import HopeNet
def init_headpose_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'hopenet':
model = HopeNet('resnet', [3, 4, 6, 3], 66)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/headpose_hopenet.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)['params']
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/headpose/hopenet_arch.py | Python | import torch
import torch.nn as nn
import torchvision
class HopeNet(nn.Module):
# Hopenet with 3 output layers for yaw, pitch and roll
# Predicts Euler angles by binning and regression with the expected value
def __init__(self, block, layers, num_bins):
super(HopeNet, self).__init__()
if block == 'resnet':
block = torchvision.models.resnet.Bottleneck
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc_yaw = nn.Linear(512 * block.expansion, num_bins)
self.fc_pitch = nn.Linear(512 * block.expansion, num_bins)
self.fc_roll = nn.Linear(512 * block.expansion, num_bins)
self.idx_tensor = torch.arange(66).float()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@staticmethod
def softmax_temperature(tensor, temperature):
result = torch.exp(tensor / temperature)
result = torch.div(result, torch.sum(result, 1).unsqueeze(1).expand_as(result))
return result
def bin2degree(self, predict):
predict = self.softmax_temperature(predict, 1)
return torch.sum(predict * self.idx_tensor.type_as(predict), 1) * 3 - 99
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
pre_yaw = self.fc_yaw(x)
pre_pitch = self.fc_pitch(x)
pre_roll = self.fc_roll(x)
yaw = self.bin2degree(pre_yaw)
pitch = self.bin2degree(pre_pitch)
roll = self.bin2degree(pre_roll)
return yaw, pitch, roll
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/matting/__init__.py | Python | import torch
from copy import deepcopy
from facexlib.utils import load_file_from_url
from .modnet import MODNet
def init_matting_model(model_name='modnet', half=False, device='cuda', model_rootpath=None):
if model_name == 'modnet':
model = MODNet(backbone_pretrained=False)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/matting_modnet_portrait.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
# TODO: clean pretrained model
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
# remove unnecessary 'module.'
for k, v in deepcopy(load_net).items():
if k.startswith('module.'):
load_net[k[7:]] = v
load_net.pop(k)
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/matting/backbone.py | Python | import os
import torch
import torch.nn as nn
from .mobilenetv2 import MobileNetV2
class BaseBackbone(nn.Module):
""" Superclass of Replaceable Backbone Model for Semantic Estimation
"""
def __init__(self, in_channels):
super(BaseBackbone, self).__init__()
self.in_channels = in_channels
self.model = None
self.enc_channels = []
def forward(self, x):
raise NotImplementedError
def load_pretrained_ckpt(self):
raise NotImplementedError
class MobileNetV2Backbone(BaseBackbone):
""" MobileNetV2 Backbone
"""
def __init__(self, in_channels):
super(MobileNetV2Backbone, self).__init__(in_channels)
self.model = MobileNetV2(self.in_channels, alpha=1.0, expansion=6, num_classes=None)
self.enc_channels = [16, 24, 32, 96, 1280]
def forward(self, x):
# x = reduce(lambda x, n: self.model.features[n](x), list(range(0, 2)), x)
x = self.model.features[0](x)
x = self.model.features[1](x)
enc2x = x
# x = reduce(lambda x, n: self.model.features[n](x), list(range(2, 4)), x)
x = self.model.features[2](x)
x = self.model.features[3](x)
enc4x = x
# x = reduce(lambda x, n: self.model.features[n](x), list(range(4, 7)), x)
x = self.model.features[4](x)
x = self.model.features[5](x)
x = self.model.features[6](x)
enc8x = x
# x = reduce(lambda x, n: self.model.features[n](x), list(range(7, 14)), x)
x = self.model.features[7](x)
x = self.model.features[8](x)
x = self.model.features[9](x)
x = self.model.features[10](x)
x = self.model.features[11](x)
x = self.model.features[12](x)
x = self.model.features[13](x)
enc16x = x
# x = reduce(lambda x, n: self.model.features[n](x), list(range(14, 19)), x)
x = self.model.features[14](x)
x = self.model.features[15](x)
x = self.model.features[16](x)
x = self.model.features[17](x)
x = self.model.features[18](x)
enc32x = x
return [enc2x, enc4x, enc8x, enc16x, enc32x]
def load_pretrained_ckpt(self):
# the pre-trained model is provided by https://github.com/thuyngch/Human-Segmentation-PyTorch
ckpt_path = './pretrained/mobilenetv2_human_seg.ckpt'
if not os.path.exists(ckpt_path):
print('cannot find the pretrained mobilenetv2 backbone')
exit()
ckpt = torch.load(ckpt_path)
self.model.load_state_dict(ckpt)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/matting/mobilenetv2.py | Python | """ This file is adapted from https://github.com/thuyngch/Human-Segmentation-PyTorch"""
import math
import torch
from torch import nn
# ------------------------------------------------------------------------------
# Useful functions
# ------------------------------------------------------------------------------
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_bn(inp, oup, stride):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))
def conv_1x1_bn(inp, oup):
return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU6(inplace=True))
# ------------------------------------------------------------------------------
# Class of Inverted Residual block
# ------------------------------------------------------------------------------
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expansion, dilation=1):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expansion)
self.use_res_connect = self.stride == 1 and inp == oup
if expansion == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, dilation=dilation, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, dilation=dilation, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
# ------------------------------------------------------------------------------
# Class of MobileNetV2
# ------------------------------------------------------------------------------
class MobileNetV2(nn.Module):
def __init__(self, in_channels, alpha=1.0, expansion=6, num_classes=1000):
super(MobileNetV2, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
input_channel = 32
last_channel = 1280
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[expansion, 24, 2, 2],
[expansion, 32, 3, 2],
[expansion, 64, 4, 2],
[expansion, 96, 3, 1],
[expansion, 160, 3, 2],
[expansion, 320, 1, 1],
]
# building first layer
input_channel = _make_divisible(input_channel * alpha, 8)
self.last_channel = _make_divisible(last_channel * alpha, 8) if alpha > 1.0 else last_channel
self.features = [conv_bn(self.in_channels, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = _make_divisible(int(c * alpha), 8)
for i in range(n):
if i == 0:
self.features.append(InvertedResidual(input_channel, output_channel, s, expansion=t))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, expansion=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
if self.num_classes is not None:
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes),
)
# Initialize weights
self._init_weights()
def forward(self, x):
# Stage1
x = self.features[0](x)
x = self.features[1](x)
# Stage2
x = self.features[2](x)
x = self.features[3](x)
# Stage3
x = self.features[4](x)
x = self.features[5](x)
x = self.features[6](x)
# Stage4
x = self.features[7](x)
x = self.features[8](x)
x = self.features[9](x)
x = self.features[10](x)
x = self.features[11](x)
x = self.features[12](x)
x = self.features[13](x)
# Stage5
x = self.features[14](x)
x = self.features[15](x)
x = self.features[16](x)
x = self.features[17](x)
x = self.features[18](x)
# Classification
if self.num_classes is not None:
x = x.mean(dim=(2, 3))
x = self.classifier(x)
# Output
return x
def _load_pretrained_model(self, pretrained_file):
pretrain_dict = torch.load(pretrained_file, map_location='cpu')
model_dict = {}
state_dict = self.state_dict()
print('[MobileNetV2] Loading pretrained model...')
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
else:
print(k, 'is ignored')
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/matting/modnet.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from .backbone import MobileNetV2Backbone
# ------------------------------------------------------------------------------
# MODNet Basic Modules
# ------------------------------------------------------------------------------
class IBNorm(nn.Module):
""" Combine Instance Norm and Batch Norm into One Layer
"""
def __init__(self, in_channels):
super(IBNorm, self).__init__()
in_channels = in_channels
self.bnorm_channels = int(in_channels / 2)
self.inorm_channels = in_channels - self.bnorm_channels
self.bnorm = nn.BatchNorm2d(self.bnorm_channels, affine=True)
self.inorm = nn.InstanceNorm2d(self.inorm_channels, affine=False)
def forward(self, x):
bn_x = self.bnorm(x[:, :self.bnorm_channels, ...].contiguous())
in_x = self.inorm(x[:, self.bnorm_channels:, ...].contiguous())
return torch.cat((bn_x, in_x), 1)
class Conv2dIBNormRelu(nn.Module):
""" Convolution + IBNorm + ReLu
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
with_ibn=True,
with_relu=True):
super(Conv2dIBNormRelu, self).__init__()
layers = [
nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
]
if with_ibn:
layers.append(IBNorm(out_channels))
if with_relu:
layers.append(nn.ReLU(inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class SEBlock(nn.Module):
""" SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf
"""
def __init__(self, in_channels, out_channels, reduction=1):
super(SEBlock, self).__init__()
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, int(in_channels // reduction), bias=False), nn.ReLU(inplace=True),
nn.Linear(int(in_channels // reduction), out_channels, bias=False), nn.Sigmoid())
def forward(self, x):
b, c, _, _ = x.size()
w = self.pool(x).view(b, c)
w = self.fc(w).view(b, c, 1, 1)
return x * w.expand_as(x)
# ------------------------------------------------------------------------------
# MODNet Branches
# ------------------------------------------------------------------------------
class LRBranch(nn.Module):
""" Low Resolution Branch of MODNet
"""
def __init__(self, backbone):
super(LRBranch, self).__init__()
enc_channels = backbone.enc_channels
self.backbone = backbone
self.se_block = SEBlock(enc_channels[4], enc_channels[4], reduction=4)
self.conv_lr16x = Conv2dIBNormRelu(enc_channels[4], enc_channels[3], 5, stride=1, padding=2)
self.conv_lr8x = Conv2dIBNormRelu(enc_channels[3], enc_channels[2], 5, stride=1, padding=2)
self.conv_lr = Conv2dIBNormRelu(
enc_channels[2], 1, kernel_size=3, stride=2, padding=1, with_ibn=False, with_relu=False)
def forward(self, img, inference):
enc_features = self.backbone.forward(img)
enc2x, enc4x, enc32x = enc_features[0], enc_features[1], enc_features[4]
enc32x = self.se_block(enc32x)
lr16x = F.interpolate(enc32x, scale_factor=2, mode='bilinear', align_corners=False)
lr16x = self.conv_lr16x(lr16x)
lr8x = F.interpolate(lr16x, scale_factor=2, mode='bilinear', align_corners=False)
lr8x = self.conv_lr8x(lr8x)
pred_semantic = None
if not inference:
lr = self.conv_lr(lr8x)
pred_semantic = torch.sigmoid(lr)
return pred_semantic, lr8x, [enc2x, enc4x]
class HRBranch(nn.Module):
""" High Resolution Branch of MODNet
"""
def __init__(self, hr_channels, enc_channels):
super(HRBranch, self).__init__()
self.tohr_enc2x = Conv2dIBNormRelu(enc_channels[0], hr_channels, 1, stride=1, padding=0)
self.conv_enc2x = Conv2dIBNormRelu(hr_channels + 3, hr_channels, 3, stride=2, padding=1)
self.tohr_enc4x = Conv2dIBNormRelu(enc_channels[1], hr_channels, 1, stride=1, padding=0)
self.conv_enc4x = Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1)
self.conv_hr4x = nn.Sequential(
Conv2dIBNormRelu(3 * hr_channels + 3, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1),
)
self.conv_hr2x = nn.Sequential(
Conv2dIBNormRelu(2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
)
self.conv_hr = nn.Sequential(
Conv2dIBNormRelu(hr_channels + 3, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, 1, kernel_size=1, stride=1, padding=0, with_ibn=False, with_relu=False),
)
def forward(self, img, enc2x, enc4x, lr8x, inference):
img2x = F.interpolate(img, scale_factor=1 / 2, mode='bilinear', align_corners=False)
img4x = F.interpolate(img, scale_factor=1 / 4, mode='bilinear', align_corners=False)
enc2x = self.tohr_enc2x(enc2x)
hr4x = self.conv_enc2x(torch.cat((img2x, enc2x), dim=1))
enc4x = self.tohr_enc4x(enc4x)
hr4x = self.conv_enc4x(torch.cat((hr4x, enc4x), dim=1))
lr4x = F.interpolate(lr8x, scale_factor=2, mode='bilinear', align_corners=False)
hr4x = self.conv_hr4x(torch.cat((hr4x, lr4x, img4x), dim=1))
hr2x = F.interpolate(hr4x, scale_factor=2, mode='bilinear', align_corners=False)
hr2x = self.conv_hr2x(torch.cat((hr2x, enc2x), dim=1))
pred_detail = None
if not inference:
hr = F.interpolate(hr2x, scale_factor=2, mode='bilinear', align_corners=False)
hr = self.conv_hr(torch.cat((hr, img), dim=1))
pred_detail = torch.sigmoid(hr)
return pred_detail, hr2x
class FusionBranch(nn.Module):
""" Fusion Branch of MODNet
"""
def __init__(self, hr_channels, enc_channels):
super(FusionBranch, self).__init__()
self.conv_lr4x = Conv2dIBNormRelu(enc_channels[2], hr_channels, 5, stride=1, padding=2)
self.conv_f2x = Conv2dIBNormRelu(2 * hr_channels, hr_channels, 3, stride=1, padding=1)
self.conv_f = nn.Sequential(
Conv2dIBNormRelu(hr_channels + 3, int(hr_channels / 2), 3, stride=1, padding=1),
Conv2dIBNormRelu(int(hr_channels / 2), 1, 1, stride=1, padding=0, with_ibn=False, with_relu=False),
)
def forward(self, img, lr8x, hr2x):
lr4x = F.interpolate(lr8x, scale_factor=2, mode='bilinear', align_corners=False)
lr4x = self.conv_lr4x(lr4x)
lr2x = F.interpolate(lr4x, scale_factor=2, mode='bilinear', align_corners=False)
f2x = self.conv_f2x(torch.cat((lr2x, hr2x), dim=1))
f = F.interpolate(f2x, scale_factor=2, mode='bilinear', align_corners=False)
f = self.conv_f(torch.cat((f, img), dim=1))
pred_matte = torch.sigmoid(f)
return pred_matte
# ------------------------------------------------------------------------------
# MODNet
# ------------------------------------------------------------------------------
class MODNet(nn.Module):
""" Architecture of MODNet
"""
def __init__(self, in_channels=3, hr_channels=32, backbone_pretrained=True):
super(MODNet, self).__init__()
self.in_channels = in_channels
self.hr_channels = hr_channels
self.backbone_pretrained = backbone_pretrained
self.backbone = MobileNetV2Backbone(self.in_channels)
self.lr_branch = LRBranch(self.backbone)
self.hr_branch = HRBranch(self.hr_channels, self.backbone.enc_channels)
self.f_branch = FusionBranch(self.hr_channels, self.backbone.enc_channels)
for m in self.modules():
if isinstance(m, nn.Conv2d):
self._init_conv(m)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.InstanceNorm2d):
self._init_norm(m)
if self.backbone_pretrained:
self.backbone.load_pretrained_ckpt()
def forward(self, img, inference):
pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(img, inference)
pred_detail, hr2x = self.hr_branch(img, enc2x, enc4x, lr8x, inference)
pred_matte = self.f_branch(img, lr8x, hr2x)
return pred_semantic, pred_detail, pred_matte
def freeze_norm(self):
norm_types = [nn.BatchNorm2d, nn.InstanceNorm2d]
for m in self.modules():
for n in norm_types:
if isinstance(m, n):
m.eval()
continue
def _init_conv(self, conv):
nn.init.kaiming_uniform_(conv.weight, a=0, mode='fan_in', nonlinearity='relu')
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def _init_norm(self, norm):
if norm.weight is not None:
nn.init.constant_(norm.weight, 1)
nn.init.constant_(norm.bias, 0)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/parsing/__init__.py | Python | import torch
from facexlib.utils import load_file_from_url
from .bisenet import BiSeNet
from .parsenet import ParseNet
def init_parsing_model(model_name='bisenet', half=False, device='cuda', model_rootpath=None):
if model_name == 'bisenet':
model = BiSeNet(num_class=19)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.0/parsing_bisenet.pth'
elif model_name == 'parsenet':
model = ParseNet(in_size=512, out_size=512, parsing_ch=19)
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
load_net = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(load_net, strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/parsing/bisenet.py | Python | import torch
import torch.nn as nn
import torch.nn.functional as F
from .resnet import ResNet18
class ConvBNReLU(nn.Module):
def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1):
super(ConvBNReLU, self).__init__()
self.conv = nn.Conv2d(in_chan, out_chan, kernel_size=ks, stride=stride, padding=padding, bias=False)
self.bn = nn.BatchNorm2d(out_chan)
def forward(self, x):
x = self.conv(x)
x = F.relu(self.bn(x))
return x
class BiSeNetOutput(nn.Module):
def __init__(self, in_chan, mid_chan, num_class):
super(BiSeNetOutput, self).__init__()
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False)
def forward(self, x):
feat = self.conv(x)
out = self.conv_out(feat)
return out, feat
class AttentionRefinementModule(nn.Module):
def __init__(self, in_chan, out_chan):
super(AttentionRefinementModule, self).__init__()
self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)
self.bn_atten = nn.BatchNorm2d(out_chan)
self.sigmoid_atten = nn.Sigmoid()
def forward(self, x):
feat = self.conv(x)
atten = F.avg_pool2d(feat, feat.size()[2:])
atten = self.conv_atten(atten)
atten = self.bn_atten(atten)
atten = self.sigmoid_atten(atten)
out = torch.mul(feat, atten)
return out
class ContextPath(nn.Module):
def __init__(self):
super(ContextPath, self).__init__()
self.resnet = ResNet18()
self.arm16 = AttentionRefinementModule(256, 128)
self.arm32 = AttentionRefinementModule(512, 128)
self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)
self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)
def forward(self, x):
feat8, feat16, feat32 = self.resnet(x)
h8, w8 = feat8.size()[2:]
h16, w16 = feat16.size()[2:]
h32, w32 = feat32.size()[2:]
avg = F.avg_pool2d(feat32, feat32.size()[2:])
avg = self.conv_avg(avg)
avg_up = F.interpolate(avg, (h32, w32), mode='nearest')
feat32_arm = self.arm32(feat32)
feat32_sum = feat32_arm + avg_up
feat32_up = F.interpolate(feat32_sum, (h16, w16), mode='nearest')
feat32_up = self.conv_head32(feat32_up)
feat16_arm = self.arm16(feat16)
feat16_sum = feat16_arm + feat32_up
feat16_up = F.interpolate(feat16_sum, (h8, w8), mode='nearest')
feat16_up = self.conv_head16(feat16_up)
return feat8, feat16_up, feat32_up # x8, x8, x16
class FeatureFusionModule(nn.Module):
def __init__(self, in_chan, out_chan):
super(FeatureFusionModule, self).__init__()
self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
self.conv1 = nn.Conv2d(out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, fsp, fcp):
fcat = torch.cat([fsp, fcp], dim=1)
feat = self.convblk(fcat)
atten = F.avg_pool2d(feat, feat.size()[2:])
atten = self.conv1(atten)
atten = self.relu(atten)
atten = self.conv2(atten)
atten = self.sigmoid(atten)
feat_atten = torch.mul(feat, atten)
feat_out = feat_atten + feat
return feat_out
class BiSeNet(nn.Module):
def __init__(self, num_class):
super(BiSeNet, self).__init__()
self.cp = ContextPath()
self.ffm = FeatureFusionModule(256, 256)
self.conv_out = BiSeNetOutput(256, 256, num_class)
self.conv_out16 = BiSeNetOutput(128, 64, num_class)
self.conv_out32 = BiSeNetOutput(128, 64, num_class)
def forward(self, x, return_feat=False):
h, w = x.size()[2:]
feat_res8, feat_cp8, feat_cp16 = self.cp(x) # return res3b1 feature
feat_sp = feat_res8 # replace spatial path feature with res3b1 feature
feat_fuse = self.ffm(feat_sp, feat_cp8)
out, feat = self.conv_out(feat_fuse)
out16, feat16 = self.conv_out16(feat_cp8)
out32, feat32 = self.conv_out32(feat_cp16)
out = F.interpolate(out, (h, w), mode='bilinear', align_corners=True)
out16 = F.interpolate(out16, (h, w), mode='bilinear', align_corners=True)
out32 = F.interpolate(out32, (h, w), mode='bilinear', align_corners=True)
if return_feat:
feat = F.interpolate(feat, (h, w), mode='bilinear', align_corners=True)
feat16 = F.interpolate(feat16, (h, w), mode='bilinear', align_corners=True)
feat32 = F.interpolate(feat32, (h, w), mode='bilinear', align_corners=True)
return out, out16, out32, feat, feat16, feat32
else:
return out, out16, out32
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/parsing/parsenet.py | Python | """Modified from https://github.com/chaofengc/PSFRGAN
"""
import numpy as np
import torch.nn as nn
from torch.nn import functional as F
class NormLayer(nn.Module):
"""Normalization Layers.
Args:
channels: input channels, for batch norm and instance norm.
input_size: input shape without batch size, for layer norm.
"""
def __init__(self, channels, normalize_shape=None, norm_type='bn'):
super(NormLayer, self).__init__()
norm_type = norm_type.lower()
self.norm_type = norm_type
if norm_type == 'bn':
self.norm = nn.BatchNorm2d(channels, affine=True)
elif norm_type == 'in':
self.norm = nn.InstanceNorm2d(channels, affine=False)
elif norm_type == 'gn':
self.norm = nn.GroupNorm(32, channels, affine=True)
elif norm_type == 'pixel':
self.norm = lambda x: F.normalize(x, p=2, dim=1)
elif norm_type == 'layer':
self.norm = nn.LayerNorm(normalize_shape)
elif norm_type == 'none':
self.norm = lambda x: x * 1.0
else:
assert 1 == 0, f'Norm type {norm_type} not support.'
def forward(self, x, ref=None):
if self.norm_type == 'spade':
return self.norm(x, ref)
else:
return self.norm(x)
class ReluLayer(nn.Module):
"""Relu Layer.
Args:
relu type: type of relu layer, candidates are
- ReLU
- LeakyReLU: default relu slope 0.2
- PRelu
- SELU
- none: direct pass
"""
def __init__(self, channels, relu_type='relu'):
super(ReluLayer, self).__init__()
relu_type = relu_type.lower()
if relu_type == 'relu':
self.func = nn.ReLU(True)
elif relu_type == 'leakyrelu':
self.func = nn.LeakyReLU(0.2, inplace=True)
elif relu_type == 'prelu':
self.func = nn.PReLU(channels)
elif relu_type == 'selu':
self.func = nn.SELU(True)
elif relu_type == 'none':
self.func = lambda x: x * 1.0
else:
assert 1 == 0, f'Relu type {relu_type} not support.'
def forward(self, x):
return self.func(x)
class ConvLayer(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
scale='none',
norm_type='none',
relu_type='none',
use_pad=True,
bias=True):
super(ConvLayer, self).__init__()
self.use_pad = use_pad
self.norm_type = norm_type
if norm_type in ['bn']:
bias = False
stride = 2 if scale == 'down' else 1
self.scale_func = lambda x: x
if scale == 'up':
self.scale_func = lambda x: nn.functional.interpolate(x, scale_factor=2, mode='nearest')
self.reflection_pad = nn.ReflectionPad2d(int(np.ceil((kernel_size - 1.) / 2)))
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, bias=bias)
self.relu = ReluLayer(out_channels, relu_type)
self.norm = NormLayer(out_channels, norm_type=norm_type)
def forward(self, x):
out = self.scale_func(x)
if self.use_pad:
out = self.reflection_pad(out)
out = self.conv2d(out)
out = self.norm(out)
out = self.relu(out)
return out
class ResidualBlock(nn.Module):
"""
Residual block recommended in: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, c_in, c_out, relu_type='prelu', norm_type='bn', scale='none'):
super(ResidualBlock, self).__init__()
if scale == 'none' and c_in == c_out:
self.shortcut_func = lambda x: x
else:
self.shortcut_func = ConvLayer(c_in, c_out, 3, scale)
scale_config_dict = {'down': ['none', 'down'], 'up': ['up', 'none'], 'none': ['none', 'none']}
scale_conf = scale_config_dict[scale]
self.conv1 = ConvLayer(c_in, c_out, 3, scale_conf[0], norm_type=norm_type, relu_type=relu_type)
self.conv2 = ConvLayer(c_out, c_out, 3, scale_conf[1], norm_type=norm_type, relu_type='none')
def forward(self, x):
identity = self.shortcut_func(x)
res = self.conv1(x)
res = self.conv2(res)
return identity + res
class ParseNet(nn.Module):
def __init__(self,
in_size=128,
out_size=128,
min_feat_size=32,
base_ch=64,
parsing_ch=19,
res_depth=10,
relu_type='LeakyReLU',
norm_type='bn',
ch_range=[32, 256]):
super().__init__()
self.res_depth = res_depth
act_args = {'norm_type': norm_type, 'relu_type': relu_type}
min_ch, max_ch = ch_range
ch_clip = lambda x: max(min_ch, min(x, max_ch)) # noqa: E731
min_feat_size = min(in_size, min_feat_size)
down_steps = int(np.log2(in_size // min_feat_size))
up_steps = int(np.log2(out_size // min_feat_size))
# =============== define encoder-body-decoder ====================
self.encoder = []
self.encoder.append(ConvLayer(3, base_ch, 3, 1))
head_ch = base_ch
for i in range(down_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch * 2)
self.encoder.append(ResidualBlock(cin, cout, scale='down', **act_args))
head_ch = head_ch * 2
self.body = []
for i in range(res_depth):
self.body.append(ResidualBlock(ch_clip(head_ch), ch_clip(head_ch), **act_args))
self.decoder = []
for i in range(up_steps):
cin, cout = ch_clip(head_ch), ch_clip(head_ch // 2)
self.decoder.append(ResidualBlock(cin, cout, scale='up', **act_args))
head_ch = head_ch // 2
self.encoder = nn.Sequential(*self.encoder)
self.body = nn.Sequential(*self.body)
self.decoder = nn.Sequential(*self.decoder)
self.out_img_conv = ConvLayer(ch_clip(head_ch), 3)
self.out_mask_conv = ConvLayer(ch_clip(head_ch), parsing_ch)
def forward(self, x):
feat = self.encoder(x)
x = feat + self.body(feat)
x = self.decoder(x)
out_img = self.out_img_conv(x)
out_mask = self.out_mask_conv(x)
return out_mask, out_img
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/parsing/resnet.py | Python | import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum - 1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
class ResNet18(nn.Module):
def __init__(self):
super(ResNet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x) # 1/8
feat16 = self.layer3(feat8) # 1/16
feat32 = self.layer4(feat16) # 1/32
return feat8, feat16, feat32
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/recognition/__init__.py | Python | import torch
from facexlib.utils import load_file_from_url
from .arcface_arch import Backbone
def init_recognition_model(model_name, half=False, device='cuda', model_rootpath=None):
if model_name == 'arcface':
model = Backbone(num_layers=50, drop_ratio=0.6, mode='ir_se').to('cuda').eval()
model_url = 'https://github.com/xinntao/facexlib/releases/download/v0.1.0/recognition_arcface_ir_se50.pth'
else:
raise NotImplementedError(f'{model_name} is not implemented.')
model_path = load_file_from_url(
url=model_url, model_dir='facexlib/weights', progress=True, file_name=None, save_dir=model_rootpath)
model.load_state_dict(torch.load(model_path), strict=True)
model.eval()
model = model.to(device)
return model
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/recognition/arcface_arch.py | Python | import torch
from collections import namedtuple
from torch.nn import (AdaptiveAvgPool2d, BatchNorm1d, BatchNorm2d, Conv2d, Dropout, Linear, MaxPool2d, Module, PReLU,
ReLU, Sequential, Sigmoid)
# Original Arcface Model
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
'''A named tuple describing a ResNet block.'''
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
return blocks
class Backbone(Module):
def __init__(self, num_layers, drop_ratio, mode='ir'):
super(Backbone, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False), BatchNorm2d(64), PReLU(64))
self.output_layer = Sequential(
BatchNorm2d(512), Dropout(drop_ratio), Flatten(), Linear(512 * 7 * 7, 512), BatchNorm1d(512))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel, bottleneck.depth, bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
# MobileFaceNet
class Conv_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Conv_block, self).__init__()
self.conv = Conv2d(
in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)
self.bn = BatchNorm2d(out_c)
self.prelu = PReLU(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x
class Linear_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Linear_block, self).__init__()
self.conv = Conv2d(
in_c, out_channels=out_c, kernel_size=kernel, groups=groups, stride=stride, padding=padding, bias=False)
self.bn = BatchNorm2d(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Depth_Wise(Module):
def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
super(Depth_Wise, self).__init__()
self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride)
self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.residual = residual
def forward(self, x):
if self.residual:
short_cut = x
x = self.conv(x)
x = self.conv_dw(x)
x = self.project(x)
if self.residual:
output = short_cut + x
else:
output = x
return output
class Residual(Module):
def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
super(Residual, self).__init__()
modules = []
for _ in range(num_block):
modules.append(
Depth_Wise(c, c, residual=True, kernel=kernel, padding=padding, stride=stride, groups=groups))
self.model = Sequential(*modules)
def forward(self, x):
return self.model(x)
class MobileFaceNet(Module):
def __init__(self, embedding_size):
super(MobileFaceNet, self).__init__()
self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
self.conv2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)
self.conv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128)
self.conv_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256)
self.conv_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512)
self.conv_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_6_sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0))
self.conv_6_flatten = Flatten()
self.linear = Linear(512, embedding_size, bias=False)
self.bn = BatchNorm1d(embedding_size)
def forward(self, x):
out = self.conv1(x)
out = self.conv2_dw(out)
out = self.conv_23(out)
out = self.conv_3(out)
out = self.conv_34(out)
out = self.conv_4(out)
out = self.conv_45(out)
out = self.conv_5(out)
out = self.conv_6_sep(out)
out = self.conv_6_dw(out)
out = self.conv_6_flatten(out)
out = self.linear(out)
out = self.bn(out)
return l2_norm(out)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/tracking/data_association.py | Python | """
For each detected item, it computes the intersection over union (IOU) w.r.t.
each tracked object. (IOU matrix)
Then, it applies the Hungarian algorithm (via linear_assignment) to assign each
det. item to the best possible tracked item (i.e. to the one with max IOU)
"""
import numpy as np
from numba import jit
from scipy.optimize import linear_sum_assignment as linear_assignment
@jit
def iou(bb_test, bb_gt):
"""Computes IOU between two bboxes in the form [x1,y1,x2,y2]
"""
xx1 = np.maximum(bb_test[0], bb_gt[0])
yy1 = np.maximum(bb_test[1], bb_gt[1])
xx2 = np.minimum(bb_test[2], bb_gt[2])
yy2 = np.minimum(bb_test[3], bb_gt[3])
w = np.maximum(0., xx2 - xx1)
h = np.maximum(0., yy2 - yy1)
wh = w * h
o = wh / ((bb_test[2] - bb_test[0]) * (bb_test[3] - bb_test[1]) + (bb_gt[2] - bb_gt[0]) *
(bb_gt[3] - bb_gt[1]) - wh)
return (o)
def associate_detections_to_trackers(detections, trackers, iou_threshold=0.25):
"""Assigns detections to tracked object (both represented as bounding boxes)
Returns:
3 lists of matches, unmatched_detections and unmatched_trackers.
"""
if len(trackers) == 0:
return np.empty((0, 2), dtype=int), np.arange(len(detections)), np.empty((0, 5), dtype=int)
iou_matrix = np.zeros((len(detections), len(trackers)), dtype=np.float32)
for d, det in enumerate(detections):
for t, trk in enumerate(trackers):
iou_matrix[d, t] = iou(det, trk)
# The linear assignment module tries to minimize the total assignment cost.
# In our case we pass -iou_matrix as we want to maximise the total IOU
# between track predictions and the frame detection.
row_ind, col_ind = linear_assignment(-iou_matrix)
unmatched_detections = []
for d, det in enumerate(detections):
if d not in row_ind:
unmatched_detections.append(d)
unmatched_trackers = []
for t, trk in enumerate(trackers):
if t not in col_ind:
unmatched_trackers.append(t)
# filter out matched with low IOU
matches = []
for row, col in zip(row_ind, col_ind):
if iou_matrix[row, col] < iou_threshold:
unmatched_detections.append(row)
unmatched_trackers.append(col)
else:
matches.append(np.array([[row, col]]))
if len(matches) == 0:
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/tracking/kalman_tracker.py | Python | import numpy as np
from filterpy.kalman import KalmanFilter
def convert_bbox_to_z(bbox):
"""Takes a bounding box in the form [x1,y1,x2,y2] and returns z in the form
[x,y,s,r] where x,y is the centre of the box and s is the scale/area and
r is the aspect ratio
"""
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
x = bbox[0] + w / 2.
y = bbox[1] + h / 2.
s = w * h # scale is just area
r = w / float(h)
return np.array([x, y, s, r]).reshape((4, 1))
def convert_x_to_bbox(x, score=None):
"""Takes a bounding box in the centre form [x,y,s,r] and returns it in
the form [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom
right
"""
w = np.sqrt(x[2] * x[3])
h = x[2] / w
if score is None:
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2.]).reshape((1, 4))
else:
return np.array([x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score]).reshape((1, 5))
class KalmanBoxTracker(object):
"""This class represents the internal state of individual tracked objects
observed as bbox.
doc: https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html
"""
count = 0
def __init__(self, bbox):
"""Initialize a tracker using initial bounding box.
"""
# define constant velocity model
# TODO: x: what is the meanning of x[4:7], v?
self.kf = KalmanFilter(dim_x=7, dim_z=4)
# F (dim_x, dim_x): state transition matrix
self.kf.F = np.array([[1, 0, 0, 0, 1, 0, 0], [0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 0, 0,
1], [0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 0, 1]])
# H (dim_z, dim_x): measurement function
self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
# R (dim_z, dim_z): measurement uncertainty/noise
self.kf.R[2:, 2:] *= 10.
# P (dim_x, dim_x): covariance matrix
# give high uncertainty to the unobservable initial velocities
self.kf.P[4:, 4:] *= 1000.
self.kf.P *= 10.
# Q (dim_x, dim_x): Process uncertainty/noise
self.kf.Q[-1, -1] *= 0.01
self.kf.Q[4:, 4:] *= 0.01
# x (dim_x, 1): filter state estimate
self.kf.x[:4] = convert_bbox_to_z(bbox)
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 0
self.hit_streak = 0
self.age = 0
# 解决画面中无人脸检测到时而导致的原有追踪器人像预测的漂移bug
self.predict_num = 0 # 连续预测的数目
# additional fields
self.face_attributes = []
def update(self, bbox):
"""Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1 # 连续命中
if bbox != []:
self.kf.update(convert_bbox_to_z(bbox))
self.predict_num = 0
else:
self.predict_num += 1
def predict(self):
"""Advances the state vector and returns the predicted bounding box
estimate.
"""
if (self.kf.x[6] + self.kf.x[2]) <= 0:
self.kf.x[6] *= 0.0
self.kf.predict()
self.age += 1
if self.time_since_update > 0:
self.hit_streak = 0
self.time_since_update += 1
self.history.append(convert_x_to_bbox(self.kf.x))
return self.history[-1][0]
def get_state(self):
"""Returns the current bounding box estimate."""
return convert_x_to_bbox(self.kf.x)[0]
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/tracking/sort.py | Python | import numpy as np
from facexlib.tracking.data_association import associate_detections_to_trackers
from facexlib.tracking.kalman_tracker import KalmanBoxTracker
class SORT(object):
"""SORT: A Simple, Online and Realtime Tracker.
Ref: https://github.com/abewley/sort
"""
def __init__(self, max_age=1, min_hits=3, iou_threshold=0.3):
self.max_age = max_age
self.min_hits = min_hits # 最小的连续命中, 只有满足的才会被返回
self.iou_threshold = iou_threshold
self.trackers = []
self.frame_count = 0
def update(self, dets, img_size, additional_attr, detect_interval):
"""This method must be called once for each frame even with
empty detections.
NOTE:as in practical realtime MOT, the detector doesn't run on every
single frame.
Args:
dets (Numpy array): detections in the format
[[x0,y0,x1,y1,score], [x0,y0,x1,y1,score], ...]
Returns:
a similar array, where the last column is the object ID.
"""
self.frame_count += 1
# get predicted locations from existing trackers
trks = np.zeros((len(self.trackers), 5))
to_del = [] # To be deleted
ret = []
# predict tracker position using Kalman filter
for t, trk in enumerate(trks):
pos = self.trackers[t].predict() # Kalman predict ,very fast ,<1ms
trk[:] = [pos[0], pos[1], pos[2], pos[3], 0]
if np.any(np.isnan(pos)):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
if dets != []:
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers( # noqa: E501
dets, trks)
# update matched trackers with assigned detections
for t, trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:, 1] == t)[0], 0]
trk.update(dets[d, :][0])
trk.face_attributes.append(additional_attr[d[0]])
# create and initialize new trackers for unmatched detections
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i, :])
trk.face_attributes.append(additional_attr[i])
print(f'New tracker: {trk.id + 1}.')
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
if dets == []:
trk.update([])
d = trk.get_state()
# get return tracklet
# 1) time_since_update < 1: detected
# 2) i) hit_streak >= min_hits: 最小的连续命中
# ii) frame_count <= min_hits: 最开始的几帧
if (trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits):
ret.append(np.concatenate((d, [trk.id + 1])).reshape(1, -1)) # +1 as MOT benchmark requires positive
i -= 1
# remove dead tracklet
# 1) time_since_update >= max_age: 多久没有更新了
# 2) predict_num: 连续预测的帧数
# 3) out of image size
if (trk.time_since_update >= self.max_age) or (trk.predict_num >= detect_interval) or (
d[2] < 0 or d[3] < 0 or d[0] > img_size[1] or d[1] > img_size[0]):
print(f'Remove tracker: {trk.id + 1}')
self.trackers.pop(i)
if len(ret) > 0:
return np.concatenate(ret)
else:
return np.empty((0, 5))
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/utils/__init__.py | Python | from .face_utils import align_crop_face_landmarks, compute_increased_bbox, get_valid_bboxes, paste_face_back
from .misc import img2tensor, load_file_from_url, scandir
__all__ = [
'align_crop_face_landmarks', 'compute_increased_bbox', 'get_valid_bboxes', 'load_file_from_url', 'paste_face_back',
'img2tensor', 'scandir'
]
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/utils/face_restoration_helper.py | Python | import cv2
import numpy as np
import os
import torch
from torchvision.transforms.functional import normalize
from facexlib.detection import init_detection_model
from facexlib.parsing import init_parsing_model
from facexlib.utils.misc import img2tensor, imwrite
def get_largest_face(det_faces, h, w):
def get_location(val, length):
if val < 0:
return 0
elif val > length:
return length
else:
return val
face_areas = []
for det_face in det_faces:
left = get_location(det_face[0], w)
right = get_location(det_face[2], w)
top = get_location(det_face[1], h)
bottom = get_location(det_face[3], h)
face_area = (right - left) * (bottom - top)
face_areas.append(face_area)
largest_idx = face_areas.index(max(face_areas))
return det_faces[largest_idx], largest_idx
def get_center_face(det_faces, h=0, w=0, center=None):
if center is not None:
center = np.array(center)
else:
center = np.array([w / 2, h / 2])
center_dist = []
for det_face in det_faces:
face_center = np.array([(det_face[0] + det_face[2]) / 2, (det_face[1] + det_face[3]) / 2])
dist = np.linalg.norm(face_center - center)
center_dist.append(dist)
center_idx = center_dist.index(min(center_dist))
return det_faces[center_idx], center_idx
class FaceRestoreHelper(object):
"""Helper for the face restoration pipeline (base class)."""
def __init__(self,
upscale_factor,
face_size=512,
crop_ratio=(1, 1),
det_model='retinaface_resnet50',
save_ext='png',
template_3points=False,
pad_blur=False,
use_parse=False,
device=None,
model_rootpath=None):
self.template_3points = template_3points # improve robustness
self.upscale_factor = upscale_factor
# the cropped face ratio based on the square face
self.crop_ratio = crop_ratio # (h, w)
assert (self.crop_ratio[0] >= 1 and self.crop_ratio[1] >= 1), 'crop ration only supports >=1'
self.face_size = (int(face_size * self.crop_ratio[1]), int(face_size * self.crop_ratio[0]))
if self.template_3points:
self.face_template = np.array([[192, 240], [319, 240], [257, 371]])
else:
# standard 5 landmarks for FFHQ faces with 512 x 512
self.face_template = np.array([[192.98138, 239.94708], [318.90277, 240.1936], [256.63416, 314.01935],
[201.26117, 371.41043], [313.08905, 371.15118]])
self.face_template = self.face_template * (face_size / 512.0)
if self.crop_ratio[0] > 1:
self.face_template[:, 1] += face_size * (self.crop_ratio[0] - 1) / 2
if self.crop_ratio[1] > 1:
self.face_template[:, 0] += face_size * (self.crop_ratio[1] - 1) / 2
self.save_ext = save_ext
self.pad_blur = pad_blur
if self.pad_blur is True:
self.template_3points = False
self.all_landmarks_5 = []
self.det_faces = []
self.affine_matrices = []
self.inverse_affine_matrices = []
self.cropped_faces = []
self.restored_faces = []
self.pad_input_imgs = []
if device is None:
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
# init face detection model
self.face_det = init_detection_model(det_model, half=False, device=self.device, model_rootpath=model_rootpath)
# init face parsing model
self.use_parse = use_parse
self.face_parse = init_parsing_model(model_name='parsenet', device=self.device, model_rootpath=model_rootpath)
def set_upscale_factor(self, upscale_factor):
self.upscale_factor = upscale_factor
def read_image(self, img):
"""img can be image path or cv2 loaded image."""
# self.input_img is Numpy array, (h, w, c), BGR, uint8, [0, 255]
if isinstance(img, str):
img = cv2.imread(img)
if np.max(img) > 256: # 16-bit image
img = img / 65535 * 255
if len(img.shape) == 2: # gray image
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 4: # RGBA image with alpha channel
img = img[:, :, 0:3]
self.input_img = img
def get_face_landmarks_5(self,
only_keep_largest=False,
only_center_face=False,
resize=None,
blur_ratio=0.01,
eye_dist_threshold=None):
if resize is None:
scale = 1
input_img = self.input_img
else:
h, w = self.input_img.shape[0:2]
scale = min(h, w) / resize
h, w = int(h / scale), int(w / scale)
input_img = cv2.resize(self.input_img, (w, h), interpolation=cv2.INTER_LANCZOS4)
with torch.no_grad():
bboxes = self.face_det.detect_faces(input_img, 0.97) * scale
for bbox in bboxes:
# remove faces with too small eye distance: side faces or too small faces
eye_dist = np.linalg.norm([bbox[5] - bbox[7], bbox[6] - bbox[8]])
if eye_dist_threshold is not None and (eye_dist < eye_dist_threshold):
continue
if self.template_3points:
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 11, 2)])
else:
landmark = np.array([[bbox[i], bbox[i + 1]] for i in range(5, 15, 2)])
self.all_landmarks_5.append(landmark)
self.det_faces.append(bbox[0:5])
if len(self.det_faces) == 0:
return 0
if only_keep_largest:
h, w, _ = self.input_img.shape
self.det_faces, largest_idx = get_largest_face(self.det_faces, h, w)
self.all_landmarks_5 = [self.all_landmarks_5[largest_idx]]
elif only_center_face:
h, w, _ = self.input_img.shape
self.det_faces, center_idx = get_center_face(self.det_faces, h, w)
self.all_landmarks_5 = [self.all_landmarks_5[center_idx]]
# pad blurry images
if self.pad_blur:
self.pad_input_imgs = []
for landmarks in self.all_landmarks_5:
# get landmarks
eye_left = landmarks[0, :]
eye_right = landmarks[1, :]
eye_avg = (eye_left + eye_right) * 0.5
mouth_avg = (landmarks[3, :] + landmarks[4, :]) * 0.5
eye_to_eye = eye_right - eye_left
eye_to_mouth = mouth_avg - eye_avg
# Get the oriented crop rectangle
# x: half width of the oriented crop rectangle
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
# norm with the hypotenuse: get the direction
x /= np.hypot(*x) # get the hypotenuse of a right triangle
rect_scale = 1.5
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
# y: half height of the oriented crop rectangle
y = np.flipud(x) * [-1, 1]
# c: center
c = eye_avg + eye_to_mouth * 0.1
# quad: (left_top, left_bottom, right_bottom, right_top)
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
# qsize: side length of the square
qsize = np.hypot(*x) * 2
border = max(int(np.rint(qsize * 0.1)), 3)
# get pad
# pad: (width_left, height_top, width_right, height_bottom)
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = [
max(-pad[0] + border, 1),
max(-pad[1] + border, 1),
max(pad[2] - self.input_img.shape[0] + border, 1),
max(pad[3] - self.input_img.shape[1] + border, 1)
]
if max(pad) > 1:
# pad image
pad_img = np.pad(self.input_img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
# modify landmark coords
landmarks[:, 0] += pad[0]
landmarks[:, 1] += pad[1]
# blur pad images
h, w, _ = pad_img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1],
np.float32(h - 1 - y) / pad[3]))
blur = int(qsize * blur_ratio)
if blur % 2 == 0:
blur += 1
blur_img = cv2.boxFilter(pad_img, 0, ksize=(blur, blur))
# blur_img = cv2.GaussianBlur(pad_img, (blur, blur), 0)
pad_img = pad_img.astype('float32')
pad_img += (blur_img - pad_img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
pad_img += (np.median(pad_img, axis=(0, 1)) - pad_img) * np.clip(mask, 0.0, 1.0)
pad_img = np.clip(pad_img, 0, 255) # float32, [0, 255]
self.pad_input_imgs.append(pad_img)
else:
self.pad_input_imgs.append(np.copy(self.input_img))
return len(self.all_landmarks_5)
def align_warp_face(self, save_cropped_path=None, border_mode='constant'):
"""Align and warp faces with face template.
"""
if self.pad_blur:
assert len(self.pad_input_imgs) == len(
self.all_landmarks_5), f'Mismatched samples: {len(self.pad_input_imgs)} and {len(self.all_landmarks_5)}'
for idx, landmark in enumerate(self.all_landmarks_5):
# use 5 landmarks to get affine matrix
# use cv2.LMEDS method for the equivalence to skimage transform
# ref: https://blog.csdn.net/yichxi/article/details/115827338
affine_matrix = cv2.estimateAffinePartial2D(landmark, self.face_template, method=cv2.LMEDS)[0]
self.affine_matrices.append(affine_matrix)
# warp and crop faces
if border_mode == 'constant':
border_mode = cv2.BORDER_CONSTANT
elif border_mode == 'reflect101':
border_mode = cv2.BORDER_REFLECT101
elif border_mode == 'reflect':
border_mode = cv2.BORDER_REFLECT
if self.pad_blur:
input_img = self.pad_input_imgs[idx]
else:
input_img = self.input_img
cropped_face = cv2.warpAffine(
input_img, affine_matrix, self.face_size, borderMode=border_mode, borderValue=(135, 133, 132)) # gray
self.cropped_faces.append(cropped_face)
# save the cropped face
if save_cropped_path is not None:
path = os.path.splitext(save_cropped_path)[0]
save_path = f'{path}_{idx:02d}.{self.save_ext}'
imwrite(cropped_face, save_path)
def get_inverse_affine(self, save_inverse_affine_path=None):
"""Get inverse affine matrix."""
for idx, affine_matrix in enumerate(self.affine_matrices):
inverse_affine = cv2.invertAffineTransform(affine_matrix)
inverse_affine *= self.upscale_factor
self.inverse_affine_matrices.append(inverse_affine)
# save inverse affine matrices
if save_inverse_affine_path is not None:
path, _ = os.path.splitext(save_inverse_affine_path)
save_path = f'{path}_{idx:02d}.pth'
torch.save(inverse_affine, save_path)
def add_restored_face(self, face):
self.restored_faces.append(face)
def paste_faces_to_input_image(self, save_path=None, upsample_img=None):
h, w, _ = self.input_img.shape
h_up, w_up = int(h * self.upscale_factor), int(w * self.upscale_factor)
if upsample_img is None:
# simply resize the background
upsample_img = cv2.resize(self.input_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
else:
upsample_img = cv2.resize(upsample_img, (w_up, h_up), interpolation=cv2.INTER_LANCZOS4)
assert len(self.restored_faces) == len(
self.inverse_affine_matrices), ('length of restored_faces and affine_matrices are different.')
for restored_face, inverse_affine in zip(self.restored_faces, self.inverse_affine_matrices):
# Add an offset to inverse affine matrix, for more precise back alignment
if self.upscale_factor > 1:
extra_offset = 0.5 * self.upscale_factor
else:
extra_offset = 0
inverse_affine[:, 2] += extra_offset
inv_restored = cv2.warpAffine(restored_face, inverse_affine, (w_up, h_up))
if self.use_parse:
# inference
face_input = cv2.resize(restored_face, (512, 512), interpolation=cv2.INTER_LINEAR)
face_input = img2tensor(face_input.astype('float32') / 255., bgr2rgb=True, float32=True)
normalize(face_input, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
face_input = torch.unsqueeze(face_input, 0).to(self.device)
with torch.no_grad():
out = self.face_parse(face_input)[0]
out = out.argmax(dim=1).squeeze().cpu().numpy()
mask = np.zeros(out.shape)
MASK_COLORMAP = [0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 255, 0, 0, 0]
for idx, color in enumerate(MASK_COLORMAP):
mask[out == idx] = color
# blur the mask
mask = cv2.GaussianBlur(mask, (101, 101), 11)
mask = cv2.GaussianBlur(mask, (101, 101), 11)
# remove the black borders
thres = 10
mask[:thres, :] = 0
mask[-thres:, :] = 0
mask[:, :thres] = 0
mask[:, -thres:] = 0
mask = mask / 255.
mask = cv2.resize(mask, restored_face.shape[:2])
mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up), flags=3)
inv_soft_mask = mask[:, :, None]
pasted_face = inv_restored
else: # use square parse maps
mask = np.ones(self.face_size, dtype=np.float32)
inv_mask = cv2.warpAffine(mask, inverse_affine, (w_up, h_up))
# remove the black borders
inv_mask_erosion = cv2.erode(
inv_mask, np.ones((int(2 * self.upscale_factor), int(2 * self.upscale_factor)), np.uint8))
pasted_face = inv_mask_erosion[:, :, None] * inv_restored
total_face_area = np.sum(inv_mask_erosion) # // 3
# compute the fusion edge based on the area of face
w_edge = int(total_face_area**0.5) // 20
erosion_radius = w_edge * 2
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
blur_size = w_edge * 2
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
if len(upsample_img.shape) == 2: # upsample_img is gray image
upsample_img = upsample_img[:, :, None]
inv_soft_mask = inv_soft_mask[:, :, None]
if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
alpha = upsample_img[:, :, 3:]
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
upsample_img = np.concatenate((upsample_img, alpha), axis=2)
else:
upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img
if np.max(upsample_img) > 256: # 16-bit image
upsample_img = upsample_img.astype(np.uint16)
else:
upsample_img = upsample_img.astype(np.uint8)
if save_path is not None:
path = os.path.splitext(save_path)[0]
save_path = f'{path}.{self.save_ext}'
imwrite(upsample_img, save_path)
return upsample_img
def clean_all(self):
self.all_landmarks_5 = []
self.restored_faces = []
self.affine_matrices = []
self.cropped_faces = []
self.inverse_affine_matrices = []
self.det_faces = []
self.pad_input_imgs = []
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/utils/face_utils.py | Python | import cv2
import numpy as np
import torch
def compute_increased_bbox(bbox, increase_area, preserve_aspect=True):
left, top, right, bot = bbox
width = right - left
height = bot - top
if preserve_aspect:
width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
else:
width_increase = height_increase = increase_area
left = int(left - width_increase * width)
top = int(top - height_increase * height)
right = int(right + width_increase * width)
bot = int(bot + height_increase * height)
return (left, top, right, bot)
def get_valid_bboxes(bboxes, h, w):
left = max(bboxes[0], 0)
top = max(bboxes[1], 0)
right = min(bboxes[2], w)
bottom = min(bboxes[3], h)
return (left, top, right, bottom)
def align_crop_face_landmarks(img,
landmarks,
output_size,
transform_size=None,
enable_padding=True,
return_inverse_affine=False,
shrink_ratio=(1, 1)):
"""Align and crop face with landmarks.
The output_size and transform_size are based on width. The height is
adjusted based on shrink_ratio_h/shring_ration_w.
Modified from:
https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
Args:
img (Numpy array): Input image.
landmarks (Numpy array): 5 or 68 or 98 landmarks.
output_size (int): Output face size.
transform_size (ing): Transform size. Usually the four time of
output_size.
enable_padding (float): Default: True.
shrink_ratio (float | tuple[float] | list[float]): Shring the whole
face for height and width (crop larger area). Default: (1, 1).
Returns:
(Numpy array): Cropped face.
"""
lm_type = 'retinaface_5' # Options: dlib_5, retinaface_5
if isinstance(shrink_ratio, (float, int)):
shrink_ratio = (shrink_ratio, shrink_ratio)
if transform_size is None:
transform_size = output_size * 4
# Parse landmarks
lm = np.array(landmarks)
if lm.shape[0] == 5 and lm_type == 'retinaface_5':
eye_left = lm[0]
eye_right = lm[1]
mouth_avg = (lm[3] + lm[4]) * 0.5
elif lm.shape[0] == 5 and lm_type == 'dlib_5':
lm_eye_left = lm[2:4]
lm_eye_right = lm[0:2]
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
mouth_avg = lm[4]
elif lm.shape[0] == 68:
lm_eye_left = lm[36:42]
lm_eye_right = lm[42:48]
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
mouth_avg = (lm[48] + lm[54]) * 0.5
elif lm.shape[0] == 98:
lm_eye_left = lm[60:68]
lm_eye_right = lm[68:76]
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
mouth_avg = (lm[76] + lm[82]) * 0.5
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
eye_to_mouth = mouth_avg - eye_avg
# Get the oriented crop rectangle
# x: half width of the oriented crop rectangle
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
# - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
# norm with the hypotenuse: get the direction
x /= np.hypot(*x) # get the hypotenuse of a right triangle
rect_scale = 1 # TODO: you can edit it to get larger rect
x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
# y: half height of the oriented crop rectangle
y = np.flipud(x) * [-1, 1]
x *= shrink_ratio[1] # width
y *= shrink_ratio[0] # height
# c: center
c = eye_avg + eye_to_mouth * 0.1
# quad: (left_top, left_bottom, right_bottom, right_top)
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
# qsize: side length of the square
qsize = np.hypot(*x) * 2
quad_ori = np.copy(quad)
# Shrink, for large face
# TODO: do we really need shrink
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
h, w = img.shape[0:2]
rsize = (int(np.rint(float(w) / shrink)), int(np.rint(float(h) / shrink)))
img = cv2.resize(img, rsize, interpolation=cv2.INTER_AREA)
quad /= shrink
qsize /= shrink
# Crop
h, w = img.shape[0:2]
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, w), min(crop[3] + border, h))
if crop[2] - crop[0] < w or crop[3] - crop[1] < h:
img = img[crop[1]:crop[3], crop[0]:crop[2], :]
quad -= crop[0:2]
# Pad
# pad: (width_left, height_top, width_right, height_bottom)
h, w = img.shape[0:2]
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
int(np.ceil(max(quad[:, 1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - w + border, 0), max(pad[3] - h + border, 0))
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w = img.shape[0:2]
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
np.float32(w - 1 - x) / pad[2]),
1.0 - np.minimum(np.float32(y) / pad[1],
np.float32(h - 1 - y) / pad[3]))
blur = int(qsize * 0.02)
if blur % 2 == 0:
blur += 1
blur_img = cv2.boxFilter(img, 0, ksize=(blur, blur))
img = img.astype('float32')
img += (blur_img - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
img = np.clip(img, 0, 255) # float32, [0, 255]
quad += pad[:2]
# Transform use cv2
h_ratio = shrink_ratio[0] / shrink_ratio[1]
dst_h, dst_w = int(transform_size * h_ratio), transform_size
template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
# use cv2.LMEDS method for the equivalence to skimage transform
# ref: https://blog.csdn.net/yichxi/article/details/115827338
affine_matrix = cv2.estimateAffinePartial2D(quad, template, method=cv2.LMEDS)[0]
cropped_face = cv2.warpAffine(
img, affine_matrix, (dst_w, dst_h), borderMode=cv2.BORDER_CONSTANT, borderValue=(135, 133, 132)) # gray
if output_size < transform_size:
cropped_face = cv2.resize(
cropped_face, (output_size, int(output_size * h_ratio)), interpolation=cv2.INTER_LINEAR)
if return_inverse_affine:
dst_h, dst_w = int(output_size * h_ratio), output_size
template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
# use cv2.LMEDS method for the equivalence to skimage transform
# ref: https://blog.csdn.net/yichxi/article/details/115827338
affine_matrix = cv2.estimateAffinePartial2D(
quad_ori, np.array([[0, 0], [0, output_size], [dst_w, dst_h], [dst_w, 0]]), method=cv2.LMEDS)[0]
inverse_affine = cv2.invertAffineTransform(affine_matrix)
else:
inverse_affine = None
return cropped_face, inverse_affine
def paste_face_back(img, face, inverse_affine):
h, w = img.shape[0:2]
face_h, face_w = face.shape[0:2]
inv_restored = cv2.warpAffine(face, inverse_affine, (w, h))
mask = np.ones((face_h, face_w, 3), dtype=np.float32)
inv_mask = cv2.warpAffine(mask, inverse_affine, (w, h))
# remove the black borders
inv_mask_erosion = cv2.erode(inv_mask, np.ones((2, 2), np.uint8))
inv_restored_remove_border = inv_mask_erosion * inv_restored
total_face_area = np.sum(inv_mask_erosion) // 3
# compute the fusion edge based on the area of face
w_edge = int(total_face_area**0.5) // 20
erosion_radius = w_edge * 2
inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
blur_size = w_edge * 2
inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
img = inv_soft_mask * inv_restored_remove_border + (1 - inv_soft_mask) * img
# float32, [0, 255]
return img
if __name__ == '__main__':
import os
from facexlib.detection import init_detection_model
from facexlib.utils.face_restoration_helper import get_largest_face
from facexlib.visualization import visualize_detection
img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png'
img_name = os.splitext(os.path.basename(img_path))[0]
# initialize model
det_net = init_detection_model('retinaface_resnet50', half=False)
img_ori = cv2.imread(img_path)
h, w = img_ori.shape[0:2]
# if larger than 800, scale it
scale = max(h / 800, w / 800)
if scale > 1:
img = cv2.resize(img_ori, (int(w / scale), int(h / scale)), interpolation=cv2.INTER_LINEAR)
with torch.no_grad():
bboxes = det_net.detect_faces(img, 0.97)
if scale > 1:
bboxes *= scale # the score is incorrect
bboxes = get_largest_face(bboxes, h, w)[0]
visualize_detection(img_ori, [bboxes], f'tmp/{img_name}_det.png')
landmarks = np.array([[bboxes[i], bboxes[i + 1]] for i in range(5, 15, 2)])
cropped_face, inverse_affine = align_crop_face_landmarks(
img_ori,
landmarks,
output_size=512,
transform_size=None,
enable_padding=True,
return_inverse_affine=True,
shrink_ratio=(1, 1))
cv2.imwrite(f'tmp/{img_name}_cropeed_face.png', cropped_face)
img = paste_face_back(img_ori, cropped_face, inverse_affine)
cv2.imwrite(f'tmp/{img_name}_back.png', img)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/utils/misc.py | Python | import cv2
import os
import os.path as osp
import torch
from torch.hub import download_url_to_file, get_dir
from urllib.parse import urlparse
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def imwrite(img, file_path, params=None, auto_mkdir=True):
"""Write image to file.
Args:
img (ndarray): Image array to be written.
file_path (str): Image file path.
params (None or list): Same as opencv's :func:`imwrite` interface.
auto_mkdir (bool): If the parent folder of `file_path` does not exist,
whether to create it automatically.
Returns:
bool: Successful or not.
"""
if auto_mkdir:
dir_name = os.path.abspath(os.path.dirname(file_path))
os.makedirs(dir_name, exist_ok=True)
return cv2.imwrite(file_path, img, params)
def img2tensor(imgs, bgr2rgb=True, float32=True):
"""Numpy array to tensor.
Args:
imgs (list[ndarray] | ndarray): Input images.
bgr2rgb (bool): Whether to change bgr to rgb.
float32 (bool): Whether to change to float32.
Returns:
list[tensor] | tensor: Tensor images. If returned results only have
one element, just return tensor.
"""
def _totensor(img, bgr2rgb, float32):
if img.shape[2] == 3 and bgr2rgb:
if img.dtype == 'float64':
img = img.astype('float32')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img.transpose(2, 0, 1))
if float32:
img = img.float()
return img
if isinstance(imgs, list):
return [_totensor(img, bgr2rgb, float32) for img in imgs]
else:
return _totensor(imgs, bgr2rgb, float32)
def load_file_from_url(url, model_dir=None, progress=True, file_name=None, save_dir=None):
"""Ref:https://github.com/1adrianb/face-alignment/blob/master/face_alignment/utils.py
"""
if model_dir is None:
hub_dir = get_dir()
model_dir = os.path.join(hub_dir, 'checkpoints')
if save_dir is None:
save_dir = os.path.join(ROOT_DIR, model_dir)
os.makedirs(save_dir, exist_ok=True)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if file_name is not None:
filename = file_name
cached_file = os.path.abspath(os.path.join(save_dir, filename))
if not os.path.exists(cached_file):
print(f'Downloading: "{url}" to {cached_file}\n')
download_url_to_file(url, cached_file, hash_prefix=None, progress=progress)
return cached_file
def scandir(dir_path, suffix=None, recursive=False, full_path=False):
"""Scan a directory to find the interested files.
Args:
dir_path (str): Path of the directory.
suffix (str | tuple(str), optional): File suffix that we are
interested in. Default: None.
recursive (bool, optional): If set to True, recursively scan the
directory. Default: False.
full_path (bool, optional): If set to True, include the dir_path.
Default: False.
Returns:
A generator for all the interested files with relative paths.
"""
if (suffix is not None) and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
root = dir_path
def _scandir(dir_path, suffix, recursive):
for entry in os.scandir(dir_path):
if not entry.name.startswith('.') and entry.is_file():
if full_path:
return_path = entry.path
else:
return_path = osp.relpath(entry.path, root)
if suffix is None:
yield return_path
elif return_path.endswith(suffix):
yield return_path
else:
if recursive:
yield from _scandir(entry.path, suffix=suffix, recursive=recursive)
else:
continue
return _scandir(dir_path, suffix=suffix, recursive=recursive)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/visualization/__init__.py | Python | from .vis_alignment import visualize_alignment
from .vis_detection import visualize_detection
from .vis_headpose import visualize_headpose
__all__ = ['visualize_detection', 'visualize_alignment', 'visualize_headpose']
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/visualization/vis_alignment.py | Python | import cv2
import numpy as np
def visualize_alignment(img, landmarks, save_path=None, to_bgr=False):
img = np.copy(img)
h, w = img.shape[0:2]
circle_size = int(max(h, w) / 150)
if to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for landmarks_face in landmarks:
for lm in landmarks_face:
cv2.circle(img, (int(lm[0]), int(lm[1])), 1, (0, 150, 0), circle_size)
# save img
if save_path is not None:
cv2.imwrite(save_path, img)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/visualization/vis_detection.py | Python | import cv2
import numpy as np
def visualize_detection(img, bboxes_and_landmarks, save_path=None, to_bgr=False):
"""Visualize detection results.
Args:
img (Numpy array): Input image. CHW, BGR, [0, 255], uint8.
"""
img = np.copy(img)
if to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
for b in bboxes_and_landmarks:
# confidence
cv2.putText(img, f'{b[4]:.4f}', (int(b[0]), int(b[1] + 12)), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# bounding boxes
b = list(map(int, b))
cv2.rectangle(img, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
# landmarks (for retinaface)
cv2.circle(img, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img, (b[13], b[14]), 1, (255, 0, 0), 4)
# save img
if save_path is not None:
cv2.imwrite(save_path, img)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
facexlib/visualization/vis_headpose.py | Python | import cv2
import numpy as np
from math import cos, sin
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size=100):
"""draw head pose axis."""
pitch = pitch * np.pi / 180
yaw = -yaw * np.pi / 180
roll = roll * np.pi / 180
if tdx is None or tdy is None:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X axis pointing to right, drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y axis pointing downside, drawn in green
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z axis, out of the screen, drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1), int(y1)), (0, 0, 255), 3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2), int(y2)), (0, 255, 0), 3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3), int(y3)), (255, 0, 0), 2)
return img
def draw_pose_cube(img, yaw, pitch, roll, tdx=None, tdy=None, size=150.):
"""draw head pose cube.
Where (tdx, tdy) is the translation of the face.
For pose we have [pitch yaw roll tdx tdy tdz scale_factor]
"""
p = pitch * np.pi / 180
y = -yaw * np.pi / 180
r = roll * np.pi / 180
if tdx is not None and tdy is not None:
face_x = tdx - 0.50 * size
face_y = tdy - 0.50 * size
else:
height, width = img.shape[:2]
face_x = width / 2 - 0.5 * size
face_y = height / 2 - 0.5 * size
x1 = size * (cos(y) * cos(r)) + face_x
y1 = size * (cos(p) * sin(r) + cos(r) * sin(p) * sin(y)) + face_y
x2 = size * (-cos(y) * sin(r)) + face_x
y2 = size * (cos(p) * cos(r) - sin(p) * sin(y) * sin(r)) + face_y
x3 = size * (sin(y)) + face_x
y3 = size * (-cos(y) * sin(p)) + face_y
# Draw base in red
cv2.line(img, (int(face_x), int(face_y)), (int(x1), int(y1)), (0, 0, 255), 3)
cv2.line(img, (int(face_x), int(face_y)), (int(x2), int(y2)), (0, 0, 255), 3)
cv2.line(img, (int(x2), int(y2)), (int(x2 + x1 - face_x), int(y2 + y1 - face_y)), (0, 0, 255), 3)
cv2.line(img, (int(x1), int(y1)), (int(x1 + x2 - face_x), int(y1 + y2 - face_y)), (0, 0, 255), 3)
# Draw pillars in blue
cv2.line(img, (int(face_x), int(face_y)), (int(x3), int(y3)), (255, 0, 0), 2)
cv2.line(img, (int(x1), int(y1)), (int(x1 + x3 - face_x), int(y1 + y3 - face_y)), (255, 0, 0), 2)
cv2.line(img, (int(x2), int(y2)), (int(x2 + x3 - face_x), int(y2 + y3 - face_y)), (255, 0, 0), 2)
cv2.line(img, (int(x2 + x1 - face_x), int(y2 + y1 - face_y)),
(int(x3 + x1 + x2 - 2 * face_x), int(y3 + y2 + y1 - 2 * face_y)), (255, 0, 0), 2)
# Draw top in green
cv2.line(img, (int(x3 + x1 - face_x), int(y3 + y1 - face_y)),
(int(x3 + x1 + x2 - 2 * face_x), int(y3 + y2 + y1 - 2 * face_y)), (0, 255, 0), 2)
cv2.line(img, (int(x2 + x3 - face_x), int(y2 + y3 - face_y)),
(int(x3 + x1 + x2 - 2 * face_x), int(y3 + y2 + y1 - 2 * face_y)), (0, 255, 0), 2)
cv2.line(img, (int(x3), int(y3)), (int(x3 + x1 - face_x), int(y3 + y1 - face_y)), (0, 255, 0), 2)
cv2.line(img, (int(x3), int(y3)), (int(x3 + x2 - face_x), int(y3 + y2 - face_y)), (0, 255, 0), 2)
return img
def visualize_headpose(img, yaw, pitch, roll, save_path=None, to_bgr=False):
img = np.copy(img)
if to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
show_string = (f'y {yaw[0].item():.2f}, p {pitch[0].item():.2f}, ' + f'r {roll[0].item():.2f}')
cv2.putText(img, show_string, (30, img.shape[0] - 30), fontFace=1, fontScale=1, color=(0, 0, 255), thickness=2)
draw_pose_cube(img, yaw[0], pitch[0], roll[0], size=100)
draw_axis(img, yaw[0], pitch[0], roll[0], tdx=50, tdy=50, size=100)
# save img
if save_path is not None:
cv2.imwrite(save_path, img)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_alignment.py | Python | import argparse
import cv2
import torch
from facexlib.alignment import init_alignment_model, landmark_98_to_68
from facexlib.visualization import visualize_alignment
def main(args):
# initialize model
align_net = init_alignment_model(args.model_name, device=args.device)
img = cv2.imread(args.img_path)
with torch.no_grad():
landmarks = align_net.get_landmarks(img)
if args.to68:
landmarks = landmark_98_to_68(landmarks)
visualize_alignment(img, [landmarks], args.save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test2.jpg')
parser.add_argument('--save_path', type=str, default='test_alignment.png')
parser.add_argument('--model_name', type=str, default='awing_fan')
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--to68', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_crop_standard_faces.py | Python | import cv2
import torch
from facexlib.detection import init_detection_model
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
input_img = '/home/wxt/datasets/ffhq/ffhq_wild/00028.png'
# initialize face helper
face_helper = FaceRestoreHelper(
upscale_factor=1, face_size=512, crop_ratio=(1, 1), det_model='retinaface_resnet50', save_ext='png')
face_helper.clean_all()
det_net = init_detection_model('retinaface_resnet50', half=False)
img = cv2.imread(input_img)
with torch.no_grad():
bboxes = det_net.detect_faces(img, 0.97)
# x0, y0, x1, y1, confidence_score, five points (x, y)
print(bboxes.shape)
bboxes = bboxes[3]
bboxes[0] -= 100
bboxes[1] -= 100
bboxes[2] += 100
bboxes[3] += 100
img = img[int(bboxes[1]):int(bboxes[3]), int(bboxes[0]):int(bboxes[2]), :]
face_helper.read_image(img)
# get face landmarks for each face
face_helper.get_face_landmarks_5(only_center_face=True, pad_blur=False)
# align and warp each face
# save_crop_path = os.path.join(save_root, 'cropped_faces', img_name)
save_crop_path = '00028_cvwarp.png'
face_helper.align_warp_face(save_crop_path)
# for i in range(50):
# img = cv2.imread(f'inputs/ffhq_512/{i:08d}.png')
# cv2.circle(img, (193, 240), 1, (0, 0, 255), 4)
# cv2.circle(img, (319, 240), 1, (0, 255, 255), 4)
# cv2.circle(img, (257, 314), 1, (255, 0, 255), 4)
# cv2.circle(img, (201, 371), 1, (0, 255, 0), 4)
# cv2.circle(img, (313, 371), 1, (255, 0, 0), 4)
# cv2.imwrite(f'ffhq_lm/{i:08d}_lm.png', img)
# [875.5 719.83333333] [1192.5 715.66666667] [1060. 997.]
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_detection.py | Python | import argparse
import cv2
import torch
from facexlib.detection import init_detection_model
from facexlib.visualization import visualize_detection
def main(args):
# initialize model
det_net = init_detection_model(args.model_name, half=args.half)
img = cv2.imread(args.img_path)
with torch.no_grad():
bboxes = det_net.detect_faces(img, 0.97)
# x0, y0, x1, y1, confidence_score, five points (x, y)
print(bboxes)
visualize_detection(img, bboxes, args.save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='test_detection.png')
parser.add_argument(
'--model_name', type=str, default='retinaface_resnet50', help='retinaface_resnet50 | retinaface_mobile0.25')
parser.add_argument('--half', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_headpose.py | Python | import argparse
import cv2
import numpy as np
import torch
from torchvision.transforms.functional import normalize
from facexlib.detection import init_detection_model
from facexlib.headpose import init_headpose_model
from facexlib.utils.misc import img2tensor
from facexlib.visualization import visualize_headpose
def main(args):
# initialize model
det_net = init_detection_model(args.detection_model_name, half=args.half)
headpose_net = init_headpose_model(args.headpose_model_name, half=args.half)
img = cv2.imread(args.img_path)
with torch.no_grad():
bboxes = det_net.detect_faces(img, 0.97)
# x0, y0, x1, y1, confidence_score, five points (x, y)
bbox = list(map(int, bboxes[0]))
# crop face region
thld = 10
h, w, _ = img.shape
top = max(bbox[1] - thld, 0)
bottom = min(bbox[3] + thld, h)
left = max(bbox[0] - thld, 0)
right = min(bbox[2] + thld, w)
det_face = img[top:bottom, left:right, :].astype(np.float32) / 255.
# resize
det_face = cv2.resize(det_face, (224, 224), interpolation=cv2.INTER_LINEAR)
det_face = img2tensor(np.copy(det_face), bgr2rgb=False)
# normalize
normalize(det_face, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225], inplace=True)
det_face = det_face.unsqueeze(0).cuda()
yaw, pitch, roll = headpose_net(det_face)
visualize_headpose(img, yaw, pitch, roll, args.save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Head pose estimation using the Hopenet network.')
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='assets/test_headpose.png')
parser.add_argument('--detection_model_name', type=str, default='retinaface_resnet50')
parser.add_argument('--headpose_model_name', type=str, default='hopenet')
parser.add_argument('--half', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_hyperiqa.py | Python | import argparse
import cv2
import numpy as np
import os
import torch
import torchvision
from PIL import Image
from facexlib.assessment import init_assessment_model
from facexlib.detection import init_detection_model
def main(args):
"""Scripts about evaluating face quality.
Two steps:
1) detect the face region and crop the face
2) evaluate the face quality by hyperIQA
"""
# initialize model
det_net = init_detection_model(args.detection_model_name, half=False)
assess_net = init_assessment_model(args.assess_model_name, half=False)
# specified face transformation in original hyperIQA
transforms = torchvision.transforms.Compose([
torchvision.transforms.Resize((512, 384)),
torchvision.transforms.RandomCrop(size=224),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
])
img = cv2.imread(args.img_path)
img_name = os.path.basename(args.img_path)
basename, _ = os.path.splitext(img_name)
with torch.no_grad():
bboxes = det_net.detect_faces(img, 0.97)
box = list(map(int, bboxes[0]))
pred_scores = []
# BRG -> RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(10):
detect_face = img[box[1]:box[3], box[0]:box[2], :]
detect_face = Image.fromarray(detect_face)
detect_face = transforms(detect_face)
detect_face = torch.tensor(detect_face.cuda()).unsqueeze(0)
pred = assess_net(detect_face)
pred_scores.append(float(pred.item()))
score = np.mean(pred_scores)
# quality score ranges from 0-100, a higher score indicates a better quality
print(f'{basename} {score:.4f}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test2.jpg')
parser.add_argument('--detection_model_name', type=str, default='retinaface_resnet50')
parser.add_argument('--assess_model_name', type=str, default='hypernet')
parser.add_argument('--half', action='store_true')
args = parser.parse_args()
main(args)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_matting.py | Python | import argparse
import cv2
import numpy as np
import torch.nn.functional as F
from torchvision.transforms.functional import normalize
from facexlib.matting import init_matting_model
from facexlib.utils import img2tensor
def main(args):
modnet = init_matting_model()
# read image
img = cv2.imread(args.img_path) / 255.
# unify image channels to 3
if len(img.shape) == 2:
img = img[:, :, None]
if img.shape[2] == 1:
img = np.repeat(img, 3, axis=2)
elif img.shape[2] == 4:
img = img[:, :, 0:3]
img_t = img2tensor(img, bgr2rgb=True, float32=True)
normalize(img_t, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
img_t = img_t.unsqueeze(0).cuda()
# resize image for input
_, _, im_h, im_w = img_t.shape
ref_size = 512
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
img_t = F.interpolate(img_t, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = modnet(img_t, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
cv2.imwrite(args.save_path, (matte * 255).astype('uint8'))
# get foreground
matte = matte[:, :, None]
foreground = img * matte + np.full(img.shape, 1) * (1 - matte)
cv2.imwrite(args.save_path.replace('.png', '_fg.png'), foreground * 255)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', type=str, default='assets/test.jpg')
parser.add_argument('--save_path', type=str, default='test_matting.png')
args = parser.parse_args()
main(args)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_parsing.py | Python | import argparse
import cv2
import numpy as np
import os
import torch
from torchvision.transforms.functional import normalize
from facexlib.parsing import init_parsing_model
from facexlib.utils.misc import img2tensor
def vis_parsing_maps(img, parsing_anno, stride, save_anno_path=None, save_vis_path=None):
# Colors for all 20 parts
part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 0, 85], [255, 0, 170], [0, 255, 0], [85, 255, 0],
[170, 255, 0], [0, 255, 85], [0, 255, 170], [0, 0, 255], [85, 0, 255], [170, 0, 255], [0, 85, 255],
[0, 170, 255], [255, 255, 0], [255, 255, 85], [255, 255, 170], [255, 0, 255], [255, 85, 255],
[255, 170, 255], [0, 255, 255], [85, 255, 255], [170, 255, 255]]
# 0: 'background'
# attributions = [1 'skin', 2 'l_brow', 3 'r_brow', 4 'l_eye', 5 'r_eye',
# 6 'eye_g', 7 'l_ear', 8 'r_ear', 9 'ear_r', 10 'nose',
# 11 'mouth', 12 'u_lip', 13 'l_lip', 14 'neck', 15 'neck_l',
# 16 'cloth', 17 'hair', 18 'hat']
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
if save_anno_path is not None:
cv2.imwrite(save_anno_path, vis_parsing_anno)
if save_vis_path is not None:
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(1, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
vis_im = cv2.addWeighted(img, 0.4, vis_parsing_anno_color, 0.6, 0)
cv2.imwrite(save_vis_path, vis_im)
def main(img_path, output):
net = init_parsing_model(model_name='bisenet')
img_name = os.path.basename(img_path)
img_basename = os.path.splitext(img_name)[0]
img_input = cv2.imread(img_path)
img_input = cv2.resize(img_input, (512, 512), interpolation=cv2.INTER_LINEAR)
img = img2tensor(img_input.astype('float32') / 255., bgr2rgb=True, float32=True)
normalize(img, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225), inplace=True)
img = torch.unsqueeze(img, 0).cuda()
with torch.no_grad():
out = net(img)[0]
out = out.squeeze(0).cpu().numpy().argmax(0)
vis_parsing_maps(
img_input,
out,
stride=1,
save_anno_path=os.path.join(output, f'{img_basename}.png'),
save_vis_path=os.path.join(output, f'{img_basename}_vis.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/ffhq/ffhq_512/00000000.png')
parser.add_argument('--output', type=str, default='results', help='output folder')
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
main(args.input, args.output)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_parsing_parsenet.py | Python | import argparse
import cv2
import numpy as np
import os
import torch
from torchvision.transforms.functional import normalize
from facexlib.parsing import init_parsing_model
from facexlib.utils.misc import img2tensor
def vis_parsing_maps(img, parsing_anno, stride, save_anno_path=None, save_vis_path=None):
# Colors for all parts
part_colors = [[0, 0, 0], [204, 0, 0], [76, 153, 0], [204, 204, 0], [51, 51, 255], [204, 0, 204], [0, 255, 255],
[255, 204, 204], [102, 51, 0], [255, 0, 0], [102, 204, 0], [255, 255, 0], [0, 0, 153], [0, 0, 204],
[255, 51, 153], [0, 204, 204], [0, 51, 0], [255, 153, 51], [0, 204, 0]]
# 0: 'background' 1: 'skin' 2: 'nose'
# 3: 'eye_g' 4: 'l_eye' 5: 'r_eye'
# 6: 'l_brow' 7: 'r_brow' 8: 'l_ear'
# 9: 'r_ear' 10: 'mouth' 11: 'u_lip'
# 12: 'l_lip' 13: 'hair' 14: 'hat'
# 15: 'ear_r' 16: 'neck_l' 17: 'neck'
# 18: 'cloth'
vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
if save_anno_path is not None:
cv2.imwrite(save_anno_path, vis_parsing_anno)
if save_vis_path is not None:
vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
num_of_class = np.max(vis_parsing_anno)
for pi in range(1, num_of_class + 1):
index = np.where(vis_parsing_anno == pi)
vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
vis_im = cv2.addWeighted(img, 0.4, vis_parsing_anno_color, 0.6, 0)
cv2.imwrite(save_vis_path, vis_im)
def main(img_path, output):
net = init_parsing_model(model_name='parsenet')
img_name = os.path.basename(img_path)
img_basename = os.path.splitext(img_name)[0]
img_input = cv2.imread(img_path)
# resize to 512 x 512 for better performance
img_input = cv2.resize(img_input, (512, 512), interpolation=cv2.INTER_LINEAR)
img = img2tensor(img_input.astype('float32') / 255., bgr2rgb=True, float32=True)
normalize(img, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
img = torch.unsqueeze(img, 0).cuda()
with torch.no_grad():
out = net(img)[0]
out = out.squeeze(0).cpu().numpy().argmax(0)
vis_parsing_maps(
img_input,
out,
stride=1,
save_anno_path=os.path.join(output, f'{img_basename}.png'),
save_vis_path=os.path.join(output, f'{img_basename}_vis.png'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='datasets/ffhq/ffhq_512/00000000.png')
parser.add_argument('--output', type=str, default='results', help='output folder')
args = parser.parse_args()
os.makedirs(args.output, exist_ok=True)
main(args.input, args.output)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_recognition.py | Python | import argparse
import glob
import math
import numpy as np
import os
import torch
from facexlib.recognition import ResNetArcFace, cosin_metric, load_image
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--folder1', type=str)
parser.add_argument('--folder2', type=str)
parser.add_argument('--model_path', type=str, default='facexlib/recognition/weights/arcface_resnet18.pth')
args = parser.parse_args()
img_list1 = sorted(glob.glob(os.path.join(args.folder1, '*')))
img_list2 = sorted(glob.glob(os.path.join(args.folder2, '*')))
print(img_list1, img_list2)
model = ResNetArcFace(block='IRBlock', layers=(2, 2, 2, 2), use_se=False)
model.load_state_dict(torch.load(args.model_path))
model.to(torch.device('cuda'))
model.eval()
dist_list = []
identical_count = 0
for idx, (img_path1, img_path2) in enumerate(zip(img_list1, img_list2)):
basename = os.path.splitext(os.path.basename(img_path1))[0]
img1 = load_image(img_path1)
img2 = load_image(img_path2)
data = torch.stack([img1, img2], dim=0)
data = data.to(torch.device('cuda'))
output = model(data)
print(output.size())
output = output.data.cpu().numpy()
dist = cosin_metric(output[0], output[1])
dist = np.arccos(dist) / math.pi * 180
print(f'{idx} - {dist} o : {basename}')
if dist < 1:
print(f'{basename} is almost identical to original.')
identical_count += 1
else:
dist_list.append(dist)
print(f'Result dist: {sum(dist_list) / len(dist_list):.6f}')
print(f'identical count: {identical_count}')
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
inference/inference_tracking.py | Python | import argparse
import cv2
import glob
import numpy as np
import os
import torch
from tqdm import tqdm
from facexlib.detection import init_detection_model
from facexlib.tracking.sort import SORT
def main(args):
detect_interval = args.detect_interval
margin = args.margin
face_score_threshold = args.face_score_threshold
save_frame = True
if save_frame:
colors = np.random.rand(32, 3)
# init detection model and tracker
det_net = init_detection_model('retinaface_resnet50', half=False)
tracker = SORT(max_age=1, min_hits=2, iou_threshold=0.2)
print('Start track...')
# track over all frames
frame_paths = sorted(glob.glob(os.path.join(args.input_folder, '*.jpg')))
pbar = tqdm(total=len(frame_paths), unit='frames', desc='Extract')
for idx, path in enumerate(frame_paths):
img_basename = os.path.basename(path)
frame = cv2.imread(path)
img_size = frame.shape[0:2]
# detection face bboxes
with torch.no_grad():
bboxes = det_net.detect_faces(frame, 0.97)
additional_attr = []
face_list = []
for idx_bb, bbox in enumerate(bboxes):
score = bbox[4]
if score > face_score_threshold:
bbox = bbox[0:5]
det = bbox[0:4]
# face rectangle
det[0] = np.maximum(det[0] - margin, 0)
det[1] = np.maximum(det[1] - margin, 0)
det[2] = np.minimum(det[2] + margin, img_size[1])
det[3] = np.minimum(det[3] + margin, img_size[0])
face_list.append(bbox)
additional_attr.append([score])
trackers = tracker.update(np.array(face_list), img_size, additional_attr, detect_interval)
pbar.update(1)
pbar.set_description(f'{idx}: detect {len(bboxes)} faces in {img_basename}')
# save frame
if save_frame:
for d in trackers:
d = d.astype(np.int32)
cv2.rectangle(frame, (d[0], d[1]), (d[2], d[3]), colors[d[4] % 32, :] * 255, 3)
if len(face_list) != 0:
cv2.putText(frame, 'ID : %d DETECT' % (d[4]), (d[0] - 10, d[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.75, colors[d[4] % 32, :] * 255, 2)
cv2.putText(frame, 'DETECTOR', (5, 45), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (1, 1, 1), 2)
else:
cv2.putText(frame, 'ID : %d' % (d[4]), (d[0] - 10, d[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
colors[d[4] % 32, :] * 255, 2)
save_path = os.path.join(args.save_folder, img_basename)
cv2.imwrite(save_path, frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_folder', help='Path to the input folder', type=str)
parser.add_argument('--save_folder', help='Path to save visualized frames', type=str, default=None)
parser.add_argument(
'--detect_interval',
help=('how many frames to make a detection, trade-off '
'between performance and fluency'),
type=int,
default=1)
# if the face is big in your video ,you can set it bigger for easy tracking
parser.add_argument('--margin', help='add margin for face', type=int, default=20)
parser.add_argument(
'--face_score_threshold', help='The threshold of the extracted faces,range 0 < x <=1', type=float, default=0.85)
args = parser.parse_args()
os.makedirs(args.save_folder, exist_ok=True)
main(args)
# add verification
# remove last few frames
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
scripts/crop_faces_5landmarks.py | Python | import glob
import os
import facexlib.utils.face_restoration_helper as face_restoration_helper
def crop_one_img(img, save_cropped_path=None):
FaceRestoreHelper.clean_all()
FaceRestoreHelper.read_image(img)
# get face landmarks
FaceRestoreHelper.get_face_landmarks_5()
FaceRestoreHelper.align_warp_face(save_cropped_path)
if __name__ == '__main__':
# initialize face helper
FaceRestoreHelper = face_restoration_helper.FaceRestoreHelper(upscale_factor=1)
img_paths = glob.glob('/home/wxt/Projects/test/*')
save_path = 'test'
for idx, path in enumerate(img_paths):
print(idx, path)
file_name = os.path.basename(path)
save_cropped_path = os.path.join(save_path, file_name)
crop_one_img(path, save_cropped_path=save_cropped_path)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
scripts/extract_detection_info_ffhq.py | Python | import cv2
import glob
import numpy as np
import os
import torch
from PIL import Image
from tqdm import tqdm
from facexlib.detection import init_detection_model
def draw_and_save(image, bboxes_and_landmarks, save_path, order_type=1):
"""Visualize results
"""
if isinstance(image, Image.Image):
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image = image.astype(np.float32)
for b in bboxes_and_landmarks:
# confidence
cv2.putText(image, '{:.4f}'.format(b[4]), (int(b[0]), int(b[1] + 12)), cv2.FONT_HERSHEY_DUPLEX, 0.5,
(255, 255, 255))
# bounding boxes
b = list(map(int, b))
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
# landmarks
if order_type == 0: # mtcnn
cv2.circle(image, (b[5], b[10]), 1, (0, 0, 255), 4)
cv2.circle(image, (b[6], b[11]), 1, (0, 255, 255), 4)
cv2.circle(image, (b[7], b[12]), 1, (255, 0, 255), 4)
cv2.circle(image, (b[8], b[13]), 1, (0, 255, 0), 4)
cv2.circle(image, (b[9], b[14]), 1, (255, 0, 0), 4)
else: # retinaface, centerface
cv2.circle(image, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(image, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(image, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(image, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(image, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
cv2.imwrite(save_path, image)
det_net = init_detection_model('retinaface_resnet50')
half = False
det_net.cuda().eval()
if half:
det_net = det_net.half()
img_list = sorted(glob.glob('../../BasicSR-private/datasets/ffhq/ffhq_512/*'))
def get_center_landmark(landmarks, center):
center = np.array(center)
center_dist = []
for landmark in landmarks:
landmark_center = np.array([(landmark[0] + landmark[2]) / 2, (landmark[1] + landmark[3]) / 2])
dist = np.linalg.norm(landmark_center - center)
center_dist.append(dist)
center_idx = center_dist.index(min(center_dist))
return landmarks[center_idx]
pbar = tqdm(total=len(img_list), unit='image')
save_np = []
for idx, path in enumerate(img_list):
img_name = os.path.basename(path)
pbar.update(1)
pbar.set_description(path)
img = Image.open(path)
with torch.no_grad():
bboxes, warped_face_list = det_net.align_multi(img, 0.97, half=half)
if len(bboxes) > 1:
bboxes = [get_center_landmark(bboxes, (256, 256))]
save_np.append(bboxes)
# draw_and_save(img, bboxes, os.path.join('tmp', img_name), 1)
np.save('ffhq_det_info.npy', save_np)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
scripts/get_ffhq_template.py | Python | import cv2
import numpy as np
from PIL import Image
bboxes = np.load('ffhq_det_info.npy', allow_pickle=True)
bboxes = np.array(bboxes).squeeze(1)
bboxes = np.mean(bboxes, axis=0)
print(bboxes)
def draw_and_save(image, bboxes_and_landmarks, save_path, order_type=1):
"""Visualize results
"""
if isinstance(image, Image.Image):
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image = image.astype(np.float32)
for b in bboxes_and_landmarks:
# confidence
cv2.putText(image, '{:.4f}'.format(b[4]), (int(b[0]), int(b[1] + 12)), cv2.FONT_HERSHEY_DUPLEX, 0.5,
(255, 255, 255))
# bounding boxes
b = list(map(int, b))
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
# landmarks
if order_type == 0: # mtcnn
cv2.circle(image, (b[5], b[10]), 1, (0, 0, 255), 4)
cv2.circle(image, (b[6], b[11]), 1, (0, 255, 255), 4)
cv2.circle(image, (b[7], b[12]), 1, (255, 0, 255), 4)
cv2.circle(image, (b[8], b[13]), 1, (0, 255, 0), 4)
cv2.circle(image, (b[9], b[14]), 1, (255, 0, 0), 4)
else: # retinaface, centerface
cv2.circle(image, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(image, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(image, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(image, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(image, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
cv2.imwrite(save_path, image)
img = Image.open('inputs/00000000.png')
# bboxes = np.array([
# 118.177826 * 2, 92.759514 * 2, 394.95926 * 2, 472.53278 * 2, 0.9995705 * 2, # noqa: E501
# 686.77227723, 488.62376238, 586.77227723, 493.59405941, 337.91089109,
# 488.38613861, 437.95049505, 493.51485149, 513.58415842, 678.5049505
# ])
# bboxes = bboxes / 2
draw_and_save(img, [bboxes], 'template_detall.png', 1)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
setup.py | Python | #!/usr/bin/env python
from setuptools import find_packages, setup
import os
import subprocess
import time
version_file = 'facexlib/version.py'
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
__gitsha__ = '{}'
version_info = ({})
"""
sha = get_hash()
with open('VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join([x if x.isdigit() else f'"{x}"' for x in SHORT_VERSION.split('.')])
version_file_str = content.format(time.asctime(), SHORT_VERSION, sha, VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def get_requirements(filename='requirements.txt'):
here = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(here, filename), 'r') as f:
requires = [line.replace('\n', '') for line in f.readlines()]
return requires
if __name__ == '__main__':
write_version_py()
setup(
name='facexlib',
version=get_version(),
description='Basic face library',
long_description=readme(),
long_description_content_type='text/markdown',
author='Xintao Wang',
author_email='xintao.wang@outlook.com',
keywords='computer vision, face, detection, landmark, alignment',
url='https://github.com/xinntao/facexlib',
include_package_data=True,
packages=find_packages(exclude=('options', 'datasets', 'experiments', 'results', 'tb_logger', 'wandb')),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
license='Apache License 2.0',
setup_requires=['cython', 'numpy'],
install_requires=get_requirements(),
zip_safe=False)
| xinntao/facexlib | 963 | FaceXlib aims at providing ready-to-use face-related functions based on current STOA open-source methods. | Python | xinntao | Xintao | Tencent |
__tests__/parse.test.ts | TypeScript | import { expect, test } from '@jest/globals'
import parse from '../src/parse'
test('parse', () => {
expect(parse('')).toMatchSnapshot()
expect(parse('cmd')).toMatchSnapshot()
expect(parse('/bot')).toMatchSnapshot()
expect(parse('/bot cmd')).toMatchSnapshot()
expect(parse('/bot cmd a b c')).toMatchSnapshot()
expect(parse('/bot test val=123 val2=val2 val3')).toMatchSnapshot()
const { getArg } = parse('/bot test some_value_1=1 SomeValue2=2 some-value-3=3 sOmev-al_uE4=4')
expect(getArg('somevalue1')).toEqual('1')
expect(getArg('some_value_2')).toEqual('2')
expect(getArg('SomeValue3')).toEqual('3')
expect(getArg('some-value-4')).toEqual('4')
})
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
__tests__/process.test.ts | TypeScript | import * as github from '@actions/github'
import { expect, test } from '@jest/globals'
import { config } from 'dotenv'
import processCmd from '../src/process'
config()
test('processCmd', async () => {
const ctx = {
owner: 'xlc',
repo: 'RFCs',
issue_number: 14
}
const octokit = github.getOctokit(process.env.GH_TOKEN!)
expect(
await processCmd(
octokit,
'/bot merge blockhash=0x39fbc57d047c71f553aa42824599a7686aea5c9aab4111f6b836d35d3d058162',
ctx
)
).toMatchSnapshot()
}, 100000)
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
jest.config.js | JavaScript | module.exports = {
clearMocks: true,
moduleFileExtensions: ['js', 'ts'],
testMatch: ['**/*.test.ts'],
transform: {
'^.+\\.ts$': 'ts-jest'
},
verbose: true
} | xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
src/api.ts | TypeScript | import { ScProvider } from '@polkadot/rpc-provider/substrate-connect'
import { ApiPromise, WsProvider } from '@polkadot/api'
import * as SC from '@substrate/connect'
import collectivesChainspec from './chainspecs/collectives-polkadot.json'
export const create = async () => {
const endpoint = process.env.ENDPOINT || 'wss://polkadot-collectives-rpc.polkadot.io'
if (endpoint === 'light-client') {
// Note: light client protocol doesn't have good support for historical state queries
// It will simply query nodes randomly and hoping it have data
// In case the remote node is not a archival node, the query will fail with `RemoteCouldntAnswer` error
// https://github.com/smol-dot/smoldot/issues/1078
const relaychain = new ScProvider(SC, SC.WellKnownChain.polkadot)
const parachain = new ScProvider(SC, JSON.stringify(collectivesChainspec), relaychain)
await parachain.connect()
return ApiPromise.create({ provider: parachain })
} else {
return ApiPromise.create({ provider: new WsProvider(endpoint) })
}
}
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
src/main.ts | TypeScript | import * as github from '@actions/github'
import processCmd from './process'
const main = async () => {
const rawcmd: string = github.context.payload.comment?.body
if (!rawcmd) {
console.log('No comment body found')
return
}
const githubToken = process.env.GH_TOKEN
const PAT = process.env.GH_PAT || githubToken
if (!githubToken) {
throw new Error('GH_TOKEN is not set')
}
if (!PAT) {
throw new Error('this is unreachable')
}
const octokit = github.getOctokit(githubToken)
const result = await processCmd(octokit, rawcmd, {
owner: github.context.repo.owner,
repo: github.context.repo.repo,
issue_number: github.context.issue.number
})
if (!result) {
console.log('No result')
return
}
console.log('Result', result)
// use a PAT to merge the PR
const patOctokit = github.getOctokit(PAT)
if (result.createComment) {
await octokit.rest.issues.createComment({
...github.context.repo,
issue_number: github.context.issue.number,
body: result.createComment
})
}
if (result.merge) {
// approve the pr
try {
await patOctokit.rest.pulls.createReview({
...github.context.repo,
pull_number: github.context.issue.number,
event: 'APPROVE'
})
} catch (e) {
console.log('Unable to approve PR', e)
}
await patOctokit.rest.pulls.merge({
...github.context.repo,
pull_number: github.context.issue.number,
sha: result.merge
})
}
if (result.close) {
await patOctokit.rest.issues.update({
...github.context.repo,
issue_number: github.context.issue.number,
state: 'closed'
})
}
}
main()
// eslint-disable-next-line github/no-then
.catch(console.error)
.finally(() => process.exit())
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
src/parse.ts | TypeScript | const parse = (body: string) => {
const match = body.match(/\/bot\s+(\w+)(.*)/)
if (!match) {
return {
getArg: () => undefined
}
}
const [, cmd, args] = match
// use csv parser to handle quoted strings
const argsArr: string[] = args.trim().split(/\s+/)
const namedArgs: Record<string, string> = {}
const unnamedArgs: string[] = []
const normalizedNamedArgs: Record<string, string> = {}
const normalize = (key: string) => {
return key.trim().toLowerCase().replaceAll('-', '').replaceAll('_', '')
}
for (const arg of argsArr) {
if (arg.trim().length === 0) {
continue
}
if (arg.includes('=')) {
const [key, value] = arg.split('=')
const trimmedValue = value.trim()
namedArgs[key.trim()] = trimmedValue
normalizedNamedArgs[normalize(key)] = trimmedValue
} else {
unnamedArgs.push(arg)
}
}
return {
cmd,
namedArgs,
unnamedArgs,
rawArgs: args,
getArg(key: string) {
return normalizedNamedArgs[normalize(key)]
}
}
}
export default parse
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
src/process.ts | TypeScript | import * as github from '@actions/github'
import { blake2AsHex } from '@polkadot/util-crypto'
import '@polkadot/api/augment'
import parse from './parse'
import { create } from './api'
type Context = {
owner: string
repo: string
issue_number: number
}
const processCmd = async (octokit: ReturnType<typeof github.getOctokit>, rawcmd: string, ctx: Context) => {
const { cmd, getArg, rawArgs } = parse(rawcmd)
if (!cmd) {
console.log('No command found')
return
}
const getRemarkBody = async (action: 'approve' | 'reject') => {
const files = await octokit.rest.pulls.listFiles({
owner: ctx.owner,
repo: ctx.repo,
pull_number: ctx.issue_number
})
const file = files.data.find(file => file.filename.match(/\d{4}-.+\.md$/i))
if (!file) {
return {
error: 'Unable to find proposal document'
}
}
if (files.data.length > 1) {
return {
error: 'More than one proposal document found'
}
}
const prInfo = await octokit.rest.pulls.get({
owner: ctx.owner,
repo: ctx.repo,
pull_number: ctx.issue_number
})
const headSha = prInfo.data.head.sha
const body = await octokit.rest.repos.getContent({
owner: ctx.owner,
repo: ctx.repo,
path: file.filename,
ref: headSha,
headers: {
accept: 'application/vnd.github.v3.raw'
}
})
const hex = blake2AsHex(body.data.toString(), 256).substring(2)
const rpc_number = ctx.issue_number.toString().padStart(4, '0')
let remarkBody
switch (action) {
case 'approve':
remarkBody = `RFC_APPROVE(${rpc_number},${hex})`
break
case 'reject':
remarkBody = `RFC_REJECT(${rpc_number},${hex})`
break
}
return {
headSha,
remarkBody
}
}
const handleRfc = async (action: 'approve' | 'reject') => {
const blockHash = getArg('blockhash')
if (!blockHash) {
return {
createComment: 'Missing block hash'
}
}
const { remarkBody, headSha, error } = await getRemarkBody(action)
if (error) {
return {
createComment: error
}
}
if (!remarkBody) {
return {
createComment: 'Unable to generate remark body'
}
}
const api = await create()
const apiAt = await api.at(blockHash)
const apiAtPrev = await api.at((await api.rpc.chain.getHeader(blockHash)).parentHash)
const remarkBodyHash = api.tx.system.remark(remarkBody).method.hash.toHex()
const events = await apiAt.query.system.events()
for (const evt of events) {
if (evt.event.section === 'fellowshipReferenda' && evt.event.method === 'Confirmed') {
const [referendumIndex] = evt.event.data
const info = await apiAtPrev.query.fellowshipReferenda.referendumInfoFor(referendumIndex)
const infoJson = info.toJSON() as any
const proposalHash = infoJson?.ongoing?.proposal?.lookup?.hash
if (proposalHash === remarkBodyHash) {
await api.disconnect()
switch (action) {
case 'approve':
return {
merge: headSha,
createComment: `RFC ${ctx.issue_number} approved. Merging ${headSha.substring(0, 8)} into master`
}
case 'reject':
return {
close: true,
createComment: `RFC ${ctx.issue_number} rejected. Closing PR`
}
}
}
}
}
return {
createComment: `Unable to find fellowshipReferenda.confirmed event at \`${blockHash}\` for proposal with preimage \`${remarkBodyHash}\``
}
}
const handlers = {
async ping() {
return {
createComment: `pong ${rawArgs.substring(0, 10)}`
}
},
async merge() {
return handleRfc('approve')
},
async close() {
return handleRfc('reject')
},
async head() {
const api = await create()
const head = await new Promise(resolve => {
api.rpc.chain.subscribeNewHeads(head => {
resolve(head.hash.toHex())
})
})
await api.disconnect()
return {
createComment: `Current head: ${head}`
}
}
}
if (cmd in handlers) {
return handlers[cmd as keyof typeof handlers]() as Promise<
{ createComment?: string; merge?: string; close?: boolean } | undefined
>
} else {
return {
createComment: `Unknown command: ${cmd}`
}
}
}
export default processCmd
| xlc/fellowship-process-bot | 0 | TypeScript | xlc | Xiliang Chen | Laminar | |
serde-implicit-proc/src/ast.rs | Rust | use std::collections::HashSet;
use syn::{
DeriveInput, Error, Field, FieldsNamed, FieldsUnnamed, Generics, Ident, punctuated::Punctuated,
token::Comma,
};
pub struct Variant {
pub ident: Ident,
pub tag: Ident,
pub fields: FieldsNamed,
}
pub struct TupleVariant {
pub ident: Ident,
pub fields: FieldsUnnamed,
pub tag_index: usize,
pub has_flatten: bool,
}
pub type Fields = FieldsNamed;
pub struct Enum {
pub ident: Ident,
pub generics: Generics,
pub vars: Style,
}
pub enum Style {
Tuple(Vec<TupleVariant>),
Struct {
variants: Vec<Variant>,
fallthrough: Option<Fallthrough>,
},
}
/// A fallthrough variant for `serde-implicit`
pub struct Fallthrough {
pub ident: Ident,
pub field: Field,
}
pub const TAG: &'static str = "tag";
pub const FLATTEN: &'static str = "flatten";
pub fn parse_data(input: DeriveInput) -> syn::Result<Enum> {
let enum_ = match input.data {
syn::Data::Enum(data_enum) => data_enum,
_ => {
return Err(Error::new_spanned(
input,
"`serde_implicit` can only `Deserialize` struct enum variants",
));
}
};
let variants = match enum_.variants.first().map(|v| &v.fields) {
Some(syn::Fields::Named(_)) => parse_struct_variants(enum_.variants)?,
Some(syn::Fields::Unnamed(_)) => parse_enum_variants(enum_.variants)?,
Some(syn::Fields::Unit) => todo!(),
None => Style::Tuple(vec![]),
};
Ok(Enum {
ident: input.ident,
generics: input.generics,
vars: variants,
})
}
enum VarOrFall {
Var(Variant),
Fall(Fallthrough),
}
fn parse_struct_variants(mut enum_variants: Punctuated<syn::Variant, Comma>) -> syn::Result<Style> {
let mut variants = vec![];
let last_var = enum_variants.pop();
for v in enum_variants {
let variant = parse_variant(v)?;
variants.push(variant);
}
let mut fallthrough = None;
if let Some(var) = last_var {
let var_or_fall = parse_variant_or_fallthrough(&var.into_value(), true)?;
match var_or_fall {
VarOrFall::Var(var) => variants.push(var),
VarOrFall::Fall(fall) => fallthrough = Some(fall),
}
}
let mut unique_tags = HashSet::new();
for v in &variants {
if !unique_tags.insert(v.tag.clone()) {
return Err(Error::new_spanned(v.tag.clone(), "duplicate tags found"));
}
}
Ok(Style::Struct {
fallthrough,
variants,
})
}
fn has_tag_attribute(field: &Field) -> syn::Result<bool> {
let mut has_tag = false;
for attr in &field.attrs {
if attr.path().is_ident("serde_implicit") {
attr.parse_nested_meta(|meta| {
if meta.path.is_ident(TAG) {
has_tag = true;
Ok(())
} else if meta.path.is_ident(FLATTEN) {
// Allow flatten in the same pass, will be validated later
Ok(())
} else {
Err(Error::new_spanned(
attr,
"unknown attribute, expected `tag` or `flatten`",
))
}
})?;
}
}
Ok(has_tag)
}
fn has_flatten_attribute(field: &Field) -> syn::Result<bool> {
let mut has_flatten = false;
for attr in &field.attrs {
if attr.path().is_ident("serde_implicit") {
attr.parse_nested_meta(|meta| {
if meta.path.is_ident(FLATTEN) {
has_flatten = true;
Ok(())
} else if meta.path.is_ident(TAG) {
// Allow tag in the same pass, will be validated later
Ok(())
} else {
Err(Error::new_spanned(
attr,
"unknown attribute, expected `tag` or `flatten`",
))
}
})?;
}
}
Ok(has_flatten)
}
fn parse_enum_variants(enum_variants: Punctuated<syn::Variant, Comma>) -> syn::Result<Style> {
let mut variants = vec![];
let mut seen_flatten = false;
for v in enum_variants {
let variant_ident = v.ident.clone();
let variant = match v.fields {
syn::Fields::Named(_) => {
return Err(Error::new_spanned(
v,
"`serde_implicit` cannot combine struct and tuple variants",
));
}
syn::Fields::Unnamed(fields_unnamed) => {
parse_enum_variant(variant_ident, fields_unnamed, &mut seen_flatten)?
}
syn::Fields::Unit => {
return Err(Error::new_spanned(
v,
"`serde_implicit` does not handle unit variants",
));
}
};
variants.push(variant);
}
Ok(Style::Tuple(variants))
}
fn parse_enum_variant(
variant_ident: Ident,
fields_unnamed: FieldsUnnamed,
seen_flatten: &mut bool,
) -> syn::Result<TupleVariant> {
// Find which field has the tag or flatten attribute
let mut tag_index = None;
let mut flatten_index = None;
for (i, field) in fields_unnamed.unnamed.iter().enumerate() {
let has_tag = has_tag_attribute(field)?;
let has_flatten = has_flatten_attribute(field)?;
// Validate tag and flatten are mutually exclusive
if has_tag && has_flatten {
return Err(Error::new_spanned(
field,
"field cannot have both `#[serde_implicit(tag)]` and `#[serde_implicit(flatten)]`",
));
}
if has_tag {
if tag_index.is_some() {
return Err(Error::new_spanned(
field,
"duplicate `#[serde_implicit(tag)]` annotations found, only one field can be tagged",
));
}
tag_index = Some(i);
}
if has_flatten {
if flatten_index.is_some() {
return Err(Error::new_spanned(
field,
"duplicate `#[serde_implicit(flatten)]` annotations found, only one field can be flattened",
));
}
flatten_index = Some(i);
}
}
let has_flatten = flatten_index.is_some();
// Validate flatten variants only have exactly 1 field
if has_flatten && fields_unnamed.unnamed.len() != 1 {
return Err(Error::new_spanned(
&variant_ident,
"flatten variant must have exactly one field",
));
}
// Validate no non-flatten variants come after flatten variants
if !has_flatten && *seen_flatten {
return Err(Error::new_spanned(
&variant_ident,
"flatten variants must appear after all non-flatten variants in the enum definition",
));
}
if has_flatten {
*seen_flatten = true;
}
Ok(TupleVariant {
ident: variant_ident,
fields: fields_unnamed,
tag_index: tag_index.unwrap_or(0), // Default to position 0
has_flatten,
})
}
fn parse_variant_or_fallthrough(v: &syn::Variant, can_fallthrough: bool) -> syn::Result<VarOrFall> {
let named = match &v.fields {
syn::Fields::Named(named) => named,
syn::Fields::Unit | syn::Fields::Unnamed(_) => {
return Err(Error::new_spanned(
v,
"`serde_implicit` can only `Deserialize` struct enum variants",
));
}
};
// Find all fields with #[serde_implicit(tag)] attribute
let mut tagged_fields = vec![];
for field in &named.named {
let mut has_tag = false;
field
.attrs
.iter()
.filter(|a| a.path().is_ident("serde_implicit"))
.try_for_each(|attr| {
attr.parse_nested_meta(|meta| {
if meta.path.is_ident(TAG) {
has_tag = true;
Ok(())
} else {
Err(Error::new_spanned(attr, "omg"))
}
})
})?;
if has_tag {
tagged_fields.push(field);
}
}
let tag;
match tagged_fields.len() {
0 => {
if !can_fallthrough {
return Err(Error::new_spanned(
named,
"missing `#[serde_implicit(tag)]`",
));
};
if named.named.len() != 1 {
return Err(Error::new_spanned(
v,
"fallthrough must have exactly one field",
));
}
return Ok(VarOrFall::Fall(Fallthrough {
ident: v.ident.clone(),
field: named.named.last().cloned().unwrap(),
}));
}
1 => {
tag = tagged_fields[0].ident.clone().unwrap();
}
_ => {
return Err(Error::new_spanned(
named,
"duplicate `#[serde_implicit(tag)]` annotations found, only one field can be tagged",
));
}
};
Ok(VarOrFall::Var(Variant {
ident: v.ident.clone(),
tag,
fields: named.clone(),
}))
}
fn parse_variant(v: syn::Variant) -> syn::Result<Variant> {
match parse_variant_or_fallthrough(&v, false)? {
VarOrFall::Var(v) => Ok(v),
_ => unreachable!(),
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit-proc/src/expand.rs | Rust | use annoying::{ImplGenerics, TypeGenerics};
use proc_macro2::{Literal, TokenStream};
use quote::{format_ident, quote};
use syn::{Ident, WhereClause};
use crate::{
ast::{self, Fallthrough, Style},
tuple_enum::expand_tuple_enum,
};
pub fn expand_derive_serialize(input: syn::DeriveInput) -> syn::Result<proc_macro2::TokenStream> {
let data_enum = ast::parse_data(input)?;
// if we need the 'de lifetime do same trick as serde
let (_, _, where_clause) = data_enum.generics.split_for_impl();
let this_type = &data_enum.ident;
let enum_variant = enum_variant(&data_enum);
let impl_generics = ImplGenerics(&data_enum.generics);
let ty_generics = TypeGenerics(&data_enum.generics);
let body = match data_enum.vars {
Style::Struct {
variants,
fallthrough,
} => expand_struct_enum(
&data_enum.ident,
(impl_generics, ty_generics, where_clause),
&variants,
fallthrough.as_ref(),
)?,
Style::Tuple(variants) => expand_tuple_enum(&data_enum.ident, &variants)?,
};
Ok(quote! {
#[automatically_derived]
impl <'de, #impl_generics > serde::Deserialize<'de> for #this_type < #ty_generics > #where_clause {
fn deserialize<__D>(__deserializer: __D) -> Result<Self, __D::Error>
where __D: serde::Deserializer<'de>
{
#enum_variant
#body
}
}
})
}
pub fn enum_variant(enum_: &ast::Enum) -> proc_macro2::TokenStream {
match &enum_.vars {
Style::Tuple(_) => {
// Tuple enums don't generate a separate variant enum type
// (though they perhaps could).
// Instead the code is structured as a series of 'trials'
// like `serde(untagged)` which commits as soon as a matching variant is found.
quote! {}
}
Style::Struct {
variants,
fallthrough,
} => generate_variant_enum(&variants, fallthrough.as_ref()),
}
}
pub fn expand_struct_enum(
ty_name: &Ident,
generics: (ImplGenerics, TypeGenerics, Option<&WhereClause>),
variants: &[ast::Variant],
fallthrough: Option<&Fallthrough>,
) -> syn::Result<proc_macro2::TokenStream> {
let (impl_generics, ty_generics, where_clause) = generics;
let this_type_str = Literal::string(&ty_name.to_string());
let mut variant_arms = vec![];
for (ix, var) in variants.iter().enumerate() {
let block = deserialize_fields(&var.fields);
let variant = implement_variant_deserializer(
&var.ident,
&var.fields,
&ty_name,
&impl_generics,
&ty_generics,
&where_clause,
);
let cons = format_ident!("__variant{ix}");
variant_arms.push(quote! {
__Variant::#cons => {#block #variant }
});
}
if let Some(fall) = &fallthrough {
let variant = implement_fallthrough_deserializer(&fall, &ty_name);
variant_arms.push(quote! {
__Variant::Fallthrough => { #variant }
});
}
let fallthrough = if fallthrough.is_some() {
quote! { Some(__Variant::Fallthrough) }
} else {
quote! { None }
};
Ok(quote! {
let (__tag, __content) = serde::Deserializer::deserialize_any(
__deserializer,
serde_implicit::__private::TaggedContentVisitor::<__Variant>::new(#this_type_str, #fallthrough))?;
let __deserializer = serde_implicit::__private::ContentDeserializer::<__D::Error>::new(__content);
match __tag {
#(#variant_arms)*
}
})
}
pub fn generate_variant_enum(
variants: &[ast::Variant],
fallthrough: Option<&Fallthrough>,
) -> TokenStream {
use proc_macro2::TokenStream;
use quote::{format_ident, quote};
use std::str::FromStr;
let variant_enum_variants = variants.iter().enumerate().map(|(i, _)| {
let variant = format_ident!("__variant{}", i);
quote! { #variant }
});
// Add an ignore variant for unknown tag values
let variant_enum_variants = quote! {
#(#variant_enum_variants,)*
};
let visit_str_arms = variants.iter().enumerate().map(|(i, var)| {
let tag_value = Literal::string(&var.tag.to_string());
let variant = format_ident!("__variant{}", i);
quote! {
#tag_value => ::std::result::Result::Ok(__Variant::#variant),
}
});
let visit_bytes_arms = variants.iter().enumerate().map(|(i, var)| {
let tag_value = &var.tag;
let byte_string = format!("b\"{}\"", tag_value);
let byte_tokens = TokenStream::from_str(&byte_string).unwrap_or_else(|_| {
quote! { #tag_value.as_bytes() }
});
let variant = format_ident!("__variant{}", i);
quote! {
#byte_tokens => ::std::result::Result::Ok(__Variant::#variant),
}
});
let fallthrough_variant = fallthrough.map(|_| {
quote! { Fallthrough }
});
quote! {
#[allow(non_camel_case_types)]
#[doc(hidden)]
enum __Variant {
#variant_enum_variants
#fallthrough_variant
}
#[doc(hidden)]
struct __VariantVisitor;
#[automatically_derived]
impl<'de> serde::de::Visitor<'de> for __VariantVisitor {
type Value = __Variant;
fn expecting(
&self,
__formatter: &mut ::std::fmt::Formatter,
) -> ::std::fmt::Result {
::std::fmt::Formatter::write_str(
__formatter,
"variant tag identifier",
)
}
fn visit_str<__E>(
self,
__value: &str,
) -> ::std::result::Result<Self::Value, __E>
where
__E: serde::de::Error,
{
match __value {
#(#visit_str_arms)*
_ => ::std::result::Result::Err(__E::missing_field("omg")),
// _ => ::std::result::Result::Ok(__Variant::__ignore),
}
}
fn visit_bytes<__E>(
self,
__value: &[u8],
) -> ::std::result::Result<Self::Value, __E>
where
__E: serde::de::Error,
{
match __value {
#(#visit_bytes_arms)*
_ => ::std::result::Result::Err(__E::missing_field("omg")),
// _ => ::std::result::Result::Ok(__Variant::__ignore),
}
}
}
#[automatically_derived]
impl<'de> serde::Deserialize<'de> for __Variant {
#[inline]
fn deserialize<__D>(
__deserializer: __D,
) -> ::std::result::Result<Self, __D::Error>
where
__D: serde::Deserializer<'de>,
{
serde::Deserializer::deserialize_identifier(
__deserializer,
__VariantVisitor,
)
}
}
}
}
fn deserialize_fields(fields: &ast::Fields) -> TokenStream {
let field_variants = (0..fields.named.len()).map(|i| {
let variant = format_ident!("__field{}", i);
quote! { #variant }
});
// todo: remove `__ignore` if `deny_unknown_fields` is set.
let field_variants = quote! {
#(#field_variants,)*
__ignore,
};
let mut visit_str_arms = Vec::new();
let mut visit_bytes_arms = Vec::new();
for (i, field) in fields.named.iter().enumerate() {
let field_ident = field.ident.as_ref().unwrap();
let field_name = field_ident.to_string();
let variant = format_ident!("__field{}", i);
visit_str_arms.push(quote! {
#field_name => ::std::result::Result::Ok(__Field::#variant),
});
let byte_string = format!("b\"{}\"", field_name);
let byte_tokens = Literal::byte_string(&byte_string.as_bytes());
visit_bytes_arms.push(quote! {
#byte_tokens => ::std::result::Result::Ok(__Field::#variant),
});
}
quote! {
#[allow(non_camel_case_types)]
#[doc(hidden)]
enum __Field {
#field_variants
}
#[doc(hidden)]
struct __FieldVisitor;
#[automatically_derived]
impl<'de> serde::de::Visitor<'de> for __FieldVisitor {
type Value = __Field;
fn expecting(
&self,
__formatter: &mut ::std::fmt::Formatter,
) -> ::std::fmt::Result {
::std::fmt::Formatter::write_str(
__formatter,
"field identifier",
)
}
fn visit_str<__E>(
self,
__value: &str,
) -> ::std::result::Result<Self::Value, __E>
where
__E: serde::de::Error,
{
match __value {
#(#visit_str_arms)*
_ => ::std::result::Result::Ok(__Field::__ignore),
}
}
fn visit_bytes<__E>(
self,
__value: &[u8],
) -> ::std::result::Result<Self::Value, __E>
where
__E: serde::de::Error,
{
match __value {
#(#visit_bytes_arms)*
_ => ::std::result::Result::Ok(__Field::__ignore),
}
}
}
#[automatically_derived]
impl<'de> serde::Deserialize<'de> for __Field {
#[inline]
fn deserialize<__D>(
__deserializer: __D,
) -> ::std::result::Result<Self, __D::Error>
where
__D: serde::Deserializer<'de>,
{
serde::Deserializer::deserialize_identifier(
__deserializer,
__FieldVisitor,
)
}
}
}
}
fn implement_fallthrough_deserializer(
fallthrough: &Fallthrough,
enum_name: &syn::Ident,
) -> TokenStream {
let variant_name = &fallthrough.ident;
let field_name = &fallthrough.field.ident;
quote! {
serde::Deserialize::deserialize(__deserializer).map(|res| { #enum_name :: #variant_name { #field_name: res } })
}
}
fn implement_variant_deserializer(
variant_ident: &Ident,
fields: &ast::Fields,
enum_name: &syn::Ident,
impl_generics: &ImplGenerics,
ty_generics: &TypeGenerics,
where_clause: &Option<&WhereClause>,
) -> TokenStream {
use quote::{format_ident, quote};
let variant_name = format!("{}::{}", enum_name, variant_ident);
let expecting_message = format!("struct variant {}", variant_name);
let mut field_declarations = Vec::new();
let mut field_processing = Vec::new();
let mut final_fields = Vec::new();
for (i, field) in fields.named.iter().enumerate() {
let field_ident = field.ident.as_ref().unwrap();
let field_name = field_ident.to_string();
let field_type = &field.ty;
let field_var = format_ident!("__field{}", i);
let field_enum_variant = format_ident!("__field{}", i);
field_declarations.push(quote! {
let mut #field_var: ::std::option::Option<#field_type> = ::std::option::Option::None;
});
field_processing.push(quote! {
__Field::#field_enum_variant => {
if ::std::option::Option::is_some(&#field_var) {
return ::std::result::Result::Err(
<__A::Error as serde::de::Error>::duplicate_field(#field_name),
);
}
#field_var = ::std::option::Option::Some(
serde::de::MapAccess::next_value::<#field_type>(&mut __map)?,
);
}
});
final_fields.push(quote! {
let #field_var = match #field_var {
::std::option::Option::Some(#field_var) => #field_var,
::std::option::Option::None => {
serde_implicit::__private::missing_field(#field_name)?
}
};
});
}
let field_idents = fields.named.iter().map(|f| f.ident.as_ref().unwrap());
let field_vars = (0..fields.named.len()).map(|i| format_ident!("__field{}", i));
let struct_init = quote! {
#enum_name::#variant_ident {
#(#field_idents: #field_vars),*
}
};
quote! {
#[doc(hidden)]
struct __Visitor<'de, #ty_generics> {
marker: ::std::marker::PhantomData<#enum_name < #ty_generics >>,
lifetime: ::std::marker::PhantomData<&'de ()>,
}
#[automatically_derived]
impl<'de, #impl_generics> serde::de::Visitor<'de> for __Visitor<'de, #ty_generics> #where_clause {
type Value = #enum_name < #ty_generics >;
fn expecting(
&self,
__formatter: &mut ::std::fmt::Formatter,
) -> ::std::fmt::Result {
::std::fmt::Formatter::write_str(
__formatter,
#expecting_message,
)
}
#[inline]
fn visit_map<__A>(
self,
mut __map: __A,
) -> ::std::result::Result<Self::Value, __A::Error>
where
__A: serde::de::MapAccess<'de>,
{
#(#field_declarations)*
while let ::std::option::Option::Some(__key) = serde::de::MapAccess::next_key::<
__Field,
>(&mut __map)? {
match __key {
#(#field_processing)*
_ => {
let _ = serde::de::MapAccess::next_value::<
serde::de::IgnoredAny,
>(&mut __map)?;
}
}
}
#(#final_fields)*
::std::result::Result::Ok(#struct_init)
}
}
serde::Deserializer::deserialize_map(
__deserializer,
__Visitor {
marker: ::std::marker::PhantomData::<#enum_name < #ty_generics > >,
lifetime: ::std::marker::PhantomData,
}
)
}
}
mod annoying {
use proc_macro2::TokenStream;
use quote::{ToTokens, quote};
use syn::{GenericParam, Generics, Token};
#[derive(Clone, Copy)]
pub struct ImplGenerics<'a>(pub(crate) &'a Generics);
pub(crate) struct TokensOrDefault<'a, T: 'a>(pub &'a Option<T>);
impl<'a, T> ToTokens for TokensOrDefault<'a, T>
where
T: ToTokens + Default,
{
fn to_tokens(&self, tokens: &mut TokenStream) {
match self.0 {
Some(t) => t.to_tokens(tokens),
None => T::default().to_tokens(tokens),
}
}
}
impl<'a> ToTokens for ImplGenerics<'a> {
fn to_tokens(&self, tokens: &mut TokenStream) {
if self.0.params.is_empty() {
return;
}
// TokensOrDefault(&self.0.lt_token).to_tokens(tokens);
// Print lifetimes before types and consts, regardless of their
// order in self.params.
let mut trailing_or_empty = true;
for param in self.0.params.pairs() {
if let GenericParam::Lifetime(_) = **param.value() {
param.to_tokens(tokens);
trailing_or_empty = param.punct().is_some();
}
}
for param in self.0.params.pairs() {
if let GenericParam::Lifetime(_) = **param.value() {
continue;
}
if !trailing_or_empty {
<Token![,]>::default().to_tokens(tokens);
trailing_or_empty = true;
}
match param.value() {
GenericParam::Lifetime(_) => unreachable!(),
GenericParam::Type(param) => {
// Leave off the type parameter defaults
param.ident.to_tokens(tokens);
// super hack
if !param.bounds.is_empty() {
TokensOrDefault(¶m.colon_token).to_tokens(tokens);
param.bounds.to_tokens(tokens);
tokens.extend(quote! { + serde::Deserialize<'de> });
} else {
tokens.extend(quote! { :serde::Deserialize<'de> });
}
}
GenericParam::Const(param) => {
// Leave off the const parameter defaults
param.const_token.to_tokens(tokens);
param.ident.to_tokens(tokens);
param.colon_token.to_tokens(tokens);
param.ty.to_tokens(tokens);
}
}
param.punct().to_tokens(tokens);
}
// TokensOrDefault(&self.0.gt_token).to_tokens(tokens);
}
}
#[derive(Clone, Copy)]
pub struct TypeGenerics<'a>(pub(crate) &'a Generics);
impl<'a> ToTokens for TypeGenerics<'a> {
fn to_tokens(&self, tokens: &mut TokenStream) {
if self.0.params.is_empty() {
return;
}
// TokensOrDefault(&self.0.lt_token).to_tokens(tokens);
// Print lifetimes before types and consts, regardless of their
// order in self.params.
let mut trailing_or_empty = true;
for param in self.0.params.pairs() {
if let GenericParam::Lifetime(def) = *param.value() {
// Leave off the lifetime bounds and attributes
def.lifetime.to_tokens(tokens);
param.punct().to_tokens(tokens);
trailing_or_empty = param.punct().is_some();
}
}
for param in self.0.params.pairs() {
if let GenericParam::Lifetime(_) = **param.value() {
continue;
}
if !trailing_or_empty {
<Token![,]>::default().to_tokens(tokens);
trailing_or_empty = true;
}
match param.value() {
GenericParam::Lifetime(_) => unreachable!(),
GenericParam::Type(param) => {
param.ident.to_tokens(tokens);
}
GenericParam::Const(param) => {
// Leave off the const parameter defaults
param.ident.to_tokens(tokens);
}
}
param.punct().to_tokens(tokens);
}
// TokensOrDefault(&self.0.gt_token).to_tokens(tokens);
}
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit-proc/src/lib.rs | Rust | use proc_macro::TokenStream as TS1;
use syn::{DeriveInput, parse_macro_input};
mod ast;
mod expand;
mod tuple_enum;
/// Derive macro for implicitly tagged enum deserialization.
///
/// Annotate one field per variant with `#[serde_implicit(tag)]` to mark it as
/// the discriminant. When that key appears in the input, the deserializer
/// commits to the corresponding variant and produces targeted error messages
/// instead of serde's generic "data did not match any variant" error.
///
/// **Tag fields should be non-optional.** During deserialization, keys whose
/// value is `null` are ignored when searching for the implicit tag. If a tag
/// field is `Option<T>` and the input contains `"field": null`, that variant
/// will not be selected.
// todo: shadow serde completely?
#[proc_macro_derive(Deserialize, attributes(serde_implicit))]
pub fn derive_serialize(input: TS1) -> TS1 {
let input = parse_macro_input!(input as DeriveInput);
let ts = expand::expand_derive_serialize(input)
.unwrap_or_else(syn::Error::into_compile_error)
.into();
ts
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit-proc/src/tuple_enum.rs | Rust | use quote::{format_ident, quote};
use syn::Ident;
use crate::ast::{self};
pub fn expand_tuple_enum(
ty_name: &Ident,
variants: &[ast::TupleVariant],
) -> syn::Result<proc_macro2::TokenStream> {
// Separate variants into regular and flatten groups
let (regular_variants, flatten_variants): (Vec<_>, Vec<_>) =
variants.iter().partition(|v| !v.has_flatten);
let mut variant_trials = vec![];
// Generate trials for regular (non-flatten) variants
for v in regular_variants.iter() {
let variant_ident = &v.ident;
let fields = &v.fields;
let field_count = fields.unnamed.len();
let tag_index = v.tag_index;
let tag_field = fields
.unnamed
.iter()
.nth(tag_index)
.expect("tag index must be smaller than variant's field count");
let tag_type = &tag_field.ty;
let trial = if field_count == 1 {
quote! {
if let serde_implicit::__private::Content::Seq(ref __seq) = __content {
if __seq.len() == 1 {
if let ::std::result::Result::Ok(__tag) = <#tag_type as serde::Deserialize>::deserialize(
serde_implicit::__private::ContentRefDeserializer::<__D::Error>::new(&__seq[0])
) {
return ::std::result::Result::Ok(#ty_name::#variant_ident(__tag));
}
}
} else {
// Try to deserialize the entire content as the tag
if let ::std::result::Result::Ok(__tag) = <#tag_type as serde::Deserialize>::deserialize(
serde_implicit::__private::ContentDeserializer::<__D::Error>::new(__content.clone())
) {
return ::std::result::Result::Ok(#ty_name::#variant_ident(__tag));
}
}
}
} else {
let variant_deserializer =
implement_variant_deserializer(variant_ident, fields, ty_name);
let tag_index_lit = proc_macro2::Literal::usize_unsuffixed(tag_index);
let field_count_lit = proc_macro2::Literal::usize_unsuffixed(field_count);
quote! {
if let serde_implicit::__private::Content::Seq(ref __seq) = __content {
// Check length and tag, if both pass, commit to this variant
if __seq.len() == #field_count_lit && <#tag_type as serde::Deserialize>::deserialize(
serde_implicit::__private::ContentRefDeserializer::<__D::Error>::new(&__seq[#tag_index_lit])
).is_ok() {
let __deserializer = serde_implicit::__private::ContentRefDeserializer::<__D::Error>::new(&__content);
return #variant_deserializer;
}
}
}
};
variant_trials.push(trial);
}
// Generate trials for flatten variants (tried only if no regular variant matched)
let mut flatten_trials = vec![];
for v in flatten_variants.iter() {
let variant_ident = &v.ident;
let fields = &v.fields;
// Flatten variants have exactly one field
let field = fields.unnamed.first().ok_or_else(|| {
syn::Error::new_spanned(&v.ident, "flatten variant must have exactly one field")
})?;
let field_type = &field.ty;
let trial = quote! {
if let ::std::result::Result::Ok(__field0) = <#field_type as serde::Deserialize>::deserialize(
serde_implicit::__private::ContentDeserializer::<__D::Error>::new(__content.clone())
) {
return ::std::result::Result::Ok(#ty_name::#variant_ident(__field0));
}
};
flatten_trials.push(trial);
}
let expected_str = proc_macro2::Literal::string(&format!("a valid variant of {}", ty_name));
Ok(quote! {
let __content = <serde_implicit::__private::Content as serde::Deserialize>::deserialize(
__deserializer,
)?;
// Try each regular variant in order
#(#variant_trials)*
// If no regular variant matched, try flatten variants
#(#flatten_trials)*
// No variant matched
::std::result::Result::Err(serde::de::Error::custom(format!(
"data did not match any variant of enum {}",
#expected_str
)))
})
}
fn implement_variant_deserializer(
variant_ident: &Ident,
fields: &syn::FieldsUnnamed,
enum_name: &syn::Ident,
) -> proc_macro2::TokenStream {
let variant_name = format!("{}::{}", enum_name, variant_ident);
let expecting_message = format!("tuple variant {}", variant_name);
let field_count = fields.unnamed.len();
// Generate field deserialization: __seq.next_element::<Type>()?.ok_or_else(...)?
let field_deserializations: Vec<_> = fields
.unnamed
.iter()
.enumerate()
.map(|(i, field)| {
let field_type = &field.ty;
let field_var = format_ident!("__field{}", i);
let field_index = proc_macro2::Literal::usize_unsuffixed(i);
let error_context = format!("{}: {{}}", variant_name);
quote! {
let #field_var = match serde::de::SeqAccess::next_element::<#field_type>(&mut __seq)
.map_err(|__e| serde::de::Error::custom(format!(#error_context, __e)))?
{
::std::option::Option::Some(__value) => __value,
::std::option::Option::None => {
return ::std::result::Result::Err(serde::de::Error::invalid_length(
#field_index,
&#expecting_message,
));
}
};
}
})
.collect();
let field_vars: Vec<_> = (0..field_count)
.map(|i| format_ident!("__field{}", i))
.collect();
let tuple_init = quote! {
#enum_name::#variant_ident(#(#field_vars),*)
};
quote! {
{
#[doc(hidden)]
struct __Visitor;
#[automatically_derived]
impl<'de> serde::de::Visitor<'de> for __Visitor {
type Value = #enum_name;
fn expecting(
&self,
__formatter: &mut ::std::fmt::Formatter,
) -> ::std::fmt::Result {
::std::fmt::Formatter::write_str(
__formatter,
#expecting_message,
)
}
#[inline]
fn visit_seq<__A>(self, mut __seq: __A) -> ::std::result::Result<Self::Value, __A::Error>
where
__A: serde::de::SeqAccess<'de>,
{
#(#field_deserializations)*
::std::result::Result::Ok(#tuple_init)
}
}
serde::Deserializer::deserialize_seq(
__deserializer,
__Visitor,
)
}
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit/src/content.rs | Rust | // This module is private and nothing here should be used outside of
// generated code.
//
// We will iterate on the implementation for a few releases and only have to
// worry about backward compatibility for the `untagged` and `tag` attributes
// rather than for this entire mechanism.
//
// This issue is tracking making some of this stuff public:
// https://github.com/serde-rs/serde/issues/741
use std::fmt;
use std::marker::PhantomData;
use serde::de::value::{MapDeserializer, SeqDeserializer};
use serde::de::{
self, Deserialize, DeserializeSeed, Deserializer, EnumAccess, Expected, IgnoredAny, MapAccess,
SeqAccess, Unexpected, Visitor,
};
/// Used from generated code to buffer the contents of the Deserializer when
/// deserializing untagged enums and internally tagged enums.
///
/// Not public API. Use serde-value instead.
#[derive(Debug, Clone)]
pub enum Content<'de> {
Bool(bool),
U8(u8),
U16(u16),
U32(u32),
U64(u64),
I8(i8),
I16(i16),
I32(i32),
I64(i64),
F32(f32),
F64(f64),
Char(char),
String(String),
Str(&'de str),
ByteBuf(Vec<u8>),
Bytes(&'de [u8]),
None,
Some(Box<Content<'de>>),
Unit,
Newtype(Box<Content<'de>>),
Seq(Vec<Content<'de>>),
Map(Vec<(Content<'de>, Content<'de>)>),
}
impl<'de> Content<'de> {
pub fn as_str(&self) -> Option<&str> {
match *self {
Content::Str(x) => Some(x),
Content::String(ref x) => Some(x),
Content::Bytes(x) => str::from_utf8(x).ok(),
Content::ByteBuf(ref x) => str::from_utf8(x).ok(),
_ => None,
}
}
#[cold]
fn unexpected(&self) -> Unexpected<'_> {
match *self {
Content::Bool(b) => Unexpected::Bool(b),
Content::U8(n) => Unexpected::Unsigned(n as u64),
Content::U16(n) => Unexpected::Unsigned(n as u64),
Content::U32(n) => Unexpected::Unsigned(n as u64),
Content::U64(n) => Unexpected::Unsigned(n),
Content::I8(n) => Unexpected::Signed(n as i64),
Content::I16(n) => Unexpected::Signed(n as i64),
Content::I32(n) => Unexpected::Signed(n as i64),
Content::I64(n) => Unexpected::Signed(n),
Content::F32(f) => Unexpected::Float(f as f64),
Content::F64(f) => Unexpected::Float(f),
Content::Char(c) => Unexpected::Char(c),
Content::String(ref s) => Unexpected::Str(s),
Content::Str(s) => Unexpected::Str(s),
Content::ByteBuf(ref b) => Unexpected::Bytes(b),
Content::Bytes(b) => Unexpected::Bytes(b),
Content::None | Content::Some(_) => Unexpected::Option,
Content::Unit => Unexpected::Unit,
Content::Newtype(_) => Unexpected::NewtypeStruct,
Content::Seq(_) => Unexpected::Seq,
Content::Map(_) => Unexpected::Map,
}
}
}
impl<'de> Deserialize<'de> for Content<'de> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Untagged and internally tagged enums are only supported in
// self-describing formats.
let visitor = ContentVisitor { value: PhantomData };
deserializer.deserialize_any(visitor)
}
}
impl<'de, E> de::IntoDeserializer<'de, E> for Content<'de>
where
E: de::Error,
{
type Deserializer = ContentDeserializer<'de, E>;
fn into_deserializer(self) -> Self::Deserializer {
ContentDeserializer::new(self)
}
}
impl<'a, 'de, E> de::IntoDeserializer<'de, E> for &'a Content<'de>
where
E: de::Error,
{
type Deserializer = ContentRefDeserializer<'a, 'de, E>;
fn into_deserializer(self) -> Self::Deserializer {
ContentRefDeserializer::new(self)
}
}
/// Used to capture data in [`Content`] from other deserializers.
/// Cannot capture externally tagged enums, `i128` and `u128`.
struct ContentVisitor<'de> {
value: PhantomData<Content<'de>>,
}
impl<'de> ContentVisitor<'de> {
fn new() -> Self {
ContentVisitor { value: PhantomData }
}
}
macro_rules! tri {
($e:expr) => {
$e?
};
}
macro_rules! map_key_integer_method {
(owned $name:ident, $visit:ident, $ty:ty) => {
fn $name<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::String(ref s) => {
if let Ok(v) = s.parse::<$ty>() {
return visitor.$visit(v);
}
}
Content::Str(s) => {
if let Ok(v) = s.parse::<$ty>() {
return visitor.$visit(v);
}
}
_ => {}
}
ContentDeserializer::new(self.content).deserialize_integer(visitor)
}
};
(ref $name:ident, $visit:ident, $ty:ty) => {
fn $name<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::String(ref s) => {
if let Ok(v) = s.parse::<$ty>() {
return visitor.$visit(v);
}
}
Content::Str(s) => {
if let Ok(v) = s.parse::<$ty>() {
return visitor.$visit(v);
}
}
_ => {}
}
ContentRefDeserializer::new(self.content).deserialize_integer(visitor)
}
};
}
impl<'de> Visitor<'de> for ContentVisitor<'de> {
type Value = Content<'de>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("any value")
}
fn visit_bool<F>(self, value: bool) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::Bool(value))
}
fn visit_i8<F>(self, value: i8) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::I8(value))
}
fn visit_i16<F>(self, value: i16) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::I16(value))
}
fn visit_i32<F>(self, value: i32) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::I32(value))
}
fn visit_i64<F>(self, value: i64) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::I64(value))
}
fn visit_u8<F>(self, value: u8) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::U8(value))
}
fn visit_u16<F>(self, value: u16) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::U16(value))
}
fn visit_u32<F>(self, value: u32) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::U32(value))
}
fn visit_u64<F>(self, value: u64) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::U64(value))
}
fn visit_f32<F>(self, value: f32) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::F32(value))
}
fn visit_f64<F>(self, value: f64) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::F64(value))
}
fn visit_char<F>(self, value: char) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::Char(value))
}
fn visit_str<F>(self, value: &str) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::String(value.into()))
}
fn visit_borrowed_str<F>(self, value: &'de str) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::Str(value))
}
fn visit_string<F>(self, value: String) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::String(value))
}
fn visit_bytes<F>(self, value: &[u8]) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::ByteBuf(value.into()))
}
fn visit_borrowed_bytes<F>(self, value: &'de [u8]) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::Bytes(value))
}
fn visit_byte_buf<F>(self, value: Vec<u8>) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::ByteBuf(value))
}
fn visit_unit<F>(self) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::Unit)
}
fn visit_none<F>(self) -> Result<Self::Value, F>
where
F: de::Error,
{
Ok(Content::None)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
let v = tri!(Deserialize::deserialize(deserializer));
Ok(Content::Some(Box::new(v)))
}
fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
let v = tri!(Deserialize::deserialize(deserializer));
Ok(Content::Newtype(Box::new(v)))
}
fn visit_seq<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: SeqAccess<'de>,
{
let mut vec = Vec::<Content>::with_capacity(visitor.size_hint().unwrap_or(0));
while let Some(e) = tri!(visitor.next_element()) {
vec.push(e);
}
Ok(Content::Seq(vec))
}
fn visit_map<V>(self, mut visitor: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut vec = Vec::<(Content, Content)>::with_capacity(visitor.size_hint().unwrap_or(0));
while let Some(kv) = tri!(visitor.next_entry()) {
vec.push(kv);
}
Ok(Content::Map(vec))
}
fn visit_enum<V>(self, _visitor: V) -> Result<Self::Value, V::Error>
where
V: EnumAccess<'de>,
{
Err(de::Error::custom(
"untagged and internally tagged enums do not support enum input",
))
}
}
/// This is the type of the map keys in an internally tagged enum.
///
/// Not public API.
pub enum TagOrContent<'de> {
Tag,
Content(Content<'de>),
}
/// Serves as a seed for deserializing a key of internally tagged enum.
/// Cannot capture externally tagged enums, `i128` and `u128`.
struct TagOrContentVisitor<'de> {
name: &'static str,
value: PhantomData<TagOrContent<'de>>,
}
impl<'de> TagOrContentVisitor<'de> {
fn new(name: &'static str) -> Self {
TagOrContentVisitor {
name,
value: PhantomData,
}
}
}
impl<'de> DeserializeSeed<'de> for TagOrContentVisitor<'de> {
type Value = TagOrContent<'de>;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
// Internally tagged enums are only supported in self-describing
// formats.
deserializer.deserialize_any(self)
}
}
impl<'de> Visitor<'de> for TagOrContentVisitor<'de> {
type Value = TagOrContent<'de>;
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "a type tag `{}` or any other value", self.name)
}
fn visit_bool<F>(self, value: bool) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_bool(value)
.map(TagOrContent::Content)
}
fn visit_i8<F>(self, value: i8) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_i8(value)
.map(TagOrContent::Content)
}
fn visit_i16<F>(self, value: i16) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_i16(value)
.map(TagOrContent::Content)
}
fn visit_i32<F>(self, value: i32) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_i32(value)
.map(TagOrContent::Content)
}
fn visit_i64<F>(self, value: i64) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_i64(value)
.map(TagOrContent::Content)
}
fn visit_u8<F>(self, value: u8) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_u8(value)
.map(TagOrContent::Content)
}
fn visit_u16<F>(self, value: u16) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_u16(value)
.map(TagOrContent::Content)
}
fn visit_u32<F>(self, value: u32) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_u32(value)
.map(TagOrContent::Content)
}
fn visit_u64<F>(self, value: u64) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_u64(value)
.map(TagOrContent::Content)
}
fn visit_f32<F>(self, value: f32) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_f32(value)
.map(TagOrContent::Content)
}
fn visit_f64<F>(self, value: f64) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_f64(value)
.map(TagOrContent::Content)
}
fn visit_char<F>(self, value: char) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_char(value)
.map(TagOrContent::Content)
}
fn visit_str<F>(self, value: &str) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_str(value)
.map(TagOrContent::Content)
}
}
fn visit_borrowed_str<F>(self, value: &'de str) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_borrowed_str(value)
.map(TagOrContent::Content)
}
}
fn visit_string<F>(self, value: String) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_string(value)
.map(TagOrContent::Content)
}
}
fn visit_bytes<F>(self, value: &[u8]) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name.as_bytes() {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_bytes(value)
.map(TagOrContent::Content)
}
}
fn visit_borrowed_bytes<F>(self, value: &'de [u8]) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name.as_bytes() {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_borrowed_bytes(value)
.map(TagOrContent::Content)
}
}
fn visit_byte_buf<F>(self, value: Vec<u8>) -> Result<Self::Value, F>
where
F: de::Error,
{
if value == self.name.as_bytes() {
Ok(TagOrContent::Tag)
} else {
ContentVisitor::new()
.visit_byte_buf(value)
.map(TagOrContent::Content)
}
}
fn visit_unit<F>(self) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_unit()
.map(TagOrContent::Content)
}
fn visit_none<F>(self) -> Result<Self::Value, F>
where
F: de::Error,
{
ContentVisitor::new()
.visit_none()
.map(TagOrContent::Content)
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
ContentVisitor::new()
.visit_some(deserializer)
.map(TagOrContent::Content)
}
fn visit_newtype_struct<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
ContentVisitor::new()
.visit_newtype_struct(deserializer)
.map(TagOrContent::Content)
}
fn visit_seq<V>(self, visitor: V) -> Result<Self::Value, V::Error>
where
V: SeqAccess<'de>,
{
ContentVisitor::new()
.visit_seq(visitor)
.map(TagOrContent::Content)
}
fn visit_map<V>(self, visitor: V) -> Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
ContentVisitor::new()
.visit_map(visitor)
.map(TagOrContent::Content)
}
fn visit_enum<V>(self, visitor: V) -> Result<Self::Value, V::Error>
where
V: EnumAccess<'de>,
{
ContentVisitor::new()
.visit_enum(visitor)
.map(TagOrContent::Content)
}
}
/// Used by generated code to deserialize an internally tagged enum.
///
/// Captures map or sequence from the original deserializer and searches
/// a tag in it (in case of sequence, tag is the first element of sequence).
///
/// Not public API.
pub struct TaggedContentVisitor<T> {
tag_name: &'static str,
expecting: &'static str,
value: PhantomData<T>,
}
impl<T> TaggedContentVisitor<T> {
/// Visitor for the content of an internally tagged enum with the given
/// tag name.
pub fn new(name: &'static str, expecting: &'static str) -> Self {
TaggedContentVisitor {
tag_name: name,
expecting,
value: PhantomData,
}
}
}
impl<'de, T> Visitor<'de> for TaggedContentVisitor<T>
where
T: Deserialize<'de>,
{
type Value = (T, Content<'de>);
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(self.expecting)
}
fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
where
S: SeqAccess<'de>,
{
let tag = match tri!(seq.next_element()) {
Some(tag) => tag,
None => {
return Err(de::Error::missing_field(self.tag_name));
}
};
let rest = de::value::SeqAccessDeserializer::new(seq);
Ok((tag, tri!(Content::deserialize(rest))))
}
fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut tag = None;
let mut vec = Vec::<(Content, Content)>::with_capacity(map.size_hint().unwrap_or(0));
while let Some(k) = tri!(map.next_key_seed(TagOrContentVisitor::new(self.tag_name))) {
match k {
TagOrContent::Tag => {
if tag.is_some() {
return Err(de::Error::duplicate_field(self.tag_name));
}
tag = Some(tri!(map.next_value()));
}
TagOrContent::Content(k) => {
let v = tri!(map.next_value());
vec.push((k, v));
}
}
}
match tag {
None => Err(de::Error::missing_field(self.tag_name)),
Some(tag) => Ok((tag, Content::Map(vec))),
}
}
}
/// Used by generated code to deserialize an adjacently tagged enum.
///
/// Not public API.
pub enum TagOrContentField {
Tag,
Content,
}
/// Not public API.
pub struct TagOrContentFieldVisitor {
/// Name of the tag field of the adjacently tagged enum
pub tag: &'static str,
/// Name of the content field of the adjacently tagged enum
pub content: &'static str,
}
impl<'de> DeserializeSeed<'de> for TagOrContentFieldVisitor {
type Value = TagOrContentField;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_identifier(self)
}
}
impl<'de> Visitor<'de> for TagOrContentFieldVisitor {
type Value = TagOrContentField;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "{:?} or {:?}", self.tag, self.content)
}
fn visit_u64<E>(self, field_index: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match field_index {
0 => Ok(TagOrContentField::Tag),
1 => Ok(TagOrContentField::Content),
_ => Err(de::Error::invalid_value(
Unexpected::Unsigned(field_index),
&self,
)),
}
}
fn visit_str<E>(self, field: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
if field == self.tag {
Ok(TagOrContentField::Tag)
} else if field == self.content {
Ok(TagOrContentField::Content)
} else {
Err(de::Error::invalid_value(Unexpected::Str(field), &self))
}
}
fn visit_bytes<E>(self, field: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
if field == self.tag.as_bytes() {
Ok(TagOrContentField::Tag)
} else if field == self.content.as_bytes() {
Ok(TagOrContentField::Content)
} else {
Err(de::Error::invalid_value(Unexpected::Bytes(field), &self))
}
}
}
/// Used by generated code to deserialize an adjacently tagged enum when
/// ignoring unrelated fields is allowed.
///
/// Not public API.
pub enum TagContentOtherField {
Tag,
Content,
Other,
}
/// Not public API.
pub struct TagContentOtherFieldVisitor {
/// Name of the tag field of the adjacently tagged enum
pub tag: &'static str,
/// Name of the content field of the adjacently tagged enum
pub content: &'static str,
}
impl<'de> DeserializeSeed<'de> for TagContentOtherFieldVisitor {
type Value = TagContentOtherField;
fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_identifier(self)
}
}
impl<'de> Visitor<'de> for TagContentOtherFieldVisitor {
type Value = TagContentOtherField;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(
formatter,
"{:?}, {:?}, or other ignored fields",
self.tag, self.content
)
}
fn visit_u64<E>(self, field_index: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match field_index {
0 => Ok(TagContentOtherField::Tag),
1 => Ok(TagContentOtherField::Content),
_ => Ok(TagContentOtherField::Other),
}
}
fn visit_str<E>(self, field: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
self.visit_bytes(field.as_bytes())
}
fn visit_bytes<E>(self, field: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
if field == self.tag.as_bytes() {
Ok(TagContentOtherField::Tag)
} else if field == self.content.as_bytes() {
Ok(TagContentOtherField::Content)
} else {
Ok(TagContentOtherField::Other)
}
}
}
/// Not public API
pub struct ContentDeserializer<'de, E> {
content: Content<'de>,
err: PhantomData<E>,
}
impl<'de, E> ContentDeserializer<'de, E>
where
E: de::Error,
{
#[cold]
fn invalid_type(self, exp: &dyn Expected) -> E {
de::Error::invalid_type(self.content.unexpected(), exp)
}
fn deserialize_integer<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
match self.content {
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_float<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
match self.content {
Content::F32(v) => visitor.visit_f32(v),
Content::F64(v) => visitor.visit_f64(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
}
fn visit_content_seq<'de, V, E>(content: Vec<Content<'de>>, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
E: de::Error,
{
let mut seq_visitor = SeqDeserializer::new(content.into_iter());
let value = tri!(visitor.visit_seq(&mut seq_visitor));
tri!(seq_visitor.end());
Ok(value)
}
fn visit_content_map<'de, V, E>(
content: Vec<(Content<'de>, Content<'de>)>,
visitor: V,
) -> Result<V::Value, E>
where
V: Visitor<'de>,
E: de::Error,
{
let mut map_visitor =
MapDeserializer::new(content.into_iter().map(|(k, v)| (MapKeyContent(k), v)));
let value = tri!(visitor.visit_map(&mut map_visitor));
tri!(map_visitor.end());
Ok(value)
}
/// Used when deserializing an internally tagged enum because the content
/// will be used exactly once.
impl<'de, E> Deserializer<'de> for ContentDeserializer<'de, E>
where
E: de::Error,
{
type Error = E;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Bool(v) => visitor.visit_bool(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
Content::F32(v) => visitor.visit_f32(v),
Content::F64(v) => visitor.visit_f64(v),
Content::Char(v) => visitor.visit_char(v),
Content::String(v) => visitor.visit_string(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(v) => visitor.visit_byte_buf(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::Unit => visitor.visit_unit(),
Content::None => visitor.visit_none(),
Content::Some(v) => visitor.visit_some(ContentDeserializer::new(*v)),
Content::Newtype(v) => visitor.visit_newtype_struct(ContentDeserializer::new(*v)),
Content::Seq(v) => visit_content_seq(v, visitor),
Content::Map(v) => visit_content_map(v, visitor),
}
}
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Bool(v) => visitor.visit_bool(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_float(visitor)
}
fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_float(visitor)
}
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Char(v) => visitor.visit_char(v),
Content::String(v) => visitor.visit_string(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_string(visitor)
}
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::String(v) => visitor.visit_string(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(v) => visitor.visit_byte_buf(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_byte_buf(visitor)
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::String(v) => visitor.visit_string(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(v) => visitor.visit_byte_buf(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::Seq(v) => visit_content_seq(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::None => visitor.visit_none(),
Content::Some(v) => visitor.visit_some(ContentDeserializer::new(*v)),
Content::Unit => visitor.visit_unit(),
_ => visitor.visit_some(self),
}
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Unit => visitor.visit_unit(),
// Allow deserializing newtype variant containing unit.
//
// #[derive(Deserialize)]
// #[serde(tag = "result")]
// enum Response<T> {
// Success(T),
// }
//
// We want {"result":"Success"} to deserialize into Response<()>.
Content::Map(ref v) if v.is_empty() => visitor.visit_unit(),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_unit_struct<V>(
self,
_name: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
// As a special case, allow deserializing untagged newtype
// variant containing unit struct.
//
// #[derive(Deserialize)]
// struct Info;
//
// #[derive(Deserialize)]
// #[serde(tag = "topic")]
// enum Message {
// Info(Info),
// }
//
// We want {"topic":"Info"} to deserialize even though
// ordinarily unit structs do not deserialize from empty map/seq.
Content::Map(ref v) if v.is_empty() => visitor.visit_unit(),
Content::Seq(ref v) if v.is_empty() => visitor.visit_unit(),
_ => self.deserialize_any(visitor),
}
}
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Newtype(v) => visitor.visit_newtype_struct(ContentDeserializer::new(*v)),
_ => visitor.visit_newtype_struct(self),
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Seq(v) => visit_content_seq(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_seq(visitor)
}
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
_len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_seq(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Map(v) => visit_content_map(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_struct<V>(
self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Seq(v) => visit_content_seq(v, visitor),
Content::Map(v) => visit_content_map(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_enum<V>(
self,
_name: &str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let (variant, value) = match self.content {
Content::Map(value) => {
let mut iter = value.into_iter();
let (variant, value) = match iter.next() {
Some(v) => v,
None => {
return Err(de::Error::invalid_value(
de::Unexpected::Map,
&"map with a single key",
));
}
};
// enums are encoded in json as maps with a single key:value pair
if iter.next().is_some() {
return Err(de::Error::invalid_value(
de::Unexpected::Map,
&"map with a single key",
));
}
(variant, Some(value))
}
s @ Content::String(_) | s @ Content::Str(_) => (s, None),
other => {
return Err(de::Error::invalid_type(
other.unexpected(),
&"string or map",
));
}
};
visitor.visit_enum(EnumDeserializer::new(variant, value))
}
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::String(v) => visitor.visit_string(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(v) => visitor.visit_byte_buf(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U64(v) => visitor.visit_u64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
drop(self);
visitor.visit_unit()
}
}
impl<'de, E> ContentDeserializer<'de, E> {
/// private API, don't use
pub fn new(content: Content<'de>) -> Self {
ContentDeserializer {
content,
err: PhantomData,
}
}
}
struct MapKeyContent<'de>(Content<'de>);
impl<'de, E> de::IntoDeserializer<'de, E> for MapKeyContent<'de>
where
E: de::Error,
{
type Deserializer = ContentMapKeyDeserializer<'de, E>;
fn into_deserializer(self) -> Self::Deserializer {
ContentMapKeyDeserializer {
content: self.0,
err: PhantomData,
}
}
}
struct ContentMapKeyDeserializer<'de, E> {
content: Content<'de>,
err: PhantomData<E>,
}
impl<'de, E> Deserializer<'de> for ContentMapKeyDeserializer<'de, E>
where
E: de::Error,
{
type Error = E;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
ContentDeserializer::new(self.content).deserialize_any(visitor)
}
map_key_integer_method!(owned deserialize_i8, visit_i8, i8);
map_key_integer_method!(owned deserialize_i16, visit_i16, i16);
map_key_integer_method!(owned deserialize_i32, visit_i32, i32);
map_key_integer_method!(owned deserialize_i64, visit_i64, i64);
map_key_integer_method!(owned deserialize_u8, visit_u8, u8);
map_key_integer_method!(owned deserialize_u16, visit_u16, u16);
map_key_integer_method!(owned deserialize_u32, visit_u32, u32);
map_key_integer_method!(owned deserialize_u64, visit_u64, u64);
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match self.content {
Content::Newtype(v) => visitor.visit_newtype_struct(ContentMapKeyDeserializer {
content: *v,
err: PhantomData,
}),
_ => visitor.visit_newtype_struct(self),
}
}
serde::forward_to_deserialize_any! {
bool f32 f64 char str string bytes byte_buf option unit unit_struct
seq tuple tuple_struct map struct enum identifier
ignored_any
}
}
pub struct EnumDeserializer<'de, E>
where
E: de::Error,
{
variant: Content<'de>,
value: Option<Content<'de>>,
err: PhantomData<E>,
}
impl<'de, E> EnumDeserializer<'de, E>
where
E: de::Error,
{
pub fn new(variant: Content<'de>, value: Option<Content<'de>>) -> EnumDeserializer<'de, E> {
EnumDeserializer {
variant,
value,
err: PhantomData,
}
}
}
impl<'de, E> de::EnumAccess<'de> for EnumDeserializer<'de, E>
where
E: de::Error,
{
type Error = E;
type Variant = VariantDeserializer<'de, Self::Error>;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), E>
where
V: de::DeserializeSeed<'de>,
{
let visitor = VariantDeserializer {
value: self.value,
err: PhantomData,
};
seed.deserialize(ContentDeserializer::new(self.variant))
.map(|v| (v, visitor))
}
}
pub struct VariantDeserializer<'de, E>
where
E: de::Error,
{
value: Option<Content<'de>>,
err: PhantomData<E>,
}
impl<'de, E> de::VariantAccess<'de> for VariantDeserializer<'de, E>
where
E: de::Error,
{
type Error = E;
fn unit_variant(self) -> Result<(), E> {
match self.value {
Some(value) => de::Deserialize::deserialize(ContentDeserializer::new(value)),
None => Ok(()),
}
}
fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, E>
where
T: de::DeserializeSeed<'de>,
{
match self.value {
Some(value) => seed.deserialize(ContentDeserializer::new(value)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"newtype variant",
)),
}
}
fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
match self.value {
Some(Content::Seq(v)) => {
de::Deserializer::deserialize_any(SeqDeserializer::new(v.into_iter()), visitor)
}
Some(other) => Err(de::Error::invalid_type(
other.unexpected(),
&"tuple variant",
)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"tuple variant",
)),
}
}
fn struct_variant<V>(
self,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
match self.value {
Some(Content::Map(v)) => {
de::Deserializer::deserialize_any(MapDeserializer::new(v.into_iter()), visitor)
}
Some(Content::Seq(v)) => {
de::Deserializer::deserialize_any(SeqDeserializer::new(v.into_iter()), visitor)
}
Some(other) => Err(de::Error::invalid_type(
other.unexpected(),
&"struct variant",
)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"struct variant",
)),
}
}
}
/// Not public API.
pub struct ContentRefDeserializer<'a, 'de: 'a, E> {
content: &'a Content<'de>,
err: PhantomData<E>,
}
impl<'a, 'de, E> ContentRefDeserializer<'a, 'de, E>
where
E: de::Error,
{
#[cold]
fn invalid_type(self, exp: &dyn Expected) -> E {
de::Error::invalid_type(self.content.unexpected(), exp)
}
fn deserialize_integer<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
match *self.content {
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_float<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
match *self.content {
Content::F32(v) => visitor.visit_f32(v),
Content::F64(v) => visitor.visit_f64(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
}
fn visit_content_seq_ref<'a, 'de, V, E>(
content: &'a [Content<'de>],
visitor: V,
) -> Result<V::Value, E>
where
V: Visitor<'de>,
E: de::Error,
{
let mut seq_visitor = SeqDeserializer::new(content.iter());
let value = tri!(visitor.visit_seq(&mut seq_visitor));
tri!(seq_visitor.end());
Ok(value)
}
fn visit_content_map_ref<'a, 'de, V, E>(
content: &'a [(Content<'de>, Content<'de>)],
visitor: V,
) -> Result<V::Value, E>
where
V: Visitor<'de>,
E: de::Error,
{
let map = content.iter().map(|(k, v)| (MapKeyContentRef(k), &*v));
let mut map_visitor = MapDeserializer::new(map);
let value = tri!(visitor.visit_map(&mut map_visitor));
tri!(map_visitor.end());
Ok(value)
}
/// Used when deserializing an untagged enum because the content may need
/// to be used more than once.
impl<'de, 'a, E> Deserializer<'de> for ContentRefDeserializer<'a, 'de, E>
where
E: de::Error,
{
type Error = E;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
match *self.content {
Content::Bool(v) => visitor.visit_bool(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U16(v) => visitor.visit_u16(v),
Content::U32(v) => visitor.visit_u32(v),
Content::U64(v) => visitor.visit_u64(v),
Content::I8(v) => visitor.visit_i8(v),
Content::I16(v) => visitor.visit_i16(v),
Content::I32(v) => visitor.visit_i32(v),
Content::I64(v) => visitor.visit_i64(v),
Content::F32(v) => visitor.visit_f32(v),
Content::F64(v) => visitor.visit_f64(v),
Content::Char(v) => visitor.visit_char(v),
Content::String(ref v) => visitor.visit_str(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(ref v) => visitor.visit_bytes(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::Unit => visitor.visit_unit(),
Content::None => visitor.visit_none(),
Content::Some(ref v) => visitor.visit_some(ContentRefDeserializer::new(v)),
Content::Newtype(ref v) => visitor.visit_newtype_struct(ContentRefDeserializer::new(v)),
Content::Seq(ref v) => visit_content_seq_ref(v, visitor),
Content::Map(ref v) => visit_content_map_ref(v, visitor),
}
}
fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Bool(v) => visitor.visit_bool(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_i64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_integer(visitor)
}
fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_float(visitor)
}
fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_float(visitor)
}
fn deserialize_char<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Char(v) => visitor.visit_char(v),
Content::String(ref v) => visitor.visit_str(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::String(ref v) => visitor.visit_str(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(ref v) => visitor.visit_bytes(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_string<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_str(visitor)
}
fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::String(ref v) => visitor.visit_str(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(ref v) => visitor.visit_bytes(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::Seq(ref v) => visit_content_seq_ref(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_bytes(visitor)
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
// Covered by tests/test_enum_untagged.rs
// with_optional_field::*
match *self.content {
Content::None => visitor.visit_none(),
Content::Some(ref v) => visitor.visit_some(ContentRefDeserializer::new(v)),
Content::Unit => visitor.visit_unit(),
// This case is to support data formats which do not encode an
// indication whether a value is optional. An example of such a
// format is JSON, and a counterexample is RON. When requesting
// `deserialize_any` in JSON, the data format never performs
// `Visitor::visit_some` but we still must be able to
// deserialize the resulting Content into data structures with
// optional fields.
_ => visitor.visit_some(self),
}
}
fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Unit => visitor.visit_unit(),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_unit_struct<V>(
self,
_name: &'static str,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_unit(visitor)
}
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
// Covered by tests/test_enum_untagged.rs
// newtype_struct
match *self.content {
Content::Newtype(ref v) => visitor.visit_newtype_struct(ContentRefDeserializer::new(v)),
// This case is to support data formats that encode newtype
// structs and their underlying data the same, with no
// indication whether a newtype wrapper was present. For example
// JSON does this, while RON does not. In RON a newtype's name
// is included in the serialized representation and it knows to
// call `Visitor::visit_newtype_struct` from `deserialize_any`.
// JSON's `deserialize_any` never calls `visit_newtype_struct`
// but in this code we still must be able to deserialize the
// resulting Content into newtypes.
_ => visitor.visit_newtype_struct(self),
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Seq(ref v) => visit_content_seq_ref(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_tuple<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_seq(visitor)
}
fn deserialize_tuple_struct<V>(
self,
_name: &'static str,
_len: usize,
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
self.deserialize_seq(visitor)
}
fn deserialize_map<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Map(ref v) => visit_content_map_ref(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_struct<V>(
self,
_name: &'static str,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Seq(ref v) => visit_content_seq_ref(v, visitor),
Content::Map(ref v) => visit_content_map_ref(v, visitor),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_enum<V>(
self,
_name: &str,
_variants: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
let (variant, value) = match *self.content {
Content::Map(ref value) => {
let mut iter = value.iter();
let (variant, value) = match iter.next() {
Some(v) => v,
None => {
return Err(de::Error::invalid_value(
de::Unexpected::Map,
&"map with a single key",
));
}
};
// enums are encoded in json as maps with a single key:value pair
if iter.next().is_some() {
return Err(de::Error::invalid_value(
de::Unexpected::Map,
&"map with a single key",
));
}
(variant, Some(value))
}
ref s @ Content::String(_) | ref s @ Content::Str(_) => (s, None),
ref other => {
return Err(de::Error::invalid_type(
other.unexpected(),
&"string or map",
));
}
};
visitor.visit_enum(EnumRefDeserializer {
variant,
value,
err: PhantomData,
})
}
fn deserialize_identifier<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::String(ref v) => visitor.visit_str(v),
Content::Str(v) => visitor.visit_borrowed_str(v),
Content::ByteBuf(ref v) => visitor.visit_bytes(v),
Content::Bytes(v) => visitor.visit_borrowed_bytes(v),
Content::U8(v) => visitor.visit_u8(v),
Content::U64(v) => visitor.visit_u64(v),
_ => Err(self.invalid_type(&visitor)),
}
}
fn deserialize_ignored_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_unit()
}
}
impl<'a, 'de, E> ContentRefDeserializer<'a, 'de, E> {
/// private API, don't use
pub fn new(content: &'a Content<'de>) -> Self {
ContentRefDeserializer {
content,
err: PhantomData,
}
}
}
impl<'a, 'de: 'a, E> Copy for ContentRefDeserializer<'a, 'de, E> {}
impl<'a, 'de: 'a, E> Clone for ContentRefDeserializer<'a, 'de, E> {
fn clone(&self) -> Self {
*self
}
}
struct MapKeyContentRef<'a, 'de: 'a>(&'a Content<'de>);
impl<'a, 'de, E> de::IntoDeserializer<'de, E> for MapKeyContentRef<'a, 'de>
where
E: de::Error,
{
type Deserializer = ContentRefMapKeyDeserializer<'a, 'de, E>;
fn into_deserializer(self) -> Self::Deserializer {
ContentRefMapKeyDeserializer {
content: self.0,
err: PhantomData,
}
}
}
struct ContentRefMapKeyDeserializer<'a, 'de: 'a, E> {
content: &'a Content<'de>,
err: PhantomData<E>,
}
impl<'a, 'de: 'a, E> Copy for ContentRefMapKeyDeserializer<'a, 'de, E> {}
impl<'a, 'de: 'a, E> Clone for ContentRefMapKeyDeserializer<'a, 'de, E> {
fn clone(&self) -> Self {
*self
}
}
impl<'de, 'a, E> Deserializer<'de> for ContentRefMapKeyDeserializer<'a, 'de, E>
where
E: de::Error,
{
type Error = E;
fn deserialize_any<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
ContentRefDeserializer::new(self.content).deserialize_any(visitor)
}
map_key_integer_method!(ref deserialize_i8, visit_i8, i8);
map_key_integer_method!(ref deserialize_i16, visit_i16, i16);
map_key_integer_method!(ref deserialize_i32, visit_i32, i32);
map_key_integer_method!(ref deserialize_i64, visit_i64, i64);
map_key_integer_method!(ref deserialize_u8, visit_u8, u8);
map_key_integer_method!(ref deserialize_u16, visit_u16, u16);
map_key_integer_method!(ref deserialize_u32, visit_u32, u32);
map_key_integer_method!(ref deserialize_u64, visit_u64, u64);
fn deserialize_newtype_struct<V>(self, _name: &str, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
match *self.content {
Content::Newtype(ref v) => visitor.visit_newtype_struct(ContentRefMapKeyDeserializer {
content: v,
err: PhantomData,
}),
_ => visitor.visit_newtype_struct(self),
}
}
serde::forward_to_deserialize_any! {
bool f32 f64 char str string bytes byte_buf option unit unit_struct
seq tuple tuple_struct map struct enum identifier
ignored_any
}
}
struct EnumRefDeserializer<'a, 'de: 'a, E>
where
E: de::Error,
{
variant: &'a Content<'de>,
value: Option<&'a Content<'de>>,
err: PhantomData<E>,
}
impl<'de, 'a, E> de::EnumAccess<'de> for EnumRefDeserializer<'a, 'de, E>
where
E: de::Error,
{
type Error = E;
type Variant = VariantRefDeserializer<'a, 'de, Self::Error>;
fn variant_seed<V>(self, seed: V) -> Result<(V::Value, Self::Variant), Self::Error>
where
V: de::DeserializeSeed<'de>,
{
let visitor = VariantRefDeserializer {
value: self.value,
err: PhantomData,
};
seed.deserialize(ContentRefDeserializer::new(self.variant))
.map(|v| (v, visitor))
}
}
struct VariantRefDeserializer<'a, 'de: 'a, E>
where
E: de::Error,
{
value: Option<&'a Content<'de>>,
err: PhantomData<E>,
}
impl<'de, 'a, E> de::VariantAccess<'de> for VariantRefDeserializer<'a, 'de, E>
where
E: de::Error,
{
type Error = E;
fn unit_variant(self) -> Result<(), E> {
match self.value {
Some(value) => de::Deserialize::deserialize(ContentRefDeserializer::new(value)),
// Covered by tests/test_annotations.rs
// test_partially_untagged_adjacently_tagged_enum
// Covered by tests/test_enum_untagged.rs
// newtype_enum::unit
None => Ok(()),
}
}
fn newtype_variant_seed<T>(self, seed: T) -> Result<T::Value, E>
where
T: de::DeserializeSeed<'de>,
{
match self.value {
// Covered by tests/test_annotations.rs
// test_partially_untagged_enum_desugared
// test_partially_untagged_enum_generic
// Covered by tests/test_enum_untagged.rs
// newtype_enum::newtype
Some(value) => seed.deserialize(ContentRefDeserializer::new(value)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"newtype variant",
)),
}
}
fn tuple_variant<V>(self, _len: usize, visitor: V) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
match self.value {
// Covered by tests/test_annotations.rs
// test_partially_untagged_enum
// test_partially_untagged_enum_desugared
// Covered by tests/test_enum_untagged.rs
// newtype_enum::tuple0
// newtype_enum::tuple2
Some(Content::Seq(v)) => visit_content_seq_ref(v, visitor),
Some(other) => Err(de::Error::invalid_type(
other.unexpected(),
&"tuple variant",
)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"tuple variant",
)),
}
}
fn struct_variant<V>(
self,
_fields: &'static [&'static str],
visitor: V,
) -> Result<V::Value, Self::Error>
where
V: de::Visitor<'de>,
{
match self.value {
// Covered by tests/test_enum_untagged.rs
// newtype_enum::struct_from_map
Some(Content::Map(v)) => visit_content_map_ref(v, visitor),
// Covered by tests/test_enum_untagged.rs
// newtype_enum::struct_from_seq
// newtype_enum::empty_struct_from_seq
Some(Content::Seq(v)) => visit_content_seq_ref(v, visitor),
Some(other) => Err(de::Error::invalid_type(
other.unexpected(),
&"struct variant",
)),
None => Err(de::Error::invalid_type(
de::Unexpected::UnitVariant,
&"struct variant",
)),
}
}
}
impl<'de, E> de::IntoDeserializer<'de, E> for ContentDeserializer<'de, E>
where
E: de::Error,
{
type Deserializer = Self;
fn into_deserializer(self) -> Self {
self
}
}
impl<'de, 'a, E> de::IntoDeserializer<'de, E> for ContentRefDeserializer<'a, 'de, E>
where
E: de::Error,
{
type Deserializer = Self;
fn into_deserializer(self) -> Self {
self
}
}
/// Visitor for deserializing an internally tagged unit variant.
///
/// Not public API.
pub struct InternallyTaggedUnitVisitor<'a> {
type_name: &'a str,
variant_name: &'a str,
}
impl<'a> InternallyTaggedUnitVisitor<'a> {
/// Not public API.
pub fn new(type_name: &'a str, variant_name: &'a str) -> Self {
InternallyTaggedUnitVisitor {
type_name,
variant_name,
}
}
}
impl<'de, 'a> Visitor<'de> for InternallyTaggedUnitVisitor<'a> {
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(
formatter,
"unit variant {}::{}",
self.type_name, self.variant_name
)
}
fn visit_seq<S>(self, _: S) -> Result<(), S::Error>
where
S: SeqAccess<'de>,
{
Ok(())
}
fn visit_map<M>(self, mut access: M) -> Result<(), M::Error>
where
M: MapAccess<'de>,
{
while tri!(access.next_entry::<IgnoredAny, IgnoredAny>()).is_some() {}
Ok(())
}
}
/// Visitor for deserializing an untagged unit variant.
///
/// Not public API.
pub struct UntaggedUnitVisitor<'a> {
type_name: &'a str,
variant_name: &'a str,
}
impl<'a> UntaggedUnitVisitor<'a> {
/// Not public API.
pub fn new(type_name: &'a str, variant_name: &'a str) -> Self {
UntaggedUnitVisitor {
type_name,
variant_name,
}
}
}
impl<'de, 'a> Visitor<'de> for UntaggedUnitVisitor<'a> {
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(
formatter,
"unit variant {}::{}",
self.type_name, self.variant_name
)
}
fn visit_unit<E>(self) -> Result<(), E>
where
E: de::Error,
{
Ok(())
}
fn visit_none<E>(self) -> Result<(), E>
where
E: de::Error,
{
Ok(())
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit/src/lib.rs | Rust | pub use serde_implicit_proc::Deserialize;
#[doc(hidden)]
#[path = "private.rs"]
pub mod __private;
pub mod content;
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit/src/private.rs | Rust | use std::fmt;
use std::marker::PhantomData;
use serde::de::{self, IntoDeserializer, MapAccess, Unexpected};
use serde::forward_to_deserialize_any;
use serde::{Deserialize, de::Visitor};
pub use crate::content::{Content, ContentDeserializer, ContentRefDeserializer};
pub struct TaggedContentVisitor<T> {
expecting: &'static str,
fallthrough: Option<T>,
}
impl<T> TaggedContentVisitor<T> {
/// Visitor for the content of an internally tagged enum with the given tag name.
pub fn new(expecting: &'static str, fallthrough: Option<T>) -> Self {
TaggedContentVisitor {
expecting,
fallthrough,
}
}
}
impl<'de, T: Deserialize<'de>> Visitor<'de> for TaggedContentVisitor<T>
where
T: Deserialize<'de>,
{
type Value = (T, Content<'de>);
fn expecting(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str(self.expecting)
}
// todo: add support for sequences?
// fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
// where
// S: SeqAccess<'de>,
// {
// let tag = match seq.next_element()? {
// Some(tag) => tag,
// None => {
// return Err(de::Error::missing_field("blerhg".into()));
// }
// };
// let rest = de::value::SeqAccessDeserializer::new(seq);
// Ok((tag, Content::deserialize(rest)?))
// }
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
match self.fallthrough {
Some(default) => Ok((default, Content::String(v.into()))),
None => Err(de::Error::invalid_type(Unexpected::Str(v), &self.expecting)),
}
}
fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
where
E: de::Error,
{
match self.fallthrough {
Some(default) => Ok((default, Content::U64(v))),
None => Err(de::Error::invalid_type(
Unexpected::Unsigned(v),
&self.expecting,
)),
}
}
fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
where
E: de::Error,
{
match self.fallthrough {
Some(default) => Ok((default, Content::I64(v))),
None => Err(de::Error::invalid_type(
Unexpected::Signed(v),
&self.expecting,
)),
}
}
fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
let mut tag: Option<(T, String)> = None;
let mut vec = Vec::<(Content, Content)>::with_capacity(0); // todo
while let Some(k) = map.next_key()? {
match k {
Content::String(_) | Content::Str(_) | Content::Bytes(_) | Content::ByteBuf(_) => {
match T::deserialize::<ContentDeserializer<'_, M::Error>>(
k.clone().into_deserializer(),
) {
// failed to parse a key, must be a non-tag field
Err(_) => {
let v = map.next_value()?;
vec.push((k, v));
}
Ok(t) => {
let v: Content = map.next_value()?;
let key_name = k.as_str().unwrap_or("unknown");
// Skip null values — they can't be a real tag
if matches!(v, Content::None | Content::Unit) {
vec.push((k, v));
} else if let Some((_, prev_key)) = &tag {
return Err(de::Error::custom(format_args!(
"found multiple implicit tag fields: `{prev_key}` and `{key_name}`",
)));
} else {
tag = Some((t, key_name.to_owned()));
vec.push((k, v));
}
}
}
}
_ => {
let v = map.next_value()?;
vec.push((k, v));
}
};
}
match (tag, self.fallthrough) {
(None, None) => Err(de::Error::missing_field("tag was not found".into())),
(None, Some(default)) => Ok((default, Content::Map(vec))),
(Some((tag, _)), _) => Ok((tag, Content::Map(vec))),
}
}
}
/// Deserialize a missing field. If the field is `Option<T>` this returns
/// `None`. For all other types, returns a "missing field" error. This
/// replicates the same mechanism used by serde's own derive macro.
pub fn missing_field<'de, V, E>(field: &'static str) -> Result<V, E>
where
V: Deserialize<'de>,
E: de::Error,
{
struct MissingFieldDeserializer<E>(&'static str, PhantomData<E>);
impl<'de, E> serde::Deserializer<'de> for MissingFieldDeserializer<E>
where
E: de::Error,
{
type Error = E;
fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
Err(de::Error::missing_field(self.0))
}
fn deserialize_option<V>(self, visitor: V) -> Result<V::Value, E>
where
V: Visitor<'de>,
{
visitor.visit_none()
}
forward_to_deserialize_any! {
bool i8 i16 i32 i64 i128 u8 u16 u32 u64 u128 f32 f64 char str string
bytes byte_buf unit unit_struct newtype_struct seq tuple
tuple_struct map struct enum identifier ignored_any
}
}
let deserializer = MissingFieldDeserializer(field, PhantomData);
Deserialize::deserialize(deserializer)
}
pub fn extract_at_index<'de, E: serde::de::Error>(
c: Content<'de>,
index: usize,
) -> ::std::result::Result<(Content<'de>, Option<Content<'de>>), E> {
match c {
Content::Seq(mut s) => {
if s.len() == 0 {
Err(E::missing_field("missing tag: sequence is empty"))
} else if index >= s.len() {
Err(E::missing_field("tag index out of bounds"))
} else {
Ok((s.remove(index), Some(Content::Seq(s))))
}
}
c => {
if index == 0 {
Ok((c, None))
} else {
Err(E::missing_field("tag index out of bounds for non-sequence"))
}
}
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit/tests/dummy.rs | Rust | use serde_json::json;
#[test]
fn test_basic() {
#[allow(dead_code)]
#[derive(serde_implicit_proc::Deserialize, Debug)]
// #[serde(untagged)]
enum MultiTypeTag {
StringVariant {
#[serde_implicit(tag)]
string_tag: String,
value: u32,
},
NumberVariant {
#[serde_implicit(tag)]
number_tag: u64,
value: String,
unique_field: String,
},
BoolVariant {
#[serde_implicit(tag)]
bool_tag: bool,
value: Vec<String>,
},
}
let res: Result<MultiTypeTag, _> =
serde_json::from_value(json!({ "string_tag": "", "value": 0 }));
assert!(res.is_ok());
let res: Result<MultiTypeTag, _> =
serde_json::from_value(json!({ "string_tag": "", "value": 0, "extra_field": "1234" }));
assert!(res.is_ok());
let res: Result<MultiTypeTag, _> =
serde_json::from_value(json!({ "string_tag": "", "value": "straing" }));
let err = res.unwrap_err();
assert!(
matches!(
&*err.to_string(),
r#"invalid type: string "straing", expected u32"#
),
"{err}",
);
let res: Result<MultiTypeTag, _> = serde_json::from_value(json!({ "string_tag": "" }));
let err = res.unwrap_err();
assert!(
matches!(&*err.to_string(), r#"missing field `value`"#),
"{err}",
);
// output specific error message about `unique_field` (if constructor is `deny_unknown_fields`)
// let res: Result<MultiTypeTag, _> =
// serde_json::from_value(json!({ "string_tag": "", "unique_field": "" }));
// let err = res.unwrap_err();
// assert!(
// matches!(&*err.to_string(), r#"missing field `value`"#),
// "{err}",
// );
}
#[test]
fn tuple_basic() {
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(bool, u32),
Case2(u32),
}
let res: Result<TupleEnum, _> = serde_json::from_value(json!([true, 0]));
assert!(res.is_ok());
let res: Result<TupleEnum, _> = serde_json::from_value(json!([0]));
assert_eq!(res.unwrap(), TupleEnum::Case2(0));
}
#[test]
fn tuple_overlap() {
// Because `serde-implicit` commits to the first variant which parses a tag
// with tuple enums, this can lead to variants being impossible to deserialize
// like `Case2` is here.
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(bool, u32),
Case2(bool, bool),
}
let res: Result<TupleEnum, _> = serde_json::from_value(json!([true, true]));
let err = res.unwrap_err();
assert!(
matches!(
&*err.to_string(),
r#"TupleEnum::Case1: invalid type: boolean `true`, expected u32"#
),
"{err}",
);
}
#[test]
fn fallthrough_basic() {
#[allow(dead_code)]
#[derive(serde_implicit_proc::Deserialize, Debug)]
enum EnumWithFallThrough<T> {
Multiple {
#[serde_implicit(tag)]
variants: Vec<u32>,
},
Single {
one: T,
},
}
// #[derive(serde::Deserialize)]
// struct Other {
// field: u32,
// }
let res: Result<EnumWithFallThrough<u32>, _> = serde_json::from_value(json!(42));
res.unwrap();
let res: Result<EnumWithFallThrough<u32>, _> = serde_json::from_value(json!(42.5));
let err = res.unwrap_err();
assert!(
matches!(
&*err.to_string(),
// todo provide more specific diagnostics
r#"invalid type: floating point `42.5`, expected EnumWithFallThrough"#
),
"{err}",
);
let res: Result<EnumWithFallThrough<u32>, _> =
serde_json::from_value(json!({"variants": [32]}));
res.unwrap();
}
#[test]
fn tuple_custom_tag_position_middle() {
// Test tag at position 1 (middle position)
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(bool, #[serde_implicit(tag)] String, u32),
Case2(u32, #[serde_implicit(tag)] bool),
}
// Case1: [false, "hello", 42] with tag "hello" at position 1
let res: Result<TupleEnum, _> = serde_json::from_value(json!([false, "hello", 42]));
assert!(res.is_ok());
assert_eq!(
res.unwrap(),
TupleEnum::Case1(false, "hello".to_string(), 42)
);
// Case2: [99, true] with tag true at position 1
let res: Result<TupleEnum, _> = serde_json::from_value(json!([99, true]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), TupleEnum::Case2(99, true));
}
#[test]
fn tuple_custom_tag_position_last() {
// Test tag at last position
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(u32, bool, #[serde_implicit(tag)] String),
Case2(#[serde_implicit(tag)] u64),
}
// Case1: [42, true, "tag"] with tag "tag" at position 2 (last)
let res: Result<TupleEnum, _> = serde_json::from_value(json!([42, true, "tag"]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), TupleEnum::Case1(42, true, "tag".to_string()));
// Case2: [999] with tag 999 at position 0
let res: Result<TupleEnum, _> = serde_json::from_value(json!([999]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), TupleEnum::Case2(999));
}
#[test]
fn tuple_mixed_tag_positions() {
// Test different tag positions across variants
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum MixedEnum {
// Tag at position 0 (default, no attribute)
First(String, u32),
// Tag at position 1
Second(bool, #[serde_implicit(tag)] u32, String),
// Tag at position 2 (last)
Third(u32, bool, #[serde_implicit(tag)] String),
}
// First variant: ["hello", 42] with tag "hello" at position 0
let res: Result<MixedEnum, _> = serde_json::from_value(json!(["hello", 42]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), MixedEnum::First("hello".to_string(), 42));
// Second variant: [true, 123, "world"] with tag 123 at position 1
let res: Result<MixedEnum, _> = serde_json::from_value(json!([true, 123, "world"]));
assert!(res.is_ok());
assert_eq!(
res.unwrap(),
MixedEnum::Second(true, 123, "world".to_string())
);
// Third variant: [99, false, "tag"] with tag "tag" at position 2
let res: Result<MixedEnum, _> = serde_json::from_value(json!([99, false, "tag"]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), MixedEnum::Third(99, false, "tag".to_string()));
}
#[test]
fn tuple_custom_tag_no_match() {
#[derive(serde_implicit::Deserialize, Debug)]
enum TupleEnum {
Case1(#[serde_implicit(tag)] String, u32),
Case2(bool, #[serde_implicit(tag)] u32),
}
// [42, "hello"] doesn't match Case1 (expects String at pos 0) or Case2 (expects bool at pos 0)
let res: Result<TupleEnum, _> = serde_json::from_value(json!([42, "hello"]));
assert!(res.is_err());
}
#[test]
fn tuple_custom_tag_overlapping_resolved() {
// With custom tag positions, the overlapping issue from tuple_overlap test is resolved
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(bool, #[serde_implicit(tag)] u32), // Tag at position 1
Case2(#[serde_implicit(tag)] bool, bool), // Tag at position 0
}
// [true, true] should now match Case2 (tag at position 0)
let res: Result<TupleEnum, _> = serde_json::from_value(json!([true, true]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), TupleEnum::Case2(true, true));
// [false, 42] should match Case1 (tag at position 1)
let res: Result<TupleEnum, _> = serde_json::from_value(json!([false, 42]));
assert!(res.is_ok());
assert_eq!(res.unwrap(), TupleEnum::Case1(false, 42));
}
#[test]
fn tuple_commit_semantics_verification() {
// Verify that tag match commits to the variant
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TupleEnum {
Case1(bool, u32),
Case2(bool, bool),
}
// [false, false] - tag matches Case1 (bool at pos 0), commits, then fails on u32
// Should NOT fall through to Case2, even though Case2 would succeed
let res: Result<TupleEnum, _> = serde_json::from_value(json!([false, false]));
let err = res.unwrap_err();
assert!(
err.to_string().contains("expected u32") || err.to_string().contains("invalid type"),
"Expected error about u32, got: {}",
err
);
}
#[test]
fn tuple_flatten_basic() {
// Test basic flatten functionality
#[derive(serde::Deserialize, Debug, PartialEq)]
struct A(String, bool);
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum Implicit {
Normal(bool),
Tagged(u64, #[serde_implicit(tag)] u32),
Nested(#[serde_implicit(flatten)] A),
}
// Should match Normal variant first
let res: Result<Implicit, _> = serde_json::from_value(json!([false]));
assert_eq!(res.unwrap(), Implicit::Normal(false));
// Should match Tagged variant (tag at position 1)
let res: Result<Implicit, _> = serde_json::from_value(json!([42, 99]));
assert_eq!(res.unwrap(), Implicit::Tagged(42, 99));
// Should match Nested (flatten) variant as fallback
let res: Result<Implicit, _> = serde_json::from_value(json!(["hello", true]));
assert_eq!(res.unwrap(), Implicit::Nested(A("hello".to_string(), true)));
}
#[test]
fn tuple_flatten_multiple() {
// Test multiple flatten variants tried sequentially
#[derive(serde::Deserialize, Debug, PartialEq)]
struct StringBool(String, bool);
#[derive(serde::Deserialize, Debug, PartialEq)]
struct StringU64(String, u64);
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum Multi {
Normal(bool),
Flatten1(#[serde_implicit(flatten)] StringBool),
Flatten2(#[serde_implicit(flatten)] StringU64),
}
// Should match Normal variant
let res: Result<Multi, _> = serde_json::from_value(json!([true]));
assert_eq!(res.unwrap(), Multi::Normal(true));
// Should match Flatten1 variant (String, bool)
let res: Result<Multi, _> = serde_json::from_value(json!(["test", false]));
assert_eq!(
res.unwrap(),
Multi::Flatten1(StringBool("test".to_string(), false))
);
// Should match Flatten2 variant (String, u64)
let res: Result<Multi, _> = serde_json::from_value(json!(["test", 42]));
assert_eq!(
res.unwrap(),
Multi::Flatten2(StringU64("test".to_string(), 42))
);
}
#[test]
fn tuple_flatten_fallback_only() {
// Test that flatten variants are only tried after all regular variants fail
#[derive(serde::Deserialize, Debug, PartialEq)]
struct Fallback(String, String);
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TestEnum {
First(bool),
Second(u64),
Fall(#[serde_implicit(flatten)] Fallback),
}
// Regular variants should be tried first
let res: Result<TestEnum, _> = serde_json::from_value(json!([true]));
assert_eq!(res.unwrap(), TestEnum::First(true));
let res: Result<TestEnum, _> = serde_json::from_value(json!([123]));
assert_eq!(res.unwrap(), TestEnum::Second(123));
// Flatten variant is only tried when others fail
let res: Result<TestEnum, _> = serde_json::from_value(json!(["foo", "bar"]));
assert_eq!(
res.unwrap(),
TestEnum::Fall(Fallback("foo".to_string(), "bar".to_string()))
);
}
#[test]
fn tuple_flatten_no_match() {
// Test error when no variant matches
#[derive(serde::Deserialize, Debug, PartialEq)]
struct OnlyBools(bool, bool);
#[derive(serde_implicit::Deserialize, Debug, PartialEq)]
enum TestEnum {
First(u64),
Second(#[serde_implicit(flatten)] OnlyBools),
}
// Should fail because it's not a u64 and not two bools
let res: Result<TestEnum, _> = serde_json::from_value(json!(["string", "string"]));
assert!(res.is_err());
let err = res.unwrap_err();
assert!(err.to_string().contains("data did not match any variant"));
}
#[test]
fn ui() {
let t = trybuild::TestCases::new();
t.compile_fail("tests/ui/*.rs");
}
#[test]
fn test_string_key_map_to_integer_key() {
use std::collections::HashMap;
#[derive(serde_implicit_proc::Deserialize, Debug)]
enum WithMap {
Variant {
#[serde_implicit(tag)]
tag: String,
data: HashMap<u32, String>,
},
}
let res: Result<WithMap, _> =
serde_json::from_value(json!({"tag": "hello", "data": {"0": "zero", "1": "one"}}));
let val = res.unwrap();
match val {
WithMap::Variant { data, .. } => {
assert_eq!(data.len(), 2);
assert_eq!(data[&0], "zero");
assert_eq!(data[&1], "one");
}
}
}
#[test]
fn test_newtype_struct_map_key() {
use std::collections::HashMap;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq, serde::Serialize, serde::Deserialize)]
struct Id(u64);
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug)]
enum WithNewtypeKey {
Variant {
#[serde_implicit(tag)]
tag: String,
data: HashMap<Id, String>,
},
}
let res: Result<WithNewtypeKey, _> =
serde_json::from_value(json!({"tag": "hello", "data": {"0": "zero", "42": "forty-two"}}));
let val = res.unwrap();
match val {
WithNewtypeKey::Variant { data, .. } => {
assert_eq!(data.len(), 2);
assert_eq!(data[&Id(0)], "zero");
assert_eq!(data[&Id(42)], "forty-two");
}
}
}
/// Null tag values should be ignored: if a tag key is present but its value is
/// `null`, the visitor should skip it and keep looking for a non-null tag.
/// This covers evolving schemas where an older variant's data may contain a
/// newer variant's tag key set to `null`.
#[test]
fn test_null_tag_value_skipped() {
#[allow(dead_code)]
#[derive(serde_implicit_proc::Deserialize, Debug)]
enum Schema {
Old {
#[serde_implicit(tag)]
config: String,
value: u32,
},
New {
#[serde_implicit(tag)]
entries: Vec<String>,
value: u32,
},
}
// "entries" is present but null — should be skipped, "config" wins.
let res: Result<Schema, _> = serde_json::from_value(json!({
"entries": null,
"config": "default",
"value": 42,
}));
assert!(res.is_ok(), "expected Ok but got: {res:?}");
match res.unwrap() {
Schema::Old { config, value, .. } => {
assert_eq!(config, "default");
assert_eq!(value, 42);
}
other => panic!("expected Old, got {other:?}"),
}
// Symmetric: "config" is null, "entries" should win.
let res: Result<Schema, _> = serde_json::from_value(json!({
"config": null,
"entries": ["a"],
"value": 7,
}));
assert!(res.is_ok(), "expected Ok but got: {res:?}");
match res.unwrap() {
Schema::New { entries, value, .. } => {
assert_eq!(entries, vec!["a"]);
assert_eq!(value, 7);
}
other => panic!("expected New, got {other:?}"),
}
// Both tags are null — should fail with "tag was not found".
let res: Result<Schema, _> = serde_json::from_value(json!({
"config": null,
"entries": null,
"value": 0,
}));
assert!(res.is_err());
// Both tags are non-null — should fail with duplicate tag error.
let res: Result<Schema, _> = serde_json::from_value(json!({
"config": "default",
"entries": ["a"],
"value": 0,
}));
let err = res.unwrap_err();
assert!(
err.to_string()
.contains("found multiple implicit tag fields"),
"expected 'found multiple implicit tag fields', got: {err}",
);
}
// musings on test coverage
// properties: parsing with implicit tagged <-> untagged
// properties: de(ser(x)) = x
// deserialize random combinations of valid and invalid fields for types
// check that crash free and that it fails to deserialize
// edge cases
// - empty enum
// - duplicate tags (add check)
// - extra fields
// - missing fields
// - recursive type
#[test]
fn test_readme_tuples() {
#[derive(serde_implicit::Deserialize, Debug)]
enum Message {
Literal(u64),
BigOp(Op, Vec<Message>),
}
#[derive(serde::Deserialize, Debug)]
enum Op {
Sum,
}
let res: Result<Op, _> = serde_json::from_value(json!("Sum"));
res.unwrap();
let res: Result<Message, _> = serde_json::from_value(json!(["Sum", 1]));
assert!(res.is_err());
let err = res.unwrap_err();
println!("{err:?}");
assert!(
err.to_string()
.contains("Message::BigOp: invalid type: integer `1`, expected a sequence")
);
}
/// Regression test: missing `Option<T>` fields in struct variants should
/// deserialize as `None`, matching standard serde behaviour.
#[test]
fn test_missing_option_field_defaults_to_none() {
#[allow(dead_code)]
#[derive(serde_implicit_proc::Deserialize, Debug)]
enum Config {
V0 {
#[serde_implicit(tag)]
settings: String,
dist_metric: String,
dimensions: Option<u32>,
},
V1 {
#[serde_implicit(tag)]
indexes: Vec<String>,
kv_store: String,
},
}
// JSON is missing the `dimensions` field entirely — should deserialize as None
let res: Result<Config, _> = serde_json::from_value(json!({
"settings": "default",
"dist_metric": "euclidean"
}));
assert!(res.is_ok(), "expected Ok but got: {res:?}");
match res.unwrap() {
Config::V0 { dimensions, .. } => assert_eq!(dimensions, None),
other => panic!("expected V0, got {other:?}"),
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
serde-implicit/tests/proptest.rs | Rust | use arbitrary_json::ArbitraryValue;
use proptest::prelude::*;
use proptest::proptest;
use proptest_arbitrary_interop::arb;
use proptest_derive::Arbitrary;
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum MultiTypeTag {
StringVariant {
#[serde_implicit(tag)]
string_tag: String,
value: u32,
},
NumberVariant {
#[serde_implicit(tag)]
number_tag: u64,
value: String,
},
BoolVariant {
#[serde_implicit(tag)]
bool_tag: bool,
value: Vec<String>,
},
}
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum OverlappingFields {
Variant1 {
#[serde_implicit(tag)]
type_tag: String,
common_field: u32,
variant1_specific: bool,
},
Variant2 {
#[serde_implicit(tag)]
version: u32,
common_field: u32,
variant2_specific: String,
},
}
#[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
struct NestedData {
field1: String,
field2: u32,
}
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum NestedEnum {
Simple {
#[serde_implicit(tag)]
tag: String,
value: u32,
},
Complex {
#[serde_implicit(tag)]
complex_tag: bool,
nested: NestedData,
optional: Option<String>,
},
}
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq)]
enum RecursiveEnum {
Leaf {
#[serde_implicit(tag)]
is_leaf: bool,
value: String,
},
Node {
#[serde_implicit(tag)]
has_children: bool,
children: Vec<RecursiveEnum>,
metadata: String,
},
}
mod edge_cases {
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq)]
enum EmptyEnum {}
#[derive(serde_implicit_proc::Deserialize, serde::Serialize, Debug, PartialEq)]
enum SingleVariant {
OnlyVariant {
#[serde_implicit(tag)]
this_is_it: bool,
data: String,
},
}
}
/// Basic tuple enum - discriminated by first field type
#[derive(serde_implicit::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum TupleEnum {
BoolU32(bool, u32),
StringOnly(String),
U64Vec(u64, Vec<u32>),
}
/// Tuple enum with custom tag positions via #[serde_implicit(tag)]
#[derive(serde_implicit::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum TupleCustomTag {
/// Tag at position 1
MiddleTag(bool, #[serde_implicit(tag)] String, u32),
/// Tag at position 0 (explicit)
FirstTag(#[serde_implicit(tag)] u64, bool),
/// Tag at last position
LastTag(u32, bool, #[serde_implicit(tag)] String),
}
/// Helper struct for flatten tests
#[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
struct FlattenInner(String, bool);
/// Tuple enum with flatten for fallback variants
#[derive(serde_implicit::Deserialize, serde::Serialize, Debug, PartialEq, Arbitrary)]
#[serde(untagged)]
enum TupleFlatten {
Normal(bool),
Tagged(u64, #[serde_implicit(tag)] u32),
Fallback(#[serde_implicit(flatten)] FlattenInner),
}
proptest! {
#[test]
fn test_tags_different_types(tag in any::<MultiTypeTag>()) {
let serialized = serde_json::to_string(&tag).unwrap();
let deserialized: MultiTypeTag = serde_json::from_str(&serialized).unwrap();
assert_eq!(tag, deserialized);
}
#[test]
fn test_tags_overlapping_fields(tag in any::<OverlappingFields>()) {
let serialized = serde_json::to_string(&tag).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(tag, deserialized);
}
#[test]
fn test_tags_nested_enum(tag in any::<NestedEnum>()) {
let serialized = serde_json::to_string(&tag).unwrap();
let deserialized = serde_json::from_str(&serialized).unwrap();
assert_eq!(tag, deserialized);
}
// Verifies that serde-implicit and serde(untagged) parse the same types
// the only difference should be in their behavior in error messages
#[test]
fn test_agrees_with_serde(rand in any::<MultiTypeTag>()) {
#[derive(serde::Deserialize, serde::Serialize, Debug, PartialEq)]
#[serde(untagged)]
enum SerdeMultiTypeTag {
StringVariant {
string_tag: String,
value: u32,
},
NumberVariant {
number_tag: u64,
value: String,
},
BoolVariant {
bool_tag: bool,
value: Vec<String>,
},
}
let serialized = serde_json::to_string(&rand).unwrap();
let deserialized1 : MultiTypeTag = serde_json::from_str(&serialized).unwrap();
let deserialized2 : SerdeMultiTypeTag = serde_json::from_str(&serialized).unwrap();
assert_eq!(serde_json::to_string(&deserialized1).unwrap(), serde_json::to_string(&deserialized2).unwrap());
}
/// Fuzz test: arbitrary JSON should never cause a panic during deserialization.
/// We don't care if it returns Ok or Err, just that it terminates cleanly.
#[test]
fn fuzz_multi_type_tag_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<MultiTypeTag>(json.into());
}
#[test]
fn fuzz_overlapping_fields_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<OverlappingFields>(json.into());
}
#[test]
fn fuzz_nested_enum_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<NestedEnum>(json.into());
}
#[test]
fn fuzz_tuple_enum_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<TupleEnum>(json.into());
}
#[test]
fn fuzz_tuple_custom_tag_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<TupleCustomTag>(json.into());
}
#[test]
fn fuzz_tuple_flatten_no_panic(json in arb::<ArbitraryValue>()) {
let _ = serde_json::from_value::<TupleFlatten>(json.into());
}
#[test]
fn test_tuple_enum_roundtrip(value in any::<TupleEnum>()) {
let serialized = serde_json::to_value(&value).unwrap();
let deserialized: TupleEnum = serde_json::from_value(serialized).unwrap();
assert_eq!(value, deserialized);
}
#[test]
fn test_tuple_custom_tag_roundtrip(value in any::<TupleCustomTag>()) {
let serialized = serde_json::to_value(&value).unwrap();
let deserialized: TupleCustomTag = serde_json::from_value(serialized).unwrap();
assert_eq!(value, deserialized);
}
#[test]
fn test_tuple_flatten_roundtrip(value in any::<TupleFlatten>()) {
let serialized = serde_json::to_value(&value).unwrap();
let deserialized: TupleFlatten = serde_json::from_value(serialized).unwrap();
assert_eq!(value, deserialized);
}
}
| xldenis/serde-implicit | 2 | implicitly tagged enum representation for serde | Rust | xldenis | Xavier Denis | turbopuffer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.