yamildiego commited on
Commit
bf360bf
·
1 Parent(s): f82b1d8

rollback default code

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. handler.py +0 -4
  2. insightface/__init__.py +0 -21
  3. insightface/app/__init__.py +0 -2
  4. insightface/app/common.py +0 -49
  5. insightface/app/face_analysis.py +0 -109
  6. insightface/app/mask_renderer.py +0 -232
  7. insightface/commands/__init__.py +0 -13
  8. insightface/commands/insightface_cli.py +0 -29
  9. insightface/commands/model_download.py +0 -36
  10. insightface/commands/rec_add_mask_param.py +0 -94
  11. insightface/data/__init__.py +0 -2
  12. insightface/data/image.py +0 -28
  13. insightface/data/images/Tom_Hanks_54745.png +0 -3
  14. insightface/data/images/mask_black.jpg +0 -0
  15. insightface/data/images/mask_blue.jpg +0 -0
  16. insightface/data/images/mask_green.jpg +0 -0
  17. insightface/data/images/mask_white.jpg +0 -0
  18. insightface/data/images/t1.jpg +0 -0
  19. insightface/data/objects/meanshape_68.pkl +0 -3
  20. insightface/data/pickle_object.py +0 -17
  21. insightface/data/rec_builder.py +0 -71
  22. insightface/model_zoo/__init__.py +0 -6
  23. insightface/model_zoo/arcface_onnx.py +0 -92
  24. insightface/model_zoo/attribute.py +0 -94
  25. insightface/model_zoo/inswapper.py +0 -105
  26. insightface/model_zoo/landmark.py +0 -114
  27. insightface/model_zoo/model_store.py +0 -103
  28. insightface/model_zoo/model_zoo.py +0 -98
  29. insightface/model_zoo/retinaface.py +0 -301
  30. insightface/model_zoo/scrfd.py +0 -348
  31. insightface/thirdparty/__init__.py +0 -0
  32. insightface/thirdparty/face3d/__init__.py +0 -4
  33. insightface/thirdparty/face3d/mesh/__init__.cpp +0 -0
  34. insightface/thirdparty/face3d/mesh/__init__.py +0 -15
  35. insightface/thirdparty/face3d/mesh/cython/mesh_core.cpp +0 -375
  36. insightface/thirdparty/face3d/mesh/cython/mesh_core.h +0 -83
  37. insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.c +0 -0
  38. insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.cpp +0 -0
  39. insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.pyx +0 -109
  40. insightface/thirdparty/face3d/mesh/cython/setup.py +0 -20
  41. insightface/thirdparty/face3d/mesh/io.cpp +0 -1
  42. insightface/thirdparty/face3d/mesh/io.py +0 -142
  43. insightface/thirdparty/face3d/mesh/light.py +0 -213
  44. insightface/thirdparty/face3d/mesh/render.py +0 -135
  45. insightface/thirdparty/face3d/mesh/transform.py +0 -383
  46. insightface/thirdparty/face3d/mesh/vis.py +0 -24
  47. insightface/thirdparty/face3d/mesh_numpy/__init__.py +0 -10
  48. insightface/thirdparty/face3d/mesh_numpy/io.py +0 -170
  49. insightface/thirdparty/face3d/mesh_numpy/light.py +0 -215
  50. insightface/thirdparty/face3d/mesh_numpy/render.py +0 -287
handler.py CHANGED
@@ -14,10 +14,6 @@ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
14
 
15
  from huggingface_hub import hf_hub_download
16
 
17
- import sys
18
- root_local = './'
19
- sys.path.insert(0, root_local)
20
-
21
  from insightface.app import FaceAnalysis
22
 
23
  from style_template import styles
 
14
 
15
  from huggingface_hub import hf_hub_download
16
 
 
 
 
 
17
  from insightface.app import FaceAnalysis
18
 
19
  from style_template import styles
insightface/__init__.py DELETED
@@ -1,21 +0,0 @@
1
- # coding: utf-8
2
- # pylint: disable=wrong-import-position
3
- """InsightFace: A Face Analysis Toolkit."""
4
- from __future__ import absolute_import
5
-
6
- try:
7
- #import mxnet as mx
8
- import onnxruntime
9
- except ImportError:
10
- raise ImportError(
11
- "Unable to import dependency onnxruntime. "
12
- )
13
-
14
- __version__ = '0.7.3'
15
-
16
- from . import model_zoo
17
- from . import utils
18
- from . import app
19
- from . import data
20
- from . import thirdparty
21
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/app/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .face_analysis import *
2
- from .mask_renderer import *
 
 
 
insightface/app/common.py DELETED
@@ -1,49 +0,0 @@
1
- import numpy as np
2
- from numpy.linalg import norm as l2norm
3
- #from easydict import EasyDict
4
-
5
- class Face(dict):
6
-
7
- def __init__(self, d=None, **kwargs):
8
- if d is None:
9
- d = {}
10
- if kwargs:
11
- d.update(**kwargs)
12
- for k, v in d.items():
13
- setattr(self, k, v)
14
- # Class attributes
15
- #for k in self.__class__.__dict__.keys():
16
- # if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
17
- # setattr(self, k, getattr(self, k))
18
-
19
- def __setattr__(self, name, value):
20
- if isinstance(value, (list, tuple)):
21
- value = [self.__class__(x)
22
- if isinstance(x, dict) else x for x in value]
23
- elif isinstance(value, dict) and not isinstance(value, self.__class__):
24
- value = self.__class__(value)
25
- super(Face, self).__setattr__(name, value)
26
- super(Face, self).__setitem__(name, value)
27
-
28
- __setitem__ = __setattr__
29
-
30
- def __getattr__(self, name):
31
- return None
32
-
33
- @property
34
- def embedding_norm(self):
35
- if self.embedding is None:
36
- return None
37
- return l2norm(self.embedding)
38
-
39
- @property
40
- def normed_embedding(self):
41
- if self.embedding is None:
42
- return None
43
- return self.embedding / self.embedding_norm
44
-
45
- @property
46
- def sex(self):
47
- if self.gender is None:
48
- return None
49
- return 'M' if self.gender==1 else 'F'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/app/face_analysis.py DELETED
@@ -1,109 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-05-04
5
- # @Function :
6
-
7
-
8
- from __future__ import division
9
-
10
- import glob
11
- import os.path as osp
12
-
13
- import numpy as np
14
- import onnxruntime
15
- from numpy.linalg import norm
16
-
17
- from ..model_zoo import model_zoo
18
- from ..utils import DEFAULT_MP_NAME, ensure_available
19
- from .common import Face
20
-
21
- __all__ = ['FaceAnalysis']
22
-
23
- class FaceAnalysis:
24
- def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', allowed_modules=None, **kwargs):
25
- onnxruntime.set_default_logger_severity(3)
26
- self.models = {}
27
- self.model_dir = ensure_available('models', name, root=root)
28
- onnx_files = glob.glob(osp.join(self.model_dir, '*.onnx'))
29
- onnx_files = sorted(onnx_files)
30
- for onnx_file in onnx_files:
31
- model = model_zoo.get_model(onnx_file, **kwargs)
32
- if model is None:
33
- print('model not recognized:', onnx_file)
34
- elif allowed_modules is not None and model.taskname not in allowed_modules:
35
- print('model ignore:', onnx_file, model.taskname)
36
- del model
37
- elif model.taskname not in self.models and (allowed_modules is None or model.taskname in allowed_modules):
38
- print('find model:', onnx_file, model.taskname, model.input_shape, model.input_mean, model.input_std)
39
- self.models[model.taskname] = model
40
- else:
41
- print('duplicated model task type, ignore:', onnx_file, model.taskname)
42
- del model
43
- assert 'detection' in self.models
44
- self.det_model = self.models['detection']
45
-
46
-
47
- def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
48
- self.det_thresh = det_thresh
49
- assert det_size is not None
50
- print('set det-size:', det_size)
51
- self.det_size = det_size
52
- for taskname, model in self.models.items():
53
- if taskname=='detection':
54
- model.prepare(ctx_id, input_size=det_size, det_thresh=det_thresh)
55
- else:
56
- model.prepare(ctx_id)
57
-
58
- def get(self, img, max_num=0):
59
- bboxes, kpss = self.det_model.detect(img,
60
- max_num=max_num,
61
- metric='default')
62
- if bboxes.shape[0] == 0:
63
- return []
64
- ret = []
65
- for i in range(bboxes.shape[0]):
66
- bbox = bboxes[i, 0:4]
67
- det_score = bboxes[i, 4]
68
- kps = None
69
- if kpss is not None:
70
- kps = kpss[i]
71
- face = Face(bbox=bbox, kps=kps, det_score=det_score)
72
- for taskname, model in self.models.items():
73
- if taskname=='detection':
74
- continue
75
- model.get(img, face)
76
- ret.append(face)
77
- return ret
78
-
79
- def draw_on(self, img, faces):
80
- import cv2
81
- dimg = img.copy()
82
- for i in range(len(faces)):
83
- face = faces[i]
84
- box = face.bbox.astype(int)
85
- color = (0, 0, 255)
86
- cv2.rectangle(dimg, (box[0], box[1]), (box[2], box[3]), color, 2)
87
- if face.kps is not None:
88
- kps = face.kps.astype(int)
89
- #print(landmark.shape)
90
- for l in range(kps.shape[0]):
91
- color = (0, 0, 255)
92
- if l == 0 or l == 3:
93
- color = (0, 255, 0)
94
- cv2.circle(dimg, (kps[l][0], kps[l][1]), 1, color,
95
- 2)
96
- if face.gender is not None and face.age is not None:
97
- cv2.putText(dimg,'%s,%d'%(face.sex,face.age), (box[0]-1, box[1]-4),cv2.FONT_HERSHEY_COMPLEX,0.7,(0,255,0),1)
98
-
99
- #for key, value in face.items():
100
- # if key.startswith('landmark_3d'):
101
- # print(key, value.shape)
102
- # print(value[0:10,:])
103
- # lmk = np.round(value).astype(int)
104
- # for l in range(lmk.shape[0]):
105
- # color = (255, 0, 0)
106
- # cv2.circle(dimg, (lmk[l][0], lmk[l][1]), 1, color,
107
- # 2)
108
- return dimg
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/app/mask_renderer.py DELETED
@@ -1,232 +0,0 @@
1
- import os, sys, datetime
2
- import numpy as np
3
- import os.path as osp
4
- import albumentations as A
5
- from albumentations.core.transforms_interface import ImageOnlyTransform
6
- from .face_analysis import FaceAnalysis
7
- from ..utils import get_model_dir
8
- from ..thirdparty import face3d
9
- from ..data import get_image as ins_get_image
10
- from ..utils import DEFAULT_MP_NAME
11
- import cv2
12
-
13
- class MaskRenderer:
14
- def __init__(self, name=DEFAULT_MP_NAME, root='~/.insightface', insfa=None):
15
- #if insfa is None, enter render_only mode
16
- self.mp_name = name
17
- self.root = root
18
- self.insfa = insfa
19
- model_dir = get_model_dir(name, root)
20
- bfm_file = osp.join(model_dir, 'BFM.mat')
21
- assert osp.exists(bfm_file), 'should contains BFM.mat in your model directory'
22
- self.bfm = face3d.morphable_model.MorphabelModel(bfm_file)
23
- self.index_ind = self.bfm.kpt_ind
24
- bfm_uv_file = osp.join(model_dir, 'BFM_UV.mat')
25
- assert osp.exists(bfm_uv_file), 'should contains BFM_UV.mat in your model directory'
26
- uv_coords = face3d.morphable_model.load.load_uv_coords(bfm_uv_file)
27
- self.uv_size = (224,224)
28
- self.mask_stxr = 0.1
29
- self.mask_styr = 0.33
30
- self.mask_etxr = 0.9
31
- self.mask_etyr = 0.7
32
- self.tex_h , self.tex_w, self.tex_c = self.uv_size[1] , self.uv_size[0],3
33
- texcoord = np.zeros_like(uv_coords)
34
- texcoord[:, 0] = uv_coords[:, 0] * (self.tex_h - 1)
35
- texcoord[:, 1] = uv_coords[:, 1] * (self.tex_w - 1)
36
- texcoord[:, 1] = self.tex_w - texcoord[:, 1] - 1
37
- self.texcoord = np.hstack((texcoord, np.zeros((texcoord.shape[0], 1))))
38
- self.X_ind = self.bfm.kpt_ind
39
- self.mask_image_names = ['mask_white', 'mask_blue', 'mask_black', 'mask_green']
40
- self.mask_aug_probs = [0.4, 0.4, 0.1, 0.1]
41
- #self.mask_images = []
42
- #self.mask_images_rgb = []
43
- #for image_name in mask_image_names:
44
- # mask_image = ins_get_image(image_name)
45
- # self.mask_images.append(mask_image)
46
- # mask_image_rgb = mask_image[:,:,::-1]
47
- # self.mask_images_rgb.append(mask_image_rgb)
48
-
49
-
50
- def prepare(self, ctx_id=0, det_thresh=0.5, det_size=(128, 128)):
51
- self.pre_ctx_id = ctx_id
52
- self.pre_det_thresh = det_thresh
53
- self.pre_det_size = det_size
54
-
55
- def transform(self, shape3D, R):
56
- s = 1.0
57
- shape3D[:2, :] = shape3D[:2, :]
58
- shape3D = s * np.dot(R, shape3D)
59
- return shape3D
60
-
61
- def preprocess(self, vertices, w, h):
62
- R1 = face3d.mesh.transform.angle2matrix([0, 180, 180])
63
- t = np.array([-w // 2, -h // 2, 0])
64
- vertices = vertices.T
65
- vertices += t
66
- vertices = self.transform(vertices.T, R1).T
67
- return vertices
68
-
69
- def project_to_2d(self,vertices,s,angles,t):
70
- transformed_vertices = self.bfm.transform(vertices, s, angles, t)
71
- projected_vertices = transformed_vertices.copy() # using stantard camera & orth projection
72
- return projected_vertices[self.bfm.kpt_ind, :2]
73
-
74
- def params_to_vertices(self,params , H , W):
75
- fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = params
76
- fitted_vertices = self.bfm.generate_vertices(fitted_sp, fitted_ep)
77
- transformed_vertices = self.bfm.transform(fitted_vertices, fitted_s, fitted_angles,
78
- fitted_t)
79
- transformed_vertices = self.preprocess(transformed_vertices.T, W, H)
80
- image_vertices = face3d.mesh.transform.to_image(transformed_vertices, H, W)
81
- return image_vertices
82
-
83
- def draw_lmk(self, face_image):
84
- faces = self.insfa.get(face_image, max_num=1)
85
- if len(faces)==0:
86
- return face_image
87
- return self.insfa.draw_on(face_image, faces)
88
-
89
- def build_params(self, face_image):
90
- #landmark = self.if3d68_handler.get(face_image)
91
- #if landmark is None:
92
- # return None #face not found
93
- if self.insfa is None:
94
- self.insfa = FaceAnalysis(name=self.mp_name, root=self.root, allowed_modules=['detection', 'landmark_3d_68'])
95
- self.insfa.prepare(ctx_id=self.pre_ctx_id, det_thresh=self.pre_det_thresh, det_size=self.pre_det_size)
96
-
97
- faces = self.insfa.get(face_image, max_num=1)
98
- if len(faces)==0:
99
- return None
100
- landmark = faces[0].landmark_3d_68[:,:2]
101
- fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t = self.bfm.fit(landmark, self.X_ind, max_iter = 3)
102
- return [fitted_sp, fitted_ep, fitted_s, fitted_angles, fitted_t]
103
-
104
- def generate_mask_uv(self,mask, positions):
105
- uv_size = (self.uv_size[1], self.uv_size[0], 3)
106
- h, w, c = uv_size
107
- uv = np.zeros(shape=(self.uv_size[1],self.uv_size[0], 3), dtype=np.uint8)
108
- stxr, styr = positions[0], positions[1]
109
- etxr, etyr = positions[2], positions[3]
110
- stx, sty = int(w * stxr), int(h * styr)
111
- etx, ety = int(w * etxr), int(h * etyr)
112
- height = ety - sty
113
- width = etx - stx
114
- mask = cv2.resize(mask, (width, height))
115
- uv[sty:ety, stx:etx] = mask
116
- return uv
117
-
118
- def render_mask(self,face_image, mask_image, params, input_is_rgb=False, auto_blend = True, positions=[0.1, 0.33, 0.9, 0.7]):
119
- if isinstance(mask_image, str):
120
- to_rgb = True if input_is_rgb else False
121
- mask_image = ins_get_image(mask_image, to_rgb=to_rgb)
122
- uv_mask_image = self.generate_mask_uv(mask_image, positions)
123
- h,w,c = face_image.shape
124
- image_vertices = self.params_to_vertices(params ,h,w)
125
- output = (1-face3d.mesh.render.render_texture(image_vertices, self.bfm.full_triangles , uv_mask_image, self.texcoord, self.bfm.full_triangles, h , w ))*255
126
- output = output.astype(np.uint8)
127
- if auto_blend:
128
- mask_bd = (output==255).astype(np.uint8)
129
- final = face_image*mask_bd + (1-mask_bd)*output
130
- return final
131
- return output
132
-
133
- #def mask_augmentation(self, face_image, label, input_is_rgb=False, p=0.1):
134
- # if np.random.random()<p:
135
- # assert isinstance(label, (list, np.ndarray)), 'make sure the rec dataset includes mask params'
136
- # assert len(label)==237 or len(lable)==235, 'make sure the rec dataset includes mask params'
137
- # if len(label)==237:
138
- # if label[1]<0.0: #invalid label for mask aug
139
- # return face_image
140
- # label = label[2:]
141
- # params = self.decode_params(label)
142
- # mask_image_name = np.random.choice(self.mask_image_names, p=self.mask_aug_probs)
143
- # pos = np.random.uniform(0.33, 0.5)
144
- # face_image = self.render_mask(face_image, mask_image_name, params, input_is_rgb=input_is_rgb, positions=[0.1, pos, 0.9, 0.7])
145
- # return face_image
146
-
147
- @staticmethod
148
- def encode_params(params):
149
- p0 = list(params[0])
150
- p1 = list(params[1])
151
- p2 = [float(params[2])]
152
- p3 = list(params[3])
153
- p4 = list(params[4])
154
- return p0+p1+p2+p3+p4
155
-
156
- @staticmethod
157
- def decode_params(params):
158
- p0 = params[0:199]
159
- p0 = np.array(p0, dtype=np.float32).reshape( (-1, 1))
160
- p1 = params[199:228]
161
- p1 = np.array(p1, dtype=np.float32).reshape( (-1, 1))
162
- p2 = params[228]
163
- p3 = tuple(params[229:232])
164
- p4 = params[232:235]
165
- p4 = np.array(p4, dtype=np.float32).reshape( (-1, 1))
166
- return p0, p1, p2, p3, p4
167
-
168
- class MaskAugmentation(ImageOnlyTransform):
169
-
170
- def __init__(
171
- self,
172
- mask_names=['mask_white', 'mask_blue', 'mask_black', 'mask_green'],
173
- mask_probs=[0.4,0.4,0.1,0.1],
174
- h_low = 0.33,
175
- h_high = 0.35,
176
- always_apply=False,
177
- p=1.0,
178
- ):
179
- super(MaskAugmentation, self).__init__(always_apply, p)
180
- self.renderer = MaskRenderer()
181
- assert len(mask_names)>0
182
- assert len(mask_names)==len(mask_probs)
183
- self.mask_names = mask_names
184
- self.mask_probs = mask_probs
185
- self.h_low = h_low
186
- self.h_high = h_high
187
- #self.hlabel = None
188
-
189
-
190
- def apply(self, image, hlabel, mask_name, h_pos, **params):
191
- #print(params.keys())
192
- #hlabel = params.get('hlabel')
193
- assert len(hlabel)==237 or len(hlabel)==235, 'make sure the rec dataset includes mask params'
194
- if len(hlabel)==237:
195
- if hlabel[1]<0.0:
196
- return image
197
- hlabel = hlabel[2:]
198
- #print(len(hlabel))
199
- mask_params = self.renderer.decode_params(hlabel)
200
- image = self.renderer.render_mask(image, mask_name, mask_params, input_is_rgb=True, positions=[0.1, h_pos, 0.9, 0.7])
201
- return image
202
-
203
- @property
204
- def targets_as_params(self):
205
- return ["image", "hlabel"]
206
-
207
- def get_params_dependent_on_targets(self, params):
208
- hlabel = params['hlabel']
209
- mask_name = np.random.choice(self.mask_names, p=self.mask_probs)
210
- h_pos = np.random.uniform(self.h_low, self.h_high)
211
- return {'hlabel': hlabel, 'mask_name': mask_name, 'h_pos': h_pos}
212
-
213
- def get_transform_init_args_names(self):
214
- #return ("hlabel", 'mask_names', 'mask_probs', 'h_low', 'h_high')
215
- return ('mask_names', 'mask_probs', 'h_low', 'h_high')
216
-
217
-
218
- if __name__ == "__main__":
219
- tool = MaskRenderer('antelope')
220
- tool.prepare(det_size=(128,128))
221
- image = cv2.imread("Tom_Hanks_54745.png")
222
- params = tool.build_params(image)
223
- #out = tool.draw_lmk(image)
224
- #cv2.imwrite('output_lmk.jpg', out)
225
- #mask_image = cv2.imread("masks/mask1.jpg")
226
- #mask_image = cv2.imread("masks/black-mask.png")
227
- #mask_image = cv2.imread("masks/mask2.jpg")
228
- mask_out = tool.render_mask(image, 'mask_blue', params)# use single thread to test the time cost
229
-
230
- cv2.imwrite('output_mask.jpg', mask_out)
231
-
232
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/commands/__init__.py DELETED
@@ -1,13 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from argparse import ArgumentParser
3
-
4
-
5
- class BaseInsightFaceCLICommand(ABC):
6
- @staticmethod
7
- @abstractmethod
8
- def register_subcommand(parser: ArgumentParser):
9
- raise NotImplementedError()
10
-
11
- @abstractmethod
12
- def run(self):
13
- raise NotImplementedError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/commands/insightface_cli.py DELETED
@@ -1,29 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- from argparse import ArgumentParser
4
-
5
- from .model_download import ModelDownloadCommand
6
- from .rec_add_mask_param import RecAddMaskParamCommand
7
-
8
- def main():
9
- parser = ArgumentParser("InsightFace CLI tool", usage="insightface-cli <command> [<args>]")
10
- commands_parser = parser.add_subparsers(help="insightface-cli command-line helpers")
11
-
12
- # Register commands
13
- ModelDownloadCommand.register_subcommand(commands_parser)
14
- RecAddMaskParamCommand.register_subcommand(commands_parser)
15
-
16
- args = parser.parse_args()
17
-
18
- if not hasattr(args, "func"):
19
- parser.print_help()
20
- exit(1)
21
-
22
- # Run
23
- service = args.func(args)
24
- service.run()
25
-
26
-
27
- if __name__ == "__main__":
28
- main()
29
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/commands/model_download.py DELETED
@@ -1,36 +0,0 @@
1
- from argparse import ArgumentParser
2
-
3
- from . import BaseInsightFaceCLICommand
4
- import os
5
- import os.path as osp
6
- import zipfile
7
- import glob
8
- from ..utils import download
9
-
10
-
11
- def model_download_command_factory(args):
12
- return ModelDownloadCommand(args.model, args.root, args.force)
13
-
14
-
15
- class ModelDownloadCommand(BaseInsightFaceCLICommand):
16
- #_url_format = '{repo_url}models/{file_name}.zip'
17
- @staticmethod
18
- def register_subcommand(parser: ArgumentParser):
19
- download_parser = parser.add_parser("model.download")
20
- download_parser.add_argument(
21
- "--root", type=str, default='~/.insightface', help="Path to location to store the models"
22
- )
23
- download_parser.add_argument(
24
- "--force", action="store_true", help="Force the model to be download even if already in root-dir"
25
- )
26
- download_parser.add_argument("model", type=str, help="Name of the model to download")
27
- download_parser.set_defaults(func=model_download_command_factory)
28
-
29
- def __init__(self, model: str, root: str, force: bool):
30
- self._model = model
31
- self._root = root
32
- self._force = force
33
-
34
- def run(self):
35
- download('models', self._model, force=self._force, root=self._root)
36
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/commands/rec_add_mask_param.py DELETED
@@ -1,94 +0,0 @@
1
-
2
- import numbers
3
- import os
4
- from argparse import ArgumentParser, Namespace
5
-
6
- import mxnet as mx
7
- import numpy as np
8
-
9
- from ..app import MaskRenderer
10
- from ..data.rec_builder import RecBuilder
11
- from . import BaseInsightFaceCLICommand
12
-
13
-
14
- def rec_add_mask_param_command_factory(args: Namespace):
15
-
16
- return RecAddMaskParamCommand(
17
- args.input, args.output
18
- )
19
-
20
-
21
- class RecAddMaskParamCommand(BaseInsightFaceCLICommand):
22
- @staticmethod
23
- def register_subcommand(parser: ArgumentParser):
24
- _parser = parser.add_parser("rec.addmaskparam")
25
- _parser.add_argument("input", type=str, help="input rec")
26
- _parser.add_argument("output", type=str, help="output rec, with mask param")
27
- _parser.set_defaults(func=rec_add_mask_param_command_factory)
28
-
29
- def __init__(
30
- self,
31
- input: str,
32
- output: str,
33
- ):
34
- self._input = input
35
- self._output = output
36
-
37
-
38
- def run(self):
39
- tool = MaskRenderer()
40
- tool.prepare(ctx_id=0, det_size=(128,128))
41
- root_dir = self._input
42
- path_imgrec = os.path.join(root_dir, 'train.rec')
43
- path_imgidx = os.path.join(root_dir, 'train.idx')
44
- imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
45
- save_path = self._output
46
- wrec=RecBuilder(path=save_path)
47
- s = imgrec.read_idx(0)
48
- header, _ = mx.recordio.unpack(s)
49
- if header.flag > 0:
50
- if len(header.label)==2:
51
- imgidx = np.array(range(1, int(header.label[0])))
52
- else:
53
- imgidx = np.array(list(self.imgrec.keys))
54
- else:
55
- imgidx = np.array(list(self.imgrec.keys))
56
- stat = [0, 0]
57
- print('total:', len(imgidx))
58
- for iid, idx in enumerate(imgidx):
59
- #if iid==500000:
60
- # break
61
- if iid%1000==0:
62
- print('processing:', iid)
63
- s = imgrec.read_idx(idx)
64
- header, img = mx.recordio.unpack(s)
65
- label = header.label
66
- if not isinstance(label, numbers.Number):
67
- label = label[0]
68
- sample = mx.image.imdecode(img).asnumpy()
69
- bgr = sample[:,:,::-1]
70
- params = tool.build_params(bgr)
71
- #if iid<10:
72
- # mask_out = tool.render_mask(bgr, 'mask_blue', params)
73
- # cv2.imwrite('maskout_%d.jpg'%iid, mask_out)
74
- stat[1] += 1
75
- if params is None:
76
- wlabel = [label] + [-1.0]*236
77
- stat[0] += 1
78
- else:
79
- #print(0, params[0].shape, params[0].dtype)
80
- #print(1, params[1].shape, params[1].dtype)
81
- #print(2, params[2])
82
- #print(3, len(params[3]), params[3][0].__class__)
83
- #print(4, params[4].shape, params[4].dtype)
84
- mask_label = tool.encode_params(params)
85
- wlabel = [label, 0.0]+mask_label # 237 including idlabel, total mask params size is 235
86
- if iid==0:
87
- print('param size:', len(mask_label), len(wlabel), label)
88
- assert len(wlabel)==237
89
- wrec.add_image(img, wlabel)
90
- #print(len(params))
91
-
92
- wrec.close()
93
- print('finished on', self._output, ', failed:', stat[0])
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/data/__init__.py DELETED
@@ -1,2 +0,0 @@
1
- from .image import get_image
2
- from .pickle_object import get_object
 
 
 
insightface/data/image.py DELETED
@@ -1,28 +0,0 @@
1
- import cv2
2
- import os
3
- import os.path as osp
4
- from pathlib import Path
5
-
6
- class ImageCache:
7
- data = {}
8
-
9
- def get_image(name, to_rgb=False, use_cache=True):
10
- key = (name, to_rgb)
11
- if key in ImageCache.data:
12
- return ImageCache.data[key]
13
- images_dir = osp.join(Path(__file__).parent.absolute(), 'images')
14
- ext_names = ['.jpg', '.png', '.jpeg']
15
- image_file = None
16
- for ext_name in ext_names:
17
- _image_file = osp.join(images_dir, "%s%s"%(name, ext_name))
18
- if osp.exists(_image_file):
19
- image_file = _image_file
20
- break
21
- assert image_file is not None, '%s not found'%name
22
- img = cv2.imread(image_file)
23
- if to_rgb:
24
- img = img[:,:,::-1]
25
- if use_cache:
26
- ImageCache.data[key] = img
27
- return img
28
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/data/images/Tom_Hanks_54745.png DELETED

Git LFS Details

  • SHA256: 8545da294e8c7c79911169c3915fed8528f1960cd0ed99b92453788ca4275083
  • Pointer size: 130 Bytes
  • Size of remote file: 12.1 kB
insightface/data/images/mask_black.jpg DELETED
Binary file (21.3 kB)
 
insightface/data/images/mask_blue.jpg DELETED
Binary file (44.7 kB)
 
insightface/data/images/mask_green.jpg DELETED
Binary file (6.12 kB)
 
insightface/data/images/mask_white.jpg DELETED
Binary file (78.9 kB)
 
insightface/data/images/t1.jpg DELETED
Binary file (129 kB)
 
insightface/data/objects/meanshape_68.pkl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:39ffecf84ba73f0d0d7e49380833ba88713c9fcdec51df4f7ac45a48b8f4cc51
3
- size 974
 
 
 
 
insightface/data/pickle_object.py DELETED
@@ -1,17 +0,0 @@
1
- import cv2
2
- import os
3
- import os.path as osp
4
- from pathlib import Path
5
- import pickle
6
-
7
- def get_object(name):
8
- objects_dir = osp.join(Path(__file__).parent.absolute(), 'objects')
9
- if not name.endswith('.pkl'):
10
- name = name+".pkl"
11
- filepath = osp.join(objects_dir, name)
12
- if not osp.exists(filepath):
13
- return None
14
- with open(filepath, 'rb') as f:
15
- obj = pickle.load(f)
16
- return obj
17
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/data/rec_builder.py DELETED
@@ -1,71 +0,0 @@
1
- import pickle
2
- import numpy as np
3
- import os
4
- import os.path as osp
5
- import sys
6
- import mxnet as mx
7
-
8
-
9
- class RecBuilder():
10
- def __init__(self, path, image_size=(112, 112)):
11
- self.path = path
12
- self.image_size = image_size
13
- self.widx = 0
14
- self.wlabel = 0
15
- self.max_label = -1
16
- assert not osp.exists(path), '%s exists' % path
17
- os.makedirs(path)
18
- self.writer = mx.recordio.MXIndexedRecordIO(os.path.join(path, 'train.idx'),
19
- os.path.join(path, 'train.rec'),
20
- 'w')
21
- self.meta = []
22
-
23
- def add(self, imgs):
24
- #!!! img should be BGR!!!!
25
- #assert label >= 0
26
- #assert label > self.last_label
27
- assert len(imgs) > 0
28
- label = self.wlabel
29
- for img in imgs:
30
- idx = self.widx
31
- image_meta = {'image_index': idx, 'image_classes': [label]}
32
- header = mx.recordio.IRHeader(0, label, idx, 0)
33
- if isinstance(img, np.ndarray):
34
- s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
35
- else:
36
- s = mx.recordio.pack(header, img)
37
- self.writer.write_idx(idx, s)
38
- self.meta.append(image_meta)
39
- self.widx += 1
40
- self.max_label = label
41
- self.wlabel += 1
42
-
43
-
44
- def add_image(self, img, label):
45
- #!!! img should be BGR!!!!
46
- #assert label >= 0
47
- #assert label > self.last_label
48
- idx = self.widx
49
- header = mx.recordio.IRHeader(0, label, idx, 0)
50
- if isinstance(label, list):
51
- idlabel = label[0]
52
- else:
53
- idlabel = label
54
- image_meta = {'image_index': idx, 'image_classes': [idlabel]}
55
- if isinstance(img, np.ndarray):
56
- s = mx.recordio.pack_img(header,img,quality=95,img_fmt='.jpg')
57
- else:
58
- s = mx.recordio.pack(header, img)
59
- self.writer.write_idx(idx, s)
60
- self.meta.append(image_meta)
61
- self.widx += 1
62
- self.max_label = max(self.max_label, idlabel)
63
-
64
- def close(self):
65
- with open(osp.join(self.path, 'train.meta'), 'wb') as pfile:
66
- pickle.dump(self.meta, pfile, protocol=pickle.HIGHEST_PROTOCOL)
67
- print('stat:', self.widx, self.wlabel)
68
- with open(os.path.join(self.path, 'property'), 'w') as f:
69
- f.write("%d,%d,%d\n" % (self.max_label+1, self.image_size[0], self.image_size[1]))
70
- f.write("%d\n" % (self.widx))
71
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- from .model_zoo import get_model
2
- from .arcface_onnx import ArcFaceONNX
3
- from .retinaface import RetinaFace
4
- from .scrfd import SCRFD
5
- from .landmark import Landmark
6
- from .attribute import Attribute
 
 
 
 
 
 
 
insightface/model_zoo/arcface_onnx.py DELETED
@@ -1,92 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-05-04
5
- # @Function :
6
-
7
- from __future__ import division
8
- import numpy as np
9
- import cv2
10
- import onnx
11
- import onnxruntime
12
- from ..utils import face_align
13
-
14
- __all__ = [
15
- 'ArcFaceONNX',
16
- ]
17
-
18
-
19
- class ArcFaceONNX:
20
- def __init__(self, model_file=None, session=None):
21
- assert model_file is not None
22
- self.model_file = model_file
23
- self.session = session
24
- self.taskname = 'recognition'
25
- find_sub = False
26
- find_mul = False
27
- model = onnx.load(self.model_file)
28
- graph = model.graph
29
- for nid, node in enumerate(graph.node[:8]):
30
- #print(nid, node.name)
31
- if node.name.startswith('Sub') or node.name.startswith('_minus'):
32
- find_sub = True
33
- if node.name.startswith('Mul') or node.name.startswith('_mul'):
34
- find_mul = True
35
- if find_sub and find_mul:
36
- #mxnet arcface model
37
- input_mean = 0.0
38
- input_std = 1.0
39
- else:
40
- input_mean = 127.5
41
- input_std = 127.5
42
- self.input_mean = input_mean
43
- self.input_std = input_std
44
- #print('input mean and std:', self.input_mean, self.input_std)
45
- if self.session is None:
46
- self.session = onnxruntime.InferenceSession(self.model_file, None)
47
- input_cfg = self.session.get_inputs()[0]
48
- input_shape = input_cfg.shape
49
- input_name = input_cfg.name
50
- self.input_size = tuple(input_shape[2:4][::-1])
51
- self.input_shape = input_shape
52
- outputs = self.session.get_outputs()
53
- output_names = []
54
- for out in outputs:
55
- output_names.append(out.name)
56
- self.input_name = input_name
57
- self.output_names = output_names
58
- assert len(self.output_names)==1
59
- self.output_shape = outputs[0].shape
60
-
61
- def prepare(self, ctx_id, **kwargs):
62
- if ctx_id<0:
63
- self.session.set_providers(['CPUExecutionProvider'])
64
-
65
- def get(self, img, face):
66
- aimg = face_align.norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
67
- face.embedding = self.get_feat(aimg).flatten()
68
- return face.embedding
69
-
70
- def compute_sim(self, feat1, feat2):
71
- from numpy.linalg import norm
72
- feat1 = feat1.ravel()
73
- feat2 = feat2.ravel()
74
- sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
75
- return sim
76
-
77
- def get_feat(self, imgs):
78
- if not isinstance(imgs, list):
79
- imgs = [imgs]
80
- input_size = self.input_size
81
-
82
- blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
83
- (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
84
- net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
85
- return net_out
86
-
87
- def forward(self, batch_data):
88
- blob = (batch_data - self.input_mean) / self.input_std
89
- net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
90
- return net_out
91
-
92
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/attribute.py DELETED
@@ -1,94 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-06-19
5
- # @Function :
6
-
7
- from __future__ import division
8
- import numpy as np
9
- import cv2
10
- import onnx
11
- import onnxruntime
12
- from ..utils import face_align
13
-
14
- __all__ = [
15
- 'Attribute',
16
- ]
17
-
18
-
19
- class Attribute:
20
- def __init__(self, model_file=None, session=None):
21
- assert model_file is not None
22
- self.model_file = model_file
23
- self.session = session
24
- find_sub = False
25
- find_mul = False
26
- model = onnx.load(self.model_file)
27
- graph = model.graph
28
- for nid, node in enumerate(graph.node[:8]):
29
- #print(nid, node.name)
30
- if node.name.startswith('Sub') or node.name.startswith('_minus'):
31
- find_sub = True
32
- if node.name.startswith('Mul') or node.name.startswith('_mul'):
33
- find_mul = True
34
- if nid<3 and node.name=='bn_data':
35
- find_sub = True
36
- find_mul = True
37
- if find_sub and find_mul:
38
- #mxnet arcface model
39
- input_mean = 0.0
40
- input_std = 1.0
41
- else:
42
- input_mean = 127.5
43
- input_std = 128.0
44
- self.input_mean = input_mean
45
- self.input_std = input_std
46
- #print('input mean and std:', model_file, self.input_mean, self.input_std)
47
- if self.session is None:
48
- self.session = onnxruntime.InferenceSession(self.model_file, None)
49
- input_cfg = self.session.get_inputs()[0]
50
- input_shape = input_cfg.shape
51
- input_name = input_cfg.name
52
- self.input_size = tuple(input_shape[2:4][::-1])
53
- self.input_shape = input_shape
54
- outputs = self.session.get_outputs()
55
- output_names = []
56
- for out in outputs:
57
- output_names.append(out.name)
58
- self.input_name = input_name
59
- self.output_names = output_names
60
- assert len(self.output_names)==1
61
- output_shape = outputs[0].shape
62
- #print('init output_shape:', output_shape)
63
- if output_shape[1]==3:
64
- self.taskname = 'genderage'
65
- else:
66
- self.taskname = 'attribute_%d'%output_shape[1]
67
-
68
- def prepare(self, ctx_id, **kwargs):
69
- if ctx_id<0:
70
- self.session.set_providers(['CPUExecutionProvider'])
71
-
72
- def get(self, img, face):
73
- bbox = face.bbox
74
- w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
75
- center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
76
- rotate = 0
77
- _scale = self.input_size[0] / (max(w, h)*1.5)
78
- #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
79
- aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
80
- input_size = tuple(aimg.shape[0:2][::-1])
81
- #assert input_size==self.input_size
82
- blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
83
- pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
84
- if self.taskname=='genderage':
85
- assert len(pred)==3
86
- gender = np.argmax(pred[:2])
87
- age = int(np.round(pred[2]*100))
88
- face['gender'] = gender
89
- face['age'] = age
90
- return gender, age
91
- else:
92
- return pred
93
-
94
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/inswapper.py DELETED
@@ -1,105 +0,0 @@
1
- import time
2
- import numpy as np
3
- import onnxruntime
4
- import cv2
5
- import onnx
6
- from onnx import numpy_helper
7
- from ..utils import face_align
8
-
9
-
10
-
11
-
12
- class INSwapper():
13
- def __init__(self, model_file=None, session=None):
14
- self.model_file = model_file
15
- self.session = session
16
- model = onnx.load(self.model_file)
17
- graph = model.graph
18
- self.emap = numpy_helper.to_array(graph.initializer[-1])
19
- self.input_mean = 0.0
20
- self.input_std = 255.0
21
- #print('input mean and std:', model_file, self.input_mean, self.input_std)
22
- if self.session is None:
23
- self.session = onnxruntime.InferenceSession(self.model_file, None)
24
- inputs = self.session.get_inputs()
25
- self.input_names = []
26
- for inp in inputs:
27
- self.input_names.append(inp.name)
28
- outputs = self.session.get_outputs()
29
- output_names = []
30
- for out in outputs:
31
- output_names.append(out.name)
32
- self.output_names = output_names
33
- assert len(self.output_names)==1
34
- output_shape = outputs[0].shape
35
- input_cfg = inputs[0]
36
- input_shape = input_cfg.shape
37
- self.input_shape = input_shape
38
- print('inswapper-shape:', self.input_shape)
39
- self.input_size = tuple(input_shape[2:4][::-1])
40
-
41
- def forward(self, img, latent):
42
- img = (img - self.input_mean) / self.input_std
43
- pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
44
- return pred
45
-
46
- def get(self, img, target_face, source_face, paste_back=True):
47
- aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
48
- blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
49
- (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
50
- latent = source_face.normed_embedding.reshape((1,-1))
51
- latent = np.dot(latent, self.emap)
52
- latent /= np.linalg.norm(latent)
53
- pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
54
- #print(latent.shape, latent.dtype, pred.shape)
55
- img_fake = pred.transpose((0,2,3,1))[0]
56
- bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
57
- if not paste_back:
58
- return bgr_fake, M
59
- else:
60
- target_img = img
61
- fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
62
- fake_diff = np.abs(fake_diff).mean(axis=2)
63
- fake_diff[:2,:] = 0
64
- fake_diff[-2:,:] = 0
65
- fake_diff[:,:2] = 0
66
- fake_diff[:,-2:] = 0
67
- IM = cv2.invertAffineTransform(M)
68
- img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
69
- bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
70
- img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
71
- fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
72
- img_white[img_white>20] = 255
73
- fthresh = 10
74
- fake_diff[fake_diff<fthresh] = 0
75
- fake_diff[fake_diff>=fthresh] = 255
76
- img_mask = img_white
77
- mask_h_inds, mask_w_inds = np.where(img_mask==255)
78
- mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
79
- mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
80
- mask_size = int(np.sqrt(mask_h*mask_w))
81
- k = max(mask_size//10, 10)
82
- #k = max(mask_size//20, 6)
83
- #k = 6
84
- kernel = np.ones((k,k),np.uint8)
85
- img_mask = cv2.erode(img_mask,kernel,iterations = 1)
86
- kernel = np.ones((2,2),np.uint8)
87
- fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
88
- k = max(mask_size//20, 5)
89
- #k = 3
90
- #k = 3
91
- kernel_size = (k, k)
92
- blur_size = tuple(2*i+1 for i in kernel_size)
93
- img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
94
- k = 5
95
- kernel_size = (k, k)
96
- blur_size = tuple(2*i+1 for i in kernel_size)
97
- fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
98
- img_mask /= 255
99
- fake_diff /= 255
100
- #img_mask = fake_diff
101
- img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
102
- fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
103
- fake_merged = fake_merged.astype(np.uint8)
104
- return fake_merged
105
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/landmark.py DELETED
@@ -1,114 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-05-04
5
- # @Function :
6
-
7
- from __future__ import division
8
- import numpy as np
9
- import cv2
10
- import onnx
11
- import onnxruntime
12
- from ..utils import face_align
13
- from ..utils import transform
14
- from ..data import get_object
15
-
16
- __all__ = [
17
- 'Landmark',
18
- ]
19
-
20
-
21
- class Landmark:
22
- def __init__(self, model_file=None, session=None):
23
- assert model_file is not None
24
- self.model_file = model_file
25
- self.session = session
26
- find_sub = False
27
- find_mul = False
28
- model = onnx.load(self.model_file)
29
- graph = model.graph
30
- for nid, node in enumerate(graph.node[:8]):
31
- #print(nid, node.name)
32
- if node.name.startswith('Sub') or node.name.startswith('_minus'):
33
- find_sub = True
34
- if node.name.startswith('Mul') or node.name.startswith('_mul'):
35
- find_mul = True
36
- if nid<3 and node.name=='bn_data':
37
- find_sub = True
38
- find_mul = True
39
- if find_sub and find_mul:
40
- #mxnet arcface model
41
- input_mean = 0.0
42
- input_std = 1.0
43
- else:
44
- input_mean = 127.5
45
- input_std = 128.0
46
- self.input_mean = input_mean
47
- self.input_std = input_std
48
- #print('input mean and std:', model_file, self.input_mean, self.input_std)
49
- if self.session is None:
50
- self.session = onnxruntime.InferenceSession(self.model_file, None)
51
- input_cfg = self.session.get_inputs()[0]
52
- input_shape = input_cfg.shape
53
- input_name = input_cfg.name
54
- self.input_size = tuple(input_shape[2:4][::-1])
55
- self.input_shape = input_shape
56
- outputs = self.session.get_outputs()
57
- output_names = []
58
- for out in outputs:
59
- output_names.append(out.name)
60
- self.input_name = input_name
61
- self.output_names = output_names
62
- assert len(self.output_names)==1
63
- output_shape = outputs[0].shape
64
- self.require_pose = False
65
- #print('init output_shape:', output_shape)
66
- if output_shape[1]==3309:
67
- self.lmk_dim = 3
68
- self.lmk_num = 68
69
- self.mean_lmk = get_object('meanshape_68.pkl')
70
- self.require_pose = True
71
- else:
72
- self.lmk_dim = 2
73
- self.lmk_num = output_shape[1]//self.lmk_dim
74
- self.taskname = 'landmark_%dd_%d'%(self.lmk_dim, self.lmk_num)
75
-
76
- def prepare(self, ctx_id, **kwargs):
77
- if ctx_id<0:
78
- self.session.set_providers(['CPUExecutionProvider'])
79
-
80
- def get(self, img, face):
81
- bbox = face.bbox
82
- w, h = (bbox[2] - bbox[0]), (bbox[3] - bbox[1])
83
- center = (bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2
84
- rotate = 0
85
- _scale = self.input_size[0] / (max(w, h)*1.5)
86
- #print('param:', img.shape, bbox, center, self.input_size, _scale, rotate)
87
- aimg, M = face_align.transform(img, center, self.input_size[0], _scale, rotate)
88
- input_size = tuple(aimg.shape[0:2][::-1])
89
- #assert input_size==self.input_size
90
- blob = cv2.dnn.blobFromImage(aimg, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
91
- pred = self.session.run(self.output_names, {self.input_name : blob})[0][0]
92
- if pred.shape[0] >= 3000:
93
- pred = pred.reshape((-1, 3))
94
- else:
95
- pred = pred.reshape((-1, 2))
96
- if self.lmk_num < pred.shape[0]:
97
- pred = pred[self.lmk_num*-1:,:]
98
- pred[:, 0:2] += 1
99
- pred[:, 0:2] *= (self.input_size[0] // 2)
100
- if pred.shape[1] == 3:
101
- pred[:, 2] *= (self.input_size[0] // 2)
102
-
103
- IM = cv2.invertAffineTransform(M)
104
- pred = face_align.trans_points(pred, IM)
105
- face[self.taskname] = pred
106
- if self.require_pose:
107
- P = transform.estimate_affine_matrix_3d23d(self.mean_lmk, pred)
108
- s, R, t = transform.P2sRt(P)
109
- rx, ry, rz = transform.matrix2angle(R)
110
- pose = np.array( [rx, ry, rz], dtype=np.float32 )
111
- face['pose'] = pose #pitch, yaw, roll
112
- return pred
113
-
114
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/model_store.py DELETED
@@ -1,103 +0,0 @@
1
- """
2
- This code file mainly comes from https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/model_store.py
3
- """
4
- from __future__ import print_function
5
-
6
- __all__ = ['get_model_file']
7
- import os
8
- import zipfile
9
- import glob
10
-
11
- from ..utils import download, check_sha1
12
-
13
- _model_sha1 = {
14
- name: checksum
15
- for checksum, name in [
16
- ('95be21b58e29e9c1237f229dae534bd854009ce0', 'arcface_r100_v1'),
17
- ('', 'arcface_mfn_v1'),
18
- ('39fd1e087a2a2ed70a154ac01fecaa86c315d01b', 'retinaface_r50_v1'),
19
- ('2c9de8116d1f448fd1d4661f90308faae34c990a', 'retinaface_mnet025_v1'),
20
- ('0db1d07921d005e6c9a5b38e059452fc5645e5a4', 'retinaface_mnet025_v2'),
21
- ('7dd8111652b7aac2490c5dcddeb268e53ac643e6', 'genderage_v1'),
22
- ]
23
- }
24
-
25
- base_repo_url = 'https://insightface.ai/files/'
26
- _url_format = '{repo_url}models/{file_name}.zip'
27
-
28
-
29
- def short_hash(name):
30
- if name not in _model_sha1:
31
- raise ValueError(
32
- 'Pretrained model for {name} is not available.'.format(name=name))
33
- return _model_sha1[name][:8]
34
-
35
-
36
- def find_params_file(dir_path):
37
- if not os.path.exists(dir_path):
38
- return None
39
- paths = glob.glob("%s/*.params" % dir_path)
40
- if len(paths) == 0:
41
- return None
42
- paths = sorted(paths)
43
- return paths[-1]
44
-
45
-
46
- def get_model_file(name, root=os.path.join('~', '.insightface', 'models')):
47
- r"""Return location for the pretrained on local file system.
48
-
49
- This function will download from online model zoo when model cannot be found or has mismatch.
50
- The root directory will be created if it doesn't exist.
51
-
52
- Parameters
53
- ----------
54
- name : str
55
- Name of the model.
56
- root : str, default '~/.mxnet/models'
57
- Location for keeping the model parameters.
58
-
59
- Returns
60
- -------
61
- file_path
62
- Path to the requested pretrained model file.
63
- """
64
-
65
- file_name = name
66
- root = os.path.expanduser(root)
67
- dir_path = os.path.join(root, name)
68
- file_path = find_params_file(dir_path)
69
- #file_path = os.path.join(root, file_name + '.params')
70
- sha1_hash = _model_sha1[name]
71
- if file_path is not None:
72
- if check_sha1(file_path, sha1_hash):
73
- return file_path
74
- else:
75
- print(
76
- 'Mismatch in the content of model file detected. Downloading again.'
77
- )
78
- else:
79
- print('Model file is not found. Downloading.')
80
-
81
- if not os.path.exists(root):
82
- os.makedirs(root)
83
- if not os.path.exists(dir_path):
84
- os.makedirs(dir_path)
85
-
86
- zip_file_path = os.path.join(root, file_name + '.zip')
87
- repo_url = base_repo_url
88
- if repo_url[-1] != '/':
89
- repo_url = repo_url + '/'
90
- download(_url_format.format(repo_url=repo_url, file_name=file_name),
91
- path=zip_file_path,
92
- overwrite=True)
93
- with zipfile.ZipFile(zip_file_path) as zf:
94
- zf.extractall(dir_path)
95
- os.remove(zip_file_path)
96
- file_path = find_params_file(dir_path)
97
-
98
- if check_sha1(file_path, sha1_hash):
99
- return file_path
100
- else:
101
- raise ValueError(
102
- 'Downloaded file has different hash. Please try again.')
103
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/model_zoo.py DELETED
@@ -1,98 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-05-04
5
- # @Function :
6
-
7
- import os
8
- import os.path as osp
9
- import glob
10
- import onnxruntime
11
- from .arcface_onnx import *
12
- from .retinaface import *
13
- #from .scrfd import *
14
- from .landmark import *
15
- from .attribute import Attribute
16
- from .inswapper import INSwapper
17
- from ..utils import download_onnx
18
-
19
- __all__ = ['get_model']
20
-
21
-
22
- class PickableInferenceSession(onnxruntime.InferenceSession):
23
- # This is a wrapper to make the current InferenceSession class pickable.
24
- def __init__(self, model_path, **kwargs):
25
- super().__init__(model_path, **kwargs)
26
- self.model_path = model_path
27
-
28
- def __getstate__(self):
29
- return {'model_path': self.model_path}
30
-
31
- def __setstate__(self, values):
32
- model_path = values['model_path']
33
- self.__init__(model_path)
34
-
35
- class ModelRouter:
36
- def __init__(self, onnx_file):
37
- self.onnx_file = onnx_file
38
-
39
- def get_model(self, **kwargs):
40
- session = PickableInferenceSession(self.onnx_file, **kwargs)
41
- print(f'Applied providers: {session._providers}, with options: {session._provider_options}')
42
- inputs = session.get_inputs()
43
- input_cfg = inputs[0]
44
- input_shape = input_cfg.shape
45
- outputs = session.get_outputs()
46
-
47
- if len(outputs)>=5:
48
- return RetinaFace(model_file=self.onnx_file, session=session)
49
- elif input_shape[2]==192 and input_shape[3]==192:
50
- return Landmark(model_file=self.onnx_file, session=session)
51
- elif input_shape[2]==96 and input_shape[3]==96:
52
- return Attribute(model_file=self.onnx_file, session=session)
53
- elif len(inputs)==2 and input_shape[2]==128 and input_shape[3]==128:
54
- return INSwapper(model_file=self.onnx_file, session=session)
55
- elif input_shape[2]==input_shape[3] and input_shape[2]>=112 and input_shape[2]%16==0:
56
- return ArcFaceONNX(model_file=self.onnx_file, session=session)
57
- else:
58
- #raise RuntimeError('error on model routing')
59
- return None
60
-
61
- def find_onnx_file(dir_path):
62
- if not os.path.exists(dir_path):
63
- return None
64
- paths = glob.glob("%s/*.onnx" % dir_path)
65
- if len(paths) == 0:
66
- return None
67
- paths = sorted(paths)
68
- return paths[-1]
69
-
70
- def get_default_providers():
71
- return ['CUDAExecutionProvider', 'CPUExecutionProvider']
72
-
73
- def get_default_provider_options():
74
- return None
75
-
76
- def get_model(name, **kwargs):
77
- root = kwargs.get('root', '~/.insightface')
78
- root = os.path.expanduser(root)
79
- model_root = osp.join(root, 'models')
80
- allow_download = kwargs.get('download', False)
81
- download_zip = kwargs.get('download_zip', False)
82
- if not name.endswith('.onnx'):
83
- model_dir = os.path.join(model_root, name)
84
- model_file = find_onnx_file(model_dir)
85
- if model_file is None:
86
- return None
87
- else:
88
- model_file = name
89
- if not osp.exists(model_file) and allow_download:
90
- model_file = download_onnx('models', model_file, root=root, download_zip=download_zip)
91
- assert osp.exists(model_file), 'model_file %s should exist'%model_file
92
- assert osp.isfile(model_file), 'model_file %s should be a file'%model_file
93
- router = ModelRouter(model_file)
94
- providers = kwargs.get('providers', get_default_providers())
95
- provider_options = kwargs.get('provider_options', get_default_provider_options())
96
- model = router.get_model(providers=providers, provider_options=provider_options)
97
- return model
98
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/retinaface.py DELETED
@@ -1,301 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-09-18
5
- # @Function :
6
-
7
- from __future__ import division
8
- import datetime
9
- import numpy as np
10
- import onnx
11
- import onnxruntime
12
- import os
13
- import os.path as osp
14
- import cv2
15
- import sys
16
-
17
- def softmax(z):
18
- assert len(z.shape) == 2
19
- s = np.max(z, axis=1)
20
- s = s[:, np.newaxis] # necessary step to do broadcasting
21
- e_x = np.exp(z - s)
22
- div = np.sum(e_x, axis=1)
23
- div = div[:, np.newaxis] # dito
24
- return e_x / div
25
-
26
- def distance2bbox(points, distance, max_shape=None):
27
- """Decode distance prediction to bounding box.
28
-
29
- Args:
30
- points (Tensor): Shape (n, 2), [x, y].
31
- distance (Tensor): Distance from the given point to 4
32
- boundaries (left, top, right, bottom).
33
- max_shape (tuple): Shape of the image.
34
-
35
- Returns:
36
- Tensor: Decoded bboxes.
37
- """
38
- x1 = points[:, 0] - distance[:, 0]
39
- y1 = points[:, 1] - distance[:, 1]
40
- x2 = points[:, 0] + distance[:, 2]
41
- y2 = points[:, 1] + distance[:, 3]
42
- if max_shape is not None:
43
- x1 = x1.clamp(min=0, max=max_shape[1])
44
- y1 = y1.clamp(min=0, max=max_shape[0])
45
- x2 = x2.clamp(min=0, max=max_shape[1])
46
- y2 = y2.clamp(min=0, max=max_shape[0])
47
- return np.stack([x1, y1, x2, y2], axis=-1)
48
-
49
- def distance2kps(points, distance, max_shape=None):
50
- """Decode distance prediction to bounding box.
51
-
52
- Args:
53
- points (Tensor): Shape (n, 2), [x, y].
54
- distance (Tensor): Distance from the given point to 4
55
- boundaries (left, top, right, bottom).
56
- max_shape (tuple): Shape of the image.
57
-
58
- Returns:
59
- Tensor: Decoded bboxes.
60
- """
61
- preds = []
62
- for i in range(0, distance.shape[1], 2):
63
- px = points[:, i%2] + distance[:, i]
64
- py = points[:, i%2+1] + distance[:, i+1]
65
- if max_shape is not None:
66
- px = px.clamp(min=0, max=max_shape[1])
67
- py = py.clamp(min=0, max=max_shape[0])
68
- preds.append(px)
69
- preds.append(py)
70
- return np.stack(preds, axis=-1)
71
-
72
- class RetinaFace:
73
- def __init__(self, model_file=None, session=None):
74
- import onnxruntime
75
- self.model_file = model_file
76
- self.session = session
77
- self.taskname = 'detection'
78
- if self.session is None:
79
- assert self.model_file is not None
80
- assert osp.exists(self.model_file)
81
- self.session = onnxruntime.InferenceSession(self.model_file, None)
82
- self.center_cache = {}
83
- self.nms_thresh = 0.4
84
- self.det_thresh = 0.5
85
- self._init_vars()
86
-
87
- def _init_vars(self):
88
- input_cfg = self.session.get_inputs()[0]
89
- input_shape = input_cfg.shape
90
- #print(input_shape)
91
- if isinstance(input_shape[2], str):
92
- self.input_size = None
93
- else:
94
- self.input_size = tuple(input_shape[2:4][::-1])
95
- #print('image_size:', self.image_size)
96
- input_name = input_cfg.name
97
- self.input_shape = input_shape
98
- outputs = self.session.get_outputs()
99
- output_names = []
100
- for o in outputs:
101
- output_names.append(o.name)
102
- self.input_name = input_name
103
- self.output_names = output_names
104
- self.input_mean = 127.5
105
- self.input_std = 128.0
106
- #print(self.output_names)
107
- #assert len(outputs)==10 or len(outputs)==15
108
- self.use_kps = False
109
- self._anchor_ratio = 1.0
110
- self._num_anchors = 1
111
- if len(outputs)==6:
112
- self.fmc = 3
113
- self._feat_stride_fpn = [8, 16, 32]
114
- self._num_anchors = 2
115
- elif len(outputs)==9:
116
- self.fmc = 3
117
- self._feat_stride_fpn = [8, 16, 32]
118
- self._num_anchors = 2
119
- self.use_kps = True
120
- elif len(outputs)==10:
121
- self.fmc = 5
122
- self._feat_stride_fpn = [8, 16, 32, 64, 128]
123
- self._num_anchors = 1
124
- elif len(outputs)==15:
125
- self.fmc = 5
126
- self._feat_stride_fpn = [8, 16, 32, 64, 128]
127
- self._num_anchors = 1
128
- self.use_kps = True
129
-
130
- def prepare(self, ctx_id, **kwargs):
131
- if ctx_id<0:
132
- self.session.set_providers(['CPUExecutionProvider'])
133
- nms_thresh = kwargs.get('nms_thresh', None)
134
- if nms_thresh is not None:
135
- self.nms_thresh = nms_thresh
136
- det_thresh = kwargs.get('det_thresh', None)
137
- if det_thresh is not None:
138
- self.det_thresh = det_thresh
139
- input_size = kwargs.get('input_size', None)
140
- if input_size is not None:
141
- if self.input_size is not None:
142
- print('warning: det_size is already set in detection model, ignore')
143
- else:
144
- self.input_size = input_size
145
-
146
- def forward(self, img, threshold):
147
- scores_list = []
148
- bboxes_list = []
149
- kpss_list = []
150
- input_size = tuple(img.shape[0:2][::-1])
151
- blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
152
- net_outs = self.session.run(self.output_names, {self.input_name : blob})
153
-
154
- input_height = blob.shape[2]
155
- input_width = blob.shape[3]
156
- fmc = self.fmc
157
- for idx, stride in enumerate(self._feat_stride_fpn):
158
- scores = net_outs[idx]
159
- bbox_preds = net_outs[idx+fmc]
160
- bbox_preds = bbox_preds * stride
161
- if self.use_kps:
162
- kps_preds = net_outs[idx+fmc*2] * stride
163
- height = input_height // stride
164
- width = input_width // stride
165
- K = height * width
166
- key = (height, width, stride)
167
- if key in self.center_cache:
168
- anchor_centers = self.center_cache[key]
169
- else:
170
- #solution-1, c style:
171
- #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
172
- #for i in range(height):
173
- # anchor_centers[i, :, 1] = i
174
- #for i in range(width):
175
- # anchor_centers[:, i, 0] = i
176
-
177
- #solution-2:
178
- #ax = np.arange(width, dtype=np.float32)
179
- #ay = np.arange(height, dtype=np.float32)
180
- #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
181
- #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
182
-
183
- #solution-3:
184
- anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
185
- #print(anchor_centers.shape)
186
-
187
- anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
188
- if self._num_anchors>1:
189
- anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
190
- if len(self.center_cache)<100:
191
- self.center_cache[key] = anchor_centers
192
-
193
- pos_inds = np.where(scores>=threshold)[0]
194
- bboxes = distance2bbox(anchor_centers, bbox_preds)
195
- pos_scores = scores[pos_inds]
196
- pos_bboxes = bboxes[pos_inds]
197
- scores_list.append(pos_scores)
198
- bboxes_list.append(pos_bboxes)
199
- if self.use_kps:
200
- kpss = distance2kps(anchor_centers, kps_preds)
201
- #kpss = kps_preds
202
- kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
203
- pos_kpss = kpss[pos_inds]
204
- kpss_list.append(pos_kpss)
205
- return scores_list, bboxes_list, kpss_list
206
-
207
- def detect(self, img, input_size = None, max_num=0, metric='default'):
208
- assert input_size is not None or self.input_size is not None
209
- input_size = self.input_size if input_size is None else input_size
210
-
211
- im_ratio = float(img.shape[0]) / img.shape[1]
212
- model_ratio = float(input_size[1]) / input_size[0]
213
- if im_ratio>model_ratio:
214
- new_height = input_size[1]
215
- new_width = int(new_height / im_ratio)
216
- else:
217
- new_width = input_size[0]
218
- new_height = int(new_width * im_ratio)
219
- det_scale = float(new_height) / img.shape[0]
220
- resized_img = cv2.resize(img, (new_width, new_height))
221
- det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
222
- det_img[:new_height, :new_width, :] = resized_img
223
-
224
- scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
225
-
226
- scores = np.vstack(scores_list)
227
- scores_ravel = scores.ravel()
228
- order = scores_ravel.argsort()[::-1]
229
- bboxes = np.vstack(bboxes_list) / det_scale
230
- if self.use_kps:
231
- kpss = np.vstack(kpss_list) / det_scale
232
- pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
233
- pre_det = pre_det[order, :]
234
- keep = self.nms(pre_det)
235
- det = pre_det[keep, :]
236
- if self.use_kps:
237
- kpss = kpss[order,:,:]
238
- kpss = kpss[keep,:,:]
239
- else:
240
- kpss = None
241
- if max_num > 0 and det.shape[0] > max_num:
242
- area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
243
- det[:, 1])
244
- img_center = img.shape[0] // 2, img.shape[1] // 2
245
- offsets = np.vstack([
246
- (det[:, 0] + det[:, 2]) / 2 - img_center[1],
247
- (det[:, 1] + det[:, 3]) / 2 - img_center[0]
248
- ])
249
- offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
250
- if metric=='max':
251
- values = area
252
- else:
253
- values = area - offset_dist_squared * 2.0 # some extra weight on the centering
254
- bindex = np.argsort(
255
- values)[::-1] # some extra weight on the centering
256
- bindex = bindex[0:max_num]
257
- det = det[bindex, :]
258
- if kpss is not None:
259
- kpss = kpss[bindex, :]
260
- return det, kpss
261
-
262
- def nms(self, dets):
263
- thresh = self.nms_thresh
264
- x1 = dets[:, 0]
265
- y1 = dets[:, 1]
266
- x2 = dets[:, 2]
267
- y2 = dets[:, 3]
268
- scores = dets[:, 4]
269
-
270
- areas = (x2 - x1 + 1) * (y2 - y1 + 1)
271
- order = scores.argsort()[::-1]
272
-
273
- keep = []
274
- while order.size > 0:
275
- i = order[0]
276
- keep.append(i)
277
- xx1 = np.maximum(x1[i], x1[order[1:]])
278
- yy1 = np.maximum(y1[i], y1[order[1:]])
279
- xx2 = np.minimum(x2[i], x2[order[1:]])
280
- yy2 = np.minimum(y2[i], y2[order[1:]])
281
-
282
- w = np.maximum(0.0, xx2 - xx1 + 1)
283
- h = np.maximum(0.0, yy2 - yy1 + 1)
284
- inter = w * h
285
- ovr = inter / (areas[i] + areas[order[1:]] - inter)
286
-
287
- inds = np.where(ovr <= thresh)[0]
288
- order = order[inds + 1]
289
-
290
- return keep
291
-
292
- def get_retinaface(name, download=False, root='~/.insightface/models', **kwargs):
293
- if not download:
294
- assert os.path.exists(name)
295
- return RetinaFace(name)
296
- else:
297
- from .model_store import get_model_file
298
- _file = get_model_file("retinaface_%s" % name, root=root)
299
- return retinaface(_file)
300
-
301
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/model_zoo/scrfd.py DELETED
@@ -1,348 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # @Organization : insightface.ai
3
- # @Author : Jia Guo
4
- # @Time : 2021-05-04
5
- # @Function :
6
-
7
- from __future__ import division
8
- import datetime
9
- import numpy as np
10
- import onnx
11
- import onnxruntime
12
- import os
13
- import os.path as osp
14
- import cv2
15
- import sys
16
-
17
- def softmax(z):
18
- assert len(z.shape) == 2
19
- s = np.max(z, axis=1)
20
- s = s[:, np.newaxis] # necessary step to do broadcasting
21
- e_x = np.exp(z - s)
22
- div = np.sum(e_x, axis=1)
23
- div = div[:, np.newaxis] # dito
24
- return e_x / div
25
-
26
- def distance2bbox(points, distance, max_shape=None):
27
- """Decode distance prediction to bounding box.
28
-
29
- Args:
30
- points (Tensor): Shape (n, 2), [x, y].
31
- distance (Tensor): Distance from the given point to 4
32
- boundaries (left, top, right, bottom).
33
- max_shape (tuple): Shape of the image.
34
-
35
- Returns:
36
- Tensor: Decoded bboxes.
37
- """
38
- x1 = points[:, 0] - distance[:, 0]
39
- y1 = points[:, 1] - distance[:, 1]
40
- x2 = points[:, 0] + distance[:, 2]
41
- y2 = points[:, 1] + distance[:, 3]
42
- if max_shape is not None:
43
- x1 = x1.clamp(min=0, max=max_shape[1])
44
- y1 = y1.clamp(min=0, max=max_shape[0])
45
- x2 = x2.clamp(min=0, max=max_shape[1])
46
- y2 = y2.clamp(min=0, max=max_shape[0])
47
- return np.stack([x1, y1, x2, y2], axis=-1)
48
-
49
- def distance2kps(points, distance, max_shape=None):
50
- """Decode distance prediction to bounding box.
51
-
52
- Args:
53
- points (Tensor): Shape (n, 2), [x, y].
54
- distance (Tensor): Distance from the given point to 4
55
- boundaries (left, top, right, bottom).
56
- max_shape (tuple): Shape of the image.
57
-
58
- Returns:
59
- Tensor: Decoded bboxes.
60
- """
61
- preds = []
62
- for i in range(0, distance.shape[1], 2):
63
- px = points[:, i%2] + distance[:, i]
64
- py = points[:, i%2+1] + distance[:, i+1]
65
- if max_shape is not None:
66
- px = px.clamp(min=0, max=max_shape[1])
67
- py = py.clamp(min=0, max=max_shape[0])
68
- preds.append(px)
69
- preds.append(py)
70
- return np.stack(preds, axis=-1)
71
-
72
- class SCRFD:
73
- def __init__(self, model_file=None, session=None):
74
- import onnxruntime
75
- self.model_file = model_file
76
- self.session = session
77
- self.taskname = 'detection'
78
- self.batched = False
79
- if self.session is None:
80
- assert self.model_file is not None
81
- assert osp.exists(self.model_file)
82
- self.session = onnxruntime.InferenceSession(self.model_file, None)
83
- self.center_cache = {}
84
- self.nms_thresh = 0.4
85
- self.det_thresh = 0.5
86
- self._init_vars()
87
-
88
- def _init_vars(self):
89
- input_cfg = self.session.get_inputs()[0]
90
- input_shape = input_cfg.shape
91
- #print(input_shape)
92
- if isinstance(input_shape[2], str):
93
- self.input_size = None
94
- else:
95
- self.input_size = tuple(input_shape[2:4][::-1])
96
- #print('image_size:', self.image_size)
97
- input_name = input_cfg.name
98
- self.input_shape = input_shape
99
- outputs = self.session.get_outputs()
100
- if len(outputs[0].shape) == 3:
101
- self.batched = True
102
- output_names = []
103
- for o in outputs:
104
- output_names.append(o.name)
105
- self.input_name = input_name
106
- self.output_names = output_names
107
- self.input_mean = 127.5
108
- self.input_std = 128.0
109
- #print(self.output_names)
110
- #assert len(outputs)==10 or len(outputs)==15
111
- self.use_kps = False
112
- self._anchor_ratio = 1.0
113
- self._num_anchors = 1
114
- if len(outputs)==6:
115
- self.fmc = 3
116
- self._feat_stride_fpn = [8, 16, 32]
117
- self._num_anchors = 2
118
- elif len(outputs)==9:
119
- self.fmc = 3
120
- self._feat_stride_fpn = [8, 16, 32]
121
- self._num_anchors = 2
122
- self.use_kps = True
123
- elif len(outputs)==10:
124
- self.fmc = 5
125
- self._feat_stride_fpn = [8, 16, 32, 64, 128]
126
- self._num_anchors = 1
127
- elif len(outputs)==15:
128
- self.fmc = 5
129
- self._feat_stride_fpn = [8, 16, 32, 64, 128]
130
- self._num_anchors = 1
131
- self.use_kps = True
132
-
133
- def prepare(self, ctx_id, **kwargs):
134
- if ctx_id<0:
135
- self.session.set_providers(['CPUExecutionProvider'])
136
- nms_thresh = kwargs.get('nms_thresh', None)
137
- if nms_thresh is not None:
138
- self.nms_thresh = nms_thresh
139
- det_thresh = kwargs.get('det_thresh', None)
140
- if det_thresh is not None:
141
- self.det_thresh = det_thresh
142
- input_size = kwargs.get('input_size', None)
143
- if input_size is not None:
144
- if self.input_size is not None:
145
- print('warning: det_size is already set in scrfd model, ignore')
146
- else:
147
- self.input_size = input_size
148
-
149
- def forward(self, img, threshold):
150
- scores_list = []
151
- bboxes_list = []
152
- kpss_list = []
153
- input_size = tuple(img.shape[0:2][::-1])
154
- blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
155
- net_outs = self.session.run(self.output_names, {self.input_name : blob})
156
-
157
- input_height = blob.shape[2]
158
- input_width = blob.shape[3]
159
- fmc = self.fmc
160
- for idx, stride in enumerate(self._feat_stride_fpn):
161
- # If model support batch dim, take first output
162
- if self.batched:
163
- scores = net_outs[idx][0]
164
- bbox_preds = net_outs[idx + fmc][0]
165
- bbox_preds = bbox_preds * stride
166
- if self.use_kps:
167
- kps_preds = net_outs[idx + fmc * 2][0] * stride
168
- # If model doesn't support batching take output as is
169
- else:
170
- scores = net_outs[idx]
171
- bbox_preds = net_outs[idx + fmc]
172
- bbox_preds = bbox_preds * stride
173
- if self.use_kps:
174
- kps_preds = net_outs[idx + fmc * 2] * stride
175
-
176
- height = input_height // stride
177
- width = input_width // stride
178
- K = height * width
179
- key = (height, width, stride)
180
- if key in self.center_cache:
181
- anchor_centers = self.center_cache[key]
182
- else:
183
- #solution-1, c style:
184
- #anchor_centers = np.zeros( (height, width, 2), dtype=np.float32 )
185
- #for i in range(height):
186
- # anchor_centers[i, :, 1] = i
187
- #for i in range(width):
188
- # anchor_centers[:, i, 0] = i
189
-
190
- #solution-2:
191
- #ax = np.arange(width, dtype=np.float32)
192
- #ay = np.arange(height, dtype=np.float32)
193
- #xv, yv = np.meshgrid(np.arange(width), np.arange(height))
194
- #anchor_centers = np.stack([xv, yv], axis=-1).astype(np.float32)
195
-
196
- #solution-3:
197
- anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
198
- #print(anchor_centers.shape)
199
-
200
- anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
201
- if self._num_anchors>1:
202
- anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
203
- if len(self.center_cache)<100:
204
- self.center_cache[key] = anchor_centers
205
-
206
- pos_inds = np.where(scores>=threshold)[0]
207
- bboxes = distance2bbox(anchor_centers, bbox_preds)
208
- pos_scores = scores[pos_inds]
209
- pos_bboxes = bboxes[pos_inds]
210
- scores_list.append(pos_scores)
211
- bboxes_list.append(pos_bboxes)
212
- if self.use_kps:
213
- kpss = distance2kps(anchor_centers, kps_preds)
214
- #kpss = kps_preds
215
- kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
216
- pos_kpss = kpss[pos_inds]
217
- kpss_list.append(pos_kpss)
218
- return scores_list, bboxes_list, kpss_list
219
-
220
- def detect(self, img, input_size = None, max_num=0, metric='default'):
221
- assert input_size is not None or self.input_size is not None
222
- input_size = self.input_size if input_size is None else input_size
223
-
224
- im_ratio = float(img.shape[0]) / img.shape[1]
225
- model_ratio = float(input_size[1]) / input_size[0]
226
- if im_ratio>model_ratio:
227
- new_height = input_size[1]
228
- new_width = int(new_height / im_ratio)
229
- else:
230
- new_width = input_size[0]
231
- new_height = int(new_width * im_ratio)
232
- det_scale = float(new_height) / img.shape[0]
233
- resized_img = cv2.resize(img, (new_width, new_height))
234
- det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
235
- det_img[:new_height, :new_width, :] = resized_img
236
-
237
- scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
238
-
239
- scores = np.vstack(scores_list)
240
- scores_ravel = scores.ravel()
241
- order = scores_ravel.argsort()[::-1]
242
- bboxes = np.vstack(bboxes_list) / det_scale
243
- if self.use_kps:
244
- kpss = np.vstack(kpss_list) / det_scale
245
- pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
246
- pre_det = pre_det[order, :]
247
- keep = self.nms(pre_det)
248
- det = pre_det[keep, :]
249
- if self.use_kps:
250
- kpss = kpss[order,:,:]
251
- kpss = kpss[keep,:,:]
252
- else:
253
- kpss = None
254
- if max_num > 0 and det.shape[0] > max_num:
255
- area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
256
- det[:, 1])
257
- img_center = img.shape[0] // 2, img.shape[1] // 2
258
- offsets = np.vstack([
259
- (det[:, 0] + det[:, 2]) / 2 - img_center[1],
260
- (det[:, 1] + det[:, 3]) / 2 - img_center[0]
261
- ])
262
- offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
263
- if metric=='max':
264
- values = area
265
- else:
266
- values = area - offset_dist_squared * 2.0 # some extra weight on the centering
267
- bindex = np.argsort(
268
- values)[::-1] # some extra weight on the centering
269
- bindex = bindex[0:max_num]
270
- det = det[bindex, :]
271
- if kpss is not None:
272
- kpss = kpss[bindex, :]
273
- return det, kpss
274
-
275
- def nms(self, dets):
276
- thresh = self.nms_thresh
277
- x1 = dets[:, 0]
278
- y1 = dets[:, 1]
279
- x2 = dets[:, 2]
280
- y2 = dets[:, 3]
281
- scores = dets[:, 4]
282
-
283
- areas = (x2 - x1 + 1) * (y2 - y1 + 1)
284
- order = scores.argsort()[::-1]
285
-
286
- keep = []
287
- while order.size > 0:
288
- i = order[0]
289
- keep.append(i)
290
- xx1 = np.maximum(x1[i], x1[order[1:]])
291
- yy1 = np.maximum(y1[i], y1[order[1:]])
292
- xx2 = np.minimum(x2[i], x2[order[1:]])
293
- yy2 = np.minimum(y2[i], y2[order[1:]])
294
-
295
- w = np.maximum(0.0, xx2 - xx1 + 1)
296
- h = np.maximum(0.0, yy2 - yy1 + 1)
297
- inter = w * h
298
- ovr = inter / (areas[i] + areas[order[1:]] - inter)
299
-
300
- inds = np.where(ovr <= thresh)[0]
301
- order = order[inds + 1]
302
-
303
- return keep
304
-
305
- def get_scrfd(name, download=False, root='~/.insightface/models', **kwargs):
306
- if not download:
307
- assert os.path.exists(name)
308
- return SCRFD(name)
309
- else:
310
- from .model_store import get_model_file
311
- _file = get_model_file("scrfd_%s" % name, root=root)
312
- return SCRFD(_file)
313
-
314
-
315
- def scrfd_2p5gkps(**kwargs):
316
- return get_scrfd("2p5gkps", download=True, **kwargs)
317
-
318
-
319
- if __name__ == '__main__':
320
- import glob
321
- detector = SCRFD(model_file='./det.onnx')
322
- detector.prepare(-1)
323
- img_paths = ['tests/data/t1.jpg']
324
- for img_path in img_paths:
325
- img = cv2.imread(img_path)
326
-
327
- for _ in range(1):
328
- ta = datetime.datetime.now()
329
- #bboxes, kpss = detector.detect(img, 0.5, input_size = (640, 640))
330
- bboxes, kpss = detector.detect(img, 0.5)
331
- tb = datetime.datetime.now()
332
- print('all cost:', (tb-ta).total_seconds()*1000)
333
- print(img_path, bboxes.shape)
334
- if kpss is not None:
335
- print(kpss.shape)
336
- for i in range(bboxes.shape[0]):
337
- bbox = bboxes[i]
338
- x1,y1,x2,y2,score = bbox.astype(np.int)
339
- cv2.rectangle(img, (x1,y1) , (x2,y2) , (255,0,0) , 2)
340
- if kpss is not None:
341
- kps = kpss[i]
342
- for kp in kps:
343
- kp = kp.astype(np.int)
344
- cv2.circle(img, tuple(kp) , 1, (0,0,255) , 2)
345
- filename = img_path.split('/')[-1]
346
- print('output:', filename)
347
- cv2.imwrite('./outputs/%s'%filename, img)
348
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/__init__.py DELETED
File without changes
insightface/thirdparty/face3d/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- #import mesh
2
- #import morphable_model
3
- from . import mesh
4
- from . import morphable_model
 
 
 
 
 
insightface/thirdparty/face3d/mesh/__init__.cpp DELETED
The diff for this file is too large to render. See raw diff
 
insightface/thirdparty/face3d/mesh/__init__.py DELETED
@@ -1,15 +0,0 @@
1
- #from __future__ import absolute_import
2
- #from cython import mesh_core_cython
3
- #import io
4
- #import vis
5
- #import transform
6
- #import light
7
- #import render
8
-
9
- from .cython import mesh_core_cython
10
- from . import io
11
- from . import vis
12
- from . import transform
13
- from . import light
14
- from . import render
15
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/cython/mesh_core.cpp DELETED
@@ -1,375 +0,0 @@
1
- /*
2
- functions that can not be optimazed by vertorization in python.
3
- 1. rasterization.(need process each triangle)
4
- 2. normal of each vertex.(use one-ring, need process each vertex)
5
- 3. write obj(seems that it can be verctorized? anyway, writing it in c++ is simple, so also add function here. --> however, why writting in c++ is still slow?)
6
-
7
- Author: Yao Feng
8
- Mail: yaofeng1995@gmail.com
9
- */
10
-
11
- #include "mesh_core.h"
12
-
13
-
14
- /* Judge whether the point is in the triangle
15
- Method:
16
- http://blackpawn.com/texts/pointinpoly/
17
- Args:
18
- point: [x, y]
19
- tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
20
- Returns:
21
- bool: true for in triangle
22
- */
23
- bool isPointInTri(point p, point p0, point p1, point p2)
24
- {
25
- // vectors
26
- point v0, v1, v2;
27
- v0 = p2 - p0;
28
- v1 = p1 - p0;
29
- v2 = p - p0;
30
-
31
- // dot products
32
- float dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0)
33
- float dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1)
34
- float dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2)
35
- float dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1)
36
- float dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2)
37
-
38
- // barycentric coordinates
39
- float inverDeno;
40
- if(dot00*dot11 - dot01*dot01 == 0)
41
- inverDeno = 0;
42
- else
43
- inverDeno = 1/(dot00*dot11 - dot01*dot01);
44
-
45
- float u = (dot11*dot02 - dot01*dot12)*inverDeno;
46
- float v = (dot00*dot12 - dot01*dot02)*inverDeno;
47
-
48
- // check if point in triangle
49
- return (u >= 0) && (v >= 0) && (u + v < 1);
50
- }
51
-
52
-
53
- void get_point_weight(float* weight, point p, point p0, point p1, point p2)
54
- {
55
- // vectors
56
- point v0, v1, v2;
57
- v0 = p2 - p0;
58
- v1 = p1 - p0;
59
- v2 = p - p0;
60
-
61
- // dot products
62
- float dot00 = v0.dot(v0); //v0.x * v0.x + v0.y * v0.y //np.dot(v0.T, v0)
63
- float dot01 = v0.dot(v1); //v0.x * v1.x + v0.y * v1.y //np.dot(v0.T, v1)
64
- float dot02 = v0.dot(v2); //v0.x * v2.x + v0.y * v2.y //np.dot(v0.T, v2)
65
- float dot11 = v1.dot(v1); //v1.x * v1.x + v1.y * v1.y //np.dot(v1.T, v1)
66
- float dot12 = v1.dot(v2); //v1.x * v2.x + v1.y * v2.y//np.dot(v1.T, v2)
67
-
68
- // barycentric coordinates
69
- float inverDeno;
70
- if(dot00*dot11 - dot01*dot01 == 0)
71
- inverDeno = 0;
72
- else
73
- inverDeno = 1/(dot00*dot11 - dot01*dot01);
74
-
75
- float u = (dot11*dot02 - dot01*dot12)*inverDeno;
76
- float v = (dot00*dot12 - dot01*dot02)*inverDeno;
77
-
78
- // weight
79
- weight[0] = 1 - u - v;
80
- weight[1] = v;
81
- weight[2] = u;
82
- }
83
-
84
-
85
- void _get_normal_core(
86
- float* normal, float* tri_normal, int* triangles,
87
- int ntri)
88
- {
89
- int i, j;
90
- int tri_p0_ind, tri_p1_ind, tri_p2_ind;
91
-
92
- for(i = 0; i < ntri; i++)
93
- {
94
- tri_p0_ind = triangles[3*i];
95
- tri_p1_ind = triangles[3*i + 1];
96
- tri_p2_ind = triangles[3*i + 2];
97
-
98
- for(j = 0; j < 3; j++)
99
- {
100
- normal[3*tri_p0_ind + j] = normal[3*tri_p0_ind + j] + tri_normal[3*i + j];
101
- normal[3*tri_p1_ind + j] = normal[3*tri_p1_ind + j] + tri_normal[3*i + j];
102
- normal[3*tri_p2_ind + j] = normal[3*tri_p2_ind + j] + tri_normal[3*i + j];
103
- }
104
- }
105
- }
106
-
107
-
108
- void _rasterize_triangles_core(
109
- float* vertices, int* triangles,
110
- float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
111
- int nver, int ntri,
112
- int h, int w)
113
- {
114
- int i;
115
- int x, y, k;
116
- int tri_p0_ind, tri_p1_ind, tri_p2_ind;
117
- point p0, p1, p2, p;
118
- int x_min, x_max, y_min, y_max;
119
- float p_depth, p0_depth, p1_depth, p2_depth;
120
- float weight[3];
121
-
122
- for(i = 0; i < ntri; i++)
123
- {
124
- tri_p0_ind = triangles[3*i];
125
- tri_p1_ind = triangles[3*i + 1];
126
- tri_p2_ind = triangles[3*i + 2];
127
-
128
- p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
129
- p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
130
- p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
131
-
132
- x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
133
- x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
134
-
135
- y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
136
- y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
137
-
138
- if(x_max < x_min || y_max < y_min)
139
- {
140
- continue;
141
- }
142
-
143
- for(y = y_min; y <= y_max; y++) //h
144
- {
145
- for(x = x_min; x <= x_max; x++) //w
146
- {
147
- p.x = x; p.y = y;
148
- if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
149
- {
150
- get_point_weight(weight, p, p0, p1, p2);
151
- p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
152
-
153
- if((p_depth > depth_buffer[y*w + x]))
154
- {
155
- depth_buffer[y*w + x] = p_depth;
156
- triangle_buffer[y*w + x] = i;
157
- for(k = 0; k < 3; k++)
158
- {
159
- barycentric_weight[y*w*3 + x*3 + k] = weight[k];
160
- }
161
- }
162
- }
163
- }
164
- }
165
- }
166
- }
167
-
168
-
169
- void _render_colors_core(
170
- float* image, float* vertices, int* triangles,
171
- float* colors,
172
- float* depth_buffer,
173
- int nver, int ntri,
174
- int h, int w, int c)
175
- {
176
- int i;
177
- int x, y, k;
178
- int tri_p0_ind, tri_p1_ind, tri_p2_ind;
179
- point p0, p1, p2, p;
180
- int x_min, x_max, y_min, y_max;
181
- float p_depth, p0_depth, p1_depth, p2_depth;
182
- float p_color, p0_color, p1_color, p2_color;
183
- float weight[3];
184
-
185
- for(i = 0; i < ntri; i++)
186
- {
187
- tri_p0_ind = triangles[3*i];
188
- tri_p1_ind = triangles[3*i + 1];
189
- tri_p2_ind = triangles[3*i + 2];
190
-
191
- p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
192
- p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
193
- p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
194
-
195
- x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
196
- x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
197
-
198
- y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
199
- y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
200
-
201
- if(x_max < x_min || y_max < y_min)
202
- {
203
- continue;
204
- }
205
-
206
- for(y = y_min; y <= y_max; y++) //h
207
- {
208
- for(x = x_min; x <= x_max; x++) //w
209
- {
210
- p.x = x; p.y = y;
211
- if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
212
- {
213
- get_point_weight(weight, p, p0, p1, p2);
214
- p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
215
-
216
- if((p_depth > depth_buffer[y*w + x]))
217
- {
218
- for(k = 0; k < c; k++) // c
219
- {
220
- p0_color = colors[c*tri_p0_ind + k];
221
- p1_color = colors[c*tri_p1_ind + k];
222
- p2_color = colors[c*tri_p2_ind + k];
223
-
224
- p_color = weight[0]*p0_color + weight[1]*p1_color + weight[2]*p2_color;
225
- image[y*w*c + x*c + k] = p_color;
226
- }
227
-
228
- depth_buffer[y*w + x] = p_depth;
229
- }
230
- }
231
- }
232
- }
233
- }
234
- }
235
-
236
-
237
- void _render_texture_core(
238
- float* image, float* vertices, int* triangles,
239
- float* texture, float* tex_coords, int* tex_triangles,
240
- float* depth_buffer,
241
- int nver, int tex_nver, int ntri,
242
- int h, int w, int c,
243
- int tex_h, int tex_w, int tex_c,
244
- int mapping_type)
245
- {
246
- int i;
247
- int x, y, k;
248
- int tri_p0_ind, tri_p1_ind, tri_p2_ind;
249
- int tex_tri_p0_ind, tex_tri_p1_ind, tex_tri_p2_ind;
250
- point p0, p1, p2, p;
251
- point tex_p0, tex_p1, tex_p2, tex_p;
252
- int x_min, x_max, y_min, y_max;
253
- float weight[3];
254
- float p_depth, p0_depth, p1_depth, p2_depth;
255
- float xd, yd;
256
- float ul, ur, dl, dr;
257
- for(i = 0; i < ntri; i++)
258
- {
259
- // mesh
260
- tri_p0_ind = triangles[3*i];
261
- tri_p1_ind = triangles[3*i + 1];
262
- tri_p2_ind = triangles[3*i + 2];
263
-
264
- p0.x = vertices[3*tri_p0_ind]; p0.y = vertices[3*tri_p0_ind + 1]; p0_depth = vertices[3*tri_p0_ind + 2];
265
- p1.x = vertices[3*tri_p1_ind]; p1.y = vertices[3*tri_p1_ind + 1]; p1_depth = vertices[3*tri_p1_ind + 2];
266
- p2.x = vertices[3*tri_p2_ind]; p2.y = vertices[3*tri_p2_ind + 1]; p2_depth = vertices[3*tri_p2_ind + 2];
267
-
268
- // texture
269
- tex_tri_p0_ind = tex_triangles[3*i];
270
- tex_tri_p1_ind = tex_triangles[3*i + 1];
271
- tex_tri_p2_ind = tex_triangles[3*i + 2];
272
-
273
- tex_p0.x = tex_coords[3*tex_tri_p0_ind]; tex_p0.y = tex_coords[3*tri_p0_ind + 1];
274
- tex_p1.x = tex_coords[3*tex_tri_p1_ind]; tex_p1.y = tex_coords[3*tri_p1_ind + 1];
275
- tex_p2.x = tex_coords[3*tex_tri_p2_ind]; tex_p2.y = tex_coords[3*tri_p2_ind + 1];
276
-
277
-
278
- x_min = max((int)ceil(min(p0.x, min(p1.x, p2.x))), 0);
279
- x_max = min((int)floor(max(p0.x, max(p1.x, p2.x))), w - 1);
280
-
281
- y_min = max((int)ceil(min(p0.y, min(p1.y, p2.y))), 0);
282
- y_max = min((int)floor(max(p0.y, max(p1.y, p2.y))), h - 1);
283
-
284
-
285
- if(x_max < x_min || y_max < y_min)
286
- {
287
- continue;
288
- }
289
-
290
- for(y = y_min; y <= y_max; y++) //h
291
- {
292
- for(x = x_min; x <= x_max; x++) //w
293
- {
294
- p.x = x; p.y = y;
295
- if(p.x < 2 || p.x > w - 3 || p.y < 2 || p.y > h - 3 || isPointInTri(p, p0, p1, p2))
296
- {
297
- get_point_weight(weight, p, p0, p1, p2);
298
- p_depth = weight[0]*p0_depth + weight[1]*p1_depth + weight[2]*p2_depth;
299
-
300
- if((p_depth > depth_buffer[y*w + x]))
301
- {
302
- // -- color from texture
303
- // cal weight in mesh tri
304
- get_point_weight(weight, p, p0, p1, p2);
305
- // cal coord in texture
306
- tex_p = tex_p0*weight[0] + tex_p1*weight[1] + tex_p2*weight[2];
307
- tex_p.x = max(min(tex_p.x, float(tex_w - 1)), float(0));
308
- tex_p.y = max(min(tex_p.y, float(tex_h - 1)), float(0));
309
-
310
- yd = tex_p.y - floor(tex_p.y);
311
- xd = tex_p.x - floor(tex_p.x);
312
- for(k = 0; k < c; k++)
313
- {
314
- if(mapping_type==0)// nearest
315
- {
316
- image[y*w*c + x*c + k] = texture[int(round(tex_p.y))*tex_w*tex_c + int(round(tex_p.x))*tex_c + k];
317
- }
318
- else//bilinear interp
319
- {
320
- ul = texture[(int)floor(tex_p.y)*tex_w*tex_c + (int)floor(tex_p.x)*tex_c + k];
321
- ur = texture[(int)floor(tex_p.y)*tex_w*tex_c + (int)ceil(tex_p.x)*tex_c + k];
322
- dl = texture[(int)ceil(tex_p.y)*tex_w*tex_c + (int)floor(tex_p.x)*tex_c + k];
323
- dr = texture[(int)ceil(tex_p.y)*tex_w*tex_c + (int)ceil(tex_p.x)*tex_c + k];
324
-
325
- image[y*w*c + x*c + k] = ul*(1-xd)*(1-yd) + ur*xd*(1-yd) + dl*(1-xd)*yd + dr*xd*yd;
326
- }
327
-
328
- }
329
-
330
- depth_buffer[y*w + x] = p_depth;
331
- }
332
- }
333
- }
334
- }
335
- }
336
- }
337
-
338
-
339
-
340
- // ------------------------------------------------- write
341
- // obj write
342
- // Ref: https://github.com/patrikhuber/eos/blob/master/include/eos/core/Mesh.hpp
343
- void _write_obj_with_colors_texture(string filename, string mtl_name,
344
- float* vertices, int* triangles, float* colors, float* uv_coords,
345
- int nver, int ntri, int ntexver)
346
- {
347
- int i;
348
-
349
- ofstream obj_file(filename.c_str());
350
-
351
- // first line of the obj file: the mtl name
352
- obj_file << "mtllib " << mtl_name << endl;
353
-
354
- // write vertices
355
- for (i = 0; i < nver; ++i)
356
- {
357
- obj_file << "v " << vertices[3*i] << " " << vertices[3*i + 1] << " " << vertices[3*i + 2] << colors[3*i] << " " << colors[3*i + 1] << " " << colors[3*i + 2] << endl;
358
- }
359
-
360
- // write uv coordinates
361
- for (i = 0; i < ntexver; ++i)
362
- {
363
- //obj_file << "vt " << uv_coords[2*i] << " " << (1 - uv_coords[2*i + 1]) << endl;
364
- obj_file << "vt " << uv_coords[2*i] << " " << uv_coords[2*i + 1] << endl;
365
- }
366
-
367
- obj_file << "usemtl FaceTexture" << endl;
368
- // write triangles
369
- for (i = 0; i < ntri; ++i)
370
- {
371
- // obj_file << "f " << triangles[3*i] << "/" << triangles[3*i] << " " << triangles[3*i + 1] << "/" << triangles[3*i + 1] << " " << triangles[3*i + 2] << "/" << triangles[3*i + 2] << endl;
372
- obj_file << "f " << triangles[3*i + 2] << "/" << triangles[3*i + 2] << " " << triangles[3*i + 1] << "/" << triangles[3*i + 1] << " " << triangles[3*i] << "/" << triangles[3*i] << endl;
373
- }
374
-
375
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/cython/mesh_core.h DELETED
@@ -1,83 +0,0 @@
1
- #ifndef MESH_CORE_HPP_
2
- #define MESH_CORE_HPP_
3
-
4
- #include <stdio.h>
5
- #include <cmath>
6
- #include <algorithm>
7
- #include <string>
8
- #include <iostream>
9
- #include <fstream>
10
-
11
- using namespace std;
12
-
13
- class point
14
- {
15
- public:
16
- float x;
17
- float y;
18
-
19
- float dot(point p)
20
- {
21
- return this->x * p.x + this->y * p.y;
22
- }
23
-
24
- point operator-(const point& p)
25
- {
26
- point np;
27
- np.x = this->x - p.x;
28
- np.y = this->y - p.y;
29
- return np;
30
- }
31
-
32
- point operator+(const point& p)
33
- {
34
- point np;
35
- np.x = this->x + p.x;
36
- np.y = this->y + p.y;
37
- return np;
38
- }
39
-
40
- point operator*(float s)
41
- {
42
- point np;
43
- np.x = s * this->x;
44
- np.y = s * this->y;
45
- return np;
46
- }
47
- };
48
-
49
-
50
- bool isPointInTri(point p, point p0, point p1, point p2, int h, int w);
51
- void get_point_weight(float* weight, point p, point p0, point p1, point p2);
52
-
53
- void _get_normal_core(
54
- float* normal, float* tri_normal, int* triangles,
55
- int ntri);
56
-
57
- void _rasterize_triangles_core(
58
- float* vertices, int* triangles,
59
- float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
60
- int nver, int ntri,
61
- int h, int w);
62
-
63
- void _render_colors_core(
64
- float* image, float* vertices, int* triangles,
65
- float* colors,
66
- float* depth_buffer,
67
- int nver, int ntri,
68
- int h, int w, int c);
69
-
70
- void _render_texture_core(
71
- float* image, float* vertices, int* triangles,
72
- float* texture, float* tex_coords, int* tex_triangles,
73
- float* depth_buffer,
74
- int nver, int tex_nver, int ntri,
75
- int h, int w, int c,
76
- int tex_h, int tex_w, int tex_c,
77
- int mapping_type);
78
-
79
- void _write_obj_with_colors_texture(string filename, string mtl_name,
80
- float* vertices, int* triangles, float* colors, float* uv_coords,
81
- int nver, int ntri, int ntexver);
82
-
83
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.c DELETED
The diff for this file is too large to render. See raw diff
 
insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.cpp DELETED
The diff for this file is too large to render. See raw diff
 
insightface/thirdparty/face3d/mesh/cython/mesh_core_cython.pyx DELETED
@@ -1,109 +0,0 @@
1
- import numpy as np
2
- cimport numpy as np
3
- from libcpp.string cimport string
4
-
5
- # use the Numpy-C-API from Cython
6
- np.import_array()
7
-
8
- # cdefine the signature of our c function
9
- cdef extern from "mesh_core.h":
10
- void _rasterize_triangles_core(
11
- float* vertices, int* triangles,
12
- float* depth_buffer, int* triangle_buffer, float* barycentric_weight,
13
- int nver, int ntri,
14
- int h, int w)
15
-
16
- void _render_colors_core(
17
- float* image, float* vertices, int* triangles,
18
- float* colors,
19
- float* depth_buffer,
20
- int nver, int ntri,
21
- int h, int w, int c)
22
-
23
- void _render_texture_core(
24
- float* image, float* vertices, int* triangles,
25
- float* texture, float* tex_coords, int* tex_triangles,
26
- float* depth_buffer,
27
- int nver, int tex_nver, int ntri,
28
- int h, int w, int c,
29
- int tex_h, int tex_w, int tex_c,
30
- int mapping_type)
31
-
32
- void _get_normal_core(
33
- float* normal, float* tri_normal, int* triangles,
34
- int ntri)
35
-
36
- void _write_obj_with_colors_texture(string filename, string mtl_name,
37
- float* vertices, int* triangles, float* colors, float* uv_coords,
38
- int nver, int ntri, int ntexver)
39
-
40
- def get_normal_core(np.ndarray[float, ndim=2, mode = "c"] normal not None,
41
- np.ndarray[float, ndim=2, mode = "c"] tri_normal not None,
42
- np.ndarray[int, ndim=2, mode="c"] triangles not None,
43
- int ntri
44
- ):
45
- _get_normal_core(
46
- <float*> np.PyArray_DATA(normal), <float*> np.PyArray_DATA(tri_normal), <int*> np.PyArray_DATA(triangles),
47
- ntri)
48
-
49
- def rasterize_triangles_core(
50
- np.ndarray[float, ndim=2, mode = "c"] vertices not None,
51
- np.ndarray[int, ndim=2, mode="c"] triangles not None,
52
- np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
53
- np.ndarray[int, ndim=2, mode = "c"] triangle_buffer not None,
54
- np.ndarray[float, ndim=2, mode = "c"] barycentric_weight not None,
55
- int nver, int ntri,
56
- int h, int w
57
- ):
58
- _rasterize_triangles_core(
59
- <float*> np.PyArray_DATA(vertices), <int*> np.PyArray_DATA(triangles),
60
- <float*> np.PyArray_DATA(depth_buffer), <int*> np.PyArray_DATA(triangle_buffer), <float*> np.PyArray_DATA(barycentric_weight),
61
- nver, ntri,
62
- h, w)
63
-
64
- def render_colors_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
65
- np.ndarray[float, ndim=2, mode = "c"] vertices not None,
66
- np.ndarray[int, ndim=2, mode="c"] triangles not None,
67
- np.ndarray[float, ndim=2, mode = "c"] colors not None,
68
- np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
69
- int nver, int ntri,
70
- int h, int w, int c
71
- ):
72
- _render_colors_core(
73
- <float*> np.PyArray_DATA(image), <float*> np.PyArray_DATA(vertices), <int*> np.PyArray_DATA(triangles),
74
- <float*> np.PyArray_DATA(colors),
75
- <float*> np.PyArray_DATA(depth_buffer),
76
- nver, ntri,
77
- h, w, c)
78
-
79
- def render_texture_core(np.ndarray[float, ndim=3, mode = "c"] image not None,
80
- np.ndarray[float, ndim=2, mode = "c"] vertices not None,
81
- np.ndarray[int, ndim=2, mode="c"] triangles not None,
82
- np.ndarray[float, ndim=3, mode = "c"] texture not None,
83
- np.ndarray[float, ndim=2, mode = "c"] tex_coords not None,
84
- np.ndarray[int, ndim=2, mode="c"] tex_triangles not None,
85
- np.ndarray[float, ndim=2, mode = "c"] depth_buffer not None,
86
- int nver, int tex_nver, int ntri,
87
- int h, int w, int c,
88
- int tex_h, int tex_w, int tex_c,
89
- int mapping_type
90
- ):
91
- _render_texture_core(
92
- <float*> np.PyArray_DATA(image), <float*> np.PyArray_DATA(vertices), <int*> np.PyArray_DATA(triangles),
93
- <float*> np.PyArray_DATA(texture), <float*> np.PyArray_DATA(tex_coords), <int*> np.PyArray_DATA(tex_triangles),
94
- <float*> np.PyArray_DATA(depth_buffer),
95
- nver, tex_nver, ntri,
96
- h, w, c,
97
- tex_h, tex_w, tex_c,
98
- mapping_type)
99
-
100
- def write_obj_with_colors_texture_core(string filename, string mtl_name,
101
- np.ndarray[float, ndim=2, mode = "c"] vertices not None,
102
- np.ndarray[int, ndim=2, mode="c"] triangles not None,
103
- np.ndarray[float, ndim=2, mode = "c"] colors not None,
104
- np.ndarray[float, ndim=2, mode = "c"] uv_coords not None,
105
- int nver, int ntri, int ntexver
106
- ):
107
- _write_obj_with_colors_texture(filename, mtl_name,
108
- <float*> np.PyArray_DATA(vertices), <int*> np.PyArray_DATA(triangles), <float*> np.PyArray_DATA(colors), <float*> np.PyArray_DATA(uv_coords),
109
- nver, ntri, ntexver)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/cython/setup.py DELETED
@@ -1,20 +0,0 @@
1
- '''
2
- python setup.py build_ext -i
3
- to compile
4
- '''
5
-
6
- # setup.py
7
- from distutils.core import setup, Extension
8
- from Cython.Build import cythonize
9
- from Cython.Distutils import build_ext
10
- import numpy
11
-
12
- setup(
13
- name = 'mesh_core_cython',
14
- cmdclass={'build_ext': build_ext},
15
- ext_modules=[Extension("mesh_core_cython",
16
- sources=["mesh_core_cython.pyx", "mesh_core.cpp"],
17
- language='c++',
18
- include_dirs=[numpy.get_include()])],
19
- )
20
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/io.cpp DELETED
@@ -1 +0,0 @@
1
- #error Do not use this file, it is the result of a failed Cython compilation.
 
 
insightface/thirdparty/face3d/mesh/io.py DELETED
@@ -1,142 +0,0 @@
1
- from __future__ import absolute_import
2
- from __future__ import division
3
- from __future__ import print_function
4
-
5
- import numpy as np
6
- import os
7
- from skimage import io
8
- from time import time
9
-
10
- from .cython import mesh_core_cython
11
-
12
- ## TODO
13
- ## TODO: c++ version
14
- def read_obj(obj_name):
15
- ''' read mesh
16
- '''
17
- return 0
18
-
19
- # ------------------------- write
20
- def write_asc(path, vertices):
21
- '''
22
- Args:
23
- vertices: shape = (nver, 3)
24
- '''
25
- if path.split('.')[-1] == 'asc':
26
- np.savetxt(path, vertices)
27
- else:
28
- np.savetxt(path + '.asc', vertices)
29
-
30
- def write_obj_with_colors(obj_name, vertices, triangles, colors):
31
- ''' Save 3D face model with texture represented by colors.
32
- Args:
33
- obj_name: str
34
- vertices: shape = (nver, 3)
35
- triangles: shape = (ntri, 3)
36
- colors: shape = (nver, 3)
37
- '''
38
- triangles = triangles.copy()
39
- triangles += 1 # meshlab start with 1
40
-
41
- if obj_name.split('.')[-1] != 'obj':
42
- obj_name = obj_name + '.obj'
43
-
44
- # write obj
45
- with open(obj_name, 'w') as f:
46
-
47
- # write vertices & colors
48
- for i in range(vertices.shape[0]):
49
- # s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
50
- s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
51
- f.write(s)
52
-
53
- # write f: ver ind/ uv ind
54
- [k, ntri] = triangles.shape
55
- for i in range(triangles.shape[0]):
56
- # s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
57
- s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
58
- f.write(s)
59
-
60
- ## TODO: c++ version
61
- def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
62
- ''' Save 3D face model with texture represented by texture map.
63
- Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
64
- Args:
65
- obj_name: str
66
- vertices: shape = (nver, 3)
67
- triangles: shape = (ntri, 3)
68
- texture: shape = (256,256,3)
69
- uv_coords: shape = (nver, 3) max value<=1
70
- '''
71
- if obj_name.split('.')[-1] != 'obj':
72
- obj_name = obj_name + '.obj'
73
- mtl_name = obj_name.replace('.obj', '.mtl')
74
- texture_name = obj_name.replace('.obj', '_texture.png')
75
-
76
- triangles = triangles.copy()
77
- triangles += 1 # mesh lab start with 1
78
-
79
- # write obj
80
- with open(obj_name, 'w') as f:
81
- # first line: write mtlib(material library)
82
- s = "mtllib {}\n".format(os.path.abspath(mtl_name))
83
- f.write(s)
84
-
85
- # write vertices
86
- for i in range(vertices.shape[0]):
87
- s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
88
- f.write(s)
89
-
90
- # write uv coords
91
- for i in range(uv_coords.shape[0]):
92
- s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
93
- f.write(s)
94
-
95
- f.write("usemtl FaceTexture\n")
96
-
97
- # write f: ver ind/ uv ind
98
- for i in range(triangles.shape[0]):
99
- s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
100
- f.write(s)
101
-
102
- # write mtl
103
- with open(mtl_name, 'w') as f:
104
- f.write("newmtl FaceTexture\n")
105
- s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
106
- f.write(s)
107
-
108
- # write texture as png
109
- imsave(texture_name, texture)
110
-
111
- # c++ version
112
- def write_obj_with_colors_texture(obj_name, vertices, triangles, colors, texture, uv_coords):
113
- ''' Save 3D face model with texture.
114
- Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
115
- Args:
116
- obj_name: str
117
- vertices: shape = (nver, 3)
118
- triangles: shape = (ntri, 3)
119
- colors: shape = (nver, 3)
120
- texture: shape = (256,256,3)
121
- uv_coords: shape = (nver, 3) max value<=1
122
- '''
123
- if obj_name.split('.')[-1] != 'obj':
124
- obj_name = obj_name + '.obj'
125
- mtl_name = obj_name.replace('.obj', '.mtl')
126
- texture_name = obj_name.replace('.obj', '_texture.png')
127
-
128
- triangles = triangles.copy()
129
- triangles += 1 # mesh lab start with 1
130
-
131
- # write obj
132
- vertices, colors, uv_coords = vertices.astype(np.float32).copy(), colors.astype(np.float32).copy(), uv_coords.astype(np.float32).copy()
133
- mesh_core_cython.write_obj_with_colors_texture_core(str.encode(obj_name), str.encode(os.path.abspath(mtl_name)), vertices, triangles, colors, uv_coords, vertices.shape[0], triangles.shape[0], uv_coords.shape[0])
134
-
135
- # write mtl
136
- with open(mtl_name, 'w') as f:
137
- f.write("newmtl FaceTexture\n")
138
- s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
139
- f.write(s)
140
-
141
- # write texture as png
142
- io.imsave(texture_name, texture)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/light.py DELETED
@@ -1,213 +0,0 @@
1
- '''
2
- Functions about lighting mesh(changing colors/texture of mesh).
3
- 1. add light to colors/texture (shade each vertex)
4
- 2. fit light according to colors/texture & image.
5
- '''
6
-
7
- from __future__ import absolute_import
8
- from __future__ import division
9
- from __future__ import print_function
10
-
11
- import numpy as np
12
- from .cython import mesh_core_cython
13
-
14
- def get_normal(vertices, triangles):
15
- ''' calculate normal direction in each vertex
16
- Args:
17
- vertices: [nver, 3]
18
- triangles: [ntri, 3]
19
- Returns:
20
- normal: [nver, 3]
21
- '''
22
- pt0 = vertices[triangles[:, 0], :] # [ntri, 3]
23
- pt1 = vertices[triangles[:, 1], :] # [ntri, 3]
24
- pt2 = vertices[triangles[:, 2], :] # [ntri, 3]
25
- tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle
26
-
27
- normal = np.zeros_like(vertices, dtype = np.float32).copy() # [nver, 3]
28
- # for i in range(triangles.shape[0]):
29
- # normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]
30
- # normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]
31
- # normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]
32
- mesh_core_cython.get_normal_core(normal, tri_normal.astype(np.float32).copy(), triangles.copy(), triangles.shape[0])
33
-
34
- # normalize to unit length
35
- mag = np.sum(normal**2, 1) # [nver]
36
- zero_ind = (mag == 0)
37
- mag[zero_ind] = 1;
38
- normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))
39
-
40
- normal = normal/np.sqrt(mag[:,np.newaxis])
41
-
42
- return normal
43
-
44
- # TODO: test
45
- def add_light_sh(vertices, triangles, colors, sh_coeff):
46
- '''
47
- In 3d face, usually assume:
48
- 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
49
- 2. Lighting can be an arbitrary combination of point sources
50
- --> can be expressed in terms of spherical harmonics(omit the lighting coefficients)
51
- I = albedo * (sh(n) x sh_coeff)
52
-
53
- albedo: n x 1
54
- sh_coeff: 9 x 1
55
- Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9
56
- # Y(n) = (1, n_x, n_y, n_z)': n x 4
57
-
58
- Args:
59
- vertices: [nver, 3]
60
- triangles: [ntri, 3]
61
- colors: [nver, 3] albedo
62
- sh_coeff: [9, 1] spherical harmonics coefficients
63
-
64
- Returns:
65
- lit_colors: [nver, 3]
66
- '''
67
- assert vertices.shape[0] == colors.shape[0]
68
- nver = vertices.shape[0]
69
- normal = get_normal(vertices, triangles) # [nver, 3]
70
- sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]
71
- ref = sh.dot(sh_coeff) #[nver, 1]
72
- lit_colors = colors*ref
73
- return lit_colors
74
-
75
-
76
- def add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):
77
- ''' Gouraud shading. add point lights.
78
- In 3d face, usually assume:
79
- 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
80
- 2. Lighting can be an arbitrary combination of point sources
81
- 3. No specular (unless skin is oil, 23333)
82
-
83
- Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline
84
- Args:
85
- vertices: [nver, 3]
86
- triangles: [ntri, 3]
87
- light_positions: [nlight, 3]
88
- light_intensities: [nlight, 3]
89
- Returns:
90
- lit_colors: [nver, 3]
91
- '''
92
- nver = vertices.shape[0]
93
- normals = get_normal(vertices, triangles) # [nver, 3]
94
-
95
- # ambient
96
- # La = ka*Ia
97
-
98
- # diffuse
99
- # Ld = kd*(I/r^2)max(0, nxl)
100
- direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]
101
- direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]
102
- direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]
103
- normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]
104
- normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]
105
- diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]
106
- diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]
107
-
108
- # specular
109
- # h = (v + l)/(|v + l|) bisector
110
- # Ls = ks*(I/r^2)max(0, nxh)^p
111
- # increasing p narrows the reflectionlob
112
-
113
- lit_colors = diffuse_output # only diffuse part here.
114
- lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)
115
- return lit_colors
116
-
117
-
118
-
119
- ## TODO. estimate light(sh coeff)
120
- ## -------------------------------- estimate. can not use now.
121
- def fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):
122
- [h, w, c] = image.shape
123
-
124
- # surface normal
125
- norm = get_normal(vertices, triangles)
126
-
127
- nver = vertices.shape[1]
128
-
129
- # vertices --> corresponding image pixel
130
- pt2d = vertices[:2, :]
131
-
132
- pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)
133
- pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)
134
- pt2d = np.round(pt2d).astype(np.int32) # 2 x nver
135
-
136
- image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3
137
- image_pixel = image_pixel.T # 3 x nver
138
-
139
- # vertices --> corresponding mean texture pixel with illumination
140
- # Spherical Harmonic Basis
141
- harmonic_dim = 9
142
- nx = norm[0,:];
143
- ny = norm[1,:];
144
- nz = norm[2,:];
145
- harmonic = np.zeros((nver, harmonic_dim))
146
-
147
- pi = np.pi
148
- harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));
149
- harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;
150
- harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;
151
- harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;
152
- harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);
153
- harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);
154
- harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);
155
- harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);
156
- harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);
157
-
158
- '''
159
- I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)
160
- set A = albedo*h (n x 9)
161
- alpha = lj (9 x 1)
162
- Y = I (n x 1)
163
- Y' = A.dot(alpha)
164
-
165
- opt function:
166
- ||Y - A*alpha|| + lambda*(alpha'*alpha)
167
- result:
168
- A'*(Y - A*alpha) + lambda*alpha = 0
169
- ==>
170
- (A'*A*alpha - lambda)*alpha = A'*Y
171
- left: 9 x 9
172
- right: 9 x 1
173
- '''
174
- n_vis_ind = len(vis_ind)
175
- n = n_vis_ind*c
176
-
177
- Y = np.zeros((n, 1))
178
- A = np.zeros((n, 9))
179
- light = np.zeros((3, 1))
180
-
181
- for k in range(c):
182
- Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]
183
- A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]
184
- Ac = texture[k, vis_ind][:, np.newaxis]
185
- Yc = image_pixel[k, vis_ind][:, np.newaxis]
186
- light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
187
-
188
- for i in range(max_iter):
189
-
190
- Yc = Y.copy()
191
- for k in range(c):
192
- Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]
193
-
194
- # update alpha
195
- equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?
196
- equation_right = np.dot(A.T, Yc)
197
- alpha = np.dot(np.linalg.inv(equation_left), equation_right)
198
-
199
- # update light
200
- for k in range(c):
201
- Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)
202
- Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]
203
- light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
204
-
205
- appearance = np.zeros_like(texture)
206
- for k in range(c):
207
- tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])
208
- appearance[k,:] = tmp.T
209
-
210
- appearance = np.minimum(np.maximum(appearance, 0), 1)
211
-
212
- return appearance
213
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/render.py DELETED
@@ -1,135 +0,0 @@
1
- '''
2
- functions about rendering mesh(from 3d obj to 2d image).
3
- only use rasterization render here.
4
- Note that:
5
- 1. Generally, render func includes camera, light, raterize. Here no camera and light(I write these in other files)
6
- 2. Generally, the input vertices are normalized to [-1,1] and cetered on [0, 0]. (in world space)
7
- Here, the vertices are using image coords, which centers on [w/2, h/2] with the y-axis pointing to oppisite direction.
8
- Means: render here only conducts interpolation.(I just want to make the input flexible)
9
-
10
- Author: Yao Feng
11
- Mail: yaofeng1995@gmail.com
12
- '''
13
- from __future__ import absolute_import
14
- from __future__ import division
15
- from __future__ import print_function
16
-
17
- import numpy as np
18
- from time import time
19
-
20
- from .cython import mesh_core_cython
21
-
22
- def rasterize_triangles(vertices, triangles, h, w):
23
- '''
24
- Args:
25
- vertices: [nver, 3]
26
- triangles: [ntri, 3]
27
- h: height
28
- w: width
29
- Returns:
30
- depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
31
- triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
32
- barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
33
-
34
- # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
35
- # h, w is the size of rendering
36
- '''
37
-
38
- # initial
39
- depth_buffer = np.zeros([h, w]) - 999999. #set the initial z to the farest position
40
- triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
41
- barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
42
-
43
- vertices = vertices.astype(np.float32).copy()
44
- triangles = triangles.astype(np.int32).copy()
45
-
46
- mesh_core_cython.rasterize_triangles_core(
47
- vertices, triangles,
48
- depth_buffer, triangle_buffer, barycentric_weight,
49
- vertices.shape[0], triangles.shape[0],
50
- h, w)
51
-
52
- def render_colors(vertices, triangles, colors, h, w, c = 3, BG = None):
53
- ''' render mesh with colors
54
- Args:
55
- vertices: [nver, 3]
56
- triangles: [ntri, 3]
57
- colors: [nver, 3]
58
- h: height
59
- w: width
60
- c: channel
61
- BG: background image
62
- Returns:
63
- image: [h, w, c]. rendered image./rendering.
64
- '''
65
-
66
- # initial
67
- if BG is None:
68
- image = np.zeros((h, w, c), dtype = np.float32)
69
- else:
70
- assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
71
- image = BG
72
- depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
73
-
74
- # change orders. --> C-contiguous order(column major)
75
- vertices = vertices.astype(np.float32).copy()
76
- triangles = triangles.astype(np.int32).copy()
77
- colors = colors.astype(np.float32).copy()
78
- ###
79
- st = time()
80
- mesh_core_cython.render_colors_core(
81
- image, vertices, triangles,
82
- colors,
83
- depth_buffer,
84
- vertices.shape[0], triangles.shape[0],
85
- h, w, c)
86
- return image
87
-
88
-
89
- def render_texture(vertices, triangles, texture, tex_coords, tex_triangles, h, w, c = 3, mapping_type = 'nearest', BG = None):
90
- ''' render mesh with texture map
91
- Args:
92
- vertices: [3, nver]
93
- triangles: [3, ntri]
94
- texture: [tex_h, tex_w, 3]
95
- tex_coords: [ntexcoords, 3]
96
- tex_triangles: [ntri, 3]
97
- h: height of rendering
98
- w: width of rendering
99
- c: channel
100
- mapping_type: 'bilinear' or 'nearest'
101
- '''
102
- # initial
103
- if BG is None:
104
- image = np.zeros((h, w, c), dtype = np.float32)
105
- else:
106
- assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
107
- image = BG
108
-
109
- depth_buffer = np.zeros([h, w], dtype = np.float32, order = 'C') - 999999.
110
-
111
- tex_h, tex_w, tex_c = texture.shape
112
- if mapping_type == 'nearest':
113
- mt = int(0)
114
- elif mapping_type == 'bilinear':
115
- mt = int(1)
116
- else:
117
- mt = int(0)
118
-
119
- # -> C order
120
- vertices = vertices.astype(np.float32).copy()
121
- triangles = triangles.astype(np.int32).copy()
122
- texture = texture.astype(np.float32).copy()
123
- tex_coords = tex_coords.astype(np.float32).copy()
124
- tex_triangles = tex_triangles.astype(np.int32).copy()
125
-
126
- mesh_core_cython.render_texture_core(
127
- image, vertices, triangles,
128
- texture, tex_coords, tex_triangles,
129
- depth_buffer,
130
- vertices.shape[0], tex_coords.shape[0], triangles.shape[0],
131
- h, w, c,
132
- tex_h, tex_w, tex_c,
133
- mt)
134
- return image
135
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/transform.py DELETED
@@ -1,383 +0,0 @@
1
- '''
2
- Functions about transforming mesh(changing the position: modify vertices).
3
- 1. forward: transform(transform, camera, project).
4
- 2. backward: estimate transform matrix from correspondences.
5
-
6
- Author: Yao Feng
7
- Mail: yaofeng1995@gmail.com
8
- '''
9
-
10
- from __future__ import absolute_import
11
- from __future__ import division
12
- from __future__ import print_function
13
-
14
- import numpy as np
15
- import math
16
- from math import cos, sin
17
-
18
- def angle2matrix(angles):
19
- ''' get rotation matrix from three rotation angles(degree). right-handed.
20
- Args:
21
- angles: [3,]. x, y, z angles
22
- x: pitch. positive for looking down.
23
- y: yaw. positive for looking left.
24
- z: roll. positive for tilting head right.
25
- Returns:
26
- R: [3, 3]. rotation matrix.
27
- '''
28
- x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
29
- # x
30
- Rx=np.array([[1, 0, 0],
31
- [0, cos(x), -sin(x)],
32
- [0, sin(x), cos(x)]])
33
- # y
34
- Ry=np.array([[ cos(y), 0, sin(y)],
35
- [ 0, 1, 0],
36
- [-sin(y), 0, cos(y)]])
37
- # z
38
- Rz=np.array([[cos(z), -sin(z), 0],
39
- [sin(z), cos(z), 0],
40
- [ 0, 0, 1]])
41
-
42
- R=Rz.dot(Ry.dot(Rx))
43
- return R.astype(np.float32)
44
-
45
- def angle2matrix_3ddfa(angles):
46
- ''' get rotation matrix from three rotation angles(radian). The same as in 3DDFA.
47
- Args:
48
- angles: [3,]. x, y, z angles
49
- x: pitch.
50
- y: yaw.
51
- z: roll.
52
- Returns:
53
- R: 3x3. rotation matrix.
54
- '''
55
- # x, y, z = np.deg2rad(angles[0]), np.deg2rad(angles[1]), np.deg2rad(angles[2])
56
- x, y, z = angles[0], angles[1], angles[2]
57
-
58
- # x
59
- Rx=np.array([[1, 0, 0],
60
- [0, cos(x), sin(x)],
61
- [0, -sin(x), cos(x)]])
62
- # y
63
- Ry=np.array([[ cos(y), 0, -sin(y)],
64
- [ 0, 1, 0],
65
- [sin(y), 0, cos(y)]])
66
- # z
67
- Rz=np.array([[cos(z), sin(z), 0],
68
- [-sin(z), cos(z), 0],
69
- [ 0, 0, 1]])
70
- R = Rx.dot(Ry).dot(Rz)
71
- return R.astype(np.float32)
72
-
73
-
74
- ## ------------------------------------------ 1. transform(transform, project, camera).
75
- ## ---------- 3d-3d transform. Transform obj in world space
76
- def rotate(vertices, angles):
77
- ''' rotate vertices.
78
- X_new = R.dot(X). X: 3 x 1
79
- Args:
80
- vertices: [nver, 3].
81
- rx, ry, rz: degree angles
82
- rx: pitch. positive for looking down
83
- ry: yaw. positive for looking left
84
- rz: roll. positive for tilting head right
85
- Returns:
86
- rotated vertices: [nver, 3]
87
- '''
88
- R = angle2matrix(angles)
89
- rotated_vertices = vertices.dot(R.T)
90
-
91
- return rotated_vertices
92
-
93
- def similarity_transform(vertices, s, R, t3d):
94
- ''' similarity transform. dof = 7.
95
- 3D: s*R.dot(X) + t
96
- Homo: M = [[sR, t],[0^T, 1]]. M.dot(X)
97
- Args:(float32)
98
- vertices: [nver, 3].
99
- s: [1,]. scale factor.
100
- R: [3,3]. rotation matrix.
101
- t3d: [3,]. 3d translation vector.
102
- Returns:
103
- transformed vertices: [nver, 3]
104
- '''
105
- t3d = np.squeeze(np.array(t3d, dtype = np.float32))
106
- transformed_vertices = s * vertices.dot(R.T) + t3d[np.newaxis, :]
107
-
108
- return transformed_vertices
109
-
110
-
111
- ## -------------- Camera. from world space to camera space
112
- # Ref: https://cs184.eecs.berkeley.edu/lecture/transforms-2
113
- def normalize(x):
114
- epsilon = 1e-12
115
- norm = np.sqrt(np.sum(x**2, axis = 0))
116
- norm = np.maximum(norm, epsilon)
117
- return x/norm
118
-
119
- def lookat_camera(vertices, eye, at = None, up = None):
120
- """ 'look at' transformation: from world space to camera space
121
- standard camera space:
122
- camera located at the origin.
123
- looking down negative z-axis.
124
- vertical vector is y-axis.
125
- Xcam = R(X - C)
126
- Homo: [[R, -RC], [0, 1]]
127
- Args:
128
- vertices: [nver, 3]
129
- eye: [3,] the XYZ world space position of the camera.
130
- at: [3,] a position along the center of the camera's gaze.
131
- up: [3,] up direction
132
- Returns:
133
- transformed_vertices: [nver, 3]
134
- """
135
- if at is None:
136
- at = np.array([0, 0, 0], np.float32)
137
- if up is None:
138
- up = np.array([0, 1, 0], np.float32)
139
-
140
- eye = np.array(eye).astype(np.float32)
141
- at = np.array(at).astype(np.float32)
142
- z_aixs = -normalize(at - eye) # look forward
143
- x_aixs = normalize(np.cross(up, z_aixs)) # look right
144
- y_axis = np.cross(z_aixs, x_aixs) # look up
145
-
146
- R = np.stack((x_aixs, y_axis, z_aixs))#, axis = 0) # 3 x 3
147
- transformed_vertices = vertices - eye # translation
148
- transformed_vertices = transformed_vertices.dot(R.T) # rotation
149
- return transformed_vertices
150
-
151
- ## --------- 3d-2d project. from camera space to image plane
152
- # generally, image plane only keeps x,y channels, here reserve z channel for calculating z-buffer.
153
- def orthographic_project(vertices):
154
- ''' scaled orthographic projection(just delete z)
155
- assumes: variations in depth over the object is small relative to the mean distance from camera to object
156
- x -> x*f/z, y -> x*f/z, z -> f.
157
- for point i,j. zi~=zj. so just delete z
158
- ** often used in face
159
- Homo: P = [[1,0,0,0], [0,1,0,0], [0,0,1,0]]
160
- Args:
161
- vertices: [nver, 3]
162
- Returns:
163
- projected_vertices: [nver, 3] if isKeepZ=True. [nver, 2] if isKeepZ=False.
164
- '''
165
- return vertices.copy()
166
-
167
- def perspective_project(vertices, fovy, aspect_ratio = 1., near = 0.1, far = 1000.):
168
- ''' perspective projection.
169
- Args:
170
- vertices: [nver, 3]
171
- fovy: vertical angular field of view. degree.
172
- aspect_ratio : width / height of field of view
173
- near : depth of near clipping plane
174
- far : depth of far clipping plane
175
- Returns:
176
- projected_vertices: [nver, 3]
177
- '''
178
- fovy = np.deg2rad(fovy)
179
- top = near*np.tan(fovy)
180
- bottom = -top
181
- right = top*aspect_ratio
182
- left = -right
183
-
184
- #-- homo
185
- P = np.array([[near/right, 0, 0, 0],
186
- [0, near/top, 0, 0],
187
- [0, 0, -(far+near)/(far-near), -2*far*near/(far-near)],
188
- [0, 0, -1, 0]])
189
- vertices_homo = np.hstack((vertices, np.ones((vertices.shape[0], 1)))) # [nver, 4]
190
- projected_vertices = vertices_homo.dot(P.T)
191
- projected_vertices = projected_vertices/projected_vertices[:,3:]
192
- projected_vertices = projected_vertices[:,:3]
193
- projected_vertices[:,2] = -projected_vertices[:,2]
194
-
195
- #-- non homo. only fovy
196
- # projected_vertices = vertices.copy()
197
- # projected_vertices[:,0] = -(near/right)*vertices[:,0]/vertices[:,2]
198
- # projected_vertices[:,1] = -(near/top)*vertices[:,1]/vertices[:,2]
199
- return projected_vertices
200
-
201
-
202
- def to_image(vertices, h, w, is_perspective = False):
203
- ''' change vertices to image coord system
204
- 3d system: XYZ, center(0, 0, 0)
205
- 2d image: x(u), y(v). center(w/2, h/2), flip y-axis.
206
- Args:
207
- vertices: [nver, 3]
208
- h: height of the rendering
209
- w : width of the rendering
210
- Returns:
211
- projected_vertices: [nver, 3]
212
- '''
213
- image_vertices = vertices.copy()
214
- if is_perspective:
215
- # if perspective, the projected vertices are normalized to [-1, 1]. so change it to image size first.
216
- image_vertices[:,0] = image_vertices[:,0]*w/2
217
- image_vertices[:,1] = image_vertices[:,1]*h/2
218
- # move to center of image
219
- image_vertices[:,0] = image_vertices[:,0] + w/2
220
- image_vertices[:,1] = image_vertices[:,1] + h/2
221
- # flip vertices along y-axis.
222
- image_vertices[:,1] = h - image_vertices[:,1] - 1
223
- return image_vertices
224
-
225
-
226
- #### -------------------------------------------2. estimate transform matrix from correspondences.
227
- def estimate_affine_matrix_3d23d(X, Y):
228
- ''' Using least-squares solution
229
- Args:
230
- X: [n, 3]. 3d points(fixed)
231
- Y: [n, 3]. corresponding 3d points(moving). Y = PX
232
- Returns:
233
- P_Affine: (3, 4). Affine camera matrix (the third row is [0, 0, 0, 1]).
234
- '''
235
- X_homo = np.hstack((X, np.ones([X.shape[1],1]))) #n x 4
236
- P = np.linalg.lstsq(X_homo, Y)[0].T # Affine matrix. 3 x 4
237
- return P
238
-
239
- def estimate_affine_matrix_3d22d(X, x):
240
- ''' Using Golden Standard Algorithm for estimating an affine camera
241
- matrix P from world to image correspondences.
242
- See Alg.7.2. in MVGCV
243
- Code Ref: https://github.com/patrikhuber/eos/blob/master/include/eos/fitting/affine_camera_estimation.hpp
244
- x_homo = X_homo.dot(P_Affine)
245
- Args:
246
- X: [n, 3]. corresponding 3d points(fixed)
247
- x: [n, 2]. n>=4. 2d points(moving). x = PX
248
- Returns:
249
- P_Affine: [3, 4]. Affine camera matrix
250
- '''
251
- X = X.T; x = x.T
252
- assert(x.shape[1] == X.shape[1])
253
- n = x.shape[1]
254
- assert(n >= 4)
255
-
256
- #--- 1. normalization
257
- # 2d points
258
- mean = np.mean(x, 1) # (2,)
259
- x = x - np.tile(mean[:, np.newaxis], [1, n])
260
- average_norm = np.mean(np.sqrt(np.sum(x**2, 0)))
261
- scale = np.sqrt(2) / average_norm
262
- x = scale * x
263
-
264
- T = np.zeros((3,3), dtype = np.float32)
265
- T[0, 0] = T[1, 1] = scale
266
- T[:2, 2] = -mean*scale
267
- T[2, 2] = 1
268
-
269
- # 3d points
270
- X_homo = np.vstack((X, np.ones((1, n))))
271
- mean = np.mean(X, 1) # (3,)
272
- X = X - np.tile(mean[:, np.newaxis], [1, n])
273
- m = X_homo[:3,:] - X
274
- average_norm = np.mean(np.sqrt(np.sum(X**2, 0)))
275
- scale = np.sqrt(3) / average_norm
276
- X = scale * X
277
-
278
- U = np.zeros((4,4), dtype = np.float32)
279
- U[0, 0] = U[1, 1] = U[2, 2] = scale
280
- U[:3, 3] = -mean*scale
281
- U[3, 3] = 1
282
-
283
- # --- 2. equations
284
- A = np.zeros((n*2, 8), dtype = np.float32);
285
- X_homo = np.vstack((X, np.ones((1, n)))).T
286
- A[:n, :4] = X_homo
287
- A[n:, 4:] = X_homo
288
- b = np.reshape(x, [-1, 1])
289
-
290
- # --- 3. solution
291
- p_8 = np.linalg.pinv(A).dot(b)
292
- P = np.zeros((3, 4), dtype = np.float32)
293
- P[0, :] = p_8[:4, 0]
294
- P[1, :] = p_8[4:, 0]
295
- P[-1, -1] = 1
296
-
297
- # --- 4. denormalization
298
- P_Affine = np.linalg.inv(T).dot(P.dot(U))
299
- return P_Affine
300
-
301
- def P2sRt(P):
302
- ''' decompositing camera matrix P
303
- Args:
304
- P: (3, 4). Affine Camera Matrix.
305
- Returns:
306
- s: scale factor.
307
- R: (3, 3). rotation matrix.
308
- t: (3,). translation.
309
- '''
310
- t = P[:, 3]
311
- R1 = P[0:1, :3]
312
- R2 = P[1:2, :3]
313
- s = (np.linalg.norm(R1) + np.linalg.norm(R2))/2.0
314
- r1 = R1/np.linalg.norm(R1)
315
- r2 = R2/np.linalg.norm(R2)
316
- r3 = np.cross(r1, r2)
317
-
318
- R = np.concatenate((r1, r2, r3), 0)
319
- return s, R, t
320
-
321
- #Ref: https://www.learnopencv.com/rotation-matrix-to-euler-angles/
322
- def isRotationMatrix(R):
323
- ''' checks if a matrix is a valid rotation matrix(whether orthogonal or not)
324
- '''
325
- Rt = np.transpose(R)
326
- shouldBeIdentity = np.dot(Rt, R)
327
- I = np.identity(3, dtype = R.dtype)
328
- n = np.linalg.norm(I - shouldBeIdentity)
329
- return n < 1e-6
330
-
331
- def matrix2angle(R):
332
- ''' get three Euler angles from Rotation Matrix
333
- Args:
334
- R: (3,3). rotation matrix
335
- Returns:
336
- x: pitch
337
- y: yaw
338
- z: roll
339
- '''
340
- assert(isRotationMatrix)
341
- sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
342
-
343
- singular = sy < 1e-6
344
-
345
- if not singular :
346
- x = math.atan2(R[2,1] , R[2,2])
347
- y = math.atan2(-R[2,0], sy)
348
- z = math.atan2(R[1,0], R[0,0])
349
- else :
350
- x = math.atan2(-R[1,2], R[1,1])
351
- y = math.atan2(-R[2,0], sy)
352
- z = 0
353
-
354
- # rx, ry, rz = np.rad2deg(x), np.rad2deg(y), np.rad2deg(z)
355
- rx, ry, rz = x*180/np.pi, y*180/np.pi, z*180/np.pi
356
- return rx, ry, rz
357
-
358
- # def matrix2angle(R):
359
- # ''' compute three Euler angles from a Rotation Matrix. Ref: http://www.gregslabaugh.net/publications/euler.pdf
360
- # Args:
361
- # R: (3,3). rotation matrix
362
- # Returns:
363
- # x: yaw
364
- # y: pitch
365
- # z: roll
366
- # '''
367
- # # assert(isRotationMatrix(R))
368
-
369
- # if R[2,0] !=1 or R[2,0] != -1:
370
- # x = math.asin(R[2,0])
371
- # y = math.atan2(R[2,1]/cos(x), R[2,2]/cos(x))
372
- # z = math.atan2(R[1,0]/cos(x), R[0,0]/cos(x))
373
-
374
- # else:# Gimbal lock
375
- # z = 0 #can be anything
376
- # if R[2,0] == -1:
377
- # x = np.pi/2
378
- # y = z + math.atan2(R[0,1], R[0,2])
379
- # else:
380
- # x = -np.pi/2
381
- # y = -z + math.atan2(-R[0,1], -R[0,2])
382
-
383
- # return x, y, z
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh/vis.py DELETED
@@ -1,24 +0,0 @@
1
- from __future__ import absolute_import
2
- from __future__ import division
3
- from __future__ import print_function
4
-
5
- import numpy as np
6
- import matplotlib.pyplot as plt
7
- from skimage import measure
8
- from mpl_toolkits.mplot3d import Axes3D
9
-
10
- def plot_mesh(vertices, triangles, subplot = [1,1,1], title = 'mesh', el = 90, az = -90, lwdt=.1, dist = 6, color = "grey"):
11
- '''
12
- plot the mesh
13
- Args:
14
- vertices: [nver, 3]
15
- triangles: [ntri, 3]
16
- '''
17
- ax = plt.subplot(subplot[0], subplot[1], subplot[2], projection = '3d')
18
- ax.plot_trisurf(vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles = triangles, lw = lwdt, color = color, alpha = 1)
19
- ax.axis("off")
20
- ax.view_init(elev = el, azim = az)
21
- ax.dist = dist
22
- plt.title(title)
23
-
24
- ### -------------- Todo: use vtk to visualize mesh? or visvis? or VisPy?
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh_numpy/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- from __future__ import absolute_import
2
- from __future__ import division
3
- from __future__ import print_function
4
-
5
- from . import io
6
- from . import vis
7
- from . import transform
8
- from . import light
9
- from . import render
10
-
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh_numpy/io.py DELETED
@@ -1,170 +0,0 @@
1
- ''' io: read&write mesh
2
- 1. read obj as array(TODO)
3
- 2. write arrays to obj
4
-
5
- Preparation knowledge:
6
- representations of 3d face: mesh, point cloud...
7
- storage format: obj, ply, bin, asc, mat...
8
- '''
9
-
10
- from __future__ import absolute_import
11
- from __future__ import division
12
- from __future__ import print_function
13
-
14
- import numpy as np
15
- import os
16
- from skimage import io
17
-
18
- ## TODO
19
- ## TODO: c++ version
20
- def read_obj(obj_name):
21
- ''' read mesh
22
- '''
23
- return 0
24
-
25
- # ------------------------- write
26
- def write_asc(path, vertices):
27
- '''
28
- Args:
29
- vertices: shape = (nver, 3)
30
- '''
31
- if path.split('.')[-1] == 'asc':
32
- np.savetxt(path, vertices)
33
- else:
34
- np.savetxt(path + '.asc', vertices)
35
-
36
- def write_obj_with_colors(obj_name, vertices, triangles, colors):
37
- ''' Save 3D face model with texture represented by colors.
38
- Args:
39
- obj_name: str
40
- vertices: shape = (nver, 3)
41
- triangles: shape = (ntri, 3)
42
- colors: shape = (nver, 3)
43
- '''
44
- triangles = triangles.copy()
45
- triangles += 1 # meshlab start with 1
46
-
47
- if obj_name.split('.')[-1] != 'obj':
48
- obj_name = obj_name + '.obj'
49
-
50
- # write obj
51
- with open(obj_name, 'w') as f:
52
-
53
- # write vertices & colors
54
- for i in range(vertices.shape[0]):
55
- # s = 'v {} {} {} \n'.format(vertices[0,i], vertices[1,i], vertices[2,i])
56
- s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
57
- f.write(s)
58
-
59
- # write f: ver ind/ uv ind
60
- [k, ntri] = triangles.shape
61
- for i in range(triangles.shape[0]):
62
- # s = 'f {} {} {}\n'.format(triangles[i, 0], triangles[i, 1], triangles[i, 2])
63
- s = 'f {} {} {}\n'.format(triangles[i, 2], triangles[i, 1], triangles[i, 0])
64
- f.write(s)
65
-
66
- ## TODO: c++ version
67
- def write_obj_with_texture(obj_name, vertices, triangles, texture, uv_coords):
68
- ''' Save 3D face model with texture represented by texture map.
69
- Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
70
- Args:
71
- obj_name: str
72
- vertices: shape = (nver, 3)
73
- triangles: shape = (ntri, 3)
74
- texture: shape = (256,256,3)
75
- uv_coords: shape = (nver, 3) max value<=1
76
- '''
77
- if obj_name.split('.')[-1] != 'obj':
78
- obj_name = obj_name + '.obj'
79
- mtl_name = obj_name.replace('.obj', '.mtl')
80
- texture_name = obj_name.replace('.obj', '_texture.png')
81
-
82
- triangles = triangles.copy()
83
- triangles += 1 # mesh lab start with 1
84
-
85
- # write obj
86
- with open(obj_name, 'w') as f:
87
- # first line: write mtlib(material library)
88
- s = "mtllib {}\n".format(os.path.abspath(mtl_name))
89
- f.write(s)
90
-
91
- # write vertices
92
- for i in range(vertices.shape[0]):
93
- s = 'v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2])
94
- f.write(s)
95
-
96
- # write uv coords
97
- for i in range(uv_coords.shape[0]):
98
- # s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
99
- s = 'vt {} {}\n'.format(uv_coords[i,0], uv_coords[i,1])
100
- f.write(s)
101
-
102
- f.write("usemtl FaceTexture\n")
103
-
104
- # write f: ver ind/ uv ind
105
- for i in range(triangles.shape[0]):
106
- s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
107
- f.write(s)
108
-
109
- # write mtl
110
- with open(mtl_name, 'w') as f:
111
- f.write("newmtl FaceTexture\n")
112
- s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
113
- f.write(s)
114
-
115
- # write texture as png
116
- imsave(texture_name, texture)
117
-
118
-
119
- def write_obj_with_colors_texture(obj_name, vertices, triangles, colors, texture, uv_coords):
120
- ''' Save 3D face model with texture.
121
- Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
122
- Args:
123
- obj_name: str
124
- vertices: shape = (nver, 3)
125
- triangles: shape = (ntri, 3)
126
- colors: shape = (nver, 3)
127
- texture: shape = (256,256,3)
128
- uv_coords: shape = (nver, 3) max value<=1
129
- '''
130
- if obj_name.split('.')[-1] != 'obj':
131
- obj_name = obj_name + '.obj'
132
- mtl_name = obj_name.replace('.obj', '.mtl')
133
- texture_name = obj_name.replace('.obj', '_texture.png')
134
-
135
- triangles = triangles.copy()
136
- triangles += 1 # mesh lab start with 1
137
-
138
- # write obj
139
- with open(obj_name, 'w') as f:
140
- # first line: write mtlib(material library)
141
- s = "mtllib {}\n".format(os.path.abspath(mtl_name))
142
- f.write(s)
143
-
144
- # write vertices
145
- for i in range(vertices.shape[0]):
146
- s = 'v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2])
147
- f.write(s)
148
-
149
- # write uv coords
150
- for i in range(uv_coords.shape[0]):
151
- # s = 'vt {} {}\n'.format(uv_coords[i,0], 1 - uv_coords[i,1])
152
- s = 'vt {} {}\n'.format(uv_coords[i,0], uv_coords[i,1])
153
- f.write(s)
154
-
155
- f.write("usemtl FaceTexture\n")
156
-
157
- # write f: ver ind/ uv ind
158
- for i in range(triangles.shape[0]):
159
- # s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,0], triangles[i,0], triangles[i,1], triangles[i,1], triangles[i,2], triangles[i,2])
160
- s = 'f {}/{} {}/{} {}/{}\n'.format(triangles[i,2], triangles[i,2], triangles[i,1], triangles[i,1], triangles[i,0], triangles[i,0])
161
- f.write(s)
162
-
163
- # write mtl
164
- with open(mtl_name, 'w') as f:
165
- f.write("newmtl FaceTexture\n")
166
- s = 'map_Kd {}\n'.format(os.path.abspath(texture_name)) # map to image
167
- f.write(s)
168
-
169
- # write texture as png
170
- io.imsave(texture_name, texture)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh_numpy/light.py DELETED
@@ -1,215 +0,0 @@
1
- '''
2
- Functions about lighting mesh(changing colors/texture of mesh).
3
- 1. add light to colors/texture (shade each vertex)
4
- 2. fit light according to colors/texture & image.
5
-
6
- Preparation knowledge:
7
- lighting: https://cs184.eecs.berkeley.edu/lecture/pipeline
8
- spherical harmonics in human face: '3D Face Reconstruction from a Single Image Using a Single Reference Face Shape'
9
- '''
10
-
11
- from __future__ import absolute_import
12
- from __future__ import division
13
- from __future__ import print_function
14
-
15
- import numpy as np
16
-
17
- def get_normal(vertices, triangles):
18
- ''' calculate normal direction in each vertex
19
- Args:
20
- vertices: [nver, 3]
21
- triangles: [ntri, 3]
22
- Returns:
23
- normal: [nver, 3]
24
- '''
25
- pt0 = vertices[triangles[:, 0], :] # [ntri, 3]
26
- pt1 = vertices[triangles[:, 1], :] # [ntri, 3]
27
- pt2 = vertices[triangles[:, 2], :] # [ntri, 3]
28
- tri_normal = np.cross(pt0 - pt1, pt0 - pt2) # [ntri, 3]. normal of each triangle
29
-
30
- normal = np.zeros_like(vertices) # [nver, 3]
31
- for i in range(triangles.shape[0]):
32
- normal[triangles[i, 0], :] = normal[triangles[i, 0], :] + tri_normal[i, :]
33
- normal[triangles[i, 1], :] = normal[triangles[i, 1], :] + tri_normal[i, :]
34
- normal[triangles[i, 2], :] = normal[triangles[i, 2], :] + tri_normal[i, :]
35
-
36
- # normalize to unit length
37
- mag = np.sum(normal**2, 1) # [nver]
38
- zero_ind = (mag == 0)
39
- mag[zero_ind] = 1;
40
- normal[zero_ind, 0] = np.ones((np.sum(zero_ind)))
41
-
42
- normal = normal/np.sqrt(mag[:,np.newaxis])
43
-
44
- return normal
45
-
46
- # TODO: test
47
- def add_light_sh(vertices, triangles, colors, sh_coeff):
48
- '''
49
- In 3d face, usually assume:
50
- 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
51
- 2. Lighting can be an arbitrary combination of point sources
52
- --> can be expressed in terms of spherical harmonics(omit the lighting coefficients)
53
- I = albedo * (sh(n) x sh_coeff)
54
-
55
- albedo: n x 1
56
- sh_coeff: 9 x 1
57
- Y(n) = (1, n_x, n_y, n_z, n_xn_y, n_xn_z, n_yn_z, n_x^2 - n_y^2, 3n_z^2 - 1)': n x 9
58
- # Y(n) = (1, n_x, n_y, n_z)': n x 4
59
-
60
- Args:
61
- vertices: [nver, 3]
62
- triangles: [ntri, 3]
63
- colors: [nver, 3] albedo
64
- sh_coeff: [9, 1] spherical harmonics coefficients
65
-
66
- Returns:
67
- lit_colors: [nver, 3]
68
- '''
69
- assert vertices.shape[0] == colors.shape[0]
70
- nver = vertices.shape[0]
71
- normal = get_normal(vertices, triangles) # [nver, 3]
72
- sh = np.array((np.ones(nver), n[:,0], n[:,1], n[:,2], n[:,0]*n[:,1], n[:,0]*n[:,2], n[:,1]*n[:,2], n[:,0]**2 - n[:,1]**2, 3*(n[:,2]**2) - 1)) # [nver, 9]
73
- ref = sh.dot(sh_coeff) #[nver, 1]
74
- lit_colors = colors*ref
75
- return lit_colors
76
-
77
-
78
- def add_light(vertices, triangles, colors, light_positions = 0, light_intensities = 0):
79
- ''' Gouraud shading. add point lights.
80
- In 3d face, usually assume:
81
- 1. The surface of face is Lambertian(reflect only the low frequencies of lighting)
82
- 2. Lighting can be an arbitrary combination of point sources
83
- 3. No specular (unless skin is oil, 23333)
84
-
85
- Ref: https://cs184.eecs.berkeley.edu/lecture/pipeline
86
- Args:
87
- vertices: [nver, 3]
88
- triangles: [ntri, 3]
89
- light_positions: [nlight, 3]
90
- light_intensities: [nlight, 3]
91
- Returns:
92
- lit_colors: [nver, 3]
93
- '''
94
- nver = vertices.shape[0]
95
- normals = get_normal(vertices, triangles) # [nver, 3]
96
-
97
- # ambient
98
- # La = ka*Ia
99
-
100
- # diffuse
101
- # Ld = kd*(I/r^2)max(0, nxl)
102
- direction_to_lights = vertices[np.newaxis, :, :] - light_positions[:, np.newaxis, :] # [nlight, nver, 3]
103
- direction_to_lights_n = np.sqrt(np.sum(direction_to_lights**2, axis = 2)) # [nlight, nver]
104
- direction_to_lights = direction_to_lights/direction_to_lights_n[:, :, np.newaxis]
105
- normals_dot_lights = normals[np.newaxis, :, :]*direction_to_lights # [nlight, nver, 3]
106
- normals_dot_lights = np.sum(normals_dot_lights, axis = 2) # [nlight, nver]
107
- diffuse_output = colors[np.newaxis, :, :]*normals_dot_lights[:, :, np.newaxis]*light_intensities[:, np.newaxis, :]
108
- diffuse_output = np.sum(diffuse_output, axis = 0) # [nver, 3]
109
-
110
- # specular
111
- # h = (v + l)/(|v + l|) bisector
112
- # Ls = ks*(I/r^2)max(0, nxh)^p
113
- # increasing p narrows the reflectionlob
114
-
115
- lit_colors = diffuse_output # only diffuse part here.
116
- lit_colors = np.minimum(np.maximum(lit_colors, 0), 1)
117
- return lit_colors
118
-
119
-
120
-
121
- ## TODO. estimate light(sh coeff)
122
- ## -------------------------------- estimate. can not use now.
123
- def fit_light(image, vertices, colors, triangles, vis_ind, lamb = 10, max_iter = 3):
124
- [h, w, c] = image.shape
125
-
126
- # surface normal
127
- norm = get_normal(vertices, triangles)
128
-
129
- nver = vertices.shape[1]
130
-
131
- # vertices --> corresponding image pixel
132
- pt2d = vertices[:2, :]
133
-
134
- pt2d[0,:] = np.minimum(np.maximum(pt2d[0,:], 0), w - 1)
135
- pt2d[1,:] = np.minimum(np.maximum(pt2d[1,:], 0), h - 1)
136
- pt2d = np.round(pt2d).astype(np.int32) # 2 x nver
137
-
138
- image_pixel = image[pt2d[1,:], pt2d[0,:], :] # nver x 3
139
- image_pixel = image_pixel.T # 3 x nver
140
-
141
- # vertices --> corresponding mean texture pixel with illumination
142
- # Spherical Harmonic Basis
143
- harmonic_dim = 9
144
- nx = norm[0,:];
145
- ny = norm[1,:];
146
- nz = norm[2,:];
147
- harmonic = np.zeros((nver, harmonic_dim))
148
-
149
- pi = np.pi
150
- harmonic[:,0] = np.sqrt(1/(4*pi)) * np.ones((nver,));
151
- harmonic[:,1] = np.sqrt(3/(4*pi)) * nx;
152
- harmonic[:,2] = np.sqrt(3/(4*pi)) * ny;
153
- harmonic[:,3] = np.sqrt(3/(4*pi)) * nz;
154
- harmonic[:,4] = 1/2. * np.sqrt(3/(4*pi)) * (2*nz**2 - nx**2 - ny**2);
155
- harmonic[:,5] = 3 * np.sqrt(5/(12*pi)) * (ny*nz);
156
- harmonic[:,6] = 3 * np.sqrt(5/(12*pi)) * (nx*nz);
157
- harmonic[:,7] = 3 * np.sqrt(5/(12*pi)) * (nx*ny);
158
- harmonic[:,8] = 3/2. * np.sqrt(5/(12*pi)) * (nx*nx - ny*ny);
159
-
160
- '''
161
- I' = sum(albedo * lj * hj) j = 0:9 (albedo = tex)
162
- set A = albedo*h (n x 9)
163
- alpha = lj (9 x 1)
164
- Y = I (n x 1)
165
- Y' = A.dot(alpha)
166
-
167
- opt function:
168
- ||Y - A*alpha|| + lambda*(alpha'*alpha)
169
- result:
170
- A'*(Y - A*alpha) + lambda*alpha = 0
171
- ==>
172
- (A'*A*alpha - lambda)*alpha = A'*Y
173
- left: 9 x 9
174
- right: 9 x 1
175
- '''
176
- n_vis_ind = len(vis_ind)
177
- n = n_vis_ind*c
178
-
179
- Y = np.zeros((n, 1))
180
- A = np.zeros((n, 9))
181
- light = np.zeros((3, 1))
182
-
183
- for k in range(c):
184
- Y[k*n_vis_ind:(k+1)*n_vis_ind, :] = image_pixel[k, vis_ind][:, np.newaxis]
185
- A[k*n_vis_ind:(k+1)*n_vis_ind, :] = texture[k, vis_ind][:, np.newaxis] * harmonic[vis_ind, :]
186
- Ac = texture[k, vis_ind][:, np.newaxis]
187
- Yc = image_pixel[k, vis_ind][:, np.newaxis]
188
- light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
189
-
190
- for i in range(max_iter):
191
-
192
- Yc = Y.copy()
193
- for k in range(c):
194
- Yc[k*n_vis_ind:(k+1)*n_vis_ind, :] /= light[k]
195
-
196
- # update alpha
197
- equation_left = np.dot(A.T, A) + lamb*np.eye(harmonic_dim); # why + ?
198
- equation_right = np.dot(A.T, Yc)
199
- alpha = np.dot(np.linalg.inv(equation_left), equation_right)
200
-
201
- # update light
202
- for k in range(c):
203
- Ac = A[k*n_vis_ind:(k+1)*n_vis_ind, :].dot(alpha)
204
- Yc = Y[k*n_vis_ind:(k+1)*n_vis_ind, :]
205
- light[k] = (Ac.T.dot(Yc))/(Ac.T.dot(Ac))
206
-
207
- appearance = np.zeros_like(texture)
208
- for k in range(c):
209
- tmp = np.dot(harmonic*texture[k, :][:, np.newaxis], alpha*light[k])
210
- appearance[k,:] = tmp.T
211
-
212
- appearance = np.minimum(np.maximum(appearance, 0), 1)
213
-
214
- return appearance
215
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
insightface/thirdparty/face3d/mesh_numpy/render.py DELETED
@@ -1,287 +0,0 @@
1
- '''
2
- functions about rendering mesh(from 3d obj to 2d image).
3
- only use rasterization render here.
4
- Note that:
5
- 1. Generally, render func includes camera, light, raterize. Here no camera and light(I write these in other files)
6
- 2. Generally, the input vertices are normalized to [-1,1] and cetered on [0, 0]. (in world space)
7
- Here, the vertices are using image coords, which centers on [w/2, h/2] with the y-axis pointing to oppisite direction.
8
- Means: render here only conducts interpolation.(I just want to make the input flexible)
9
-
10
- Preparation knowledge:
11
- z-buffer: https://cs184.eecs.berkeley.edu/lecture/pipeline
12
-
13
- Author: Yao Feng
14
- Mail: yaofeng1995@gmail.com
15
- '''
16
- from __future__ import absolute_import
17
- from __future__ import division
18
- from __future__ import print_function
19
-
20
- import numpy as np
21
- from time import time
22
-
23
- def isPointInTri(point, tri_points):
24
- ''' Judge whether the point is in the triangle
25
- Method:
26
- http://blackpawn.com/texts/pointinpoly/
27
- Args:
28
- point: (2,). [u, v] or [x, y]
29
- tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
30
- Returns:
31
- bool: true for in triangle
32
- '''
33
- tp = tri_points
34
-
35
- # vectors
36
- v0 = tp[2,:] - tp[0,:]
37
- v1 = tp[1,:] - tp[0,:]
38
- v2 = point - tp[0,:]
39
-
40
- # dot products
41
- dot00 = np.dot(v0.T, v0)
42
- dot01 = np.dot(v0.T, v1)
43
- dot02 = np.dot(v0.T, v2)
44
- dot11 = np.dot(v1.T, v1)
45
- dot12 = np.dot(v1.T, v2)
46
-
47
- # barycentric coordinates
48
- if dot00*dot11 - dot01*dot01 == 0:
49
- inverDeno = 0
50
- else:
51
- inverDeno = 1/(dot00*dot11 - dot01*dot01)
52
-
53
- u = (dot11*dot02 - dot01*dot12)*inverDeno
54
- v = (dot00*dot12 - dot01*dot02)*inverDeno
55
-
56
- # check if point in triangle
57
- return (u >= 0) & (v >= 0) & (u + v < 1)
58
-
59
- def get_point_weight(point, tri_points):
60
- ''' Get the weights of the position
61
- Methods: https://gamedev.stackexchange.com/questions/23743/whats-the-most-efficient-way-to-find-barycentric-coordinates
62
- -m1.compute the area of the triangles formed by embedding the point P inside the triangle
63
- -m2.Christer Ericson's book "Real-Time Collision Detection". faster.(used)
64
- Args:
65
- point: (2,). [u, v] or [x, y]
66
- tri_points: (3 vertices, 2 coords). three vertices(2d points) of a triangle.
67
- Returns:
68
- w0: weight of v0
69
- w1: weight of v1
70
- w2: weight of v3
71
- '''
72
- tp = tri_points
73
- # vectors
74
- v0 = tp[2,:] - tp[0,:]
75
- v1 = tp[1,:] - tp[0,:]
76
- v2 = point - tp[0,:]
77
-
78
- # dot products
79
- dot00 = np.dot(v0.T, v0)
80
- dot01 = np.dot(v0.T, v1)
81
- dot02 = np.dot(v0.T, v2)
82
- dot11 = np.dot(v1.T, v1)
83
- dot12 = np.dot(v1.T, v2)
84
-
85
- # barycentric coordinates
86
- if dot00*dot11 - dot01*dot01 == 0:
87
- inverDeno = 0
88
- else:
89
- inverDeno = 1/(dot00*dot11 - dot01*dot01)
90
-
91
- u = (dot11*dot02 - dot01*dot12)*inverDeno
92
- v = (dot00*dot12 - dot01*dot02)*inverDeno
93
-
94
- w0 = 1 - u - v
95
- w1 = v
96
- w2 = u
97
-
98
- return w0, w1, w2
99
-
100
- def rasterize_triangles(vertices, triangles, h, w):
101
- '''
102
- Args:
103
- vertices: [nver, 3]
104
- triangles: [ntri, 3]
105
- h: height
106
- w: width
107
- Returns:
108
- depth_buffer: [h, w] saves the depth, here, the bigger the z, the fronter the point.
109
- triangle_buffer: [h, w] saves the tri id(-1 for no triangle).
110
- barycentric_weight: [h, w, 3] saves corresponding barycentric weight.
111
-
112
- # Each triangle has 3 vertices & Each vertex has 3 coordinates x, y, z.
113
- # h, w is the size of rendering
114
- '''
115
- # initial
116
- depth_buffer = np.zeros([h, w]) - 999999. #+ np.min(vertices[2,:]) - 999999. # set the initial z to the farest position
117
- triangle_buffer = np.zeros([h, w], dtype = np.int32) - 1 # if tri id = -1, the pixel has no triangle correspondance
118
- barycentric_weight = np.zeros([h, w, 3], dtype = np.float32) #
119
-
120
- for i in range(triangles.shape[0]):
121
- tri = triangles[i, :] # 3 vertex indices
122
-
123
- # the inner bounding box
124
- umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
125
- umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
126
-
127
- vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
128
- vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
129
-
130
- if umax<umin or vmax<vmin:
131
- continue
132
-
133
- for u in range(umin, umax+1):
134
- for v in range(vmin, vmax+1):
135
- if not isPointInTri([u,v], vertices[tri, :2]):
136
- continue
137
- w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2]) # barycentric weight
138
- point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
139
- if point_depth > depth_buffer[v, u]:
140
- depth_buffer[v, u] = point_depth
141
- triangle_buffer[v, u] = i
142
- barycentric_weight[v, u, :] = np.array([w0, w1, w2])
143
-
144
- return depth_buffer, triangle_buffer, barycentric_weight
145
-
146
-
147
- def render_colors_ras(vertices, triangles, colors, h, w, c = 3):
148
- ''' render mesh with colors(rasterize triangle first)
149
- Args:
150
- vertices: [nver, 3]
151
- triangles: [ntri, 3]
152
- colors: [nver, 3]
153
- h: height
154
- w: width
155
- c: channel
156
- Returns:
157
- image: [h, w, c]. rendering.
158
- '''
159
- assert vertices.shape[0] == colors.shape[0]
160
-
161
- depth_buffer, triangle_buffer, barycentric_weight = rasterize_triangles(vertices, triangles, h, w)
162
-
163
- triangle_buffer_flat = np.reshape(triangle_buffer, [-1]) # [h*w]
164
- barycentric_weight_flat = np.reshape(barycentric_weight, [-1, c]) #[h*w, c]
165
- weight = barycentric_weight_flat[:, :, np.newaxis] # [h*w, 3(ver in tri), 1]
166
-
167
- colors_flat = colors[triangles[triangle_buffer_flat, :], :] # [h*w(tri id in pixel), 3(ver in tri), c(color in ver)]
168
- colors_flat = weight*colors_flat # [h*w, 3, 3]
169
- colors_flat = np.sum(colors_flat, 1) #[h*w, 3]. add tri.
170
-
171
- image = np.reshape(colors_flat, [h, w, c])
172
- # mask = (triangle_buffer[:,:] > -1).astype(np.float32)
173
- # image = image*mask[:,:,np.newaxis]
174
- return image
175
-
176
-
177
- def render_colors(vertices, triangles, colors, h, w, c = 3):
178
- ''' render mesh with colors
179
- Args:
180
- vertices: [nver, 3]
181
- triangles: [ntri, 3]
182
- colors: [nver, 3]
183
- h: height
184
- w: width
185
- Returns:
186
- image: [h, w, c].
187
- '''
188
- assert vertices.shape[0] == colors.shape[0]
189
-
190
- # initial
191
- image = np.zeros((h, w, c))
192
- depth_buffer = np.zeros([h, w]) - 999999.
193
-
194
- for i in range(triangles.shape[0]):
195
- tri = triangles[i, :] # 3 vertex indices
196
-
197
- # the inner bounding box
198
- umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
199
- umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
200
-
201
- vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
202
- vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
203
-
204
- if umax<umin or vmax<vmin:
205
- continue
206
-
207
- for u in range(umin, umax+1):
208
- for v in range(vmin, vmax+1):
209
- if not isPointInTri([u,v], vertices[tri, :2]):
210
- continue
211
- w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2])
212
- point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
213
-
214
- if point_depth > depth_buffer[v, u]:
215
- depth_buffer[v, u] = point_depth
216
- image[v, u, :] = w0*colors[tri[0], :] + w1*colors[tri[1], :] + w2*colors[tri[2], :]
217
- return image
218
-
219
-
220
- def render_texture(vertices, triangles, texture, tex_coords, tex_triangles, h, w, c = 3, mapping_type = 'nearest'):
221
- ''' render mesh with texture map
222
- Args:
223
- vertices: [nver], 3
224
- triangles: [ntri, 3]
225
- texture: [tex_h, tex_w, 3]
226
- tex_coords: [ntexcoords, 3]
227
- tex_triangles: [ntri, 3]
228
- h: height of rendering
229
- w: width of rendering
230
- c: channel
231
- mapping_type: 'bilinear' or 'nearest'
232
- '''
233
- assert triangles.shape[0] == tex_triangles.shape[0]
234
- tex_h, tex_w, _ = texture.shape
235
-
236
- # initial
237
- image = np.zeros((h, w, c))
238
- depth_buffer = np.zeros([h, w]) - 999999.
239
-
240
- for i in range(triangles.shape[0]):
241
- tri = triangles[i, :] # 3 vertex indices
242
- tex_tri = tex_triangles[i, :] # 3 tex indice
243
-
244
- # the inner bounding box
245
- umin = max(int(np.ceil(np.min(vertices[tri, 0]))), 0)
246
- umax = min(int(np.floor(np.max(vertices[tri, 0]))), w-1)
247
-
248
- vmin = max(int(np.ceil(np.min(vertices[tri, 1]))), 0)
249
- vmax = min(int(np.floor(np.max(vertices[tri, 1]))), h-1)
250
-
251
- if umax<umin or vmax<vmin:
252
- continue
253
-
254
- for u in range(umin, umax+1):
255
- for v in range(vmin, vmax+1):
256
- if not isPointInTri([u,v], vertices[tri, :2]):
257
- continue
258
- w0, w1, w2 = get_point_weight([u, v], vertices[tri, :2])
259
- point_depth = w0*vertices[tri[0], 2] + w1*vertices[tri[1], 2] + w2*vertices[tri[2], 2]
260
- if point_depth > depth_buffer[v, u]:
261
- # update depth
262
- depth_buffer[v, u] = point_depth
263
-
264
- # tex coord
265
- tex_xy = w0*tex_coords[tex_tri[0], :] + w1*tex_coords[tex_tri[1], :] + w2*tex_coords[tex_tri[2], :]
266
- tex_xy[0] = max(min(tex_xy[0], float(tex_w - 1)), 0.0);
267
- tex_xy[1] = max(min(tex_xy[1], float(tex_h - 1)), 0.0);
268
-
269
- # nearest
270
- if mapping_type == 'nearest':
271
- tex_xy = np.round(tex_xy).astype(np.int32)
272
- tex_value = texture[tex_xy[1], tex_xy[0], :]
273
-
274
- # bilinear
275
- elif mapping_type == 'bilinear':
276
- # next 4 pixels
277
- ul = texture[int(np.floor(tex_xy[1])), int(np.floor(tex_xy[0])), :]
278
- ur = texture[int(np.floor(tex_xy[1])), int(np.ceil(tex_xy[0])), :]
279
- dl = texture[int(np.ceil(tex_xy[1])), int(np.floor(tex_xy[0])), :]
280
- dr = texture[int(np.ceil(tex_xy[1])), int(np.ceil(tex_xy[0])), :]
281
-
282
- yd = tex_xy[1] - np.floor(tex_xy[1])
283
- xd = tex_xy[0] - np.floor(tex_xy[0])
284
- tex_value = ul*(1-xd)*(1-yd) + ur*xd*(1-yd) + dl*(1-xd)*yd + dr*xd*yd
285
-
286
- image[v, u, :] = tex_value
287
- return image