hassan526 commited on
Commit
25b4c0f
·
verified ·
1 Parent(s): 707f90a

Upload 40 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ dependency/lib/libonnxruntime.so filter=lfs diff=lfs merge=lfs -text
37
+ dependency/lib/libopyfaceenv.so filter=lfs diff=lfs merge=lfs -text
38
+ engine/libopyfacerecog.so filter=lfs diff=lfs merge=lfs -text
39
+ examples/5.webp filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1 +1,26 @@
1
- aaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8-slim
2
+
3
+ ENV CONTAINER_TIMEZONE=UTC
4
+ RUN ln -snf /usr/share/zoneinfo/$CONTAINER_TIMEZONE /etc/localtime && echo $CONTAINER_TIMEZONE > /etc/timezone
5
+
6
+ # Create directory for the application
7
+ RUN mkdir -p /home/recognito_fr
8
+
9
+ # Set the working directory
10
+ WORKDIR /home/recognito_fr
11
+
12
+ # Copy the application files into the container
13
+ COPY . .
14
+
15
+ # Make the scripts executable
16
+ RUN chmod +x install.sh run_demo.sh
17
+
18
+ # Run the install.sh script to perform any installation tasks
19
+ RUN apt-get update && apt-get install -y sudo
20
+ RUN ./install.sh
21
+
22
+ # Expose port 8000(flask), 7860(gradio)
23
+ EXPOSE 8000 7860
24
+
25
+ # Set the default command to run the application
26
+ ENTRYPOINT ["./run_demo.sh"]
app.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('.')
3
+
4
+ import os
5
+ import numpy as np
6
+ import base64
7
+ import json
8
+ import io
9
+ from PIL import Image, ExifTags
10
+ from flask import Flask, request, jsonify
11
+ from engine.header import *
12
+
13
+ maxFaceCount1 = 5
14
+ maxFaceCount2 = 10
15
+
16
+ file_path = os.path.abspath(__file__)
17
+ root_path = os.path.dirname(file_path)
18
+
19
+ app = Flask(__name__)
20
+ app.config['SITE'] = "http://0.0.0.0:8000/"
21
+ app.config['DEBUG'] = False
22
+
23
+ device_id = getHWID().decode('utf-8')
24
+ print_info('\t <Hardware ID> \t\t {}'.format(device_id))
25
+
26
+ license = os.environ.get("FR_LICENSE_KEY")
27
+ dict_path = os.path.join(root_path, "engine")
28
+
29
+ ret = -1
30
+ if license is None:
31
+ try:
32
+ licenseKeyPath = os.path.join(root_path, "license.txt")
33
+ with open(licenseKeyPath, 'r') as file:
34
+ license = file.read().strip()
35
+ except IOError as exc:
36
+ print_error(f"failed to open license.txt: {exc.errno}")
37
+
38
+ print_log(f"License Key: \n{license}")
39
+ ret = setLicenseKey(license.encode('utf-8'))
40
+ print_log(f"Set License: {ret}")
41
+
42
+ ret = initSDK(dict_path.encode('utf-8'))
43
+ if ret == 0:
44
+ print_log("Successfully init SDK!")
45
+ else:
46
+ print_error(f"Failed to init SDK, Error code {ret}")
47
+ exit(-1)
48
+
49
+ def apply_exif_rotation(image):
50
+ try:
51
+ exif = image._getexif()
52
+ if exif is not None:
53
+ for orientation in ExifTags.TAGS.keys():
54
+ if ExifTags.TAGS[orientation] == 'Orientation':
55
+ break
56
+
57
+ # Get the orientation value
58
+ orientation = exif.get(orientation, None)
59
+
60
+ # Apply the appropriate rotation based on the orientation
61
+ if orientation == 3:
62
+ image = image.rotate(180, expand=True)
63
+ elif orientation == 6:
64
+ image = image.rotate(270, expand=True)
65
+ elif orientation == 8:
66
+ image = image.rotate(90, expand=True)
67
+
68
+ except AttributeError:
69
+ print_error("No EXIF data found")
70
+
71
+ return image
72
+
73
+ @app.route('/compare_face', methods=['POST'])
74
+ def compare_face():
75
+ file1 = request.files['image1']
76
+ file2 = request.files['image2']
77
+
78
+ try:
79
+ image1 = apply_exif_rotation(Image.open(file1)).convert('RGB')
80
+ except:
81
+ result = "Failed to open file1"
82
+ response = jsonify({"resultCode": result})
83
+
84
+ response.status_code = 200
85
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
86
+ return response
87
+
88
+ try:
89
+ image2 = apply_exif_rotation(Image.open(file2)).convert('RGB')
90
+ except:
91
+ result = "Failed to open file2"
92
+ response = jsonify({"resultCode": result})
93
+
94
+ response.status_code = 200
95
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
96
+ return response
97
+
98
+ image_np1 = np.asarray(image1)
99
+ faces1 = (Face * maxFaceCount1)()
100
+ faceNum1 = processImage(image_np1, image_np1.shape[1], image_np1.shape[0], faces1, maxFaceCount1)
101
+
102
+ faces1_result = []
103
+ for i in range(faceNum1):
104
+ face = {"x": faces1[i].x, "y": faces1[i].y, "width": faces1[i].width, "height": faces1[i].height}
105
+ faces1_result.append(face)
106
+
107
+ image_np2 = np.asarray(image2)
108
+ faces2 = (Face * maxFaceCount1)()
109
+ faceNum2 = processImage(image_np2, image_np2.shape[1], image_np2.shape[0], faces2, maxFaceCount1)
110
+
111
+ faces2_result = []
112
+ for i in range(faceNum2):
113
+ face = {"x": faces2[i].x, "y": faces2[i].y, "width": faces2[i].width, "height": faces2[i].height}
114
+ faces2_result.append(face)
115
+
116
+ if faceNum1 > 0 and faceNum2 > 0:
117
+ results = []
118
+ for i in range(faceNum1):
119
+ for j in range(faceNum2):
120
+ score = verifyFeat(faces1[i].featSize, faces1[i].featData, faces2[j].featSize, faces2[j].featData)
121
+ match_result = {"face1": i, "face2": j, "score": score}
122
+ results.append(match_result)
123
+
124
+ response = jsonify({"resultCode": "Ok", "faces1": faces1_result, "faces2": faces2_result, "results": results})
125
+
126
+ response.status_code = 200
127
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
128
+ return response
129
+ elif faceNum1 == 0:
130
+ response = jsonify({"resultCode": "No face1", "faces1": faces1, "faces2": faces2})
131
+
132
+ response.status_code = 200
133
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
134
+ return response
135
+ elif faceNum2 == 0:
136
+ response = jsonify({"resultCode": "No face2", "faces1": faces1, "faces2": faces2})
137
+
138
+ response.status_code = 200
139
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
140
+ return response
141
+
142
+ @app.route('/compare_face_base64', methods=['POST'])
143
+ def compare_face_base64():
144
+ base64_image1 = ""
145
+ base64_image2 = ""
146
+
147
+ try:
148
+ content = request.get_json()
149
+ base64_image1 = content['image1_base64']
150
+
151
+ image_data1 = base64.b64decode(base64_image1)
152
+ image1 = apply_exif_rotation(Image.open(io.BytesIO(image_data1))).convert("RGB")
153
+ except:
154
+ result = "Failed to parse image1 base64"
155
+ response = jsonify({"resultCode": result})
156
+
157
+ response.status_code = 200
158
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
159
+ return response
160
+
161
+ try:
162
+ content = request.get_json()
163
+ base64_image2 = content['image2_base64']
164
+
165
+ image_data2 = base64.b64decode(base64_image2)
166
+ image2 = apply_exif_rotation(Image.open(io.BytesIO(image_data2))).convert("RGB")
167
+ except:
168
+ result = "Failed to parse image2 base64"
169
+ response = jsonify({"resultCode": result})
170
+
171
+ response.status_code = 200
172
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
173
+ return response
174
+
175
+ image_np1 = np.asarray(image1)
176
+ faces1 = (Face * maxFaceCount1)()
177
+ faceNum1 = processImage(image_np1, image_np1.shape[1], image_np1.shape[0], faces1, maxFaceCount1)
178
+
179
+ faces1_result = []
180
+ for i in range(faceNum1):
181
+ face = {"x": faces1[i].x, "y": faces1[i].y, "width": faces1[i].width, "height": faces1[i].height}
182
+ faces1_result.append(face)
183
+
184
+ image_np2 = np.asarray(image2)
185
+ faces2 = (Face * maxFaceCount1)()
186
+ faceNum2 = processImage(image_np2, image_np2.shape[1], image_np2.shape[0], faces2, maxFaceCount1)
187
+
188
+ faces2_result = []
189
+ for i in range(faceNum2):
190
+ face = {"x": faces2[i].x, "y": faces2[i].y, "width": faces2[i].width, "height": faces2[i].height}
191
+ faces2_result.append(face)
192
+
193
+ if faceNum1 > 0 and faceNum2 > 0:
194
+ results = []
195
+ for i in range(faceNum1):
196
+ for j in range(faceNum2):
197
+ score = verifyFeat(faces1[i].featSize, faces1[i].featData, faces2[j].featSize, faces2[j].featData)
198
+ match_result = {"face1": i, "face2": j, "score": score}
199
+ results.append(match_result)
200
+
201
+ response = jsonify({"resultCode": "Ok", "faces1": faces1_result, "faces2": faces2_result, "results": results})
202
+
203
+ response.status_code = 200
204
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
205
+ return response
206
+ elif faceNum1 == 0:
207
+ response = jsonify({"resultCode": "No face1", "faces1": faces1, "faces2": faces2})
208
+
209
+ response.status_code = 200
210
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
211
+ return response
212
+ elif faceNum2 == 0:
213
+ response = jsonify({"resultCode": "No face2", "faces1": faces1, "faces2": faces2})
214
+
215
+ response.status_code = 200
216
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
217
+ return response
218
+
219
+ @app.route('/detect_face', methods=['POST'])
220
+ def detect_face():
221
+ file = request.files['image']
222
+
223
+ try:
224
+ image = apply_exif_rotation(Image.open(file)).convert('RGB')
225
+ except:
226
+ result = "Failed to open file"
227
+ response = jsonify({"resultCode": result})
228
+
229
+ response.status_code = 200
230
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
231
+ return response
232
+
233
+ image_np = np.asarray(image)
234
+ faces = (Face * maxFaceCount2)()
235
+ faceNum = processImage(image_np, image_np.shape[1], image_np.shape[0], faces, maxFaceCount2)
236
+
237
+ if faceNum > 0:
238
+ faces_result = []
239
+ for i in range(faceNum):
240
+ face = {"rect": {"x": faces[i].x, "y": faces[i].y, "width": faces[i].width, "height": faces[i].height},
241
+ "angles": {"yaw": faces[i].yaw, "roll": faces[i].roll, "pitch": faces[i].pitch},
242
+ "age_gender": {"age": faces[i].age, "gender": faces[i].gender},
243
+ "emotion": {"neutral": faces[i].neutral, "happy": faces[i].happy, "angry": faces[i].angry,
244
+ "surprised": faces[i].surprised, "disgusted": faces[i].disgusted, "sad": faces[i].sad, "scared": faces[i].scared},
245
+ "attribute": {"masked": faces[i].masked, "left_eye_opened": faces[i].left_eye_opened, "right_eye_opened": faces[i].right_eye_opened}}
246
+ faces_result.append(face)
247
+
248
+ response = jsonify({"resultCode": "Ok", "result": faces_result})
249
+ else:
250
+ response = jsonify({"resultCode": "No face"})
251
+
252
+ response.status_code = 200
253
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
254
+ return response
255
+
256
+ if __name__ == '__main__':
257
+ port = int(os.environ.get("PORT", 8000))
258
+ app.run(host='0.0.0.0', port=port)
demo.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import json
5
+ import io
6
+ from gradio.components import Image
7
+ from PIL import Image as PILImage, ImageDraw, ImageFont # This import may be needed if you're processing images
8
+
9
+ from PIL import Image
10
+
11
+ from PIL import Image
12
+ import io
13
+ import base64
14
+
15
+ def face_crop(image, face_rect):
16
+ x = face_rect.get('x')
17
+ y = face_rect.get('y')
18
+ width = face_rect.get('width')
19
+ height = face_rect.get('height')
20
+
21
+
22
+ if x < 0:
23
+ x = 0
24
+ if y < 0:
25
+ y = 0
26
+ if x + width >= image.width:
27
+ width = image.width - x
28
+ if y + height >= image.height:
29
+ height = image.height - y
30
+
31
+ face_image = image.crop((x, y, x + width - 1, y + height - 1))
32
+ face_image_ratio = face_image.width / float(face_image.height)
33
+ resized_w = int(face_image_ratio * 150)
34
+ resized_h = 150
35
+
36
+ face_image = face_image.resize((int(resized_w), int(resized_h)))
37
+ return face_image
38
+
39
+ def pil_image_to_base64(image, format="PNG"):
40
+ """
41
+ Converts a PIL.Image object to a Base64-encoded string.
42
+
43
+ :param image: PIL.Image object
44
+ :param format: Format to save the image, e.g., "PNG", "JPEG"
45
+ :return: Base64-encoded string
46
+ """
47
+ # Save the image to a BytesIO buffer
48
+ buffer = io.BytesIO()
49
+ image.save(buffer, format=format)
50
+ buffer.seek(0) # Rewind the buffer
51
+
52
+ # Convert the buffer's contents to a Base64 string
53
+ base64_string = base64.b64encode(buffer.getvalue()).decode('utf-8')
54
+ return base64_string
55
+
56
+ def compare_face(image1, image2, verifyThreshold):
57
+ try:
58
+ img_bytes1 = io.BytesIO()
59
+ image1.save(img_bytes1, format="JPEG")
60
+ img_bytes1.seek(0)
61
+ except:
62
+ return ["Failed to open image1", {"resultCode": "Failed to open image1"}]
63
+
64
+ try:
65
+ img_bytes2 = io.BytesIO()
66
+ image2.save(img_bytes2, format="JPEG")
67
+ img_bytes2.seek(0)
68
+ except:
69
+ return ["Failed to open image2", {"resultCode": "Failed to open image2"}]
70
+
71
+ url = "http://127.0.0.1:8000/compare_face"
72
+ files = {'image1': img_bytes1, 'image2': img_bytes2}
73
+ result = requests.post(url=url, files=files)
74
+ if result.ok:
75
+ json_result = result.json()
76
+ if json_result.get("resultCode") != "Ok":
77
+ return [json_result.get("resultCode"), json_result]
78
+
79
+ html = ""
80
+ faces1 = json_result.get("faces1", {})
81
+ faces2 = json_result.get("faces2", {})
82
+ results = json_result.get("results", {})
83
+
84
+ for result in results:
85
+ score = result.get('score')
86
+ face1_idx = result.get('face1')
87
+ face2_idx = result.get('face2')
88
+
89
+ face_image1 = face_crop(image1, faces1[face1_idx])
90
+ face_value1 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image1, format="PNG"))
91
+
92
+ face_image2 = face_crop(image2, faces2[face2_idx])
93
+ face_value2 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image2, format="PNG"))
94
+
95
+ match_icon = '<svg fill="red" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.92V10.2H19V13.92H0ZM0 21.64V17.92H19V21.64H0Z"></path><path d="M14.08 0H18.08L5.08 32H1.08L14.08 0Z"></path></svg>'
96
+ if score > verifyThreshold:
97
+ match_icon = '<svg fill="green" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.9202V10.2002H19V13.9202H0ZM0 21.6402V17.9202H19V21.6402H0Z"></path></svg>'
98
+
99
+ item_value = ('<div style="align-items: center; gap: 10px; display: flex; flex-direction: column;">'
100
+ '<div style="display: flex; align-items: center; gap: 20px;">'
101
+ '{face_value1}'
102
+ '{match_icon}'
103
+ '{face_value2}'
104
+ '</div>'
105
+ '<div style="text-align: center; margin-top: 10px;">'
106
+ 'Score: {score}'
107
+ '</div>'
108
+ '</div>'
109
+ ).format(face_value1=face_value1, face_value2=face_value2, match_icon=match_icon, score=f"{score:.2f}")
110
+ html += item_value
111
+ html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>'
112
+
113
+ return [html, json_result]
114
+ else:
115
+ return [result.text, {"resultCode": result.text}]
116
+
117
+
118
+ def detect_face(image):
119
+ try:
120
+ img_bytes = io.BytesIO()
121
+ image.save(img_bytes, format="JPEG")
122
+ img_bytes.seek(0)
123
+ except:
124
+ return ["Failed to open image", {"resultCode": "Failed to open image"}]
125
+
126
+ url = "http://127.0.0.1:8000/detect_face"
127
+ files = {'image': img_bytes}
128
+ result = requests.post(url=url, files=files)
129
+ if result.ok:
130
+ json_result = result.json()
131
+
132
+ html = ""
133
+ resultCode = json_result.get("resultCode")
134
+ if resultCode == "Ok":
135
+ faces = json_result.get("result", {})
136
+
137
+ for face in faces:
138
+ face_rect = face.get("rect", {})
139
+ angles = face.get("angles", {})
140
+ age_gender = face.get("age_gender", {})
141
+ emotion = face.get("emotion", {})
142
+ attribute = face.get("attribute", {})
143
+
144
+ face_image = face_crop(image, face_rect)
145
+ face_value = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image, format="PNG"))
146
+
147
+ item_value = ('<div style="display: flex; justify-content: center; align-items: flex-start; margin: 10px;">'
148
+ '<div style="display: flex; align-items: flex-start; gap: 40px; ">'
149
+ '{face_value}'
150
+ '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">'
151
+ '<div>'
152
+ '<p><b>Age</b></p>'
153
+ '<p><b>Gender</b></p>'
154
+ '<p><b>Mask</b></p>'
155
+ '<p><b>Left Eye</b></p>'
156
+ '<p><b>Right Eye</b></p>'
157
+ '<p><b>Yaw</b></p>'
158
+ '<p><b>Roll</b></p>'
159
+ '<p><b>Pitch</b></p>'
160
+ '</div>'
161
+ '<div>'
162
+ '<p>{age}</p>'
163
+ '<p>{gender}</p>'
164
+ '<p>{masked}</p>'
165
+ '<p>{left_eye}</p>'
166
+ '<p>{right_eye}</p>'
167
+ '<p>{yaw}</p>'
168
+ '<p>{roll}</p>'
169
+ '<p>{pitch}</p>'
170
+ '</div>'
171
+ '</div>'
172
+ '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">'
173
+ '<div>'
174
+ '<p><b>Neutral</b></p>'
175
+ '<p><b>Happy</b></p>'
176
+ '<p><b>Angry</b></p>'
177
+ '<p><b>Surprised</b></p>'
178
+ '<p><b>Disgusted</b></p>'
179
+ '<p><b>Sad</b></p>'
180
+ '<p><b>Scared</b></p>'
181
+ '</div>'
182
+ '<div>'
183
+ '<p>{neutral}</p>'
184
+ '<p>{happy}</p>'
185
+ '<p>{angry}</p>'
186
+ '<p>{surprised}</p>'
187
+ '<p>{disgusted}</p>'
188
+ '<p>{sad}</p>'
189
+ '<p>{scared}</p>'
190
+ '</div>'
191
+ '</div>'
192
+ '</div></div>').format(face_value=face_value,
193
+ age=age_gender.get('age'),
194
+ gender="Female" if age_gender.get('gender') == 0 else "Male",
195
+ neutral=f"{emotion.get('neutral'):.2f}",
196
+ happy=f"{emotion.get('happy'):.2f}",
197
+ angry=f"{emotion.get('angry'):.2f}",
198
+ surprised=f"{emotion.get('surprised'):.2f}",
199
+ disgusted=f"{emotion.get('disgusted'):.2f}",
200
+ sad=f"{emotion.get('sad'):.2f}",
201
+ scared=f"{emotion.get('scared'):.2f}",
202
+ masked="Yes" if attribute.get('masked') == 1 else "No",
203
+ left_eye="Open" if attribute.get('left_eye_opened') == 1 else "Close",
204
+ right_eye="Open" if attribute.get('right_eye_opened') == 1 else "Close",
205
+ yaw=f"{angles.get('yaw'):.2f}",
206
+ roll=f"{angles.get('roll'):.2f}",
207
+ pitch=f"{angles.get('pitch'):.2f}",
208
+ )
209
+
210
+ html += item_value
211
+ html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>'
212
+ else:
213
+ html = "No face!"
214
+
215
+ return [html, json_result]
216
+ else:
217
+ return [result.text, {"resultCode": result.text}]
218
+
219
+ with gr.Blocks() as demo:
220
+ gr.Markdown(
221
+ """
222
+ <div style="display: flex; align-items: center;justify-content: center;">
223
+ <p style="font-size: 36px; font-weight: bold;">Face Recognition, Face Attribute</p>
224
+ </div>
225
+ """
226
+ )
227
+
228
+ with gr.TabItem("Face Recognition"):
229
+ with gr.Row():
230
+ with gr.Column(scale=7):
231
+ with gr.Row():
232
+ with gr.Column():
233
+ image_input1 = gr.Image(type='pil')
234
+ gr.Examples(['examples/1.webp', 'examples/2.webp', 'examples/3.webp', 'examples/4.webp'],
235
+ inputs=image_input1)
236
+ with gr.Column():
237
+ image_input2 = gr.Image(type='pil')
238
+ gr.Examples(['examples/5.webp', 'examples/6.webp', 'examples/7.webp', 'examples/8.webp'],
239
+ inputs=image_input2)
240
+ verifyThreshold = gr.Slider(minimum=0, maximum=1, value=0.67, label="Verify Threshold")
241
+ face_recog_button = gr.Button("Face Recognition")
242
+ with gr.Column(scale=3):
243
+ with gr.TabItem("Output"):
244
+ recog_html_output = gr.HTML()
245
+ with gr.TabItem("JSON"):
246
+ recog_json_output = gr.JSON()
247
+ with gr.TabItem("Face Attribute"):
248
+ with gr.Row():
249
+ with gr.Column():
250
+ image_input = gr.Image(type='pil')
251
+ gr.Examples(['examples/11.webp', 'examples/12.webp', 'examples/13.webp', 'examples/14.webp'],
252
+ inputs=image_input)
253
+ face_attr_button = gr.Button("Face Attribute")
254
+ with gr.Column():
255
+ with gr.TabItem("Output"):
256
+ detect_html_output = gr.HTML()
257
+ with gr.TabItem("JSON"):
258
+ detect_json_output = gr.JSON()
259
+
260
+
261
+ face_recog_button.click(compare_face, inputs=[image_input1, image_input2, verifyThreshold], outputs=[recog_html_output, recog_json_output])
262
+ face_attr_button.click(detect_face, inputs=[image_input], outputs=[detect_html_output, detect_json_output])
263
+
264
+ demo.launch(server_name="0.0.0.0", server_port=7860)
dependency/lib/libonnxruntime.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a4cf2aa1ae121f55307baff4ea4423742aa97800282798ca3fc166eb76cc33
3
+ size 13967344
dependency/lib/libopyfaceenv.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9e7da1c562656ba0dc15e78912d755e049b15d7e04f9fcb529224824bf69e88
3
+ size 20793360
engine/bin/age.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c30be02b190ff35489f77eed3def79245eac5f8cc6187b5f375d3db9c3464728
3
+ size 2733265
engine/bin/detect1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b31fc12be022d0e32becd8cb6767aabeb8e082647adb2646dd3418f5c68a6d31
3
+ size 1723908
engine/bin/detect2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd3a4259604c4533f39b6d7f57b3c16d00a1721ba5b60501074c90d06e934af
3
+ size 7959332
engine/bin/em.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d47354be00d7125aba538c9bc09cb37c9a246cfd50b42cffc45be4a865e2198c
3
+ size 16017560
engine/bin/eyes.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:158a3ffa5c482462a58e533952f6aa0a4a62aefa0875ada21d19ee4d076d8602
3
+ size 334300
engine/bin/gender.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71a2ffa0a93d97271546bc40bcc065958945858d7d1ccc0fcff986000b46b15
3
+ size 2733307
engine/bin/mask.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a295d3e5336cb33ed1fa956ea36ee6d9e64d17de03a3860e16e069599cba8f5f
3
+ size 950768
engine/bin/recog1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c89a00205ee1559cbd6818a7f6c15476b79fc24dcf1193cd75122dad1b2fd81c
3
+ size 424155759
engine/bin/recog2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423721ee4d28c22239087f28c922963d7b4948f4365140f55735aed61c11e360
3
+ size 51391196
engine/bin/recog3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18c9f8ea119cff8757a2640b2c101b3dc2840e1c4de5d7452585112de2d1b5e5
3
+ size 6424533
engine/conf/facerec/ag.xml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <age_gender_impl_name>version_3.0</age_gender_impl_name>
5
+
6
+ <analysisa>bin/age.bin</analysisa>
7
+ <age_use_cuda>false</age_use_cuda>
8
+ <age_use_legacy>false</age_use_legacy>
9
+ <analysisg>bin/gender.bin</analysisg>
10
+ <gender_use_cuda>false</gender_use_cuda>
11
+ <gender_use_legacy>false</gender_use_legacy>
12
+
13
+ </opencv_storage>
engine/conf/facerec/config.xml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ <conf>
2
+ </conf>
engine/conf/facerec/detect.xml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <capturer_impl_name>fld_capturer</capturer_impl_name>
5
+
6
+ <capturer_type>frontal</capturer_type>
7
+
8
+
9
+ <detector_type>refa</detector_type>
10
+ <min_size>0.045</min_size>
11
+ <max_size>3</max_size>
12
+ <num_threads>1</num_threads>
13
+ <score_threshold>0.89</score_threshold>
14
+ <nms_iou_threshold>0.4</nms_iou_threshold>
15
+ <coarse_score_threshold>0.5</coarse_score_threshold>
16
+
17
+ <refa_id>refa-a</refa_id>
18
+ <refa_filepath>bin/detect1.bin</refa_filepath>
19
+
20
+ <use_cuda>0</use_cuda>
21
+ <gpu_index>0</gpu_index>
22
+ <use_advanced_multithreading>0</use_advanced_multithreading>
23
+
24
+ <fitter_type>fda</fitter_type>
25
+ <fda_file>bin/detect2.bin</fda_file>
26
+
27
+ <iris_enabled>0</iris_enabled>
28
+ <iris_config_filepath>iris_fitter.xml</iris_config_filepath>
29
+
30
+
31
+ <downscale_rawsamples_to_preferred_size>1</downscale_rawsamples_to_preferred_size>
32
+
33
+ </opencv_storage>
engine/conf/facerec/em.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <emotion_impl_name>version_2.0</emotion_impl_name>
5
+
6
+ <analysis>bin/em.bin</analysis>
7
+ <use_cuda>false</use_cuda>
8
+ <use_legacy>false</use_legacy>
9
+
10
+ </opencv_storage>
engine/conf/facerec/eyes.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <face_attribute_impl>the_one</face_attribute_impl>
5
+
6
+ <face_attribute_name>eyes_openness_v2</face_attribute_name>
7
+ <face_attribute_data>bin/eyes.bin</face_attribute_data>
8
+ <score_threshold>0.5</score_threshold>
9
+
10
+ </opencv_storage>
engine/conf/facerec/mask.xml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <face_attribute_impl>version_2.0</face_attribute_impl>
5
+
6
+ <face_attribute_name>masked_face</face_attribute_name>
7
+ <face_attribute_data>bin/mask.bin</face_attribute_data>
8
+ <score_threshold>0.5</score_threshold>
9
+
10
+ <use_mobile_gpu>0</use_mobile_gpu>
11
+
12
+ </opencv_storage>
engine/conf/facerec/recog.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <recognizer_impl_name>method_link</recognizer_impl_name>
5
+ <recognizer_config>conf/facerec/recog_param.xml</recognizer_config>
6
+
7
+ </opencv_storage>
engine/conf/facerec/recog_param.xml ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <recognizer_impl_name>method12v1000</recognizer_impl_name>
5
+
6
+
7
+
8
+ <part0>bin/detect2.bin</part0>
9
+ <part1>bin/recog1.bin</part1>
10
+ <part2>bin/recog2.bin</part2>
11
+ <part3>bin/recog3.bin</part3>
12
+
13
+ <use_mobile_gpu>0</use_mobile_gpu>
14
+ <use_cuda>0</use_cuda>
15
+ <gpu_index>0</gpu_index>
16
+
17
+
18
+ <roc>
19
+ 1.512680053711
20
+ 0.000000000000
21
+ 0.017418799922
22
+ 0.000000000000
23
+
24
+ 244.525177001953
25
+ 0.000000000000
26
+ 0.036921270192
27
+ 0.000145137310
28
+
29
+ 487.537658691406
30
+ 0.000000000000
31
+ 0.070433743298
32
+ 0.000585138798
33
+
34
+ 730.550170898438
35
+ 0.000000000000
36
+ 0.091564692557
37
+ 0.001321136951
38
+
39
+ 973.562683105469
40
+ 0.000000000000
41
+ 0.102614454925
42
+ 0.002369165421
43
+
44
+ 1216.575195312500
45
+ 0.000000000000
46
+ 0.112338289618
47
+ 0.003732264042
48
+
49
+ 1459.587646484375
50
+ 0.000000000000
51
+ 0.131876721978
52
+ 0.005405783653
53
+
54
+ 1702.600219726562
55
+ 0.000000000000
56
+ 0.163213133812
57
+ 0.007417380810
58
+
59
+ 1945.612670898438
60
+ 0.000000000000
61
+ 0.215010792017
62
+ 0.009764373302
63
+
64
+ 2188.625244140625
65
+ 0.000000000000
66
+ 0.278898477554
67
+ 0.012435853481
68
+
69
+ 2431.637695312500
70
+ 0.000000000000
71
+ 0.353262901306
72
+ 0.015471816063
73
+
74
+ 2674.650146484375
75
+ 0.000000000000
76
+ 0.435616225004
77
+ 0.018863618374
78
+
79
+ 2917.662597656250
80
+ 0.000000000000
81
+ 0.517174601555
82
+ 0.022593140602
83
+
84
+ 3160.675048828125
85
+ 0.000000000000
86
+ 0.594927132130
87
+ 0.026714444160
88
+
89
+ 3403.687744140625
90
+ 0.000000000000
91
+ 0.665740191936
92
+ 0.031211495399
93
+
94
+ 3646.700195312500
95
+ 0.000000000000
96
+ 0.728241384029
97
+ 0.036059856415
98
+
99
+ 3889.712646484375
100
+ 0.000000000000
101
+ 0.782364964485
102
+ 0.041326820850
103
+
104
+ 4132.725097656250
105
+ 0.000000000000
106
+ 0.828701794147
107
+ 0.046990036964
108
+
109
+ 4375.737792968750
110
+ 0.000000000000
111
+ 0.869253814220
112
+ 0.053017497063
113
+
114
+ 4618.750000000000
115
+ 0.000000000000
116
+ 0.900053620338
117
+ 0.059491038322
118
+
119
+ 4861.762695312500
120
+ 0.000000000000
121
+ 0.925056338310
122
+ 0.066381156445
123
+
124
+ 5104.774902343750
125
+ 0.000000005995
126
+ 0.944028019905
127
+ 0.073648035526
128
+
129
+ 5347.787597656250
130
+ 0.000000007104
131
+ 0.958393573761
132
+ 0.081388950348
133
+
134
+ 5590.800292968750
135
+ 0.000000008051
136
+ 0.968550026417
137
+ 0.089514672756
138
+
139
+ 5833.812500000000
140
+ 0.000000009453
141
+ 0.976274311543
142
+ 0.098133325577
143
+
144
+ 6076.825195312500
145
+ 0.000000016849
146
+ 0.982362270355
147
+ 0.107202410698
148
+
149
+ 6319.837402343750
150
+ 0.000000037714
151
+ 0.986156880856
152
+ 0.116668164730
153
+
154
+ 6562.850097656250
155
+ 0.000000125729
156
+ 0.989229261875
157
+ 0.126655399799
158
+
159
+ 6805.862792968750
160
+ 0.000000322775
161
+ 0.990983843803
162
+ 0.137113332748
163
+
164
+ 7048.875000000000
165
+ 0.000000844537
166
+ 0.992396354675
167
+ 0.147979438305
168
+
169
+ 7291.887695312500
170
+ 0.000002360034
171
+ 0.993351519108
172
+ 0.168762803078
173
+
174
+ 7534.899902343750
175
+ 0.000006154218
176
+ 0.994017183781
177
+ 0.214203178883
178
+
179
+ 7777.912597656250
180
+ 0.000016113172
181
+ 0.994441568851
182
+ 0.278451800346
183
+
184
+ 8020.925292968750
185
+ 0.000040494200
186
+ 0.994999945164
187
+ 0.356456458569
188
+
189
+ 8263.937500000000
190
+ 0.000099464734
191
+ 0.995369017124
192
+ 0.441985607147
193
+
194
+ 8506.950195312500
195
+ 0.000238555382
196
+ 0.995669007301
197
+ 0.528621554375
198
+
199
+ 8749.962890625000
200
+ 0.000559987093
201
+ 0.995895981789
202
+ 0.611591219902
203
+
204
+ 8992.974609375000
205
+ 0.001302033430
206
+ 0.996132075787
207
+ 0.684468984604
208
+
209
+ 9235.987304687500
210
+ 0.003006055485
211
+ 0.996436715126
212
+ 0.741078972816
213
+
214
+ 9479.000000000000
215
+ 0.006861325353
216
+ 0.996610999107
217
+ 0.782298445702
218
+
219
+ 9722.012695312500
220
+ 0.015361220576
221
+ 0.996944129467
222
+ 0.817885696888
223
+
224
+ 9965.025390625000
225
+ 0.033221695572
226
+ 0.997113645077
227
+ 0.848900675774
228
+
229
+ 10208.037109375000
230
+ 0.067775554955
231
+ 0.997449994087
232
+ 0.876659750938
233
+
234
+ 10451.049804687500
235
+ 0.128415152431
236
+ 0.997725605965
237
+ 0.904105246067
238
+
239
+ 10694.062500000000
240
+ 0.222135275602
241
+ 0.998053014278
242
+ 0.929574251175
243
+
244
+ 10937.075195312500
245
+ 0.349268019199
246
+ 0.998477220535
247
+ 0.946929633617
248
+
249
+ 11180.087890625000
250
+ 0.499025940895
251
+ 0.998991847038
252
+ 0.953711867332
253
+
254
+ 11423.099609375000
255
+ 0.650276362896
256
+ 0.999405980110
257
+ 0.959191501141
258
+
259
+ 11666.112304687500
260
+ 0.781673550606
261
+ 0.999659240246
262
+ 0.964288055897
263
+
264
+ 11909.125000000000
265
+ 0.879517912865
266
+ 0.999745011330
267
+ 0.968977451324
268
+
269
+ 12152.137695312500
270
+ 0.941511988640
271
+ 0.999958992004
272
+ 0.973327934742
273
+
274
+ 12395.150390625000
275
+ 0.975172519684
276
+ 0.999983191490
277
+ 0.977318882942
278
+
279
+ 12638.162109375000
280
+ 0.990831315517
281
+ 0.999997973442
282
+ 0.980933845043
283
+
284
+ 12881.174804687500
285
+ 0.997046291828
286
+ 1.000000000000
287
+ 0.984228491783
288
+
289
+ 13124.187500000000
290
+ 0.999171257019
291
+ 1.000000000000
292
+ 0.987184703350
293
+
294
+ 13367.200195312500
295
+ 0.999799430370
296
+ 1.000000000000
297
+ 0.989796817303
298
+
299
+ 13610.212890625000
300
+ 0.999957025051
301
+ 1.000000000000
302
+ 0.992106020451
303
+
304
+ 13853.224609375000
305
+ 0.999992012978
306
+ 1.000000000000
307
+ 0.994101285934
308
+
309
+ 14096.237304687500
310
+ 0.999998986721
311
+ 1.000000000000
312
+ 0.995782136917
313
+
314
+ 14339.250000000000
315
+ 1.000000000000
316
+ 1.000000000000
317
+ 0.997178018093
318
+
319
+ 14582.262695312500
320
+ 1.000000000000
321
+ 1.000000000000
322
+ 0.998284399509
323
+
324
+ 14825.275390625000
325
+ 1.000000000000
326
+ 1.000000000000
327
+ 0.999105751514
328
+
329
+ 15068.287109375000
330
+ 1.000000000000
331
+ 1.000000000000
332
+ 0.999660372734
333
+
334
+ 15311.299804687500
335
+ 1.000000000000
336
+ 1.000000000000
337
+ 0.999948918819
338
+
339
+ </roc>
340
+
341
+ </opencv_storage>
engine/header.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import numpy as np
4
+ import ctypes, ctypes.util
5
+ from enum import Enum
6
+ from ctypes import *
7
+ from numpy.ctypeslib import ndpointer
8
+
9
+ def print_log(fmt): print("[LOG] \033[98m{}\033[00m" .format(fmt))
10
+ def print_info(fmt): print("[INFO] \033[92m{}\033[00m" .format(fmt))
11
+ def print_error(fmt): print("[ERR] \033[91m{}\033[00m" .format(fmt))
12
+ def print_warning(fmt): print("[WARNING] \033[93m{}\033[00m" .format(fmt))
13
+
14
+ class ENGINE_CODE(Enum):
15
+ E_NO_FACE = 0
16
+ E_ACTIVATION_ERROR = -1
17
+ E_ENGINE_INIT_ERROR = -2
18
+
19
+ class Face(Structure):
20
+ _fields_ = [("x", c_int32), ("y", c_int32), ("width", c_int32), ("height", c_int32),
21
+ ("yaw", c_float), ("roll", c_float), ("pitch", c_float),
22
+ ("age", c_int32), ("gender", c_int32),
23
+ ("neutral", c_float), ("happy", c_float), ("angry", c_float),
24
+ ("surprised", c_float), ("disgusted", c_float), ("sad", c_float), ("scared", c_float),
25
+ ("masked", c_int32), ("left_eye_opened", c_int32), ("right_eye_opened", c_int32),
26
+ ("featSize", c_int32),
27
+ ("featData", c_ubyte * 512)
28
+ ]
29
+
30
+ libPath = os.path.abspath(os.path.dirname(__file__)) + '/libopyfacerecog.so'
31
+ lib = cdll.LoadLibrary(libPath)
32
+
33
+ getHWID = lib.getHWID
34
+ getHWID.argtypes = []
35
+ getHWID.restype = c_char_p
36
+
37
+ setLicenseKey = lib.setLicenseKey
38
+ setLicenseKey.argtypes = [c_char_p]
39
+ setLicenseKey.restype = c_int32
40
+
41
+ initSDK = lib.initSDK
42
+ initSDK.argtypes = [c_char_p]
43
+ initSDK.restype = c_int32
44
+
45
+ processImage = lib.processImage
46
+ processImage.argtypes = [ndpointer(c_ubyte, flags='C_CONTIGUOUS'), c_int32, c_int32, POINTER(Face), c_int32]
47
+ processImage.restype = c_int32
48
+
49
+ verifyFeat = lib.verifyFeat
50
+ verifyFeat.argtypes = [c_int32, c_ubyte * 512, c_int32, c_ubyte * 512]
51
+ verifyFeat.restype = c_float
engine/libopyfacerecog.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef27df68830fd6d06ae547f4955cd2c570e4d888eda31838301a6db819f7dfd3
3
+ size 2925659
examples/1.webp ADDED
examples/11.webp ADDED
examples/12.webp ADDED
examples/13.webp ADDED
examples/14.webp ADDED
examples/2.webp ADDED
examples/3.webp ADDED
examples/4.webp ADDED
examples/5.webp ADDED

Git LFS Details

  • SHA256: bf3ae5274c066fe13ebc73c83b66768c524b4481ce252dabebc18111338a14c1
  • Pointer size: 131 Bytes
  • Size of remote file: 102 kB
examples/6.webp ADDED
examples/7.webp ADDED
examples/8.webp ADDED
install.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ echo "Install environment..."
4
+
5
+ # Install packages:
6
+ sudo apt-get update -y && sudo apt-get install -y python3 python3-pip python3-opencv libusb-0.1-4
7
+
8
+ # Install requirements:
9
+ python3 -m pip install --upgrade pip && python3 -m pip install opencv-python flask flask-cors gradio datadog_api_client
10
+
11
+ # Copy OpenVino library
12
+ sudo cp -rf dependency/lib/* /usr/lib
13
+
14
+ echo "Installed successfully!"
license.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ f8I6pxZJPlJVJCz+AaEZdm5pva6Fu3hTADFzPLLHEP1SHkzolYyErq/nGREb3hB9aSP1WiqySDbv
2
+ OWKJCJ5OhYI+r5POImX4ZzmQTBnU431MBLHZ5eH3lcgOdy1HNF1rIyF+rrU5Ncp/kHeRKTkg0D3y
run_demo.sh ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ exec python3 demo.py &
4
+ exec python3 app.py