cadyderwin commited on
Commit
5e65c0c
·
verified ·
1 Parent(s): 60d1623

Upload 40 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ lib/libonnxruntime.so filter=lfs diff=lfs merge=lfs -text
37
+ lib/libopyfaceenv.so filter=lfs diff=lfs merge=lfs -text
38
+ lib/libopyfacerecog.so filter=lfs diff=lfs merge=lfs -text
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8-slim
2
+ WORKDIR /home/opyfacerecog
3
+ COPY ./opyfacerecog.py .
4
+ COPY ./app.py .
5
+ COPY ./demo.py .
6
+ COPY ./license.txt .
7
+ COPY ./requirements.txt .
8
+ COPY ./run.sh .
9
+ COPY ./examples ./examples
10
+ COPY ./model ./model
11
+ COPY ./lib /usr/lib
12
+ COPY ./lib/libopyfacerecog.so ./lib/libopyfacerecog.so
13
+ RUN pip3 install -r requirements.txt
14
+ RUN chmod a+x run.sh
15
+ CMD ["./run.sh"]
16
+ EXPOSE 9000 7860
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('.')
3
+
4
+ import os
5
+ import numpy as np
6
+ import base64
7
+ import json
8
+ import io
9
+
10
+ from PIL import Image, ExifTags
11
+ from flask import Flask, request, jsonify
12
+ from opyfacerecog import getHWID
13
+ from opyfacerecog import setLicenseKey
14
+ from opyfacerecog import initSDK
15
+ from opyfacerecog import processImage
16
+ from opyfacerecog import verifyFeat
17
+ from face import Face
18
+
19
+ licenseKeyPath = "license.txt"
20
+ license = ""
21
+
22
+ maxFaceCount1 = 5
23
+ maxFaceCount2 = 10
24
+
25
+ try:
26
+ with open(licenseKeyPath, 'r') as file:
27
+ license = file.read().strip()
28
+ except IOError as exc:
29
+ print("failed to open license.txt: ", exc.errno)
30
+ print("License Key: ", license)
31
+
32
+ hwid = getHWID()
33
+ print("HWID: ", hwid.decode('utf-8'))
34
+
35
+ ret = setLicenseKey(license.encode('utf-8'))
36
+ print("Set License: ", ret)
37
+
38
+ ret = initSDK("model".encode('utf-8'))
39
+ print("Init: ", ret)
40
+
41
+ app = Flask(__name__)
42
+
43
+ def apply_exif_rotation(image):
44
+ try:
45
+ exif = image._getexif()
46
+ if exif is not None:
47
+ for orientation in ExifTags.TAGS.keys():
48
+ if ExifTags.TAGS[orientation] == 'Orientation':
49
+ break
50
+
51
+ # Get the orientation value
52
+ orientation = exif.get(orientation, None)
53
+
54
+ # Apply the appropriate rotation based on the orientation
55
+ if orientation == 3:
56
+ image = image.rotate(180, expand=True)
57
+ elif orientation == 6:
58
+ image = image.rotate(270, expand=True)
59
+ elif orientation == 8:
60
+ image = image.rotate(90, expand=True)
61
+
62
+ except AttributeError:
63
+ print("No EXIF data found")
64
+
65
+ return image
66
+
67
+
68
+ @app.route('/compare_face', methods=['POST'])
69
+ def compare_face():
70
+ file1 = request.files['image1']
71
+ file2 = request.files['image2']
72
+
73
+ try:
74
+ image1 = apply_exif_rotation(Image.open(file1)).convert('RGB')
75
+ except:
76
+ result = "Failed to open file1"
77
+ response = jsonify({"resultCode": result})
78
+
79
+ response.status_code = 200
80
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
81
+ return response
82
+
83
+ try:
84
+ image2 = apply_exif_rotation(Image.open(file2)).convert('RGB')
85
+ except:
86
+ result = "Failed to open file2"
87
+ response = jsonify({"resultCode": result})
88
+
89
+ response.status_code = 200
90
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
91
+ return response
92
+
93
+ image_np1 = np.asarray(image1)
94
+ faces1 = (Face * maxFaceCount1)()
95
+ faceNum1 = processImage(image_np1, image_np1.shape[1], image_np1.shape[0], faces1, maxFaceCount1)
96
+
97
+ faces1_result = []
98
+ for i in range(faceNum1):
99
+ face = {"x": faces1[i].x, "y": faces1[i].y, "width": faces1[i].width, "height": faces1[i].height}
100
+ faces1_result.append(face)
101
+
102
+ image_np2 = np.asarray(image2)
103
+ faces2 = (Face * maxFaceCount1)()
104
+ faceNum2 = processImage(image_np2, image_np2.shape[1], image_np2.shape[0], faces2, maxFaceCount1)
105
+
106
+ faces2_result = []
107
+ for i in range(faceNum2):
108
+ face = {"x": faces2[i].x, "y": faces2[i].y, "width": faces2[i].width, "height": faces2[i].height}
109
+ faces2_result.append(face)
110
+
111
+ if faceNum1 > 0 and faceNum2 > 0:
112
+ results = []
113
+ for i in range(faceNum1):
114
+ for j in range(faceNum2):
115
+ score = verifyFeat(faces1[i].featSize, faces1[i].featData, faces2[j].featSize, faces2[j].featData)
116
+ match_result = {"face1": i, "face2": j, "score": score}
117
+ results.append(match_result)
118
+
119
+ response = jsonify({"resultCode": "Ok", "faces1": faces1_result, "faces2": faces2_result, "results": results})
120
+
121
+ response.status_code = 200
122
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
123
+ return response
124
+ elif faceNum1 == 0:
125
+ response = jsonify({"resultCode": "No face1", "faces1": faces1, "faces2": faces2})
126
+
127
+ response.status_code = 200
128
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
129
+ return response
130
+ elif faceNum2 == 0:
131
+ response = jsonify({"resultCode": "No face2", "faces1": faces1, "faces2": faces2})
132
+
133
+ response.status_code = 200
134
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
135
+ return response
136
+
137
+ @app.route('/compare_face_base64', methods=['POST'])
138
+ def compare_face_base64():
139
+ base64_image1 = ""
140
+ base64_image2 = ""
141
+
142
+ try:
143
+ content = request.get_json()
144
+ base64_image1 = content['image1_base64']
145
+
146
+ image_data1 = base64.b64decode(base64_image1)
147
+ image1 = apply_exif_rotation(Image.open(io.BytesIO(image_data1))).convert("RGB")
148
+ except:
149
+ result = "Failed to parse image1 base64"
150
+ response = jsonify({"resultCode": result})
151
+
152
+ response.status_code = 200
153
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
154
+ return response
155
+
156
+ try:
157
+ content = request.get_json()
158
+ base64_image2 = content['image2_base64']
159
+
160
+ image_data2 = base64.b64decode(base64_image2)
161
+ image2 = apply_exif_rotation(Image.open(io.BytesIO(image_data2))).convert("RGB")
162
+ except:
163
+ result = "Failed to parse image2 base64"
164
+ response = jsonify({"resultCode": result})
165
+
166
+ response.status_code = 200
167
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
168
+ return response
169
+
170
+ image_np1 = np.asarray(image1)
171
+ faces1 = (Face * maxFaceCount1)()
172
+ faceNum1 = processImage(image_np1, image_np1.shape[1], image_np1.shape[0], faces1, maxFaceCount1)
173
+
174
+ image_np2 = np.asarray(image2)
175
+ faces2 = (Face * maxFaceCount1)()
176
+ faceNum2 = processImage(image_np2, image_np2.shape[1], image_np2.shape[0], faces2, maxFaceCount1)
177
+
178
+ if faceNum1 == 1 and faceNum2 == 1:
179
+ score = verifyFeat(faces1[0].featSize, faces1[0].featData, faces2[0].featSize, faces2[0].featData)
180
+ response = jsonify({"resultCode": "Ok", "score": score})
181
+
182
+ response.status_code = 200
183
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
184
+ return response
185
+ elif faceNum1 == 0:
186
+ response = jsonify({"resultCode": "No face1"})
187
+
188
+ response.status_code = 200
189
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
190
+ return response
191
+ elif faceNum2 == 0:
192
+ response = jsonify({"resultCode": "No face2"})
193
+
194
+ response.status_code = 200
195
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
196
+ return response
197
+
198
+ @app.route('/detect_face', methods=['POST'])
199
+ def detect_face():
200
+ file = request.files['image']
201
+
202
+ try:
203
+ image = apply_exif_rotation(Image.open(file)).convert('RGB')
204
+ except:
205
+ result = "Failed to open file"
206
+ response = jsonify({"resultCode": result})
207
+
208
+ response.status_code = 200
209
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
210
+ return response
211
+
212
+ image_np = np.asarray(image)
213
+ faces = (Face * maxFaceCount2)()
214
+ faceNum = processImage(image_np, image_np.shape[1], image_np.shape[0], faces, maxFaceCount2)
215
+
216
+ if faceNum > 0:
217
+ faces_result = []
218
+ for i in range(faceNum):
219
+ face = {"rect": {"x": faces[i].x, "y": faces[i].y, "width": faces[i].width, "height": faces[i].height},
220
+ "angles": {"yaw": faces[i].yaw, "roll": faces[i].roll, "pitch": faces[i].pitch},
221
+ "age_gender": {"age": faces[i].age, "gender": faces[i].gender},
222
+ "emotion": {"neutral": faces[i].neutral, "happy": faces[i].happy, "angry": faces[i].angry,
223
+ "surprised": faces[i].surprised, "disgusted": faces[i].disgusted, "sad": faces[i].sad, "scared": faces[i].scared},
224
+ "attribute": {"masked": faces[i].masked, "left_eye_opened": faces[i].left_eye_opened, "right_eye_opened": faces[i].right_eye_opened}}
225
+ faces_result.append(face)
226
+
227
+ response = jsonify({"resultCode": "Ok", "result": faces_result})
228
+ else:
229
+ response = jsonify({"resultCode": "No face"})
230
+
231
+ response.status_code = 200
232
+ response.headers["Content-Type"] = "application/json; charset=utf-8"
233
+ return response
234
+
235
+ if __name__ == '__main__':
236
+ port = int(os.environ.get("PORT", 9000))
237
+ app.run(host='0.0.0.0', port=port)
demo.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import requests
4
+ import json
5
+ import io
6
+ from gradio.components import Image
7
+ from PIL import Image as PILImage, ImageDraw, ImageFont # This import may be needed if you're processing images
8
+
9
+ from PIL import Image
10
+
11
+ from PIL import Image
12
+ import io
13
+ import base64
14
+
15
+ def face_crop(image, face_rect):
16
+ x = face_rect.get('x')
17
+ y = face_rect.get('y')
18
+ width = face_rect.get('width')
19
+ height = face_rect.get('height')
20
+
21
+
22
+ if x < 0:
23
+ x = 0
24
+ if y < 0:
25
+ y = 0
26
+ if x + width >= image.width:
27
+ width = image.width - x
28
+ if y + height >= image.height:
29
+ height = image.height - y
30
+
31
+ face_image = image.crop((x, y, x + width - 1, y + height - 1))
32
+ face_image_ratio = face_image.width / float(face_image.height)
33
+ resized_w = int(face_image_ratio * 150)
34
+ resized_h = 150
35
+
36
+ face_image = face_image.resize((int(resized_w), int(resized_h)))
37
+ return face_image
38
+
39
+ def pil_image_to_base64(image, format="PNG"):
40
+ """
41
+ Converts a PIL.Image object to a Base64-encoded string.
42
+
43
+ :param image: PIL.Image object
44
+ :param format: Format to save the image, e.g., "PNG", "JPEG"
45
+ :return: Base64-encoded string
46
+ """
47
+ # Save the image to a BytesIO buffer
48
+ buffer = io.BytesIO()
49
+ image.save(buffer, format=format)
50
+ buffer.seek(0) # Rewind the buffer
51
+
52
+ # Convert the buffer's contents to a Base64 string
53
+ base64_string = base64.b64encode(buffer.getvalue()).decode('utf-8')
54
+ return base64_string
55
+
56
+ def compare_face(image1, image2, verifyThreshold):
57
+ try:
58
+ img_bytes1 = io.BytesIO()
59
+ image1.save(img_bytes1, format="JPEG")
60
+ img_bytes1.seek(0)
61
+ except:
62
+ return ["Failed to open image1", {"resultCode": "Failed to open image1"}]
63
+
64
+ try:
65
+ img_bytes2 = io.BytesIO()
66
+ image2.save(img_bytes2, format="JPEG")
67
+ img_bytes2.seek(0)
68
+ except:
69
+ return ["Failed to open image2", {"resultCode": "Failed to open image2"}]
70
+
71
+ url = "http://127.0.0.1:9000/compare_face"
72
+ files = {'image1': img_bytes1, 'image2': img_bytes2}
73
+ result = requests.post(url=url, files=files)
74
+ if result.ok:
75
+ json_result = result.json()
76
+ if json_result.get("resultCode") != "Ok":
77
+ return [json_result.get("resultCode"), json_result]
78
+
79
+ html = ""
80
+ faces1 = json_result.get("faces1", {})
81
+ faces2 = json_result.get("faces2", {})
82
+ results = json_result.get("results", {})
83
+
84
+ for result in results:
85
+ score = result.get('score')
86
+ face1_idx = result.get('face1')
87
+ face2_idx = result.get('face2')
88
+
89
+ face_image1 = face_crop(image1, faces1[face1_idx])
90
+ face_value1 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image1, format="PNG"))
91
+
92
+ face_image2 = face_crop(image2, faces2[face2_idx])
93
+ face_value2 = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image2, format="PNG"))
94
+
95
+ match_icon = '<svg fill="red" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.92V10.2H19V13.92H0ZM0 21.64V17.92H19V21.64H0Z"></path><path d="M14.08 0H18.08L5.08 32H1.08L14.08 0Z"></path></svg>'
96
+ if score > verifyThreshold:
97
+ match_icon = '<svg fill="green" width="19" height="32" viewBox="0 0 19 32"><path d="M0 13.9202V10.2002H19V13.9202H0ZM0 21.6402V17.9202H19V21.6402H0Z"></path></svg>'
98
+
99
+ item_value = ('<div style="align-items: center; gap: 10px; display: flex; flex-direction: column;">'
100
+ '<div style="display: flex; align-items: center; gap: 20px;">'
101
+ '{face_value1}'
102
+ '{match_icon}'
103
+ '{face_value2}'
104
+ '</div>'
105
+ '<div style="text-align: center; margin-top: 10px;">'
106
+ 'Score: {score}'
107
+ '</div>'
108
+ '</div>'
109
+ ).format(face_value1=face_value1, face_value2=face_value2, match_icon=match_icon, score=f"{score:.2f}")
110
+ html += item_value
111
+ html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>'
112
+
113
+ return [html, json_result]
114
+ else:
115
+ return [result.text, {"resultCode": result.text}]
116
+
117
+
118
+ def detect_face(image):
119
+ try:
120
+ img_bytes = io.BytesIO()
121
+ image.save(img_bytes, format="JPEG")
122
+ img_bytes.seek(0)
123
+ except:
124
+ return ["Failed to open image", {"resultCode": "Failed to open image"}]
125
+
126
+ url = "http://127.0.0.1:9000/detect_face"
127
+ files = {'image': img_bytes}
128
+ result = requests.post(url=url, files=files)
129
+ if result.ok:
130
+ json_result = result.json()
131
+
132
+ html = ""
133
+ resultCode = json_result.get("resultCode")
134
+ if resultCode == "Ok":
135
+ faces = json_result.get("result", {})
136
+
137
+ for face in faces:
138
+ face_rect = face.get("rect", {})
139
+ angles = face.get("angles", {})
140
+ age_gender = face.get("age_gender", {})
141
+ emotion = face.get("emotion", {})
142
+ attribute = face.get("attribute", {})
143
+
144
+ face_image = face_crop(image, face_rect)
145
+ face_value = ('<img src="data:image/png;base64,{base64_image}" style="width: 100px; height: auto; object-fit: contain;"/>').format(base64_image=pil_image_to_base64(face_image, format="PNG"))
146
+
147
+ item_value = ('<div style="display: flex; justify-content: center; align-items: flex-start; margin: 10px;">'
148
+ '<div style="display: flex; align-items: flex-start; gap: 40px; ">'
149
+ '{face_value}'
150
+ '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">'
151
+ '<div>'
152
+ '<p><b>Age</b></p>'
153
+ '<p><b>Gender</b></p>'
154
+ '<p><b>Mask</b></p>'
155
+ '<p><b>Left Eye</b></p>'
156
+ '<p><b>Right Eye</b></p>'
157
+ '<p><b>Yaw</b></p>'
158
+ '<p><b>Roll</b></p>'
159
+ '<p><b>Pitch</b></p>'
160
+ '</div>'
161
+ '<div>'
162
+ '<p>{age}</p>'
163
+ '<p>{gender}</p>'
164
+ '<p>{masked}</p>'
165
+ '<p>{left_eye}</p>'
166
+ '<p>{right_eye}</p>'
167
+ '<p>{yaw}</p>'
168
+ '<p>{roll}</p>'
169
+ '<p>{pitch}</p>'
170
+ '</div>'
171
+ '</div>'
172
+ '<div style="display: flex; gap: 20px; border-left: 1px solid #C0C0C0; padding-left: 20px;">'
173
+ '<div>'
174
+ '<p><b>Neutral</b></p>'
175
+ '<p><b>Happy</b></p>'
176
+ '<p><b>Angry</b></p>'
177
+ '<p><b>Surprised</b></p>'
178
+ '<p><b>Disgusted</b></p>'
179
+ '<p><b>Sad</b></p>'
180
+ '<p><b>Scared</b></p>'
181
+ '</div>'
182
+ '<div>'
183
+ '<p>{neutral}</p>'
184
+ '<p>{happy}</p>'
185
+ '<p>{angry}</p>'
186
+ '<p>{surprised}</p>'
187
+ '<p>{disgusted}</p>'
188
+ '<p>{sad}</p>'
189
+ '<p>{scared}</p>'
190
+ '</div>'
191
+ '</div>'
192
+ '</div></div>').format(face_value=face_value,
193
+ age=age_gender.get('age'),
194
+ gender="Female" if age_gender.get('gender') == 0 else "Male",
195
+ neutral=f"{emotion.get('neutral'):.2f}",
196
+ happy=f"{emotion.get('happy'):.2f}",
197
+ angry=f"{emotion.get('angry'):.2f}",
198
+ surprised=f"{emotion.get('surprised'):.2f}",
199
+ disgusted=f"{emotion.get('disgusted'):.2f}",
200
+ sad=f"{emotion.get('sad'):.2f}",
201
+ scared=f"{emotion.get('scared'):.2f}",
202
+ masked="Yes" if attribute.get('masked') == 1 else "No",
203
+ left_eye="Open" if attribute.get('left_eye_opened') == 1 else "Close",
204
+ right_eye="Open" if attribute.get('right_eye_opened') == 1 else "Close",
205
+ yaw=f"{angles.get('yaw'):.2f}",
206
+ roll=f"{angles.get('roll'):.2f}",
207
+ pitch=f"{angles.get('pitch'):.2f}",
208
+ )
209
+
210
+ html += item_value
211
+ html += '<hr style="border: 1px solid #C0C0C0; margin: 10px 0;"/>'
212
+ else:
213
+ html = "No face!"
214
+
215
+ return [html, json_result]
216
+ else:
217
+ return [result.text, {"resultCode": result.text}]
218
+
219
+ with gr.Blocks() as demo:
220
+ gr.Markdown(
221
+ """
222
+ <div style="display: flex;align-items: center;">
223
+ <img alt="Opulentyn Logo" src="https://github.com/user-attachments/assets/5fc78032-bff2-4f7e-a174-7d64b22f506d" width="350"/>
224
+ <div>
225
+ <h1>Face Recognition/Face Attribute</h1>
226
+ <p>We offer <b>on-premises</b> OCR and liveness check solutions available with a <b>perpetual license</b>.</p>
227
+ </div>
228
+ </div>
229
+
230
+ ## 🤝 Talk to us
231
+
232
+ <div style="display: flex; align-items: center;">
233
+ <a href="https://opulentyn.com" target="_blank">
234
+ <img src="https://img.shields.io/badge/Website-https%3A%2F%2Fopulentyn.com-blue?style=flat&logo=google-chrome&logoColor=white" alt="Website">
235
+ </a>
236
+ &nbsp;&nbsp;&nbsp;&nbsp;
237
+ <a href="mailto:support@opulentyn.com">
238
+ <img src="https://img.shields.io/badge/Email-support%40opulentyn.com-blue?style=flat&logo=gmail&logoColor=white" alt="Email">
239
+ </a>
240
+ &nbsp;&nbsp;&nbsp;&nbsp;
241
+ <a href="https://wa.me/13435013587" target="_blank">
242
+ <img src="https://img.shields.io/badge/WhatsApp-%2B13435013587-blue?logo=whatsapp&logoColor=green" alt="WhatsApp">
243
+ </a>
244
+ &nbsp;&nbsp;&nbsp;&nbsp;
245
+ <a href="https://join.slack.com/t/opulentyn/shared_invite/zt-2s230jtbq-dWBs8XUZcrYim~nUqiimSA" target="_blank">
246
+ <img src="https://img.shields.io/badge/Slack-support--sdk-blueviolet?style=flat&logo=slack&logoColor=white" alt="Slack">
247
+ </a>
248
+ </div>
249
+ """
250
+ )
251
+
252
+ with gr.TabItem("Face Recognition"):
253
+ with gr.Row():
254
+ with gr.Column(scale=7):
255
+ with gr.Row():
256
+ with gr.Column():
257
+ image_input1 = gr.Image(type='pil')
258
+ gr.Examples(['examples/1.webp', 'examples/2.webp', 'examples/3.webp', 'examples/4.webp'],
259
+ inputs=image_input1)
260
+ with gr.Column():
261
+ image_input2 = gr.Image(type='pil')
262
+ gr.Examples(['examples/5.webp', 'examples/6.webp', 'examples/7.webp', 'examples/8.webp'],
263
+ inputs=image_input2)
264
+ verifyThreshold = gr.Slider(minimum=0, maximum=1, value=0.67, label="Verify Threshold")
265
+ face_recog_button = gr.Button("Face Recognition")
266
+ with gr.Column(scale=3):
267
+ with gr.TabItem("Output"):
268
+ recog_html_output = gr.HTML()
269
+ with gr.TabItem("JSON"):
270
+ recog_json_output = gr.JSON()
271
+ with gr.TabItem("Face Attribute"):
272
+ with gr.Row():
273
+ with gr.Column():
274
+ image_input = gr.Image(type='pil')
275
+ gr.Examples(['examples/11.webp', 'examples/12.webp', 'examples/13.webp', 'examples/14.webp'],
276
+ inputs=image_input)
277
+ face_attr_button = gr.Button("Face Attribute")
278
+ with gr.Column():
279
+ with gr.TabItem("Output"):
280
+ detect_html_output = gr.HTML()
281
+ with gr.TabItem("JSON"):
282
+ detect_json_output = gr.JSON()
283
+
284
+
285
+ face_recog_button.click(compare_face, inputs=[image_input1, image_input2, verifyThreshold], outputs=[recog_html_output, recog_json_output])
286
+ face_attr_button.click(detect_face, inputs=[image_input], outputs=[detect_html_output, detect_json_output])
287
+
288
+ gr.HTML('<a href="https://visitorbadge.io/status?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fopulentyn%2FCardOCR"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fopulentyn%2FCardOCR&countColor=%23263759" /></a>')
289
+
290
+ demo.launch(server_name="0.0.0.0", server_port=7860)
examples/1.webp ADDED
examples/11.webp ADDED
examples/12.webp ADDED
examples/13.webp ADDED
examples/14.webp ADDED
examples/2.webp ADDED
examples/3.webp ADDED
examples/4.webp ADDED
examples/5.webp ADDED
examples/6.webp ADDED
examples/7.webp ADDED
examples/8.webp ADDED
face.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ctypes import *
2
+
3
+ class Face(Structure):
4
+ _fields_ = [("x", c_int32), ("y", c_int32), ("width", c_int32), ("height", c_int32),
5
+ ("yaw", c_float), ("roll", c_float), ("pitch", c_float),
6
+ ("age", c_int32), ("gender", c_int32),
7
+ ("neutral", c_float), ("happy", c_float), ("angry", c_float),
8
+ ("surprised", c_float), ("disgusted", c_float), ("sad", c_float), ("scared", c_float),
9
+ ("masked", c_int32), ("left_eye_opened", c_int32), ("right_eye_opened", c_int32),
10
+ ("featSize", c_int32),
11
+ ("featData", c_ubyte * 512)
12
+ ]
lib/libonnxruntime.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78a4cf2aa1ae121f55307baff4ea4423742aa97800282798ca3fc166eb76cc33
3
+ size 13967344
lib/libopyfaceenv.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9e7da1c562656ba0dc15e78912d755e049b15d7e04f9fcb529224824bf69e88
3
+ size 20793360
lib/libopyfacerecog.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef27df68830fd6d06ae547f4955cd2c570e4d888eda31838301a6db819f7dfd3
3
+ size 2925659
model/bin/age.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c30be02b190ff35489f77eed3def79245eac5f8cc6187b5f375d3db9c3464728
3
+ size 2733265
model/bin/detect1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b31fc12be022d0e32becd8cb6767aabeb8e082647adb2646dd3418f5c68a6d31
3
+ size 1723908
model/bin/detect2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd3a4259604c4533f39b6d7f57b3c16d00a1721ba5b60501074c90d06e934af
3
+ size 7959332
model/bin/em.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d47354be00d7125aba538c9bc09cb37c9a246cfd50b42cffc45be4a865e2198c
3
+ size 16017560
model/bin/eyes.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:158a3ffa5c482462a58e533952f6aa0a4a62aefa0875ada21d19ee4d076d8602
3
+ size 334300
model/bin/gender.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71a2ffa0a93d97271546bc40bcc065958945858d7d1ccc0fcff986000b46b15
3
+ size 2733307
model/bin/mask.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a295d3e5336cb33ed1fa956ea36ee6d9e64d17de03a3860e16e069599cba8f5f
3
+ size 950768
model/bin/recog1.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c89a00205ee1559cbd6818a7f6c15476b79fc24dcf1193cd75122dad1b2fd81c
3
+ size 424155759
model/bin/recog2.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:423721ee4d28c22239087f28c922963d7b4948f4365140f55735aed61c11e360
3
+ size 51391196
model/bin/recog3.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18c9f8ea119cff8757a2640b2c101b3dc2840e1c4de5d7452585112de2d1b5e5
3
+ size 6424533
model/conf/facerec/ag.xml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <age_gender_impl_name>version_3.0</age_gender_impl_name>
5
+
6
+ <analysisa>bin/age.bin</analysisa>
7
+ <age_use_cuda>false</age_use_cuda>
8
+ <age_use_legacy>false</age_use_legacy>
9
+ <analysisg>bin/gender.bin</analysisg>
10
+ <gender_use_cuda>false</gender_use_cuda>
11
+ <gender_use_legacy>false</gender_use_legacy>
12
+
13
+ </opencv_storage>
model/conf/facerec/config.xml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ <conf>
2
+ </conf>
model/conf/facerec/detect.xml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <capturer_impl_name>fld_capturer</capturer_impl_name>
5
+
6
+ <capturer_type>frontal</capturer_type>
7
+
8
+
9
+ <detector_type>refa</detector_type>
10
+ <min_size>0.045</min_size>
11
+ <max_size>3</max_size>
12
+ <num_threads>1</num_threads>
13
+ <score_threshold>0.89</score_threshold>
14
+ <nms_iou_threshold>0.4</nms_iou_threshold>
15
+ <coarse_score_threshold>0.5</coarse_score_threshold>
16
+
17
+ <refa_id>refa-a</refa_id>
18
+ <refa_filepath>bin/detect1.bin</refa_filepath>
19
+
20
+ <use_cuda>0</use_cuda>
21
+ <gpu_index>0</gpu_index>
22
+ <use_advanced_multithreading>0</use_advanced_multithreading>
23
+
24
+ <fitter_type>fda</fitter_type>
25
+ <fda_file>bin/detect2.bin</fda_file>
26
+
27
+ <iris_enabled>0</iris_enabled>
28
+ <iris_config_filepath>iris_fitter.xml</iris_config_filepath>
29
+
30
+
31
+ <downscale_rawsamples_to_preferred_size>1</downscale_rawsamples_to_preferred_size>
32
+
33
+ </opencv_storage>
model/conf/facerec/em.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <emotion_impl_name>version_2.0</emotion_impl_name>
5
+
6
+ <analysis>bin/em.bin</analysis>
7
+ <use_cuda>false</use_cuda>
8
+ <use_legacy>false</use_legacy>
9
+
10
+ </opencv_storage>
model/conf/facerec/eyes.xml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <face_attribute_impl>the_one</face_attribute_impl>
5
+
6
+ <face_attribute_name>eyes_openness_v2</face_attribute_name>
7
+ <face_attribute_data>bin/eyes.bin</face_attribute_data>
8
+ <score_threshold>0.5</score_threshold>
9
+
10
+ </opencv_storage>
model/conf/facerec/mask.xml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <face_attribute_impl>version_2.0</face_attribute_impl>
5
+
6
+ <face_attribute_name>masked_face</face_attribute_name>
7
+ <face_attribute_data>bin/mask.bin</face_attribute_data>
8
+ <score_threshold>0.5</score_threshold>
9
+
10
+ <use_mobile_gpu>0</use_mobile_gpu>
11
+
12
+ </opencv_storage>
model/conf/facerec/recog.xml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <recognizer_impl_name>method_link</recognizer_impl_name>
5
+ <recognizer_config>conf/facerec/recog_param.xml</recognizer_config>
6
+
7
+ </opencv_storage>
model/conf/facerec/recog_param.xml ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <opencv_storage>
3
+
4
+ <recognizer_impl_name>method12v1000</recognizer_impl_name>
5
+
6
+
7
+
8
+ <part0>bin/detect2.bin</part0>
9
+ <part1>bin/recog1.bin</part1>
10
+ <part2>bin/recog2.bin</part2>
11
+ <part3>bin/recog3.bin</part3>
12
+
13
+ <use_mobile_gpu>0</use_mobile_gpu>
14
+ <use_cuda>0</use_cuda>
15
+ <gpu_index>0</gpu_index>
16
+
17
+
18
+ <roc>
19
+ 1.512680053711
20
+ 0.000000000000
21
+ 0.017418799922
22
+ 0.000000000000
23
+
24
+ 244.525177001953
25
+ 0.000000000000
26
+ 0.036921270192
27
+ 0.000145137310
28
+
29
+ 487.537658691406
30
+ 0.000000000000
31
+ 0.070433743298
32
+ 0.000585138798
33
+
34
+ 730.550170898438
35
+ 0.000000000000
36
+ 0.091564692557
37
+ 0.001321136951
38
+
39
+ 973.562683105469
40
+ 0.000000000000
41
+ 0.102614454925
42
+ 0.002369165421
43
+
44
+ 1216.575195312500
45
+ 0.000000000000
46
+ 0.112338289618
47
+ 0.003732264042
48
+
49
+ 1459.587646484375
50
+ 0.000000000000
51
+ 0.131876721978
52
+ 0.005405783653
53
+
54
+ 1702.600219726562
55
+ 0.000000000000
56
+ 0.163213133812
57
+ 0.007417380810
58
+
59
+ 1945.612670898438
60
+ 0.000000000000
61
+ 0.215010792017
62
+ 0.009764373302
63
+
64
+ 2188.625244140625
65
+ 0.000000000000
66
+ 0.278898477554
67
+ 0.012435853481
68
+
69
+ 2431.637695312500
70
+ 0.000000000000
71
+ 0.353262901306
72
+ 0.015471816063
73
+
74
+ 2674.650146484375
75
+ 0.000000000000
76
+ 0.435616225004
77
+ 0.018863618374
78
+
79
+ 2917.662597656250
80
+ 0.000000000000
81
+ 0.517174601555
82
+ 0.022593140602
83
+
84
+ 3160.675048828125
85
+ 0.000000000000
86
+ 0.594927132130
87
+ 0.026714444160
88
+
89
+ 3403.687744140625
90
+ 0.000000000000
91
+ 0.665740191936
92
+ 0.031211495399
93
+
94
+ 3646.700195312500
95
+ 0.000000000000
96
+ 0.728241384029
97
+ 0.036059856415
98
+
99
+ 3889.712646484375
100
+ 0.000000000000
101
+ 0.782364964485
102
+ 0.041326820850
103
+
104
+ 4132.725097656250
105
+ 0.000000000000
106
+ 0.828701794147
107
+ 0.046990036964
108
+
109
+ 4375.737792968750
110
+ 0.000000000000
111
+ 0.869253814220
112
+ 0.053017497063
113
+
114
+ 4618.750000000000
115
+ 0.000000000000
116
+ 0.900053620338
117
+ 0.059491038322
118
+
119
+ 4861.762695312500
120
+ 0.000000000000
121
+ 0.925056338310
122
+ 0.066381156445
123
+
124
+ 5104.774902343750
125
+ 0.000000005995
126
+ 0.944028019905
127
+ 0.073648035526
128
+
129
+ 5347.787597656250
130
+ 0.000000007104
131
+ 0.958393573761
132
+ 0.081388950348
133
+
134
+ 5590.800292968750
135
+ 0.000000008051
136
+ 0.968550026417
137
+ 0.089514672756
138
+
139
+ 5833.812500000000
140
+ 0.000000009453
141
+ 0.976274311543
142
+ 0.098133325577
143
+
144
+ 6076.825195312500
145
+ 0.000000016849
146
+ 0.982362270355
147
+ 0.107202410698
148
+
149
+ 6319.837402343750
150
+ 0.000000037714
151
+ 0.986156880856
152
+ 0.116668164730
153
+
154
+ 6562.850097656250
155
+ 0.000000125729
156
+ 0.989229261875
157
+ 0.126655399799
158
+
159
+ 6805.862792968750
160
+ 0.000000322775
161
+ 0.990983843803
162
+ 0.137113332748
163
+
164
+ 7048.875000000000
165
+ 0.000000844537
166
+ 0.992396354675
167
+ 0.147979438305
168
+
169
+ 7291.887695312500
170
+ 0.000002360034
171
+ 0.993351519108
172
+ 0.168762803078
173
+
174
+ 7534.899902343750
175
+ 0.000006154218
176
+ 0.994017183781
177
+ 0.214203178883
178
+
179
+ 7777.912597656250
180
+ 0.000016113172
181
+ 0.994441568851
182
+ 0.278451800346
183
+
184
+ 8020.925292968750
185
+ 0.000040494200
186
+ 0.994999945164
187
+ 0.356456458569
188
+
189
+ 8263.937500000000
190
+ 0.000099464734
191
+ 0.995369017124
192
+ 0.441985607147
193
+
194
+ 8506.950195312500
195
+ 0.000238555382
196
+ 0.995669007301
197
+ 0.528621554375
198
+
199
+ 8749.962890625000
200
+ 0.000559987093
201
+ 0.995895981789
202
+ 0.611591219902
203
+
204
+ 8992.974609375000
205
+ 0.001302033430
206
+ 0.996132075787
207
+ 0.684468984604
208
+
209
+ 9235.987304687500
210
+ 0.003006055485
211
+ 0.996436715126
212
+ 0.741078972816
213
+
214
+ 9479.000000000000
215
+ 0.006861325353
216
+ 0.996610999107
217
+ 0.782298445702
218
+
219
+ 9722.012695312500
220
+ 0.015361220576
221
+ 0.996944129467
222
+ 0.817885696888
223
+
224
+ 9965.025390625000
225
+ 0.033221695572
226
+ 0.997113645077
227
+ 0.848900675774
228
+
229
+ 10208.037109375000
230
+ 0.067775554955
231
+ 0.997449994087
232
+ 0.876659750938
233
+
234
+ 10451.049804687500
235
+ 0.128415152431
236
+ 0.997725605965
237
+ 0.904105246067
238
+
239
+ 10694.062500000000
240
+ 0.222135275602
241
+ 0.998053014278
242
+ 0.929574251175
243
+
244
+ 10937.075195312500
245
+ 0.349268019199
246
+ 0.998477220535
247
+ 0.946929633617
248
+
249
+ 11180.087890625000
250
+ 0.499025940895
251
+ 0.998991847038
252
+ 0.953711867332
253
+
254
+ 11423.099609375000
255
+ 0.650276362896
256
+ 0.999405980110
257
+ 0.959191501141
258
+
259
+ 11666.112304687500
260
+ 0.781673550606
261
+ 0.999659240246
262
+ 0.964288055897
263
+
264
+ 11909.125000000000
265
+ 0.879517912865
266
+ 0.999745011330
267
+ 0.968977451324
268
+
269
+ 12152.137695312500
270
+ 0.941511988640
271
+ 0.999958992004
272
+ 0.973327934742
273
+
274
+ 12395.150390625000
275
+ 0.975172519684
276
+ 0.999983191490
277
+ 0.977318882942
278
+
279
+ 12638.162109375000
280
+ 0.990831315517
281
+ 0.999997973442
282
+ 0.980933845043
283
+
284
+ 12881.174804687500
285
+ 0.997046291828
286
+ 1.000000000000
287
+ 0.984228491783
288
+
289
+ 13124.187500000000
290
+ 0.999171257019
291
+ 1.000000000000
292
+ 0.987184703350
293
+
294
+ 13367.200195312500
295
+ 0.999799430370
296
+ 1.000000000000
297
+ 0.989796817303
298
+
299
+ 13610.212890625000
300
+ 0.999957025051
301
+ 1.000000000000
302
+ 0.992106020451
303
+
304
+ 13853.224609375000
305
+ 0.999992012978
306
+ 1.000000000000
307
+ 0.994101285934
308
+
309
+ 14096.237304687500
310
+ 0.999998986721
311
+ 1.000000000000
312
+ 0.995782136917
313
+
314
+ 14339.250000000000
315
+ 1.000000000000
316
+ 1.000000000000
317
+ 0.997178018093
318
+
319
+ 14582.262695312500
320
+ 1.000000000000
321
+ 1.000000000000
322
+ 0.998284399509
323
+
324
+ 14825.275390625000
325
+ 1.000000000000
326
+ 1.000000000000
327
+ 0.999105751514
328
+
329
+ 15068.287109375000
330
+ 1.000000000000
331
+ 1.000000000000
332
+ 0.999660372734
333
+
334
+ 15311.299804687500
335
+ 1.000000000000
336
+ 1.000000000000
337
+ 0.999948918819
338
+
339
+ </roc>
340
+
341
+ </opencv_storage>
opyfacerecog.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from ctypes import *
4
+ from numpy.ctypeslib import ndpointer
5
+ from face import Face
6
+
7
+ libPath = os.path.abspath(os.path.dirname(__file__)) + '/lib/libopyfacerecog.so'
8
+ opyfacerecog = cdll.LoadLibrary(libPath)
9
+
10
+ getHWID = opyfacerecog.getHWID
11
+ getHWID.argtypes = []
12
+ getHWID.restype = c_char_p
13
+
14
+ setLicenseKey = opyfacerecog.setLicenseKey
15
+ setLicenseKey.argtypes = [c_char_p]
16
+ setLicenseKey.restype = c_int32
17
+
18
+ initSDK = opyfacerecog.initSDK
19
+ initSDK.argtypes = [c_char_p]
20
+ initSDK.restype = c_int32
21
+
22
+ processImage = opyfacerecog.processImage
23
+ processImage.argtypes = [ndpointer(c_ubyte, flags='C_CONTIGUOUS'), c_int32, c_int32, POINTER(Face), c_int32]
24
+ processImage.restype = c_int32
25
+
26
+ verifyFeat = opyfacerecog.verifyFeat
27
+ verifyFeat.argtypes = [c_int32, c_ubyte * 512, c_int32, c_ubyte * 512]
28
+ verifyFeat.restype = c_float
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ gradio
4
+ datadog_api_client
run.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ cd /home/opycard
4
+ exec python3 demo.py &
5
+ exec python3 app.py