Dev-mohamed commited on
Commit
3888132
·
1 Parent(s): 8eed6b7
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .github/FUNDING.yml +2 -0
  2. .github/ISSUE_TEMPLATE/01-report-bug.yaml +86 -0
  3. .github/ISSUE_TEMPLATE/02-request-feature.yaml +18 -0
  4. .github/ISSUE_TEMPLATE/03-documentation.yaml +18 -0
  5. .github/ISSUE_TEMPLATE/config.yml +5 -0
  6. .github/pull_request_template.md +13 -0
  7. .github/workflows/tests.yml +69 -0
  8. .gitignore +16 -0
  9. .pylintrc +641 -0
  10. .vscode/settings.json +18 -0
  11. CITATION.md +41 -0
  12. Dockerfile +56 -0
  13. LICENSE +21 -0
  14. Makefile +8 -0
  15. README.md +377 -0
  16. Train.py +55 -0
  17. deepface/DeepFace.py +585 -0
  18. deepface/__init__.py +1 -0
  19. deepface/api/__init__.py +0 -0
  20. deepface/api/postman/deepface-api.postman_collection.json +102 -0
  21. deepface/api/src/__init__.py +0 -0
  22. deepface/api/src/api.py +10 -0
  23. deepface/api/src/app.py +11 -0
  24. deepface/api/src/modules/__init__.py +0 -0
  25. deepface/api/src/modules/core/__init__.py +0 -0
  26. deepface/api/src/modules/core/routes.py +207 -0
  27. deepface/api/src/modules/core/service.py +84 -0
  28. deepface/basemodels/ArcFace.py +179 -0
  29. deepface/basemodels/DeepID.py +99 -0
  30. deepface/basemodels/Dlib.py +89 -0
  31. deepface/basemodels/Facenet.py +1715 -0
  32. deepface/basemodels/FbDeepFace.py +105 -0
  33. deepface/basemodels/GhostFaceNet.py +312 -0
  34. deepface/basemodels/OpenFace.py +397 -0
  35. deepface/basemodels/SFace.py +87 -0
  36. deepface/basemodels/VGGFace.py +160 -0
  37. deepface/basemodels/__init__.py +0 -0
  38. deepface/commons/__init__.py +0 -0
  39. deepface/commons/constant.py +4 -0
  40. deepface/commons/folder_utils.py +35 -0
  41. deepface/commons/image_utils.py +149 -0
  42. deepface/commons/logger.py +54 -0
  43. deepface/commons/os_path.py +10 -0
  44. deepface/commons/package_utils.py +46 -0
  45. deepface/commons/path.py +9 -0
  46. deepface/detectors/CenterFace.py +217 -0
  47. deepface/detectors/DetectorWrapper.py +204 -0
  48. deepface/detectors/Dlib.py +114 -0
  49. deepface/detectors/FastMtCnn.py +89 -0
  50. deepface/detectors/MediaPipe.py +76 -0
.github/FUNDING.yml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ github: serengil
2
+ patreon: serengil?repo=deepface
.github/ISSUE_TEMPLATE/01-report-bug.yaml ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '🐛 Report a bug'
2
+ description: 'Use this template to report DeepFace related issues'
3
+ title: '[BUG]: <short description of the issue>'
4
+ labels:
5
+ - bug
6
+ body:
7
+ - type: checkboxes
8
+ id: preliminary-checks
9
+ attributes:
10
+ label: Before You Report a Bug, Please Confirm You Have Done The Following...
11
+ description: If any of these required steps are not taken, we may not be able to review your issue. Help us to help you!
12
+ options:
13
+ - label: I have updated to the latest version of the packages.
14
+ required: true
15
+ - label: I have searched for both [existing issues](https://github.com/serengil/deepface/issues) and [closed issues](https://github.com/serengil/deepface/issues?q=is%3Aissue+is%3Aclosed) and found none that matched my issue.
16
+ required: true
17
+ - type: input
18
+ id: deepface-version
19
+ attributes:
20
+ label: DeepFace's version
21
+ description: |
22
+ Please provide your deepface version with calling the command `python -c "import deepface; print(deepface.__version__)"` in your terminal
23
+ placeholder: e.g. v0.0.90
24
+ validations:
25
+ required: true
26
+ - type: input
27
+ id: python-version
28
+ attributes:
29
+ label: Python version
30
+ description: |
31
+ Please provide your python programming language's version with calling `python --version` in your terminal
32
+ placeholder: e.g. 3.8.5
33
+ validations:
34
+ required: true
35
+ - type: input
36
+ id: os
37
+ attributes:
38
+ label: Operating System
39
+ description: |
40
+ Please provide your operation system's details
41
+ placeholder: e.g. Windows 10 or Ubuntu 20.04
42
+ validations:
43
+ required: false
44
+ - type: textarea
45
+ id: dependencies
46
+ attributes:
47
+ label: Dependencies
48
+ description: |
49
+ Please provide python dependencies with calling `pip freeze` in your terminal, in particular tensorflow's and keras' versions
50
+ validations:
51
+ required: true
52
+ - type: textarea
53
+ id: repro-code
54
+ attributes:
55
+ label: Reproducible example
56
+ description: A ***minimal*** code sample which reproduces the issue
57
+ render: Python
58
+ validations:
59
+ required: true
60
+ - type: textarea
61
+ id: exception-message
62
+ attributes:
63
+ label: Relevant Log Output
64
+ description: Please share the exception message from your terminal if your program is failing
65
+ validations:
66
+ required: false
67
+ - type: textarea
68
+ id: expected
69
+ attributes:
70
+ label: Expected Result
71
+ description: What did you expect to happen?
72
+ validations:
73
+ required: false
74
+ - type: textarea
75
+ id: actual
76
+ attributes:
77
+ label: What happened instead?
78
+ description: What actually happened?
79
+ validations:
80
+ required: false
81
+ - type: textarea
82
+ id: additional
83
+ attributes:
84
+ label: Additional Info
85
+ description: |
86
+ Any additional info you'd like to provide.
.github/ISSUE_TEMPLATE/02-request-feature.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '✨ Request a New Feature'
2
+ description: 'Use this template to propose a new feature'
3
+ title: '[FEATURE]: <a short description of my proposal>'
4
+ labels:
5
+ - 'enhancement'
6
+ body:
7
+ - type: textarea
8
+ id: description
9
+ attributes:
10
+ label: Description
11
+ description: Explain what your proposed feature would do and why this is useful.
12
+ validations:
13
+ required: true
14
+ - type: textarea
15
+ id: additional
16
+ attributes:
17
+ label: Additional Info
18
+ description: Any additional info you'd like to provide.
.github/ISSUE_TEMPLATE/03-documentation.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: '📝 Documentation'
2
+ description: 'Use this template to add or improve docs'
3
+ title: '[DOC]: <a short description of my proposal>'
4
+ labels:
5
+ - documentation
6
+ body:
7
+ - type: textarea
8
+ attributes:
9
+ label: Suggested Changes
10
+ description: What would you like to see happen in the docs?
11
+ validations:
12
+ required: true
13
+ - type: textarea
14
+ id: additional
15
+ attributes:
16
+ label: Additional Info
17
+ description: |
18
+ Any additional info you'd like to provide.
.github/ISSUE_TEMPLATE/config.yml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ blank_issues_enabled: false
2
+ contact_links:
3
+ - name: Ask a question on StackOverflow
4
+ about: If you just want to ask a question, consider asking it on StackOverflow!
5
+ url: https://stackoverflow.com/questions/tagged/deepface
.github/pull_request_template.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Tickets
2
+
3
+ https://github.com/serengil/deepface/issues/XXX
4
+
5
+ ### What has been done
6
+
7
+ With this PR, ...
8
+
9
+ ## How to test
10
+
11
+ ```shell
12
+ make lint && make test
13
+ ```
.github/workflows/tests.yml ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Tests and Linting
2
+
3
+ on:
4
+ push:
5
+ paths:
6
+ - '.github/workflows/tests.yml'
7
+ - 'deepface/**'
8
+ - 'tests/**'
9
+ - 'api/**'
10
+ - 'requirements.txt'
11
+ - '.gitignore'
12
+ - 'setup.py'
13
+ pull_request:
14
+ paths:
15
+ - '.github/workflows/tests.yml'
16
+ - 'deepface/**'
17
+ - 'tests/**'
18
+ - 'api/**'
19
+ - 'requirements.txt'
20
+ - '.gitignore'
21
+ - 'setup.py'
22
+
23
+ jobs:
24
+ unit-tests:
25
+ runs-on: ubuntu-latest
26
+ strategy:
27
+ matrix:
28
+ python-version: [3.8]
29
+
30
+ steps:
31
+ - uses: actions/checkout@v4
32
+ - name: Set up Python ${{ matrix.python-version }}
33
+ uses: actions/setup-python@v5
34
+ with:
35
+ python-version: ${{ matrix.python-version }}
36
+ - name: Install dependencies
37
+ run: |
38
+ python -m pip install --upgrade pip
39
+ pip install pytest
40
+ pip install .
41
+
42
+ - name: Test with pytest
43
+ run: |
44
+ cd tests
45
+ python -m pytest . -s --disable-warnings
46
+ linting:
47
+ needs: unit-tests
48
+
49
+ runs-on: ubuntu-latest
50
+ strategy:
51
+ matrix:
52
+ python-version: [3.8]
53
+
54
+ steps:
55
+ - uses: actions/checkout@v4
56
+ - name: Set up Python ${{ matrix.python-version }}
57
+ uses: actions/setup-python@v5
58
+ with:
59
+ python-version: ${{ matrix.python-version }}
60
+ - name: Install dependencies
61
+ run: |
62
+ python -m pip install --upgrade pip
63
+ pip install pylint
64
+ pip install black
65
+ pip install .
66
+
67
+ - name: Lint with pylint
68
+ run: |
69
+ pylint --fail-under=10 deepface/
.gitignore ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ **/__pycache__
2
+ **/.DS_Store
3
+ build/
4
+ dist/
5
+ Pipfile
6
+ Pipfile.lock
7
+ .mypy_cache/
8
+ .idea/
9
+ deepface.egg-info/
10
+ tests/dataset/*.pkl
11
+ tests/*.ipynb
12
+ tests/*.csv
13
+ *.pyc
14
+ **/.coverage
15
+ **/.coverage.*
16
+ .env
.pylintrc ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [MAIN]
2
+
3
+ # Analyse import fallback blocks. This can be used to support both Python 2 and
4
+ # 3 compatible code, which means that the block might have code that exists
5
+ # only in one or another interpreter, leading to false positives when analysed.
6
+ analyse-fallback-blocks=no
7
+
8
+ # Load and enable all available extensions. Use --list-extensions to see a list
9
+ # all available extensions.
10
+ #enable-all-extensions=
11
+
12
+ # In error mode, messages with a category besides ERROR or FATAL are
13
+ # suppressed, and no reports are done by default. Error mode is compatible with
14
+ # disabling specific errors.
15
+ #errors-only=
16
+
17
+ # Always return a 0 (non-error) status code, even if lint errors are found.
18
+ # This is primarily useful in continuous integration scripts.
19
+ #exit-zero=
20
+
21
+ # A comma-separated list of package or module names from where C extensions may
22
+ # be loaded. Extensions are loading into the active Python interpreter and may
23
+ # run arbitrary code.
24
+ extension-pkg-allow-list=
25
+
26
+ # A comma-separated list of package or module names from where C extensions may
27
+ # be loaded. Extensions are loading into the active Python interpreter and may
28
+ # run arbitrary code. (This is an alternative name to extension-pkg-allow-list
29
+ # for backward compatibility.)
30
+ extension-pkg-whitelist=
31
+
32
+ # Return non-zero exit code if any of these messages/categories are detected,
33
+ # even if score is above --fail-under value. Syntax same as enable. Messages
34
+ # specified are enabled, while categories only check already-enabled messages.
35
+ fail-on=
36
+
37
+ # Specify a score threshold under which the program will exit with error.
38
+ fail-under=10
39
+
40
+ # Interpret the stdin as a python script, whose filename needs to be passed as
41
+ # the module_or_package argument.
42
+ #from-stdin=
43
+
44
+ # Files or directories to be skipped. They should be base names, not paths.
45
+ ignore=CVS
46
+
47
+ # Add files or directories matching the regular expressions patterns to the
48
+ # ignore-list. The regex matches against paths and can be in Posix or Windows
49
+ # format. Because '\' represents the directory delimiter on Windows systems, it
50
+ # can't be used as an escape character.
51
+ ignore-paths=
52
+
53
+ # Files or directories matching the regular expression patterns are skipped.
54
+ # The regex matches against base names, not paths. The default value ignores
55
+ # Emacs file locks
56
+ ignore-patterns=^\.#
57
+
58
+ # List of module names for which member attributes should not be checked
59
+ # (useful for modules/projects where namespaces are manipulated during runtime
60
+ # and thus existing member attributes cannot be deduced by static analysis). It
61
+ # supports qualified module names, as well as Unix pattern matching.
62
+ ignored-modules=
63
+
64
+ # Python code to execute, usually for sys.path manipulation such as
65
+ # pygtk.require().
66
+ #init-hook=
67
+
68
+ # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
69
+ # number of processors available to use, and will cap the count on Windows to
70
+ # avoid hangs.
71
+ jobs=1
72
+
73
+ # Control the amount of potential inferred values when inferring a single
74
+ # object. This can help the performance when dealing with large functions or
75
+ # complex, nested conditions.
76
+ limit-inference-results=100
77
+
78
+ # List of plugins (as comma separated values of python module names) to load,
79
+ # usually to register additional checkers.
80
+ load-plugins=
81
+
82
+ # Pickle collected data for later comparisons.
83
+ persistent=yes
84
+
85
+ # Minimum Python version to use for version dependent checks. Will default to
86
+ # the version used to run pylint.
87
+ py-version=3.9
88
+
89
+ # Discover python modules and packages in the file system subtree.
90
+ recursive=no
91
+
92
+ # When enabled, pylint would attempt to guess common misconfiguration and emit
93
+ # user-friendly hints instead of false-positive error messages.
94
+ suggestion-mode=yes
95
+
96
+ # Allow loading of arbitrary C extensions. Extensions are imported into the
97
+ # active Python interpreter and may run arbitrary code.
98
+ unsafe-load-any-extension=no
99
+
100
+ # In verbose mode, extra non-checker-related info will be displayed.
101
+ #verbose=
102
+
103
+
104
+ [BASIC]
105
+
106
+ # Naming style matching correct argument names.
107
+ argument-naming-style=snake_case
108
+
109
+ # Regular expression matching correct argument names. Overrides argument-
110
+ # naming-style. If left empty, argument names will be checked with the set
111
+ # naming style.
112
+ #argument-rgx=
113
+
114
+ # Naming style matching correct attribute names.
115
+ attr-naming-style=snake_case
116
+
117
+ # Regular expression matching correct attribute names. Overrides attr-naming-
118
+ # style. If left empty, attribute names will be checked with the set naming
119
+ # style.
120
+ #attr-rgx=
121
+
122
+ # Bad variable names which should always be refused, separated by a comma.
123
+ bad-names=foo,
124
+ bar,
125
+ baz,
126
+ toto,
127
+ tutu,
128
+ tata
129
+
130
+ # Bad variable names regexes, separated by a comma. If names match any regex,
131
+ # they will always be refused
132
+ bad-names-rgxs=
133
+
134
+ # Naming style matching correct class attribute names.
135
+ class-attribute-naming-style=any
136
+
137
+ # Regular expression matching correct class attribute names. Overrides class-
138
+ # attribute-naming-style. If left empty, class attribute names will be checked
139
+ # with the set naming style.
140
+ #class-attribute-rgx=
141
+
142
+ # Naming style matching correct class constant names.
143
+ class-const-naming-style=UPPER_CASE
144
+
145
+ # Regular expression matching correct class constant names. Overrides class-
146
+ # const-naming-style. If left empty, class constant names will be checked with
147
+ # the set naming style.
148
+ #class-const-rgx=
149
+
150
+ # Naming style matching correct class names.
151
+ class-naming-style=PascalCase
152
+
153
+ # Regular expression matching correct class names. Overrides class-naming-
154
+ # style. If left empty, class names will be checked with the set naming style.
155
+ #class-rgx=
156
+
157
+ # Naming style matching correct constant names.
158
+ const-naming-style=UPPER_CASE
159
+
160
+ # Regular expression matching correct constant names. Overrides const-naming-
161
+ # style. If left empty, constant names will be checked with the set naming
162
+ # style.
163
+ #const-rgx=
164
+
165
+ # Minimum line length for functions/classes that require docstrings, shorter
166
+ # ones are exempt.
167
+ docstring-min-length=-1
168
+
169
+ # Naming style matching correct function names.
170
+ function-naming-style=snake_case
171
+
172
+ # Regular expression matching correct function names. Overrides function-
173
+ # naming-style. If left empty, function names will be checked with the set
174
+ # naming style.
175
+ #function-rgx=
176
+
177
+ # Good variable names which should always be accepted, separated by a comma.
178
+ good-names=i,
179
+ j,
180
+ k,
181
+ ex,
182
+ Run,
183
+ _
184
+
185
+ # Good variable names regexes, separated by a comma. If names match any regex,
186
+ # they will always be accepted
187
+ good-names-rgxs=
188
+
189
+ # Include a hint for the correct naming format with invalid-name.
190
+ include-naming-hint=no
191
+
192
+ # Naming style matching correct inline iteration names.
193
+ inlinevar-naming-style=any
194
+
195
+ # Regular expression matching correct inline iteration names. Overrides
196
+ # inlinevar-naming-style. If left empty, inline iteration names will be checked
197
+ # with the set naming style.
198
+ #inlinevar-rgx=
199
+
200
+ # Naming style matching correct method names.
201
+ method-naming-style=snake_case
202
+
203
+ # Regular expression matching correct method names. Overrides method-naming-
204
+ # style. If left empty, method names will be checked with the set naming style.
205
+ #method-rgx=
206
+
207
+ # Naming style matching correct module names.
208
+ module-naming-style=snake_case
209
+
210
+ # Regular expression matching correct module names. Overrides module-naming-
211
+ # style. If left empty, module names will be checked with the set naming style.
212
+ #module-rgx=
213
+
214
+ # Colon-delimited sets of names that determine each other's naming style when
215
+ # the name regexes allow several styles.
216
+ name-group=
217
+
218
+ # Regular expression which should only match function or class names that do
219
+ # not require a docstring.
220
+ no-docstring-rgx=^_
221
+
222
+ # List of decorators that produce properties, such as abc.abstractproperty. Add
223
+ # to this list to register other decorators that produce valid properties.
224
+ # These decorators are taken in consideration only for invalid-name.
225
+ property-classes=abc.abstractproperty
226
+
227
+ # Regular expression matching correct type variable names. If left empty, type
228
+ # variable names will be checked with the set naming style.
229
+ #typevar-rgx=
230
+
231
+ # Naming style matching correct variable names.
232
+ variable-naming-style=snake_case
233
+
234
+ # Regular expression matching correct variable names. Overrides variable-
235
+ # naming-style. If left empty, variable names will be checked with the set
236
+ # naming style.
237
+ #variable-rgx=
238
+
239
+
240
+ [CLASSES]
241
+
242
+ # Warn about protected attribute access inside special methods
243
+ check-protected-access-in-special-methods=no
244
+
245
+ # List of method names used to declare (i.e. assign) instance attributes.
246
+ defining-attr-methods=__init__,
247
+ __new__,
248
+ setUp,
249
+ __post_init__
250
+
251
+ # List of member names, which should be excluded from the protected access
252
+ # warning.
253
+ exclude-protected=_asdict,
254
+ _fields,
255
+ _replace,
256
+ _source,
257
+ _make
258
+
259
+ # List of valid names for the first argument in a class method.
260
+ valid-classmethod-first-arg=cls
261
+
262
+ # List of valid names for the first argument in a metaclass class method.
263
+ valid-metaclass-classmethod-first-arg=cls
264
+
265
+
266
+ [DESIGN]
267
+
268
+ # List of regular expressions of class ancestor names to ignore when counting
269
+ # public methods (see R0903)
270
+ exclude-too-few-public-methods=
271
+
272
+ # List of qualified class names to ignore when counting class parents (see
273
+ # R0901)
274
+ ignored-parents=
275
+
276
+ # Maximum number of arguments for function / method.
277
+ max-args=5
278
+
279
+ # Maximum number of attributes for a class (see R0902).
280
+ max-attributes=7
281
+
282
+ # Maximum number of boolean expressions in an if statement (see R0916).
283
+ max-bool-expr=5
284
+
285
+ # Maximum number of branch for function / method body.
286
+ max-branches=12
287
+
288
+ # Maximum number of locals for function / method body.
289
+ max-locals=15
290
+
291
+ # Maximum number of parents for a class (see R0901).
292
+ max-parents=7
293
+
294
+ # Maximum number of public methods for a class (see R0904).
295
+ max-public-methods=20
296
+
297
+ # Maximum number of return / yield for function / method body.
298
+ max-returns=6
299
+
300
+ # Maximum number of statements in function / method body.
301
+ max-statements=50
302
+
303
+ # Minimum number of public methods for a class (see R0903).
304
+ min-public-methods=2
305
+
306
+
307
+ [EXCEPTIONS]
308
+
309
+ # Exceptions that will emit a warning when caught.
310
+ overgeneral-exceptions=BaseException,
311
+ Exception
312
+
313
+
314
+ [FORMAT]
315
+
316
+ # Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
317
+ expected-line-ending-format=
318
+
319
+ # Regexp for a line that is allowed to be longer than the limit.
320
+ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
321
+
322
+ # Number of spaces of indent required inside a hanging or continued line.
323
+ indent-after-paren=4
324
+
325
+ # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
326
+ # tab).
327
+ indent-string=' '
328
+
329
+ # Maximum number of characters on a single line.
330
+ max-line-length=100
331
+
332
+ # Maximum number of lines in a module.
333
+ max-module-lines=1000
334
+
335
+ # Allow the body of a class to be on the same line as the declaration if body
336
+ # contains single statement.
337
+ single-line-class-stmt=no
338
+
339
+ # Allow the body of an if to be on the same line as the test if there is no
340
+ # else.
341
+ single-line-if-stmt=no
342
+
343
+
344
+ [IMPORTS]
345
+
346
+ # List of modules that can be imported at any level, not just the top level
347
+ # one.
348
+ allow-any-import-level=
349
+
350
+ # Allow wildcard imports from modules that define __all__.
351
+ allow-wildcard-with-all=no
352
+
353
+ # Deprecated modules which should not be used, separated by a comma.
354
+ deprecated-modules=
355
+
356
+ # Output a graph (.gv or any supported image format) of external dependencies
357
+ # to the given file (report RP0402 must not be disabled).
358
+ ext-import-graph=
359
+
360
+ # Output a graph (.gv or any supported image format) of all (i.e. internal and
361
+ # external) dependencies to the given file (report RP0402 must not be
362
+ # disabled).
363
+ import-graph=
364
+
365
+ # Output a graph (.gv or any supported image format) of internal dependencies
366
+ # to the given file (report RP0402 must not be disabled).
367
+ int-import-graph=
368
+
369
+ # Force import order to recognize a module as part of the standard
370
+ # compatibility libraries.
371
+ known-standard-library=
372
+
373
+ # Force import order to recognize a module as part of a third party library.
374
+ known-third-party=enchant
375
+
376
+ # Couples of modules and preferred modules, separated by a comma.
377
+ preferred-modules=
378
+
379
+
380
+ [LOGGING]
381
+
382
+ # The type of string formatting that logging methods do. `old` means using %
383
+ # formatting, `new` is for `{}` formatting.
384
+ logging-format-style=old
385
+
386
+ # Logging modules to check that the string format arguments are in logging
387
+ # function parameter format.
388
+ logging-modules=logging
389
+
390
+
391
+ [MESSAGES CONTROL]
392
+
393
+ # Only show warnings with the listed confidence levels. Leave empty to show
394
+ # all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE,
395
+ # UNDEFINED.
396
+ confidence=HIGH,
397
+ CONTROL_FLOW,
398
+ INFERENCE,
399
+ INFERENCE_FAILURE,
400
+ UNDEFINED
401
+
402
+ # Disable the message, report, category or checker with the given id(s). You
403
+ # can either give multiple identifiers separated by comma (,) or put this
404
+ # option multiple times (only on the command line, not in the configuration
405
+ # file where it should appear only once). You can also use "--disable=all" to
406
+ # disable everything first and then re-enable specific checks. For example, if
407
+ # you want to run only the similarities checker, you can use "--disable=all
408
+ # --enable=similarities". If you want to run only the classes checker, but have
409
+ # no Warning level messages displayed, use "--disable=all --enable=classes
410
+ # --disable=W".
411
+ disable=raw-checker-failed,
412
+ bad-inline-option,
413
+ locally-disabled,
414
+ file-ignored,
415
+ suppressed-message,
416
+ useless-suppression,
417
+ deprecated-pragma,
418
+ use-symbolic-message-instead,
419
+ import-error,
420
+ invalid-name,
421
+ missing-module-docstring,
422
+ missing-function-docstring,
423
+ missing-class-docstring,
424
+ too-many-arguments,
425
+ too-many-locals,
426
+ too-many-branches,
427
+ too-many-statements,
428
+ global-variable-undefined,
429
+ import-outside-toplevel,
430
+ singleton-comparison,
431
+ too-many-lines,
432
+ duplicate-code,
433
+ bare-except,
434
+ cyclic-import,
435
+ global-statement,
436
+ no-member,
437
+ no-name-in-module,
438
+ unrecognized-option,
439
+ consider-using-dict-items,
440
+ consider-iterating-dictionary,
441
+ unexpected-keyword-arg
442
+
443
+ # Enable the message, report, category or checker with the given id(s). You can
444
+ # either give multiple identifier separated by comma (,) or put this option
445
+ # multiple time (only on the command line, not in the configuration file where
446
+ # it should appear only once). See also the "--disable" option for examples.
447
+ enable=c-extension-no-member
448
+
449
+
450
+ [METHOD_ARGS]
451
+
452
+ # List of qualified names (i.e., library.method) which require a timeout
453
+ # parameter e.g. 'requests.api.get,requests.api.post'
454
+ timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request
455
+
456
+
457
+ [MISCELLANEOUS]
458
+
459
+ # List of note tags to take in consideration, separated by a comma.
460
+ notes=FIXME,
461
+ XXX,
462
+ TODO
463
+
464
+ # Regular expression of note tags to take in consideration.
465
+ notes-rgx=
466
+
467
+
468
+ [REFACTORING]
469
+
470
+ # Maximum number of nested blocks for function / method body
471
+ max-nested-blocks=5
472
+
473
+ # Complete name of functions that never returns. When checking for
474
+ # inconsistent-return-statements if a never returning function is called then
475
+ # it will be considered as an explicit return statement and no message will be
476
+ # printed.
477
+ never-returning-functions=sys.exit,argparse.parse_error
478
+
479
+
480
+ [REPORTS]
481
+
482
+ # Python expression which should return a score less than or equal to 10. You
483
+ # have access to the variables 'fatal', 'error', 'warning', 'refactor',
484
+ # 'convention', and 'info' which contain the number of messages in each
485
+ # category, as well as 'statement' which is the total number of statements
486
+ # analyzed. This score is used by the global evaluation report (RP0004).
487
+ evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10))
488
+
489
+ # Template used to display messages. This is a python new-style format string
490
+ # used to format the message information. See doc for all details.
491
+ msg-template=
492
+
493
+ # Set the output format. Available formats are text, parseable, colorized, json
494
+ # and msvs (visual studio). You can also give a reporter class, e.g.
495
+ # mypackage.mymodule.MyReporterClass.
496
+ #output-format=
497
+
498
+ # Tells whether to display a full report or only the messages.
499
+ reports=no
500
+
501
+ # Activate the evaluation score.
502
+ score=yes
503
+
504
+
505
+ [SIMILARITIES]
506
+
507
+ # Comments are removed from the similarity computation
508
+ ignore-comments=yes
509
+
510
+ # Docstrings are removed from the similarity computation
511
+ ignore-docstrings=yes
512
+
513
+ # Imports are removed from the similarity computation
514
+ ignore-imports=yes
515
+
516
+ # Signatures are removed from the similarity computation
517
+ ignore-signatures=yes
518
+
519
+ # Minimum lines number of a similarity.
520
+ min-similarity-lines=4
521
+
522
+
523
+ [SPELLING]
524
+
525
+ # Limits count of emitted suggestions for spelling mistakes.
526
+ max-spelling-suggestions=4
527
+
528
+ # Spelling dictionary name. Available dictionaries: none. To make it work,
529
+ # install the 'python-enchant' package.
530
+ spelling-dict=
531
+
532
+ # List of comma separated words that should be considered directives if they
533
+ # appear at the beginning of a comment and should not be checked.
534
+ spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy:
535
+
536
+ # List of comma separated words that should not be checked.
537
+ spelling-ignore-words=
538
+
539
+ # A path to a file that contains the private dictionary; one word per line.
540
+ spelling-private-dict-file=
541
+
542
+ # Tells whether to store unknown words to the private dictionary (see the
543
+ # --spelling-private-dict-file option) instead of raising a message.
544
+ spelling-store-unknown-words=no
545
+
546
+
547
+ [STRING]
548
+
549
+ # This flag controls whether inconsistent-quotes generates a warning when the
550
+ # character used as a quote delimiter is used inconsistently within a module.
551
+ check-quote-consistency=no
552
+
553
+ # This flag controls whether the implicit-str-concat should generate a warning
554
+ # on implicit string concatenation in sequences defined over several lines.
555
+ check-str-concat-over-line-jumps=no
556
+
557
+
558
+ [TYPECHECK]
559
+
560
+ # List of decorators that produce context managers, such as
561
+ # contextlib.contextmanager. Add to this list to register other decorators that
562
+ # produce valid context managers.
563
+ contextmanager-decorators=contextlib.contextmanager
564
+
565
+ # List of members which are set dynamically and missed by pylint inference
566
+ # system, and so shouldn't trigger E1101 when accessed. Python regular
567
+ # expressions are accepted.
568
+ generated-members=
569
+
570
+ # Tells whether to warn about missing members when the owner of the attribute
571
+ # is inferred to be None.
572
+ ignore-none=yes
573
+
574
+ # This flag controls whether pylint should warn about no-member and similar
575
+ # checks whenever an opaque object is returned when inferring. The inference
576
+ # can return multiple potential results while evaluating a Python object, but
577
+ # some branches might not be evaluated, which results in partial inference. In
578
+ # that case, it might be useful to still emit no-member and other checks for
579
+ # the rest of the inferred objects.
580
+ ignore-on-opaque-inference=yes
581
+
582
+ # List of symbolic message names to ignore for Mixin members.
583
+ ignored-checks-for-mixins=no-member,
584
+ not-async-context-manager,
585
+ not-context-manager,
586
+ attribute-defined-outside-init
587
+
588
+ # List of class names for which member attributes should not be checked (useful
589
+ # for classes with dynamically set attributes). This supports the use of
590
+ # qualified names.
591
+ ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace
592
+
593
+ # Show a hint with possible names when a member name was not found. The aspect
594
+ # of finding the hint is based on edit distance.
595
+ missing-member-hint=yes
596
+
597
+ # The minimum edit distance a name should have in order to be considered a
598
+ # similar match for a missing member name.
599
+ missing-member-hint-distance=1
600
+
601
+ # The total number of similar names that should be taken in consideration when
602
+ # showing a hint for a missing member.
603
+ missing-member-max-choices=1
604
+
605
+ # Regex pattern to define which classes are considered mixins.
606
+ mixin-class-rgx=.*[Mm]ixin
607
+
608
+ # List of decorators that change the signature of a decorated function.
609
+ signature-mutators=
610
+
611
+
612
+ [VARIABLES]
613
+
614
+ # List of additional names supposed to be defined in builtins. Remember that
615
+ # you should avoid defining new builtins when possible.
616
+ additional-builtins=
617
+
618
+ # Tells whether unused global variables should be treated as a violation.
619
+ allow-global-unused-variables=yes
620
+
621
+ # List of names allowed to shadow builtins
622
+ allowed-redefined-builtins=
623
+
624
+ # List of strings which can identify a callback function by name. A callback
625
+ # name must start or end with one of those strings.
626
+ callbacks=cb_,
627
+ _cb
628
+
629
+ # A regular expression matching the name of dummy variables (i.e. expected to
630
+ # not be used).
631
+ dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
632
+
633
+ # Argument names that match this expression will be ignored.
634
+ ignored-argument-names=_.*|^ignored_|^unused_
635
+
636
+ # Tells whether we should check for unused import in __init__ files.
637
+ init-import=no
638
+
639
+ # List of qualified module names which can have objects that can redefine
640
+ # builtins.
641
+ redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
.vscode/settings.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "python.linting.pylintEnabled": true,
3
+ "python.linting.enabled": true,
4
+ "python.linting.pylintUseMinimalCheckers": false,
5
+ "editor.formatOnSave": true,
6
+ "editor.renderWhitespace": "all",
7
+ "files.autoSave": "afterDelay",
8
+ "python.analysis.typeCheckingMode": "basic",
9
+ "python.formatting.provider": "black",
10
+ "python.formatting.blackArgs": ["--line-length=100"],
11
+ "editor.fontWeight": "normal",
12
+ "python.analysis.extraPaths": [
13
+ "./deepface"
14
+ ],
15
+ "black-formatter.args": [
16
+ "--line-length=100"
17
+ ]
18
+ }
CITATION.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Cite DeepFace Papers
2
+
3
+ Please cite deepface in your publications if it helps your research. Here are its BibTex entries:
4
+
5
+ ### Facial Recognition
6
+
7
+ If you use deepface in your research for facial recogntion purposes, please cite the this publication.
8
+
9
+ ```BibTeX
10
+ @inproceedings{serengil2020lightface,
11
+ title = {LightFace: A Hybrid Deep Face Recognition Framework},
12
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
13
+ booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
14
+ pages = {23-27},
15
+ year = {2020},
16
+ doi = {10.1109/ASYU50717.2020.9259802},
17
+ url = {https://doi.org/10.1109/ASYU50717.2020.9259802},
18
+ organization = {IEEE}
19
+ }
20
+ ```
21
+
22
+ ### Facial Attribute Analysis
23
+
24
+ If you use deepface in your research for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction or face detection purposes, please cite the this publication.
25
+
26
+ ```BibTeX
27
+ @inproceedings{serengil2021lightface,
28
+ title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
29
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
30
+ booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
31
+ pages = {1-4},
32
+ year = {2021},
33
+ doi = {10.1109/ICEET53442.2021.9659697},
34
+ url = {https://doi.org/10.1109/ICEET53442.2021.9659697},
35
+ organization = {IEEE}
36
+ }
37
+ ```
38
+
39
+ ### Repositories
40
+
41
+ Also, if you use deepface in your GitHub projects, please add `deepface` in the `requirements.txt`. Thereafter, your project will be listed in its [dependency graph](https://github.com/serengil/deepface/network/dependents).
Dockerfile ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # base image
2
+ FROM python:3.8.12
3
+ LABEL org.opencontainers.image.source https://github.com/serengil/deepface
4
+
5
+ # -----------------------------------
6
+ # create required folder
7
+ RUN mkdir /app
8
+ RUN mkdir /app/deepface
9
+
10
+ # -----------------------------------
11
+ # switch to application directory
12
+ WORKDIR /app
13
+
14
+ # -----------------------------------
15
+ # update image os
16
+ RUN apt-get update
17
+ RUN apt-get install ffmpeg libsm6 libxext6 -y
18
+
19
+ # -----------------------------------
20
+ # Copy required files from repo into image
21
+ COPY ./deepface /app/deepface
22
+ # even though we will use local requirements, this one is required to perform install deepface from source code
23
+ COPY ./requirements.txt /app/requirements.txt
24
+ COPY ./requirements_local /app/requirements_local.txt
25
+ COPY ./package_info.json /app/
26
+ COPY ./setup.py /app/
27
+ COPY ./README.md /app/
28
+
29
+ # -----------------------------------
30
+ # if you plan to use a GPU, you should install the 'tensorflow-gpu' package
31
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org tensorflow-gpu
32
+
33
+ # -----------------------------------
34
+ # install deepface from pypi release (might be out-of-date)
35
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org deepface
36
+ # -----------------------------------
37
+ # install dependencies - deepface with these dependency versions is working
38
+ RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -r /app/requirements_local.txt
39
+ # install deepface from source code (always up-to-date)
40
+ RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org -e .
41
+
42
+ # -----------------------------------
43
+ # some packages are optional in deepface. activate if your task depends on one.
44
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org cmake==3.24.1.1
45
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org dlib==19.20.0
46
+ # RUN pip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org lightgbm==2.3.1
47
+
48
+ # -----------------------------------
49
+ # environment variables
50
+ ENV PYTHONUNBUFFERED=1
51
+
52
+ # -----------------------------------
53
+ # run the app (re-configure port if necessary)
54
+ WORKDIR /app/deepface/api/src
55
+ EXPOSE 15000
56
+ CMD ["gunicorn", "--workers=1", "--timeout=3600", "--bind=0.0.0.0:15000", "app:create_app()"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 Sefik Ilkin Serengil
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
Makefile ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ test:
2
+ cd tests && python -m pytest . -s --disable-warnings
3
+
4
+ lint:
5
+ python -m pylint deepface/ --fail-under=10
6
+
7
+ coverage:
8
+ pip install pytest-cov && cd tests && python -m pytest --cov=deepface
README.md ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # deepface
2
+
3
+ <div align="center">
4
+
5
+ [![PyPI Downloads](https://static.pepy.tech/personalized-badge/deepface?period=total&units=international_system&left_color=grey&right_color=blue&left_text=pypi%20downloads)](https://pepy.tech/project/deepface)
6
+ [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/deepface?color=green&label=conda%20downloads)](https://anaconda.org/conda-forge/deepface)
7
+ [![Stars](https://img.shields.io/github/stars/serengil/deepface?color=yellow&style=flat&label=%E2%AD%90%20stars)](https://github.com/serengil/deepface/stargazers)
8
+ [![License](http://img.shields.io/:license-MIT-green.svg?style=flat)](https://github.com/serengil/deepface/blob/master/LICENSE)
9
+ [![Tests](https://github.com/serengil/deepface/actions/workflows/tests.yml/badge.svg)](https://github.com/serengil/deepface/actions/workflows/tests.yml)
10
+
11
+ [![Blog](https://img.shields.io/:blog-sefiks.com-blue.svg?style=flat&logo=wordpress)](https://sefiks.com)
12
+ [![YouTube](https://img.shields.io/:youtube-@sefiks-red.svg?style=flat&logo=youtube)](https://www.youtube.com/@sefiks?sub_confirmation=1)
13
+ [![Twitter](https://img.shields.io/:follow-@serengil-blue.svg?style=flat&logo=twitter)](https://twitter.com/intent/user?screen_name=serengil)
14
+ [![Support me on Patreon](https://img.shields.io/endpoint.svg?url=https%3A%2F%2Fshieldsio-patreon.vercel.app%2Fapi%3Fusername%3Dserengil%26type%3Dpatrons&style=flat)](https://www.patreon.com/serengil?repo=deepface)
15
+ [![GitHub Sponsors](https://img.shields.io/github/sponsors/serengil?logo=GitHub&color=lightgray)](https://github.com/sponsors/serengil)
16
+
17
+ [![DOI](http://img.shields.io/:DOI-10.1109/ASYU50717.2020.9259802-blue.svg?style=flat)](https://doi.org/10.1109/ASYU50717.2020.9259802)
18
+ [![DOI](http://img.shields.io/:DOI-10.1109/ICEET53442.2021.9659697-blue.svg?style=flat)](https://doi.org/10.1109/ICEET53442.2021.9659697)
19
+
20
+ </div>
21
+
22
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-icon-labeled.png" width="200" height="240"></p>
23
+
24
+ Deepface is a lightweight [face recognition](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and facial attribute analysis ([age](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [gender](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [emotion](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) and [race](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/)) framework for python. It is a hybrid face recognition framework wrapping **state-of-the-art** models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/), [`FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), `SFace` and `GhostFaceNet`.
25
+
26
+ Experiments show that human beings have 97.53% accuracy on facial recognition tasks whereas those models already reached and passed that accuracy level.
27
+
28
+ ## Installation [![PyPI](https://img.shields.io/pypi/v/deepface.svg)](https://pypi.org/project/deepface/) [![Conda](https://img.shields.io/conda/vn/conda-forge/deepface.svg)](https://anaconda.org/conda-forge/deepface)
29
+
30
+ The easiest way to install deepface is to download it from [`PyPI`](https://pypi.org/project/deepface/). It's going to install the library itself and its prerequisites as well.
31
+
32
+ ```shell
33
+ $ pip install deepface
34
+ ```
35
+
36
+ Secondly, DeepFace is also available at [`Conda`](https://anaconda.org/conda-forge/deepface). You can alternatively install the package via conda.
37
+
38
+ ```shell
39
+ $ conda install -c conda-forge deepface
40
+ ```
41
+
42
+ Thirdly, you can install deepface from its source code.
43
+
44
+ ```shell
45
+ $ git clone https://github.com/serengil/deepface.git
46
+ $ cd deepface
47
+ $ pip install -e .
48
+ ```
49
+
50
+ Then you will be able to import the library and use its functionalities.
51
+
52
+ ```python
53
+ from deepface import DeepFace
54
+ ```
55
+
56
+ **Facial Recognition** - [`Demo`](https://youtu.be/WnUVYQP4h44)
57
+
58
+ A modern [**face recognition pipeline**](https://sefiks.com/2020/05/01/a-gentle-introduction-to-face-recognition-in-deep-learning/) consists of 5 common stages: [detect](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [align](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [normalize](https://sefiks.com/2020/11/20/facial-landmarks-for-face-recognition-with-dlib/), [represent](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) and [verify](https://sefiks.com/2020/05/22/fine-tuning-the-threshold-in-face-recognition/). While Deepface handles all these common stages in the background, you don’t need to acquire in-depth knowledge about all the processes behind it. You can just call its verification, find or analysis function with a single line of code.
59
+
60
+ **Face Verification** - [`Demo`](https://youtu.be/KRCvkNCOphE)
61
+
62
+ This function verifies face pairs as same person or different persons. It expects exact image paths as inputs. Passing numpy or base64 encoded images is also welcome. Then, it is going to return a dictionary and you should check just its verified key.
63
+
64
+ ```python
65
+ result = DeepFace.verify(img1_path = "img1.jpg", img2_path = "img2.jpg")
66
+ ```
67
+
68
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-1.jpg" width="95%" height="95%"></p>
69
+
70
+ **Face recognition** - [`Demo`](https://youtu.be/Hrjp-EStM_s)
71
+
72
+ [Face recognition](https://sefiks.com/2020/05/25/large-scale-face-recognition-for-deep-learning/) requires applying face verification many times. Herein, deepface has an out-of-the-box find function to handle this action. It's going to look for the identity of input image in the database path and it will return list of pandas data frame as output. Meanwhile, facial embeddings of the facial database are stored in a pickle file to be searched faster in next time. Result is going to be the size of faces appearing in the source image. Besides, target images in the database can have many faces as well.
73
+
74
+
75
+ ```python
76
+ dfs = DeepFace.find(img_path = "img1.jpg", db_path = "C:/workspace/my_db")
77
+ ```
78
+
79
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-6-v2.jpg" width="95%" height="95%"></p>
80
+
81
+ **Embeddings**
82
+
83
+ Face recognition models basically represent facial images as multi-dimensional vectors. Sometimes, you need those embedding vectors directly. DeepFace comes with a dedicated representation function. Represent function returns a list of embeddings. Result is going to be the size of faces appearing in the image path.
84
+
85
+ ```python
86
+ embedding_objs = DeepFace.represent(img_path = "img.jpg")
87
+ ```
88
+
89
+ This function returns an array as embedding. The size of the embedding array would be different based on the model name. For instance, VGG-Face is the default model and it represents facial images as 4096 dimensional vectors.
90
+
91
+ ```python
92
+ embedding = embedding_objs[0]["embedding"]
93
+ assert isinstance(embedding, list)
94
+ assert model_name == "VGG-Face" and len(embedding) == 4096
95
+ ```
96
+
97
+ Here, embedding is also [plotted](https://sefiks.com/2020/05/01/a-gentle-introduction-to-face-recognition-in-deep-learning/) with 4096 slots horizontally. Each slot is corresponding to a dimension value in the embedding vector and dimension value is explained in the colorbar on the right. Similar to 2D barcodes, vertical dimension stores no information in the illustration.
98
+
99
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/embedding.jpg" width="95%" height="95%"></p>
100
+
101
+ **Face recognition models** - [`Demo`](https://youtu.be/i_MOwvhbLdI)
102
+
103
+ Deepface is a **hybrid** face recognition package. It currently wraps many **state-of-the-art** face recognition models: [`VGG-Face`](https://sefiks.com/2018/08/06/deep-face-recognition-with-keras/) , [`FaceNet`](https://sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/), [`OpenFace`](https://sefiks.com/2019/07/21/face-recognition-with-openface-in-keras/), [`DeepFace`](https://sefiks.com/2020/02/17/face-recognition-with-facebook-deepface-in-keras/), [`DeepID`](https://sefiks.com/2020/06/16/face-recognition-with-deepid-in-keras/), [`ArcFace`](https://sefiks.com/2020/12/14/deep-face-recognition-with-arcface-in-keras-and-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), `SFace` and `GhostFaceNet`. The default configuration uses VGG-Face model.
104
+
105
+ ```python
106
+ models = [
107
+ "VGG-Face",
108
+ "Facenet",
109
+ "Facenet512",
110
+ "OpenFace",
111
+ "DeepFace",
112
+ "DeepID",
113
+ "ArcFace",
114
+ "Dlib",
115
+ "SFace",
116
+ "GhostFaceNet",
117
+ ]
118
+
119
+ #face verification
120
+ result = DeepFace.verify(img1_path = "img1.jpg",
121
+ img2_path = "img2.jpg",
122
+ model_name = models[0]
123
+ )
124
+
125
+ #face recognition
126
+ dfs = DeepFace.find(img_path = "img1.jpg",
127
+ db_path = "C:/workspace/my_db",
128
+ model_name = models[1]
129
+ )
130
+
131
+ #embeddings
132
+ embedding_objs = DeepFace.represent(img_path = "img.jpg",
133
+ model_name = models[2]
134
+ )
135
+ ```
136
+
137
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/model-portfolio-20240316.jpg" width="95%" height="95%"></p>
138
+
139
+ FaceNet, VGG-Face, ArcFace and Dlib are [overperforming](https://youtu.be/i_MOwvhbLdI) ones based on experiments. You can find out the scores of those models below on [Labeled Faces in the Wild](https://sefiks.com/2020/08/27/labeled-faces-in-the-wild-for-face-recognition/) set declared by its creators.
140
+
141
+ | Model | Declared LFW Score |
142
+ | -------------- | ------------------ |
143
+ | VGG-Face | 98.9% |
144
+ | Facenet | 99.2% |
145
+ | Facenet512 | 99.6% |
146
+ | OpenFace | 92.9% |
147
+ | DeepID | 97.4% |
148
+ | Dlib | 99.3 % |
149
+ | SFace | 99.5% |
150
+ | ArcFace | 99.5% |
151
+ | GhostFaceNet | 99.7% |
152
+ | *Human-beings* | *97.5%* |
153
+
154
+ Conducting experiments with those models within DeepFace may reveal disparities compared to the original studies, owing to the adoption of distinct detection or normalization techniques. Furthermore, some models have been released solely with their backbones, lacking pre-trained weights. Thus, we are utilizing their re-implementations instead of the original pre-trained weights.
155
+
156
+ **Similarity**
157
+
158
+ Face recognition models are regular [convolutional neural networks](https://sefiks.com/2018/03/23/convolutional-autoencoder-clustering-images-with-neural-networks/) and they are responsible to represent faces as vectors. We expect that a face pair of same person should be [more similar](https://sefiks.com/2020/05/22/fine-tuning-the-threshold-in-face-recognition/) than a face pair of different persons.
159
+
160
+ Similarity could be calculated by different metrics such as [Cosine Similarity](https://sefiks.com/2018/08/13/cosine-similarity-in-machine-learning/), Euclidean Distance and L2 form. The default configuration uses cosine similarity.
161
+
162
+ ```python
163
+ metrics = ["cosine", "euclidean", "euclidean_l2"]
164
+
165
+ #face verification
166
+ result = DeepFace.verify(img1_path = "img1.jpg",
167
+ img2_path = "img2.jpg",
168
+ distance_metric = metrics[1]
169
+ )
170
+
171
+ #face recognition
172
+ dfs = DeepFace.find(img_path = "img1.jpg",
173
+ db_path = "C:/workspace/my_db",
174
+ distance_metric = metrics[2]
175
+ )
176
+ ```
177
+
178
+ Euclidean L2 form [seems](https://youtu.be/i_MOwvhbLdI) to be more stable than cosine and regular Euclidean distance based on experiments.
179
+
180
+ **Facial Attribute Analysis** - [`Demo`](https://youtu.be/GT2UeN85BdA)
181
+
182
+ Deepface also comes with a strong facial attribute analysis module including [`age`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`gender`](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/), [`facial expression`](https://sefiks.com/2018/01/01/facial-expression-recognition-with-keras/) (including angry, fear, neutral, sad, disgust, happy and surprise) and [`race`](https://sefiks.com/2019/11/11/race-and-ethnicity-prediction-in-keras/) (including asian, white, middle eastern, indian, latino and black) predictions. Result is going to be the size of faces appearing in the source image.
183
+
184
+ ```python
185
+ objs = DeepFace.analyze(img_path = "img4.jpg",
186
+ actions = ['age', 'gender', 'race', 'emotion']
187
+ )
188
+ ```
189
+
190
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-2.jpg" width="95%" height="95%"></p>
191
+
192
+ Age model got ± 4.65 MAE; gender model got 97.44% accuracy, 96.29% precision and 95.05% recall as mentioned in its [tutorial](https://sefiks.com/2019/02/13/apparent-age-and-gender-prediction-in-keras/).
193
+
194
+
195
+ **Face Detectors** - [`Demo`](https://youtu.be/GZ2p2hj2H5k)
196
+
197
+ Face detection and alignment are important early stages of a modern face recognition pipeline. Experiments show that just alignment increases the face recognition accuracy almost 1%. [`OpenCV`](https://sefiks.com/2020/02/23/face-alignment-for-face-recognition-in-python-within-opencv/), [`Ssd`](https://sefiks.com/2020/08/25/deep-face-detection-with-opencv-in-python/), [`Dlib`](https://sefiks.com/2020/07/11/face-recognition-with-dlib-in-python/), [`MtCnn`](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/), `Faster MTCNN`, [`RetinaFace`](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/), [`MediaPipe`](https://sefiks.com/2022/01/14/deep-face-detection-with-mediapipe/), `Yolo`, `YuNet` and `CenterFace` detectors are wrapped in deepface.
198
+
199
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/detector-portfolio-v6.jpg" width="95%" height="95%"></p>
200
+
201
+ All deepface functions accept an optional detector backend input argument. You can switch among those detectors with this argument. OpenCV is the default detector.
202
+
203
+ ```python
204
+ backends = [
205
+ 'opencv',
206
+ 'ssd',
207
+ 'dlib',
208
+ 'mtcnn',
209
+ 'fastmtcnn',
210
+ 'retinaface',
211
+ 'mediapipe',
212
+ 'yolov8',
213
+ 'yunet',
214
+ 'centerface',
215
+ ]
216
+
217
+ #face verification
218
+ obj = DeepFace.verify(img1_path = "img1.jpg",
219
+ img2_path = "img2.jpg",
220
+ detector_backend = backends[0]
221
+ )
222
+
223
+ #face recognition
224
+ dfs = DeepFace.find(img_path = "img.jpg",
225
+ db_path = "my_db",
226
+ detector_backend = backends[1]
227
+ )
228
+
229
+ #embeddings
230
+ embedding_objs = DeepFace.represent(img_path = "img.jpg",
231
+ detector_backend = backends[2]
232
+ )
233
+
234
+ #facial analysis
235
+ demographies = DeepFace.analyze(img_path = "img4.jpg",
236
+ detector_backend = backends[3]
237
+ )
238
+
239
+ #face detection and alignment
240
+ face_objs = DeepFace.extract_faces(img_path = "img.jpg",
241
+ detector_backend = backends[4]
242
+ )
243
+ ```
244
+
245
+ Face recognition models are actually CNN models and they expect standard sized inputs. So, resizing is required before representation. To avoid deformation, deepface adds black padding pixels according to the target size argument after detection and alignment.
246
+
247
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/detector-outputs-20240414.jpg" width="90%" height="90%"></p>
248
+
249
+ [RetinaFace](https://sefiks.com/2021/04/27/deep-face-detection-with-retinaface-in-python/) and [MTCNN](https://sefiks.com/2020/09/09/deep-face-detection-with-mtcnn-in-python/) seem to overperform in detection and alignment stages but they are much slower. If the speed of your pipeline is more important, then you should use opencv or ssd. On the other hand, if you consider the accuracy, then you should use retinaface or mtcnn.
250
+
251
+ The performance of RetinaFace is very satisfactory even in the crowd as seen in the following illustration. Besides, it comes with an incredible facial landmark detection performance. Highlighted red points show some facial landmarks such as eyes, nose and mouth. That's why, alignment score of RetinaFace is high as well.
252
+
253
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/retinaface-results.jpeg" width="90%" height="90%">
254
+ <br><em>The Yellow Angels - Fenerbahce Women's Volleyball Team</em>
255
+ </p>
256
+
257
+ You can find out more about RetinaFace on this [repo](https://github.com/serengil/retinaface).
258
+
259
+ **Real Time Analysis** - [`Demo`](https://youtu.be/-c9sSJcx6wI)
260
+
261
+ You can run deepface for real time videos as well. Stream function will access your webcam and apply both face recognition and facial attribute analysis. The function starts to analyze a frame if it can focus a face sequentially 5 frames. Then, it shows results 5 seconds.
262
+
263
+ ```python
264
+ DeepFace.stream(db_path = "C:/User/Sefik/Desktop/database")
265
+ ```
266
+
267
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/stock-3.jpg" width="90%" height="90%"></p>
268
+
269
+ Even though face recognition is based on one-shot learning, you can use multiple face pictures of a person as well. You should rearrange your directory structure as illustrated below.
270
+
271
+ ```bash
272
+ user
273
+ ├── database
274
+ │ ├── Alice
275
+ │ │ ├── Alice1.jpg
276
+ │ │ ├── Alice2.jpg
277
+ │ ├── Bob
278
+ │ │ ├── Bob.jpg
279
+ ```
280
+
281
+ **API** - [`Demo`](https://youtu.be/HeKCQ6U9XmI)
282
+
283
+ DeepFace serves an API as well - see [`api folder`](https://github.com/serengil/deepface/tree/master/deepface/api/src) for more details. You can clone deepface source code and run the api with the following command. It will use gunicorn server to get a rest service up. In this way, you can call deepface from an external system such as mobile app or web.
284
+
285
+ ```shell
286
+ cd scripts
287
+ ./service.sh
288
+ ```
289
+
290
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-api.jpg" width="90%" height="90%"></p>
291
+
292
+ Face recognition, facial attribute analysis and vector representation functions are covered in the API. You are expected to call these functions as http post methods. Default service endpoints will be `http://localhost:5000/verify` for face recognition, `http://localhost:5000/analyze` for facial attribute analysis, and `http://localhost:5000/represent` for vector representation. You can pass input images as exact image paths on your environment, base64 encoded strings or images on web. [Here](https://github.com/serengil/deepface/tree/master/deepface/api/postman), you can find a postman project to find out how these methods should be called.
293
+
294
+ **Dockerized Service**
295
+
296
+ You can deploy the deepface api on a kubernetes cluster with docker. The following [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh) will serve deepface on `localhost:5000`. You need to re-configure the [Dockerfile](https://github.com/serengil/deepface/blob/master/Dockerfile) if you want to change the port. Then, even if you do not have a development environment, you will be able to consume deepface services such as verify and analyze. You can also access the inside of the docker image to run deepface related commands. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh).
297
+
298
+ ```shell
299
+ cd scripts
300
+ ./dockerize.sh
301
+ ```
302
+
303
+ <p align="center"><img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/deepface-dockerized-v2.jpg" width="50%" height="50%"></p>
304
+
305
+ **Command Line Interface** - [`Demo`](https://youtu.be/PKKTAr3ts2s)
306
+
307
+ DeepFace comes with a command line interface as well. You are able to access its functions in command line as shown below. The command deepface expects the function name as 1st argument and function arguments thereafter.
308
+
309
+ ```shell
310
+ #face verification
311
+ $ deepface verify -img1_path tests/dataset/img1.jpg -img2_path tests/dataset/img2.jpg
312
+
313
+ #facial analysis
314
+ $ deepface analyze -img_path tests/dataset/img1.jpg
315
+ ```
316
+
317
+ You can also run these commands if you are running deepface with docker. Please follow the instructions in the [shell script](https://github.com/serengil/deepface/blob/master/scripts/dockerize.sh#L17).
318
+
319
+ ## Contribution
320
+
321
+ Pull requests are more than welcome! If you are planning to contribute a large patch, please create an issue first to get any upfront questions or design decisions out of the way first.
322
+
323
+ Before creating a PR, you should run the unit tests and linting locally by running `make test && make lint` command. Once a PR sent, GitHub test workflow will be run automatically and unit test and linting jobs will be available in [GitHub actions](https://github.com/serengil/deepface/actions) before approval.
324
+
325
+ ## Support
326
+
327
+ There are many ways to support a project - starring⭐️ the GitHub repo is just one 🙏
328
+
329
+ You can also support this work on [Patreon](https://www.patreon.com/serengil?repo=deepface) or [GitHub Sponsors](https://github.com/sponsors/serengil).
330
+
331
+ <a href="https://www.patreon.com/serengil?repo=deepface">
332
+ <img src="https://raw.githubusercontent.com/serengil/deepface/master/icon/patreon.png" width="30%" height="30%">
333
+ </a>
334
+
335
+ ## Citation
336
+
337
+ Please cite deepface in your publications if it helps your research - see [`CITATIONS`](https://github.com/serengil/deepface/blob/master/CITATION.md) for more details. Here are its BibTex entries:
338
+
339
+ If you use deepface in your research for facial recogntion purposes, please cite this publication.
340
+
341
+ ```BibTeX
342
+ @inproceedings{serengil2020lightface,
343
+ title = {LightFace: A Hybrid Deep Face Recognition Framework},
344
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
345
+ booktitle = {2020 Innovations in Intelligent Systems and Applications Conference (ASYU)},
346
+ pages = {23-27},
347
+ year = {2020},
348
+ doi = {10.1109/ASYU50717.2020.9259802},
349
+ url = {https://ieeexplore.ieee.org/document/9259802},
350
+ organization = {IEEE}
351
+ }
352
+ ```
353
+
354
+ If you use deepface in your research for facial attribute analysis purposes such as age, gender, emotion or ethnicity prediction, please cite this publication.
355
+
356
+ ```BibTeX
357
+ @inproceedings{serengil2021lightface,
358
+ title = {HyperExtended LightFace: A Facial Attribute Analysis Framework},
359
+ author = {Serengil, Sefik Ilkin and Ozpinar, Alper},
360
+ booktitle = {2021 International Conference on Engineering and Emerging Technologies (ICEET)},
361
+ pages = {1-4},
362
+ year = {2021},
363
+ doi = {10.1109/ICEET53442.2021.9659697},
364
+ url = {https://ieeexplore.ieee.org/document/9659697},
365
+ organization = {IEEE}
366
+ }
367
+ ```
368
+
369
+ Also, if you use deepface in your GitHub projects, please add `deepface` in the `requirements.txt`.
370
+
371
+ ## Licence
372
+
373
+ DeepFace is licensed under the MIT License - see [`LICENSE`](https://github.com/serengil/deepface/blob/master/LICENSE) for more details.
374
+
375
+ DeepFace wraps some external face recognition models: [VGG-Face](http://www.robots.ox.ac.uk/~vgg/software/vgg_face/), [Facenet](https://github.com/davidsandberg/facenet/blob/master/LICENSE.md), [OpenFace](https://github.com/iwantooxxoox/Keras-OpenFace/blob/master/LICENSE), [DeepFace](https://github.com/swghosh/DeepFace), [DeepID](https://github.com/Ruoyiran/DeepID/blob/master/LICENSE.md), [ArcFace](https://github.com/leondgarse/Keras_insightface/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/dlib/LICENSE.txt), [SFace](https://github.com/opencv/opencv_zoo/blob/master/models/face_recognition_sface/LICENSE) and [GhostFaceNet](https://github.com/HamadYA/GhostFaceNets/blob/main/LICENSE). Besides, age, gender and race / ethnicity models were trained on the backbone of VGG-Face with transfer learning. Similarly, DeepFace wraps many face detectors: [OpenCv](https://github.com/opencv/opencv/blob/4.x/LICENSE), [Ssd](https://github.com/opencv/opencv/blob/master/LICENSE), [Dlib](https://github.com/davisking/dlib/blob/master/LICENSE.txt), [MtCnn](https://github.com/ipazc/mtcnn/blob/master/LICENSE), [Fast MtCnn](https://github.com/timesler/facenet-pytorch/blob/master/LICENSE.md), [RetinaFace](https://github.com/serengil/retinaface/blob/master/LICENSE), [MediaPipe](https://github.com/google/mediapipe/blob/master/LICENSE), [YuNet](https://github.com/ShiqiYu/libfacedetection/blob/master/LICENSE), [Yolo](https://github.com/derronqi/yolov8-face/blob/main/LICENSE) and [CenterFace](https://github.com/Star-Clouds/CenterFace/blob/master/LICENSE). License types will be inherited when you intend to utilize those models. Please check the license types of those models for production purposes.
376
+
377
+ DeepFace [logo](https://thenounproject.com/term/face-recognition/2965879/) is created by [Adrien Coquet](https://thenounproject.com/coquet_adrien/) and it is licensed under [Creative Commons: By Attribution 3.0 License](https://creativecommons.org/licenses/by/3.0/).
Train.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # from deepface import DeepFace
3
+ # import os
4
+ # models = [
5
+ # "VGG-Face",
6
+ # "Facenet",
7
+ # "Facenet512",
8
+ # "OpenFace",
9
+ # "DeepFace",
10
+ # "DeepID",
11
+ # "ArcFace",
12
+ # "Dlib",
13
+ # "SFace",
14
+ # ]
15
+
16
+ # metrics = ["cosine", "euclidean", "euclidean_l2"]
17
+
18
+ # backends = [
19
+ # 'opencv',
20
+ # 'ssd',
21
+ # 'dlib',
22
+ # 'mtcnn',
23
+ # 'retinaface',
24
+ # 'mediapipe',
25
+ # 'yolov8',
26
+ # 'yunet',
27
+ # 'fastmtcnn',
28
+ # ]
29
+
30
+ # # df = DeepFace.find(img_path='F:/projects/python/mafqoud/dataset/missing_people/m0.jpg'
31
+ # # , db_path='F:/projects/python/mafqoud/dataset/founded_people'
32
+ # # , enforce_detection = True
33
+ # # , model_name = models[2]
34
+ # # , distance_metric = metrics[2]
35
+ # # , detector_backend = backends[3])
36
+
37
+ # DeepFace.stream(db_path = "F:/deepface")
38
+
39
+ # base_dir = os.path.abspath(os.path.dirname(__file__))
40
+ # # base_dir = "f:\\"
41
+ # founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
42
+ # def get_main_directory():
43
+ # path = os.path.abspath(__file__)
44
+ # drive, _ = os.path.splitdrive(path)
45
+ # if not drive.endswith(os.path.sep):
46
+ # drive += os.path.sep
47
+ # return drive
48
+
49
+ # base_dir = get_main_directory()
50
+ # missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
51
+ # print(missing_dir)
52
+
53
+ # print(base_dir)
54
+ # print(missing_dir)
55
+ # print(founded_dir)
deepface/DeepFace.py ADDED
@@ -0,0 +1,585 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # common dependencies
2
+ import os
3
+ import warnings
4
+ import logging
5
+ from typing import Any, Dict, List, Union, Optional
6
+ from deepface.commons.os_path import os_path
7
+
8
+ # this has to be set before importing tensorflow
9
+ os.environ["TF_USE_LEGACY_KERAS"] = "1"
10
+
11
+ # pylint: disable=wrong-import-position
12
+
13
+ # 3rd party dependencies
14
+ import numpy as np
15
+ import pandas as pd
16
+ import tensorflow as tf
17
+
18
+ # package dependencies
19
+ from deepface.commons import package_utils, folder_utils
20
+ from deepface.commons import logger as log
21
+ from deepface.modules import (
22
+ modeling,
23
+ representation,
24
+ verification,
25
+ recognition,
26
+ demography,
27
+ detection,
28
+ streaming,
29
+ preprocessing,
30
+ cloudservice,
31
+ )
32
+ from deepface import __version__
33
+
34
+ logger = log.get_singletonish_logger()
35
+
36
+ # -----------------------------------
37
+ # configurations for dependencies
38
+
39
+ # users should install tf_keras package if they are using tf 2.16 or later versions
40
+ package_utils.validate_for_keras3()
41
+
42
+ warnings.filterwarnings("ignore")
43
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
44
+ tf_version = package_utils.get_tf_major_version()
45
+ if tf_version == 2:
46
+ tf.get_logger().setLevel(logging.ERROR)
47
+ # -----------------------------------
48
+
49
+ # create required folders if necessary to store model weights
50
+ folder_utils.initialize_folder()
51
+
52
+
53
+ def build_model(model_name: str) -> Any:
54
+ """
55
+ This function builds a deepface model
56
+ Args:
57
+ model_name (string): face recognition or facial attribute model
58
+ VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition
59
+ Age, Gender, Emotion, Race for facial attributes
60
+ Returns:
61
+ built_model
62
+ """
63
+ return modeling.build_model(model_name=model_name)
64
+
65
+
66
+ def verify(
67
+ img1_path: Union[str, np.ndarray, List[float]],
68
+ img2_path: Union[str, np.ndarray, List[float]],
69
+ model_name: str = "VGG-Face",
70
+ detector_backend: str = "opencv",
71
+ distance_metric: str = "cosine",
72
+ enforce_detection: bool = True,
73
+ align: bool = True,
74
+ expand_percentage: int = 0,
75
+ normalization: str = "base",
76
+ silent: bool = False,
77
+ ) -> Dict[str, Any]:
78
+ """
79
+ Verify if an image pair represents the same person or different persons.
80
+ Args:
81
+ img1_path (str or np.ndarray or List[float]): Path to the first image.
82
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
83
+ or pre-calculated embeddings.
84
+
85
+ img2_path (str or np.ndarray or List[float]): Path to the second image.
86
+ Accepts exact image path as a string, numpy array (BGR), base64 encoded images
87
+ or pre-calculated embeddings.
88
+
89
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
90
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
91
+
92
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
93
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
94
+ (default is opencv).
95
+
96
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
97
+ 'euclidean', 'euclidean_l2' (default is cosine).
98
+
99
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
100
+ Set to False to avoid the exception for low-resolution images (default is True).
101
+
102
+ align (bool): Flag to enable face alignment (default is True).
103
+
104
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
105
+
106
+ normalization (string): Normalize the input image before feeding it to the model.
107
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base)
108
+
109
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
110
+ (default is False).
111
+
112
+ Returns:
113
+ result (dict): A dictionary containing verification results with following keys.
114
+
115
+ - 'verified' (bool): Indicates whether the images represent the same person (True)
116
+ or different persons (False).
117
+
118
+ - 'distance' (float): The distance measure between the face vectors.
119
+ A lower distance indicates higher similarity.
120
+
121
+ - 'max_threshold_to_verify' (float): The maximum threshold used for verification.
122
+ If the distance is below this threshold, the images are considered a match.
123
+
124
+ - 'model' (str): The chosen face recognition model.
125
+
126
+ - 'distance_metric' (str): The chosen similarity metric for measuring distances.
127
+
128
+ - 'facial_areas' (dict): Rectangular regions of interest for faces in both images.
129
+ - 'img1': {'x': int, 'y': int, 'w': int, 'h': int}
130
+ Region of interest for the first image.
131
+ - 'img2': {'x': int, 'y': int, 'w': int, 'h': int}
132
+ Region of interest for the second image.
133
+
134
+ - 'time' (float): Time taken for the verification process in seconds.
135
+ """
136
+
137
+ return verification.verify(
138
+ img1_path=img1_path,
139
+ img2_path=img2_path,
140
+ model_name=model_name,
141
+ detector_backend=detector_backend,
142
+ distance_metric=distance_metric,
143
+ enforce_detection=enforce_detection,
144
+ align=align,
145
+ expand_percentage=expand_percentage,
146
+ normalization=normalization,
147
+ silent=silent,
148
+ )
149
+
150
+
151
+ def analyze(
152
+ img_path: Union[str, np.ndarray],
153
+ actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
154
+ enforce_detection: bool = True,
155
+ detector_backend: str = "opencv",
156
+ align: bool = True,
157
+ expand_percentage: int = 0,
158
+ silent: bool = False,
159
+ ) -> List[Dict[str, Any]]:
160
+ """
161
+ Analyze facial attributes such as age, gender, emotion, and race in the provided image.
162
+ Args:
163
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
164
+ or a base64 encoded image. If the source image contains multiple faces, the result will
165
+ include information for each detected face.
166
+
167
+ actions (tuple): Attributes to analyze. The default is ('age', 'gender', 'emotion', 'race').
168
+ You can exclude some of these attributes from the analysis if needed.
169
+
170
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
171
+ Set to False to avoid the exception for low-resolution images (default is True).
172
+
173
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
174
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
175
+ (default is opencv).
176
+
177
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
178
+ 'euclidean', 'euclidean_l2' (default is cosine).
179
+
180
+ align (boolean): Perform alignment based on the eye positions (default is True).
181
+
182
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
183
+
184
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
185
+ (default is False).
186
+
187
+ Returns:
188
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary represents
189
+ the analysis results for a detected face. Each dictionary in the list contains the
190
+ following keys:
191
+
192
+ - 'region' (dict): Represents the rectangular region of the detected face in the image.
193
+ - 'x': x-coordinate of the top-left corner of the face.
194
+ - 'y': y-coordinate of the top-left corner of the face.
195
+ - 'w': Width of the detected face region.
196
+ - 'h': Height of the detected face region.
197
+
198
+ - 'age' (float): Estimated age of the detected face.
199
+
200
+ - 'face_confidence' (float): Confidence score for the detected face.
201
+ Indicates the reliability of the face detection.
202
+
203
+ - 'dominant_gender' (str): The dominant gender in the detected face.
204
+ Either "Man" or "Woman".
205
+
206
+ - 'gender' (dict): Confidence scores for each gender category.
207
+ - 'Man': Confidence score for the male gender.
208
+ - 'Woman': Confidence score for the female gender.
209
+
210
+ - 'dominant_emotion' (str): The dominant emotion in the detected face.
211
+ Possible values include "sad," "angry," "surprise," "fear," "happy,"
212
+ "disgust," and "neutral"
213
+
214
+ - 'emotion' (dict): Confidence scores for each emotion category.
215
+ - 'sad': Confidence score for sadness.
216
+ - 'angry': Confidence score for anger.
217
+ - 'surprise': Confidence score for surprise.
218
+ - 'fear': Confidence score for fear.
219
+ - 'happy': Confidence score for happiness.
220
+ - 'disgust': Confidence score for disgust.
221
+ - 'neutral': Confidence score for neutrality.
222
+
223
+ - 'dominant_race' (str): The dominant race in the detected face.
224
+ Possible values include "indian," "asian," "latino hispanic,"
225
+ "black," "middle eastern," and "white."
226
+
227
+ - 'race' (dict): Confidence scores for each race category.
228
+ - 'indian': Confidence score for Indian ethnicity.
229
+ - 'asian': Confidence score for Asian ethnicity.
230
+ - 'latino hispanic': Confidence score for Latino/Hispanic ethnicity.
231
+ - 'black': Confidence score for Black ethnicity.
232
+ - 'middle eastern': Confidence score for Middle Eastern ethnicity.
233
+ - 'white': Confidence score for White ethnicity.
234
+ """
235
+ return demography.analyze(
236
+ img_path=img_path,
237
+ actions=actions,
238
+ enforce_detection=enforce_detection,
239
+ detector_backend=detector_backend,
240
+ align=align,
241
+ expand_percentage=expand_percentage,
242
+ silent=silent,
243
+ )
244
+
245
+
246
+ def find(
247
+ img_path: Union[str, np.ndarray],
248
+ db_path: str,
249
+ model_name: str = "VGG-Face",
250
+ distance_metric: str = "cosine",
251
+ enforce_detection: bool = True,
252
+ detector_backend: str = "opencv",
253
+ align: bool = True,
254
+ expand_percentage: int = 0,
255
+ threshold: Optional[float] = None,
256
+ normalization: str = "base",
257
+ silent: bool = False,
258
+ ) -> List[pd.DataFrame]:
259
+ """
260
+ Identify individuals in a database
261
+ Args:
262
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
263
+ or a base64 encoded image. If the source image contains multiple faces, the result will
264
+ include information for each detected face.
265
+
266
+ db_path (string): Path to the folder containing image files. All detected faces
267
+ in the database will be considered in the decision-making process.
268
+
269
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
270
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
271
+
272
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
273
+ 'euclidean', 'euclidean_l2' (default is cosine).
274
+
275
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
276
+ Set to False to avoid the exception for low-resolution images (default is True).
277
+
278
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
279
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
280
+ (default is opencv).
281
+
282
+ align (boolean): Perform alignment based on the eye positions (default is True).
283
+
284
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
285
+
286
+ threshold (float): Specify a threshold to determine whether a pair represents the same
287
+ person or different individuals. This threshold is used for comparing distances.
288
+ If left unset, default pre-tuned threshold values will be applied based on the specified
289
+ model name and distance metric (default is None).
290
+
291
+ normalization (string): Normalize the input image before feeding it to the model.
292
+ Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace (default is base).
293
+
294
+ silent (boolean): Suppress or allow some log messages for a quieter analysis process
295
+ (default is False).
296
+
297
+ Returns:
298
+ results (List[pd.DataFrame]): A list of pandas dataframes. Each dataframe corresponds
299
+ to the identity information for an individual detected in the source image.
300
+ The DataFrame columns include:
301
+
302
+ - 'identity': Identity label of the detected individual.
303
+
304
+ - 'target_x', 'target_y', 'target_w', 'target_h': Bounding box coordinates of the
305
+ target face in the database.
306
+
307
+ - 'source_x', 'source_y', 'source_w', 'source_h': Bounding box coordinates of the
308
+ detected face in the source image.
309
+
310
+ - 'threshold': threshold to determine a pair whether same person or different persons
311
+
312
+ - 'distance': Similarity score between the faces based on the
313
+ specified model and distance metric
314
+ """
315
+ return recognition.find(
316
+ img_path=img_path,
317
+ db_path=db_path,
318
+ model_name=model_name,
319
+ distance_metric=distance_metric,
320
+ enforce_detection=enforce_detection,
321
+ detector_backend=detector_backend,
322
+ align=align,
323
+ expand_percentage=expand_percentage,
324
+ threshold=threshold,
325
+ normalization=normalization,
326
+ silent=silent,
327
+ )
328
+
329
+
330
+ def represent(
331
+ img_path: Union[str, np.ndarray],
332
+ model_name: str = "VGG-Face",
333
+ enforce_detection: bool = True,
334
+ detector_backend: str = "opencv",
335
+ align: bool = True,
336
+ expand_percentage: int = 0,
337
+ normalization: str = "base",
338
+ ) -> List[Dict[str, Any]]:
339
+ """
340
+ Represent facial images as multi-dimensional vector embeddings.
341
+
342
+ Args:
343
+ img_path (str or np.ndarray): The exact path to the image, a numpy array in BGR format,
344
+ or a base64 encoded image. If the source image contains multiple faces, the result will
345
+ include information for each detected face.
346
+
347
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
348
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet
349
+ (default is VGG-Face.).
350
+
351
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
352
+ Default is True. Set to False to avoid the exception for low-resolution images
353
+ (default is True).
354
+
355
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
356
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
357
+ (default is opencv).
358
+
359
+ align (boolean): Perform alignment based on the eye positions (default is True).
360
+
361
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
362
+
363
+ normalization (string): Normalize the input image before feeding it to the model.
364
+ Default is base. Options: base, raw, Facenet, Facenet2018, VGGFace, VGGFace2, ArcFace
365
+ (default is base).
366
+
367
+ Returns:
368
+ results (List[Dict[str, Any]]): A list of dictionaries, each containing the
369
+ following fields:
370
+
371
+ - embedding (List[float]): Multidimensional vector representing facial features.
372
+ The number of dimensions varies based on the reference model
373
+ (e.g., FaceNet returns 128 dimensions, VGG-Face returns 4096 dimensions).
374
+
375
+ - facial_area (dict): Detected facial area by face detection in dictionary format.
376
+ Contains 'x' and 'y' as the left-corner point, and 'w' and 'h'
377
+ as the width and height. If `detector_backend` is set to 'skip', it represents
378
+ the full image area and is nonsensical.
379
+
380
+ - face_confidence (float): Confidence score of face detection. If `detector_backend` is set
381
+ to 'skip', the confidence will be 0 and is nonsensical.
382
+ """
383
+ return representation.represent(
384
+ img_path=img_path,
385
+ model_name=model_name,
386
+ enforce_detection=enforce_detection,
387
+ detector_backend=detector_backend,
388
+ align=align,
389
+ expand_percentage=expand_percentage,
390
+ normalization=normalization,
391
+ )
392
+
393
+
394
+ def stream(
395
+ db_path: str = "",
396
+ model_name: str = "VGG-Face",
397
+ detector_backend: str = "opencv",
398
+ distance_metric: str = "cosine",
399
+ enable_face_analysis: bool = True,
400
+ source: Any = 0,
401
+ time_threshold: int = 5,
402
+ frame_threshold: int = 5,
403
+ ) -> None:
404
+ """
405
+ Run real time face recognition and facial attribute analysis
406
+
407
+ Args:
408
+ db_path (string): Path to the folder containing image files. All detected faces
409
+ in the database will be considered in the decision-making process.
410
+
411
+ model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512,
412
+ OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
413
+
414
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
415
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
416
+ (default is opencv).
417
+
418
+ distance_metric (string): Metric for measuring similarity. Options: 'cosine',
419
+ 'euclidean', 'euclidean_l2' (default is cosine).
420
+
421
+ enable_face_analysis (bool): Flag to enable face analysis (default is True).
422
+
423
+ source (Any): The source for the video stream (default is 0, which represents the
424
+ default camera).
425
+
426
+ time_threshold (int): The time threshold (in seconds) for face recognition (default is 5).
427
+
428
+ frame_threshold (int): The frame threshold for face recognition (default is 5).
429
+ Returns:
430
+ None
431
+ """
432
+
433
+ time_threshold = max(time_threshold, 1)
434
+ frame_threshold = max(frame_threshold, 1)
435
+
436
+ streaming.analysis(
437
+ db_path=db_path,
438
+ model_name=model_name,
439
+ detector_backend=detector_backend,
440
+ distance_metric=distance_metric,
441
+ enable_face_analysis=enable_face_analysis,
442
+ source=source,
443
+ time_threshold=time_threshold,
444
+ frame_threshold=frame_threshold,
445
+ )
446
+
447
+
448
+ def extract_faces(
449
+ img_path: Union[str, np.ndarray],
450
+ detector_backend: str = "opencv",
451
+ enforce_detection: bool = True,
452
+ align: bool = True,
453
+ expand_percentage: int = 0,
454
+ grayscale: bool = False,
455
+ ) -> List[Dict[str, Any]]:
456
+ """
457
+ Extract faces from a given image
458
+
459
+ Args:
460
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
461
+ as a string, numpy array (BGR), or base64 encoded images.
462
+
463
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
464
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
465
+ (default is opencv).
466
+
467
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
468
+ Set to False to avoid the exception for low-resolution images (default is True).
469
+
470
+ align (bool): Flag to enable face alignment (default is True).
471
+
472
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
473
+
474
+ grayscale (boolean): Flag to convert the image to grayscale before
475
+ processing (default is False).
476
+
477
+ Returns:
478
+ results (List[Dict[str, Any]]): A list of dictionaries, where each dictionary contains:
479
+
480
+ - "face" (np.ndarray): The detected face as a NumPy array.
481
+
482
+ - "facial_area" (Dict[str, Any]): The detected face's regions as a dictionary containing:
483
+ - keys 'x', 'y', 'w', 'h' with int values
484
+ - keys 'left_eye', 'right_eye' with a tuple of 2 ints as values. left and right eyes
485
+ are eyes on the left and right respectively with respect to the person itself
486
+ instead of observer.
487
+
488
+ - "confidence" (float): The confidence score associated with the detected face.
489
+ """
490
+
491
+ return detection.extract_faces(
492
+ img_path=img_path,
493
+ detector_backend=detector_backend,
494
+ enforce_detection=enforce_detection,
495
+ align=align,
496
+ expand_percentage=expand_percentage,
497
+ grayscale=grayscale,
498
+ )
499
+
500
+
501
+ def cli() -> None:
502
+ """
503
+ command line interface function will be offered in this block
504
+ """
505
+ import fire
506
+
507
+ fire.Fire()
508
+
509
+
510
+ # deprecated function(s)
511
+
512
+
513
+ def detectFace(
514
+ img_path: Union[str, np.ndarray],
515
+ target_size: tuple = (224, 224),
516
+ detector_backend: str = "opencv",
517
+ enforce_detection: bool = True,
518
+ align: bool = True,
519
+ ) -> Union[np.ndarray, None]:
520
+ """
521
+ Deprecated face detection function. Use extract_faces for same functionality.
522
+
523
+ Args:
524
+ img_path (str or np.ndarray): Path to the first image. Accepts exact image path
525
+ as a string, numpy array (BGR), or base64 encoded images.
526
+
527
+ target_size (tuple): final shape of facial image. black pixels will be
528
+ added to resize the image (default is (224, 224)).
529
+
530
+ detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
531
+ 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
532
+ (default is opencv).
533
+
534
+ enforce_detection (boolean): If no face is detected in an image, raise an exception.
535
+ Set to False to avoid the exception for low-resolution images (default is True).
536
+
537
+ align (bool): Flag to enable face alignment (default is True).
538
+
539
+ Returns:
540
+ img (np.ndarray): detected (and aligned) facial area image as numpy array
541
+ """
542
+ logger.warn("Function detectFace is deprecated. Use extract_faces instead.")
543
+ face_objs = extract_faces(
544
+ img_path=img_path,
545
+ detector_backend=detector_backend,
546
+ enforce_detection=enforce_detection,
547
+ align=align,
548
+ grayscale=False,
549
+ )
550
+ extracted_face = None
551
+ if len(face_objs) > 0:
552
+ extracted_face = face_objs[0]["face"]
553
+ extracted_face = preprocessing.resize_image(img=extracted_face, target_size=target_size)
554
+ return extracted_face
555
+
556
+
557
+ def sync_datasets():
558
+ # Set the local directories
559
+ base_dir = os_path.get_main_directory()
560
+
561
+ missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
562
+ founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
563
+
564
+ # Ensure the directories exist
565
+ os.makedirs(missing_dir, exist_ok=True)
566
+ os.makedirs(founded_dir, exist_ok=True)
567
+
568
+ missing_people = cloudservice.sync_folder('missing_people', missing_dir)
569
+
570
+ founded_people = cloudservice.sync_folder('founded_people', founded_dir)
571
+
572
+ def delete_pkls():
573
+ # Set the local directories
574
+ base_dir = os_path.get_main_directory()
575
+
576
+ missing_dir = os.path.join(base_dir, 'mafqoud', 'images', 'missing_people')
577
+ founded_dir = os.path.join(base_dir, 'mafqoud', 'images', 'founded_people')
578
+
579
+ # Ensure the directories exist
580
+ os.makedirs(missing_dir, exist_ok=True)
581
+ os.makedirs(founded_dir, exist_ok=True)
582
+
583
+ cloudservice.delete_pkl_files(missing_dir)
584
+ cloudservice.delete_pkl_files(founded_dir)
585
+
deepface/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "0.0.90"
deepface/api/__init__.py ADDED
File without changes
deepface/api/postman/deepface-api.postman_collection.json ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "info": {
3
+ "_postman_id": "4c0b144e-4294-4bdd-8072-bcb326b1fed2",
4
+ "name": "deepface-api",
5
+ "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
6
+ },
7
+ "item": [
8
+ {
9
+ "name": "Represent",
10
+ "request": {
11
+ "method": "POST",
12
+ "header": [],
13
+ "body": {
14
+ "mode": "raw",
15
+ "raw": "{\n \"model_name\": \"Facenet\",\n \"img\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\"\n}",
16
+ "options": {
17
+ "raw": {
18
+ "language": "json"
19
+ }
20
+ }
21
+ },
22
+ "url": {
23
+ "raw": "http://127.0.0.1:5000/represent",
24
+ "protocol": "http",
25
+ "host": [
26
+ "127",
27
+ "0",
28
+ "0",
29
+ "1"
30
+ ],
31
+ "port": "5000",
32
+ "path": [
33
+ "represent"
34
+ ]
35
+ }
36
+ },
37
+ "response": []
38
+ },
39
+ {
40
+ "name": "Face verification",
41
+ "request": {
42
+ "method": "POST",
43
+ "header": [],
44
+ "body": {
45
+ "mode": "raw",
46
+ "raw": " {\n \t\"img1_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img1.jpg\",\n \"img2_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/img2.jpg\",\n \"model_name\": \"Facenet\",\n \"detector_backend\": \"mtcnn\",\n \"distance_metric\": \"euclidean\"\n }",
47
+ "options": {
48
+ "raw": {
49
+ "language": "json"
50
+ }
51
+ }
52
+ },
53
+ "url": {
54
+ "raw": "http://127.0.0.1:5000/verify",
55
+ "protocol": "http",
56
+ "host": [
57
+ "127",
58
+ "0",
59
+ "0",
60
+ "1"
61
+ ],
62
+ "port": "5000",
63
+ "path": [
64
+ "verify"
65
+ ]
66
+ }
67
+ },
68
+ "response": []
69
+ },
70
+ {
71
+ "name": "Face analysis",
72
+ "request": {
73
+ "method": "POST",
74
+ "header": [],
75
+ "body": {
76
+ "mode": "raw",
77
+ "raw": "{\n \"img_path\": \"/Users/sefik/Desktop/deepface/tests/dataset/couple.jpg\",\n \"actions\": [\"age\", \"gender\", \"emotion\", \"race\"]\n}",
78
+ "options": {
79
+ "raw": {
80
+ "language": "json"
81
+ }
82
+ }
83
+ },
84
+ "url": {
85
+ "raw": "http://127.0.0.1:5000/analyze",
86
+ "protocol": "http",
87
+ "host": [
88
+ "127",
89
+ "0",
90
+ "0",
91
+ "1"
92
+ ],
93
+ "port": "5000",
94
+ "path": [
95
+ "analyze"
96
+ ]
97
+ }
98
+ },
99
+ "response": []
100
+ }
101
+ ]
102
+ }
deepface/api/src/__init__.py ADDED
File without changes
deepface/api/src/api.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import app
3
+ import os
4
+
5
+ if __name__ == "__main__":
6
+ deepface_app = app.create_app()
7
+ parser = argparse.ArgumentParser()
8
+ parser.add_argument("-p", "--port", type=int, default=int(os.getenv('DEFAULT_PORT')), help="Port of serving api")
9
+ args = parser.parse_args()
10
+ deepface_app.run(host="0.0.0.0", port=args.port)
deepface/api/src/app.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd parth dependencies
2
+ from flask import Flask
3
+ from deepface.api.src.modules.core.routes import blueprint
4
+
5
+
6
+ def create_app():
7
+ app = Flask(__name__)
8
+ app.register_blueprint(blueprint)
9
+ print(app.url_map)
10
+ return app
11
+
deepface/api/src/modules/__init__.py ADDED
File without changes
deepface/api/src/modules/core/__init__.py ADDED
File without changes
deepface/api/src/modules/core/routes.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Blueprint, request , jsonify
2
+ from deepface.api.src.modules.core import service
3
+ from deepface.commons.logger import Logger
4
+ from deepface.commons.os_path import os_path
5
+ import json
6
+ import os
7
+
8
+ logger = Logger(module="api/src/routes.py")
9
+
10
+ blueprint = Blueprint("routes", __name__)
11
+
12
+
13
+ @blueprint.route("/")
14
+ def home():
15
+ return "<h1>Welcome to DeepFace API!</h1>"
16
+
17
+
18
+ @blueprint.route("/represent", methods=["POST"])
19
+ def represent():
20
+ input_args = request.get_json()
21
+
22
+ if input_args is None:
23
+ return {"message": "empty input set passed"}
24
+
25
+ img_path = input_args.get("img") or input_args.get("img_path")
26
+ if img_path is None:
27
+ return {"message": "you must pass img_path input"}
28
+
29
+ model_name = input_args.get("model_name", "VGG-Face")
30
+ detector_backend = input_args.get("detector_backend", "opencv")
31
+ enforce_detection = input_args.get("enforce_detection", True)
32
+ align = input_args.get("align", True)
33
+
34
+ obj = service.represent(
35
+ img_path=img_path,
36
+ model_name=model_name,
37
+ detector_backend=detector_backend,
38
+ enforce_detection=enforce_detection,
39
+ align=align,
40
+ )
41
+
42
+ logger.debug(obj)
43
+
44
+ return obj
45
+
46
+
47
+ @blueprint.route("/verify", methods=["POST"])
48
+ def verify():
49
+ input_args = request.get_json()
50
+
51
+ if input_args is None:
52
+ return {"message": "empty input set passed"}
53
+
54
+ img1_path = input_args.get("img1") or input_args.get("img1_path")
55
+ img2_path = input_args.get("img2") or input_args.get("img2_path")
56
+
57
+ if img1_path is None:
58
+ return {"message": "you must pass img1_path input"}
59
+
60
+ if img2_path is None:
61
+ return {"message": "you must pass img2_path input"}
62
+
63
+ model_name = input_args.get("model_name", "VGG-Face")
64
+ detector_backend = input_args.get("detector_backend", "opencv")
65
+ enforce_detection = input_args.get("enforce_detection", True)
66
+ distance_metric = input_args.get("distance_metric", "cosine")
67
+ align = input_args.get("align", True)
68
+
69
+ verification = service.verify(
70
+ img1_path=img1_path,
71
+ img2_path=img2_path,
72
+ model_name=model_name,
73
+ detector_backend=detector_backend,
74
+ distance_metric=distance_metric,
75
+ align=align,
76
+ enforce_detection=enforce_detection,
77
+ )
78
+
79
+ logger.debug(verification)
80
+
81
+ return verification
82
+
83
+
84
+ @blueprint.route("/analyze", methods=["POST"])
85
+ def analyze():
86
+ input_args = request.get_json()
87
+
88
+ if input_args is None:
89
+ return {"message": "empty input set passed"}
90
+
91
+ img_path = input_args.get("img") or input_args.get("img_path")
92
+ if img_path is None:
93
+ return {"message": "you must pass img_path input"}
94
+
95
+ detector_backend = input_args.get("detector_backend", "opencv")
96
+ enforce_detection = input_args.get("enforce_detection", True)
97
+ align = input_args.get("align", True)
98
+ actions = input_args.get("actions", ["age", "gender", "emotion", "race"])
99
+
100
+ demographies = service.analyze(
101
+ img_path=img_path,
102
+ actions=actions,
103
+ detector_backend=detector_backend,
104
+ enforce_detection=enforce_detection,
105
+ align=align,
106
+ )
107
+
108
+ logger.debug(demographies)
109
+
110
+ return demographies
111
+
112
+ @blueprint.route("/find", methods=["POST"])
113
+ def find():
114
+ input_args = request.get_json()
115
+
116
+ if input_args is None:
117
+ response = jsonify({'error': 'empty input set passed'})
118
+ response.status_code = 500
119
+ return response
120
+
121
+ img_name = input_args.get("img") or input_args.get("img_name")
122
+ img_type = input_args.get("img_type")
123
+
124
+ if img_name is None:
125
+ response = jsonify({'error': 'you must pass img_name input'})
126
+ response.status_code = 404
127
+ return response
128
+
129
+ if img_type == "missing" or img_type == "missing_person" or img_type == "missing_people" or img_type == "missing person" or img_type == "missing people" :
130
+
131
+ img_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "missing_people" , img_name)
132
+ db_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "founded_people")
133
+
134
+ elif img_type == "founded" or img_type == "founded_person" or img_type == "founded_people" or img_type == "founded person" or img_type == "founded people" :
135
+
136
+ img_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "founded_people" , img_name)
137
+ db_path = os.path.join( os_path.get_main_directory() , 'mafqoud' , 'images' , "missing_people")
138
+
139
+ else :
140
+
141
+ response = jsonify({'error': 'the type of the image is not correct and it should be one of those : ( missing , missing_people , missing_people , missing person , missing people ) or ( founded , founded_people , founded_people , founded person , founded people )'})
142
+ response.status_code = 400
143
+ return response
144
+
145
+ print(img_path)
146
+ if not os.path.exists(img_path) or not os.path.isfile(img_path):
147
+ # If the image does not exist, return a JSON response with status code 404
148
+ response = jsonify({'error': 'Image not found'})
149
+ response.status_code = 404
150
+ return response
151
+
152
+
153
+ model_name = input_args.get("model_name", "Facenet512")
154
+ detector_backend = input_args.get("detector_backend", "mtcnn")
155
+ enforce_detection = input_args.get("enforce_detection", True)
156
+ distance_metric = input_args.get("distance_metric", "euclidean_l2")
157
+ align = input_args.get("align", True)
158
+
159
+ if img_name is None:
160
+ return {"message": "you must pass img1_path input"}
161
+
162
+ if db_path is None:
163
+ dataset_path = os.path.join(path.get_parent_path(), 'dataset')
164
+ if img_type == "missing_person":
165
+ img_path = os.path.join(dataset_path, 'missing_people', img_name)
166
+ db_path = os.path.join(dataset_path, 'founded_people')
167
+ elif img_type == "founded_people":
168
+ img_path = os.path.join(dataset_path, 'founded_people', img_name)
169
+ db_path = os.path.join(dataset_path, 'missing_people')
170
+
171
+ results = service.find(
172
+ img_path=img_path,
173
+ db_path=db_path,
174
+ model_name=model_name,
175
+ detector_backend=detector_backend,
176
+ distance_metric=distance_metric,
177
+ align=align,
178
+ enforce_detection=enforce_detection,
179
+ )
180
+
181
+ # Calculate similarity_percentage for each row
182
+ results[0]['similarity_percentage'] =100 - ((results[0]['distance'] / results[0]['threshold']) * 100)
183
+
184
+ data = []
185
+ for _, row in results[0].iterrows():
186
+ data.append({
187
+ "identity": row['identity'],
188
+ "similarity_percentage": row['similarity_percentage']
189
+ })
190
+
191
+ json_data = json.dumps(data, indent=4)
192
+
193
+
194
+ logger.debug(json_data)
195
+ return json_data
196
+
197
+
198
+ @blueprint.route("/dataset/sync", methods=["GET"])
199
+ def sync_datasets():
200
+ result = service.sync_datasets()
201
+ return jsonify(result)
202
+
203
+
204
+ @blueprint.route("/delete/pkls", methods=["GET"])
205
+ def delete_pkls():
206
+ result = service.delete_pkls()
207
+ return jsonify(result)
deepface/api/src/modules/core/service.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deepface import DeepFace
2
+
3
+ # pylint: disable=broad-except
4
+
5
+
6
+ def represent(img_path, model_name, detector_backend, enforce_detection, align):
7
+ try:
8
+ result = {}
9
+ embedding_objs = DeepFace.represent(
10
+ img_path=img_path,
11
+ model_name=model_name,
12
+ detector_backend=detector_backend,
13
+ enforce_detection=enforce_detection,
14
+ align=align,
15
+ )
16
+ result["results"] = embedding_objs
17
+ return result
18
+ except Exception as err:
19
+ return {"error": f"Exception while representing: {str(err)}"}, 400
20
+
21
+
22
+ def verify(
23
+ img1_path, img2_path, model_name, detector_backend, distance_metric, enforce_detection, align
24
+ ):
25
+ try:
26
+ obj = DeepFace.verify(
27
+ img1_path=img1_path,
28
+ img2_path=img2_path,
29
+ model_name=model_name,
30
+ detector_backend=detector_backend,
31
+ distance_metric=distance_metric,
32
+ align=align,
33
+ enforce_detection=enforce_detection,
34
+ )
35
+ return obj
36
+ except Exception as err:
37
+ return {"error": f"Exception while verifying: {str(err)}"}, 400
38
+
39
+
40
+ def analyze(img_path, actions, detector_backend, enforce_detection, align):
41
+ try:
42
+ result = {}
43
+ demographies = DeepFace.analyze(
44
+ img_path=img_path,
45
+ actions=actions,
46
+ detector_backend=detector_backend,
47
+ enforce_detection=enforce_detection,
48
+ align=align,
49
+ silent=True,
50
+ )
51
+ result["results"] = demographies
52
+ return result
53
+ except Exception as err:
54
+ return {"error": f"Exception while analyzing: {str(err)}"}, 400
55
+
56
+ def find(img_path, db_path, model_name, detector_backend, distance_metric, enforce_detection, align):
57
+ try:
58
+ obj = DeepFace.find(
59
+ img_path=img_path,
60
+ db_path=db_path,
61
+ model_name=model_name,
62
+ detector_backend=detector_backend,
63
+ distance_metric=distance_metric,
64
+ align=align,
65
+ enforce_detection=enforce_detection,
66
+ )
67
+ return obj
68
+ except Exception as err:
69
+ return {"error": f"Exception while Findind: {str(err)}"}, 400
70
+
71
+
72
+ def sync_datasets():
73
+ try:
74
+ DeepFace.sync_datasets()
75
+ return {'data': 'synced successfully'}, 200
76
+ except Exception as e:
77
+ return {'error': str(e)}, 400
78
+
79
+ def delete_pkls():
80
+ try:
81
+ DeepFace.delete_pkls()
82
+ return {'data': 'pkl files deleted successfully'}, 200
83
+ except Exception as e:
84
+ return {'error': str(e)}, 400
deepface/basemodels/ArcFace.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ # pylint: disable=unsubscriptable-object
11
+
12
+ # --------------------------------
13
+ # dependency configuration
14
+
15
+ tf_version = package_utils.get_tf_major_version()
16
+
17
+ if tf_version == 1:
18
+ from keras.models import Model
19
+ from keras.engine import training
20
+ from keras.layers import (
21
+ ZeroPadding2D,
22
+ Input,
23
+ Conv2D,
24
+ BatchNormalization,
25
+ PReLU,
26
+ Add,
27
+ Dropout,
28
+ Flatten,
29
+ Dense,
30
+ )
31
+ else:
32
+ from tensorflow.keras.models import Model
33
+ from tensorflow.python.keras.engine import training
34
+ from tensorflow.keras.layers import (
35
+ ZeroPadding2D,
36
+ Input,
37
+ Conv2D,
38
+ BatchNormalization,
39
+ PReLU,
40
+ Add,
41
+ Dropout,
42
+ Flatten,
43
+ Dense,
44
+ )
45
+
46
+ # pylint: disable=too-few-public-methods
47
+ class ArcFaceClient(FacialRecognition):
48
+ """
49
+ ArcFace model class
50
+ """
51
+
52
+ def __init__(self):
53
+ self.model = load_model()
54
+ self.model_name = "ArcFace"
55
+ self.input_shape = (112, 112)
56
+ self.output_shape = 512
57
+
58
+
59
+ def load_model(
60
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5",
61
+ ) -> Model:
62
+ """
63
+ Construct ArcFace model, download its weights and load
64
+ Returns:
65
+ model (Model)
66
+ """
67
+ base_model = ResNet34()
68
+ inputs = base_model.inputs[0]
69
+ arcface_model = base_model.outputs[0]
70
+ arcface_model = BatchNormalization(momentum=0.9, epsilon=2e-5)(arcface_model)
71
+ arcface_model = Dropout(0.4)(arcface_model)
72
+ arcface_model = Flatten()(arcface_model)
73
+ arcface_model = Dense(512, activation=None, use_bias=True, kernel_initializer="glorot_normal")(
74
+ arcface_model
75
+ )
76
+ embedding = BatchNormalization(momentum=0.9, epsilon=2e-5, name="embedding", scale=True)(
77
+ arcface_model
78
+ )
79
+ model = Model(inputs, embedding, name=base_model.name)
80
+
81
+ # ---------------------------------------
82
+ # check the availability of pre-trained weights
83
+
84
+ home = folder_utils.get_deepface_home()
85
+
86
+ file_name = "arcface_weights.h5"
87
+ output = home + "/.deepface/weights/" + file_name
88
+
89
+ if os.path.isfile(output) != True:
90
+
91
+ logger.info(f"{file_name} will be downloaded to {output}")
92
+ gdown.download(url, output, quiet=False)
93
+
94
+ # ---------------------------------------
95
+
96
+ model.load_weights(output)
97
+
98
+ return model
99
+
100
+
101
+ def ResNet34() -> Model:
102
+ """
103
+ ResNet34 model
104
+ Returns:
105
+ model (Model)
106
+ """
107
+ img_input = Input(shape=(112, 112, 3))
108
+
109
+ x = ZeroPadding2D(padding=1, name="conv1_pad")(img_input)
110
+ x = Conv2D(
111
+ 64, 3, strides=1, use_bias=False, kernel_initializer="glorot_normal", name="conv1_conv"
112
+ )(x)
113
+ x = BatchNormalization(axis=3, epsilon=2e-5, momentum=0.9, name="conv1_bn")(x)
114
+ x = PReLU(shared_axes=[1, 2], name="conv1_prelu")(x)
115
+ x = stack_fn(x)
116
+
117
+ model = training.Model(img_input, x, name="ResNet34")
118
+
119
+ return model
120
+
121
+
122
+ def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
123
+ bn_axis = 3
124
+
125
+ if conv_shortcut:
126
+ shortcut = Conv2D(
127
+ filters,
128
+ 1,
129
+ strides=stride,
130
+ use_bias=False,
131
+ kernel_initializer="glorot_normal",
132
+ name=name + "_0_conv",
133
+ )(x)
134
+ shortcut = BatchNormalization(
135
+ axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_0_bn"
136
+ )(shortcut)
137
+ else:
138
+ shortcut = x
139
+
140
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_1_bn")(x)
141
+ x = ZeroPadding2D(padding=1, name=name + "_1_pad")(x)
142
+ x = Conv2D(
143
+ filters,
144
+ 3,
145
+ strides=1,
146
+ kernel_initializer="glorot_normal",
147
+ use_bias=False,
148
+ name=name + "_1_conv",
149
+ )(x)
150
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_2_bn")(x)
151
+ x = PReLU(shared_axes=[1, 2], name=name + "_1_prelu")(x)
152
+
153
+ x = ZeroPadding2D(padding=1, name=name + "_2_pad")(x)
154
+ x = Conv2D(
155
+ filters,
156
+ kernel_size,
157
+ strides=stride,
158
+ kernel_initializer="glorot_normal",
159
+ use_bias=False,
160
+ name=name + "_2_conv",
161
+ )(x)
162
+ x = BatchNormalization(axis=bn_axis, epsilon=2e-5, momentum=0.9, name=name + "_3_bn")(x)
163
+
164
+ x = Add(name=name + "_add")([shortcut, x])
165
+ return x
166
+
167
+
168
+ def stack1(x, filters, blocks, stride1=2, name=None):
169
+ x = block1(x, filters, stride=stride1, name=name + "_block1")
170
+ for i in range(2, blocks + 1):
171
+ x = block1(x, filters, conv_shortcut=False, name=name + "_block" + str(i))
172
+ return x
173
+
174
+
175
+ def stack_fn(x):
176
+ x = stack1(x, 64, 3, name="conv2")
177
+ x = stack1(x, 128, 4, name="conv3")
178
+ x = stack1(x, 256, 6, name="conv4")
179
+ return stack1(x, 512, 3, name="conv5")
deepface/basemodels/DeepID.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+ from deepface.commons import logger as log
6
+
7
+ logger = log.get_singletonish_logger()
8
+
9
+ tf_version = package_utils.get_tf_major_version()
10
+
11
+ if tf_version == 1:
12
+ from keras.models import Model
13
+ from keras.layers import (
14
+ Conv2D,
15
+ Activation,
16
+ Input,
17
+ Add,
18
+ MaxPooling2D,
19
+ Flatten,
20
+ Dense,
21
+ Dropout,
22
+ )
23
+ else:
24
+ from tensorflow.keras.models import Model
25
+ from tensorflow.keras.layers import (
26
+ Conv2D,
27
+ Activation,
28
+ Input,
29
+ Add,
30
+ MaxPooling2D,
31
+ Flatten,
32
+ Dense,
33
+ Dropout,
34
+ )
35
+
36
+ # pylint: disable=line-too-long
37
+
38
+
39
+ # -------------------------------------
40
+
41
+ # pylint: disable=too-few-public-methods
42
+ class DeepIdClient(FacialRecognition):
43
+ """
44
+ DeepId model class
45
+ """
46
+
47
+ def __init__(self):
48
+ self.model = load_model()
49
+ self.model_name = "DeepId"
50
+ self.input_shape = (47, 55)
51
+ self.output_shape = 160
52
+
53
+
54
+ def load_model(
55
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5",
56
+ ) -> Model:
57
+ """
58
+ Construct DeepId model, download its weights and load
59
+ """
60
+
61
+ myInput = Input(shape=(55, 47, 3))
62
+
63
+ x = Conv2D(20, (4, 4), name="Conv1", activation="relu", input_shape=(55, 47, 3))(myInput)
64
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool1")(x)
65
+ x = Dropout(rate=0.99, name="D1")(x)
66
+
67
+ x = Conv2D(40, (3, 3), name="Conv2", activation="relu")(x)
68
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool2")(x)
69
+ x = Dropout(rate=0.99, name="D2")(x)
70
+
71
+ x = Conv2D(60, (3, 3), name="Conv3", activation="relu")(x)
72
+ x = MaxPooling2D(pool_size=2, strides=2, name="Pool3")(x)
73
+ x = Dropout(rate=0.99, name="D3")(x)
74
+
75
+ x1 = Flatten()(x)
76
+ fc11 = Dense(160, name="fc11")(x1)
77
+
78
+ x2 = Conv2D(80, (2, 2), name="Conv4", activation="relu")(x)
79
+ x2 = Flatten()(x2)
80
+ fc12 = Dense(160, name="fc12")(x2)
81
+
82
+ y = Add()([fc11, fc12])
83
+ y = Activation("relu", name="deepid")(y)
84
+
85
+ model = Model(inputs=[myInput], outputs=y)
86
+
87
+ # ---------------------------------
88
+
89
+ home = folder_utils.get_deepface_home()
90
+
91
+ if os.path.isfile(home + "/.deepface/weights/deepid_keras_weights.h5") != True:
92
+ logger.info("deepid_keras_weights.h5 will be downloaded...")
93
+
94
+ output = home + "/.deepface/weights/deepid_keras_weights.h5"
95
+ gdown.download(url, output, quiet=False)
96
+
97
+ model.load_weights(home + "/.deepface/weights/deepid_keras_weights.h5")
98
+
99
+ return model
deepface/basemodels/Dlib.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import os
3
+ import bz2
4
+ import gdown
5
+ import numpy as np
6
+ from deepface.commons import folder_utils
7
+ from deepface.models.FacialRecognition import FacialRecognition
8
+ from deepface.commons import logger as log
9
+
10
+ logger = log.get_singletonish_logger()
11
+
12
+ # pylint: disable=too-few-public-methods
13
+
14
+
15
+ class DlibClient(FacialRecognition):
16
+ """
17
+ Dlib model class
18
+ """
19
+
20
+ def __init__(self):
21
+ self.model = DlibResNet()
22
+ self.model_name = "Dlib"
23
+ self.input_shape = (150, 150)
24
+ self.output_shape = 128
25
+
26
+ def forward(self, img: np.ndarray) -> List[float]:
27
+ """
28
+ Find embeddings with Dlib model.
29
+ This model necessitates the override of the forward method
30
+ because it is not a keras model.
31
+ Args:
32
+ img (np.ndarray): pre-loaded image in BGR
33
+ Returns
34
+ embeddings (list): multi-dimensional vector
35
+ """
36
+ # return self.model.predict(img)[0].tolist()
37
+
38
+ # extract_faces returns 4 dimensional images
39
+ if len(img.shape) == 4:
40
+ img = img[0]
41
+
42
+ # bgr to rgb
43
+ img = img[:, :, ::-1] # bgr to rgb
44
+
45
+ # img is in scale of [0, 1] but expected [0, 255]
46
+ if img.max() <= 1:
47
+ img = img * 255
48
+
49
+ img = img.astype(np.uint8)
50
+
51
+ img_representation = self.model.model.compute_face_descriptor(img)
52
+ img_representation = np.array(img_representation)
53
+ img_representation = np.expand_dims(img_representation, axis=0)
54
+ return img_representation[0].tolist()
55
+
56
+
57
+ class DlibResNet:
58
+ def __init__(self):
59
+
60
+ ## this is not a must dependency. do not import it in the global level.
61
+ try:
62
+ import dlib
63
+ except ModuleNotFoundError as e:
64
+ raise ImportError(
65
+ "Dlib is an optional dependency, ensure the library is installed."
66
+ "Please install using 'pip install dlib' "
67
+ ) from e
68
+
69
+ home = folder_utils.get_deepface_home()
70
+ weight_file = home + "/.deepface/weights/dlib_face_recognition_resnet_model_v1.dat"
71
+
72
+ # download pre-trained model if it does not exist
73
+ if os.path.isfile(weight_file) != True:
74
+ logger.info("dlib_face_recognition_resnet_model_v1.dat is going to be downloaded")
75
+
76
+ file_name = "dlib_face_recognition_resnet_model_v1.dat.bz2"
77
+ url = f"http://dlib.net/files/{file_name}"
78
+ output = f"{home}/.deepface/weights/{file_name}"
79
+ gdown.download(url, output, quiet=False)
80
+
81
+ zipfile = bz2.BZ2File(output)
82
+ data = zipfile.read()
83
+ newfilepath = output[:-4] # discard .bz2 extension
84
+ with open(newfilepath, "wb") as f:
85
+ f.write(data)
86
+
87
+ self.model = dlib.face_recognition_model_v1(weight_file)
88
+
89
+ # return None # classes must return None
deepface/basemodels/Facenet.py ADDED
@@ -0,0 +1,1715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ from deepface.commons import package_utils, folder_utils
4
+ from deepface.models.FacialRecognition import FacialRecognition
5
+ from deepface.commons import logger as log
6
+
7
+ logger = log.get_singletonish_logger()
8
+
9
+ # --------------------------------
10
+ # dependency configuration
11
+
12
+ tf_version = package_utils.get_tf_major_version()
13
+
14
+ if tf_version == 1:
15
+ from keras.models import Model
16
+ from keras.layers import Activation
17
+ from keras.layers import BatchNormalization
18
+ from keras.layers import Concatenate
19
+ from keras.layers import Conv2D
20
+ from keras.layers import Dense
21
+ from keras.layers import Dropout
22
+ from keras.layers import GlobalAveragePooling2D
23
+ from keras.layers import Input
24
+ from keras.layers import Lambda
25
+ from keras.layers import MaxPooling2D
26
+ from keras.layers import add
27
+ from keras import backend as K
28
+ else:
29
+ from tensorflow.keras.models import Model
30
+ from tensorflow.keras.layers import Activation
31
+ from tensorflow.keras.layers import BatchNormalization
32
+ from tensorflow.keras.layers import Concatenate
33
+ from tensorflow.keras.layers import Conv2D
34
+ from tensorflow.keras.layers import Dense
35
+ from tensorflow.keras.layers import Dropout
36
+ from tensorflow.keras.layers import GlobalAveragePooling2D
37
+ from tensorflow.keras.layers import Input
38
+ from tensorflow.keras.layers import Lambda
39
+ from tensorflow.keras.layers import MaxPooling2D
40
+ from tensorflow.keras.layers import add
41
+ from tensorflow.keras import backend as K
42
+
43
+ # --------------------------------
44
+
45
+ # pylint: disable=too-few-public-methods
46
+ class FaceNet128dClient(FacialRecognition):
47
+ """
48
+ FaceNet-128d model class
49
+ """
50
+
51
+ def __init__(self):
52
+ self.model = load_facenet128d_model()
53
+ self.model_name = "FaceNet-128d"
54
+ self.input_shape = (160, 160)
55
+ self.output_shape = 128
56
+
57
+
58
+ class FaceNet512dClient(FacialRecognition):
59
+ """
60
+ FaceNet-1512d model class
61
+ """
62
+
63
+ def __init__(self):
64
+ self.model = load_facenet512d_model()
65
+ self.model_name = "FaceNet-512d"
66
+ self.input_shape = (160, 160)
67
+ self.output_shape = 512
68
+
69
+
70
+ def scaling(x, scale):
71
+ return x * scale
72
+
73
+
74
+ def InceptionResNetV1(dimension: int = 128) -> Model:
75
+ """
76
+ InceptionResNetV1 model heavily inspired from
77
+ github.com/davidsandberg/facenet/blob/master/src/models/inception_resnet_v1.py
78
+ As mentioned in Sandberg's repo's readme, pre-trained models are using Inception ResNet v1
79
+ Besides training process is documented at
80
+ sefiks.com/2018/09/03/face-recognition-with-facenet-in-keras/
81
+
82
+ Args:
83
+ dimension (int): number of dimensions in the embedding layer
84
+ Returns:
85
+ model (Model)
86
+ """
87
+
88
+ inputs = Input(shape=(160, 160, 3))
89
+ x = Conv2D(32, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_1a_3x3")(inputs)
90
+ x = BatchNormalization(
91
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_1a_3x3_BatchNorm"
92
+ )(x)
93
+ x = Activation("relu", name="Conv2d_1a_3x3_Activation")(x)
94
+ x = Conv2D(32, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_2a_3x3")(x)
95
+ x = BatchNormalization(
96
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2a_3x3_BatchNorm"
97
+ )(x)
98
+ x = Activation("relu", name="Conv2d_2a_3x3_Activation")(x)
99
+ x = Conv2D(64, 3, strides=1, padding="same", use_bias=False, name="Conv2d_2b_3x3")(x)
100
+ x = BatchNormalization(
101
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_2b_3x3_BatchNorm"
102
+ )(x)
103
+ x = Activation("relu", name="Conv2d_2b_3x3_Activation")(x)
104
+ x = MaxPooling2D(3, strides=2, name="MaxPool_3a_3x3")(x)
105
+ x = Conv2D(80, 1, strides=1, padding="valid", use_bias=False, name="Conv2d_3b_1x1")(x)
106
+ x = BatchNormalization(
107
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_3b_1x1_BatchNorm"
108
+ )(x)
109
+ x = Activation("relu", name="Conv2d_3b_1x1_Activation")(x)
110
+ x = Conv2D(192, 3, strides=1, padding="valid", use_bias=False, name="Conv2d_4a_3x3")(x)
111
+ x = BatchNormalization(
112
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4a_3x3_BatchNorm"
113
+ )(x)
114
+ x = Activation("relu", name="Conv2d_4a_3x3_Activation")(x)
115
+ x = Conv2D(256, 3, strides=2, padding="valid", use_bias=False, name="Conv2d_4b_3x3")(x)
116
+ x = BatchNormalization(
117
+ axis=3, momentum=0.995, epsilon=0.001, scale=False, name="Conv2d_4b_3x3_BatchNorm"
118
+ )(x)
119
+ x = Activation("relu", name="Conv2d_4b_3x3_Activation")(x)
120
+
121
+ # 5x Block35 (Inception-ResNet-A block):
122
+ branch_0 = Conv2D(
123
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_0_Conv2d_1x1"
124
+ )(x)
125
+ branch_0 = BatchNormalization(
126
+ axis=3,
127
+ momentum=0.995,
128
+ epsilon=0.001,
129
+ scale=False,
130
+ name="Block35_1_Branch_0_Conv2d_1x1_BatchNorm",
131
+ )(branch_0)
132
+ branch_0 = Activation("relu", name="Block35_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
133
+ branch_1 = Conv2D(
134
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0a_1x1"
135
+ )(x)
136
+ branch_1 = BatchNormalization(
137
+ axis=3,
138
+ momentum=0.995,
139
+ epsilon=0.001,
140
+ scale=False,
141
+ name="Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
142
+ )(branch_1)
143
+ branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
144
+ branch_1 = Conv2D(
145
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_1_Conv2d_0b_3x3"
146
+ )(branch_1)
147
+ branch_1 = BatchNormalization(
148
+ axis=3,
149
+ momentum=0.995,
150
+ epsilon=0.001,
151
+ scale=False,
152
+ name="Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm",
153
+ )(branch_1)
154
+ branch_1 = Activation("relu", name="Block35_1_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
155
+ branch_2 = Conv2D(
156
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0a_1x1"
157
+ )(x)
158
+ branch_2 = BatchNormalization(
159
+ axis=3,
160
+ momentum=0.995,
161
+ epsilon=0.001,
162
+ scale=False,
163
+ name="Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm",
164
+ )(branch_2)
165
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
166
+ branch_2 = Conv2D(
167
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0b_3x3"
168
+ )(branch_2)
169
+ branch_2 = BatchNormalization(
170
+ axis=3,
171
+ momentum=0.995,
172
+ epsilon=0.001,
173
+ scale=False,
174
+ name="Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm",
175
+ )(branch_2)
176
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
177
+ branch_2 = Conv2D(
178
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_1_Branch_2_Conv2d_0c_3x3"
179
+ )(branch_2)
180
+ branch_2 = BatchNormalization(
181
+ axis=3,
182
+ momentum=0.995,
183
+ epsilon=0.001,
184
+ scale=False,
185
+ name="Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm",
186
+ )(branch_2)
187
+ branch_2 = Activation("relu", name="Block35_1_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
188
+ branches = [branch_0, branch_1, branch_2]
189
+ mixed = Concatenate(axis=3, name="Block35_1_Concatenate")(branches)
190
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_1_Conv2d_1x1")(
191
+ mixed
192
+ )
193
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
194
+ x = add([x, up])
195
+ x = Activation("relu", name="Block35_1_Activation")(x)
196
+
197
+ branch_0 = Conv2D(
198
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_0_Conv2d_1x1"
199
+ )(x)
200
+ branch_0 = BatchNormalization(
201
+ axis=3,
202
+ momentum=0.995,
203
+ epsilon=0.001,
204
+ scale=False,
205
+ name="Block35_2_Branch_0_Conv2d_1x1_BatchNorm",
206
+ )(branch_0)
207
+ branch_0 = Activation("relu", name="Block35_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
208
+ branch_1 = Conv2D(
209
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0a_1x1"
210
+ )(x)
211
+ branch_1 = BatchNormalization(
212
+ axis=3,
213
+ momentum=0.995,
214
+ epsilon=0.001,
215
+ scale=False,
216
+ name="Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm",
217
+ )(branch_1)
218
+ branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
219
+ branch_1 = Conv2D(
220
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_1_Conv2d_0b_3x3"
221
+ )(branch_1)
222
+ branch_1 = BatchNormalization(
223
+ axis=3,
224
+ momentum=0.995,
225
+ epsilon=0.001,
226
+ scale=False,
227
+ name="Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm",
228
+ )(branch_1)
229
+ branch_1 = Activation("relu", name="Block35_2_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
230
+ branch_2 = Conv2D(
231
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0a_1x1"
232
+ )(x)
233
+ branch_2 = BatchNormalization(
234
+ axis=3,
235
+ momentum=0.995,
236
+ epsilon=0.001,
237
+ scale=False,
238
+ name="Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
239
+ )(branch_2)
240
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
241
+ branch_2 = Conv2D(
242
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0b_3x3"
243
+ )(branch_2)
244
+ branch_2 = BatchNormalization(
245
+ axis=3,
246
+ momentum=0.995,
247
+ epsilon=0.001,
248
+ scale=False,
249
+ name="Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm",
250
+ )(branch_2)
251
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
252
+ branch_2 = Conv2D(
253
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_2_Branch_2_Conv2d_0c_3x3"
254
+ )(branch_2)
255
+ branch_2 = BatchNormalization(
256
+ axis=3,
257
+ momentum=0.995,
258
+ epsilon=0.001,
259
+ scale=False,
260
+ name="Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm",
261
+ )(branch_2)
262
+ branch_2 = Activation("relu", name="Block35_2_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
263
+ branches = [branch_0, branch_1, branch_2]
264
+ mixed = Concatenate(axis=3, name="Block35_2_Concatenate")(branches)
265
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_2_Conv2d_1x1")(
266
+ mixed
267
+ )
268
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
269
+ x = add([x, up])
270
+ x = Activation("relu", name="Block35_2_Activation")(x)
271
+
272
+ branch_0 = Conv2D(
273
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_0_Conv2d_1x1"
274
+ )(x)
275
+ branch_0 = BatchNormalization(
276
+ axis=3,
277
+ momentum=0.995,
278
+ epsilon=0.001,
279
+ scale=False,
280
+ name="Block35_3_Branch_0_Conv2d_1x1_BatchNorm",
281
+ )(branch_0)
282
+ branch_0 = Activation("relu", name="Block35_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
283
+ branch_1 = Conv2D(
284
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0a_1x1"
285
+ )(x)
286
+ branch_1 = BatchNormalization(
287
+ axis=3,
288
+ momentum=0.995,
289
+ epsilon=0.001,
290
+ scale=False,
291
+ name="Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm",
292
+ )(branch_1)
293
+ branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
294
+ branch_1 = Conv2D(
295
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_1_Conv2d_0b_3x3"
296
+ )(branch_1)
297
+ branch_1 = BatchNormalization(
298
+ axis=3,
299
+ momentum=0.995,
300
+ epsilon=0.001,
301
+ scale=False,
302
+ name="Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm",
303
+ )(branch_1)
304
+ branch_1 = Activation("relu", name="Block35_3_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
305
+ branch_2 = Conv2D(
306
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0a_1x1"
307
+ )(x)
308
+ branch_2 = BatchNormalization(
309
+ axis=3,
310
+ momentum=0.995,
311
+ epsilon=0.001,
312
+ scale=False,
313
+ name="Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm",
314
+ )(branch_2)
315
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
316
+ branch_2 = Conv2D(
317
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0b_3x3"
318
+ )(branch_2)
319
+ branch_2 = BatchNormalization(
320
+ axis=3,
321
+ momentum=0.995,
322
+ epsilon=0.001,
323
+ scale=False,
324
+ name="Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm",
325
+ )(branch_2)
326
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
327
+ branch_2 = Conv2D(
328
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_3_Branch_2_Conv2d_0c_3x3"
329
+ )(branch_2)
330
+ branch_2 = BatchNormalization(
331
+ axis=3,
332
+ momentum=0.995,
333
+ epsilon=0.001,
334
+ scale=False,
335
+ name="Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm",
336
+ )(branch_2)
337
+ branch_2 = Activation("relu", name="Block35_3_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
338
+ branches = [branch_0, branch_1, branch_2]
339
+ mixed = Concatenate(axis=3, name="Block35_3_Concatenate")(branches)
340
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_3_Conv2d_1x1")(
341
+ mixed
342
+ )
343
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
344
+ x = add([x, up])
345
+ x = Activation("relu", name="Block35_3_Activation")(x)
346
+
347
+ branch_0 = Conv2D(
348
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_0_Conv2d_1x1"
349
+ )(x)
350
+ branch_0 = BatchNormalization(
351
+ axis=3,
352
+ momentum=0.995,
353
+ epsilon=0.001,
354
+ scale=False,
355
+ name="Block35_4_Branch_0_Conv2d_1x1_BatchNorm",
356
+ )(branch_0)
357
+ branch_0 = Activation("relu", name="Block35_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
358
+ branch_1 = Conv2D(
359
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0a_1x1"
360
+ )(x)
361
+ branch_1 = BatchNormalization(
362
+ axis=3,
363
+ momentum=0.995,
364
+ epsilon=0.001,
365
+ scale=False,
366
+ name="Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm",
367
+ )(branch_1)
368
+ branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
369
+ branch_1 = Conv2D(
370
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_1_Conv2d_0b_3x3"
371
+ )(branch_1)
372
+ branch_1 = BatchNormalization(
373
+ axis=3,
374
+ momentum=0.995,
375
+ epsilon=0.001,
376
+ scale=False,
377
+ name="Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm",
378
+ )(branch_1)
379
+ branch_1 = Activation("relu", name="Block35_4_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
380
+ branch_2 = Conv2D(
381
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0a_1x1"
382
+ )(x)
383
+ branch_2 = BatchNormalization(
384
+ axis=3,
385
+ momentum=0.995,
386
+ epsilon=0.001,
387
+ scale=False,
388
+ name="Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm",
389
+ )(branch_2)
390
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
391
+ branch_2 = Conv2D(
392
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0b_3x3"
393
+ )(branch_2)
394
+ branch_2 = BatchNormalization(
395
+ axis=3,
396
+ momentum=0.995,
397
+ epsilon=0.001,
398
+ scale=False,
399
+ name="Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm",
400
+ )(branch_2)
401
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
402
+ branch_2 = Conv2D(
403
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_4_Branch_2_Conv2d_0c_3x3"
404
+ )(branch_2)
405
+ branch_2 = BatchNormalization(
406
+ axis=3,
407
+ momentum=0.995,
408
+ epsilon=0.001,
409
+ scale=False,
410
+ name="Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm",
411
+ )(branch_2)
412
+ branch_2 = Activation("relu", name="Block35_4_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
413
+ branches = [branch_0, branch_1, branch_2]
414
+ mixed = Concatenate(axis=3, name="Block35_4_Concatenate")(branches)
415
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_4_Conv2d_1x1")(
416
+ mixed
417
+ )
418
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
419
+ x = add([x, up])
420
+ x = Activation("relu", name="Block35_4_Activation")(x)
421
+
422
+ branch_0 = Conv2D(
423
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_0_Conv2d_1x1"
424
+ )(x)
425
+ branch_0 = BatchNormalization(
426
+ axis=3,
427
+ momentum=0.995,
428
+ epsilon=0.001,
429
+ scale=False,
430
+ name="Block35_5_Branch_0_Conv2d_1x1_BatchNorm",
431
+ )(branch_0)
432
+ branch_0 = Activation("relu", name="Block35_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
433
+ branch_1 = Conv2D(
434
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0a_1x1"
435
+ )(x)
436
+ branch_1 = BatchNormalization(
437
+ axis=3,
438
+ momentum=0.995,
439
+ epsilon=0.001,
440
+ scale=False,
441
+ name="Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm",
442
+ )(branch_1)
443
+ branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
444
+ branch_1 = Conv2D(
445
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_1_Conv2d_0b_3x3"
446
+ )(branch_1)
447
+ branch_1 = BatchNormalization(
448
+ axis=3,
449
+ momentum=0.995,
450
+ epsilon=0.001,
451
+ scale=False,
452
+ name="Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm",
453
+ )(branch_1)
454
+ branch_1 = Activation("relu", name="Block35_5_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
455
+ branch_2 = Conv2D(
456
+ 32, 1, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0a_1x1"
457
+ )(x)
458
+ branch_2 = BatchNormalization(
459
+ axis=3,
460
+ momentum=0.995,
461
+ epsilon=0.001,
462
+ scale=False,
463
+ name="Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm",
464
+ )(branch_2)
465
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
466
+ branch_2 = Conv2D(
467
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0b_3x3"
468
+ )(branch_2)
469
+ branch_2 = BatchNormalization(
470
+ axis=3,
471
+ momentum=0.995,
472
+ epsilon=0.001,
473
+ scale=False,
474
+ name="Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm",
475
+ )(branch_2)
476
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
477
+ branch_2 = Conv2D(
478
+ 32, 3, strides=1, padding="same", use_bias=False, name="Block35_5_Branch_2_Conv2d_0c_3x3"
479
+ )(branch_2)
480
+ branch_2 = BatchNormalization(
481
+ axis=3,
482
+ momentum=0.995,
483
+ epsilon=0.001,
484
+ scale=False,
485
+ name="Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm",
486
+ )(branch_2)
487
+ branch_2 = Activation("relu", name="Block35_5_Branch_2_Conv2d_0c_3x3_Activation")(branch_2)
488
+ branches = [branch_0, branch_1, branch_2]
489
+ mixed = Concatenate(axis=3, name="Block35_5_Concatenate")(branches)
490
+ up = Conv2D(256, 1, strides=1, padding="same", use_bias=True, name="Block35_5_Conv2d_1x1")(
491
+ mixed
492
+ )
493
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.17})(up)
494
+ x = add([x, up])
495
+ x = Activation("relu", name="Block35_5_Activation")(x)
496
+
497
+ # Mixed 6a (Reduction-A block):
498
+ branch_0 = Conv2D(
499
+ 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_0_Conv2d_1a_3x3"
500
+ )(x)
501
+ branch_0 = BatchNormalization(
502
+ axis=3,
503
+ momentum=0.995,
504
+ epsilon=0.001,
505
+ scale=False,
506
+ name="Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm",
507
+ )(branch_0)
508
+ branch_0 = Activation("relu", name="Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
509
+ branch_1 = Conv2D(
510
+ 192, 1, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0a_1x1"
511
+ )(x)
512
+ branch_1 = BatchNormalization(
513
+ axis=3,
514
+ momentum=0.995,
515
+ epsilon=0.001,
516
+ scale=False,
517
+ name="Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm",
518
+ )(branch_1)
519
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
520
+ branch_1 = Conv2D(
521
+ 192, 3, strides=1, padding="same", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_0b_3x3"
522
+ )(branch_1)
523
+ branch_1 = BatchNormalization(
524
+ axis=3,
525
+ momentum=0.995,
526
+ epsilon=0.001,
527
+ scale=False,
528
+ name="Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm",
529
+ )(branch_1)
530
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation")(branch_1)
531
+ branch_1 = Conv2D(
532
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_6a_Branch_1_Conv2d_1a_3x3"
533
+ )(branch_1)
534
+ branch_1 = BatchNormalization(
535
+ axis=3,
536
+ momentum=0.995,
537
+ epsilon=0.001,
538
+ scale=False,
539
+ name="Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm",
540
+ )(branch_1)
541
+ branch_1 = Activation("relu", name="Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
542
+ branch_pool = MaxPooling2D(
543
+ 3, strides=2, padding="valid", name="Mixed_6a_Branch_2_MaxPool_1a_3x3"
544
+ )(x)
545
+ branches = [branch_0, branch_1, branch_pool]
546
+ x = Concatenate(axis=3, name="Mixed_6a")(branches)
547
+
548
+ # 10x Block17 (Inception-ResNet-B block):
549
+ branch_0 = Conv2D(
550
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_0_Conv2d_1x1"
551
+ )(x)
552
+ branch_0 = BatchNormalization(
553
+ axis=3,
554
+ momentum=0.995,
555
+ epsilon=0.001,
556
+ scale=False,
557
+ name="Block17_1_Branch_0_Conv2d_1x1_BatchNorm",
558
+ )(branch_0)
559
+ branch_0 = Activation("relu", name="Block17_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
560
+ branch_1 = Conv2D(
561
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_1_Branch_1_Conv2d_0a_1x1"
562
+ )(x)
563
+ branch_1 = BatchNormalization(
564
+ axis=3,
565
+ momentum=0.995,
566
+ epsilon=0.001,
567
+ scale=False,
568
+ name="Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
569
+ )(branch_1)
570
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
571
+ branch_1 = Conv2D(
572
+ 128,
573
+ [1, 7],
574
+ strides=1,
575
+ padding="same",
576
+ use_bias=False,
577
+ name="Block17_1_Branch_1_Conv2d_0b_1x7",
578
+ )(branch_1)
579
+ branch_1 = BatchNormalization(
580
+ axis=3,
581
+ momentum=0.995,
582
+ epsilon=0.001,
583
+ scale=False,
584
+ name="Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm",
585
+ )(branch_1)
586
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0b_1x7_Activation")(branch_1)
587
+ branch_1 = Conv2D(
588
+ 128,
589
+ [7, 1],
590
+ strides=1,
591
+ padding="same",
592
+ use_bias=False,
593
+ name="Block17_1_Branch_1_Conv2d_0c_7x1",
594
+ )(branch_1)
595
+ branch_1 = BatchNormalization(
596
+ axis=3,
597
+ momentum=0.995,
598
+ epsilon=0.001,
599
+ scale=False,
600
+ name="Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm",
601
+ )(branch_1)
602
+ branch_1 = Activation("relu", name="Block17_1_Branch_1_Conv2d_0c_7x1_Activation")(branch_1)
603
+ branches = [branch_0, branch_1]
604
+ mixed = Concatenate(axis=3, name="Block17_1_Concatenate")(branches)
605
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_1_Conv2d_1x1")(
606
+ mixed
607
+ )
608
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
609
+ x = add([x, up])
610
+ x = Activation("relu", name="Block17_1_Activation")(x)
611
+
612
+ branch_0 = Conv2D(
613
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_0_Conv2d_1x1"
614
+ )(x)
615
+ branch_0 = BatchNormalization(
616
+ axis=3,
617
+ momentum=0.995,
618
+ epsilon=0.001,
619
+ scale=False,
620
+ name="Block17_2_Branch_0_Conv2d_1x1_BatchNorm",
621
+ )(branch_0)
622
+ branch_0 = Activation("relu", name="Block17_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
623
+ branch_1 = Conv2D(
624
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_2_Branch_2_Conv2d_0a_1x1"
625
+ )(x)
626
+ branch_1 = BatchNormalization(
627
+ axis=3,
628
+ momentum=0.995,
629
+ epsilon=0.001,
630
+ scale=False,
631
+ name="Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
632
+ )(branch_1)
633
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
634
+ branch_1 = Conv2D(
635
+ 128,
636
+ [1, 7],
637
+ strides=1,
638
+ padding="same",
639
+ use_bias=False,
640
+ name="Block17_2_Branch_2_Conv2d_0b_1x7",
641
+ )(branch_1)
642
+ branch_1 = BatchNormalization(
643
+ axis=3,
644
+ momentum=0.995,
645
+ epsilon=0.001,
646
+ scale=False,
647
+ name="Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm",
648
+ )(branch_1)
649
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0b_1x7_Activation")(branch_1)
650
+ branch_1 = Conv2D(
651
+ 128,
652
+ [7, 1],
653
+ strides=1,
654
+ padding="same",
655
+ use_bias=False,
656
+ name="Block17_2_Branch_2_Conv2d_0c_7x1",
657
+ )(branch_1)
658
+ branch_1 = BatchNormalization(
659
+ axis=3,
660
+ momentum=0.995,
661
+ epsilon=0.001,
662
+ scale=False,
663
+ name="Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm",
664
+ )(branch_1)
665
+ branch_1 = Activation("relu", name="Block17_2_Branch_2_Conv2d_0c_7x1_Activation")(branch_1)
666
+ branches = [branch_0, branch_1]
667
+ mixed = Concatenate(axis=3, name="Block17_2_Concatenate")(branches)
668
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_2_Conv2d_1x1")(
669
+ mixed
670
+ )
671
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
672
+ x = add([x, up])
673
+ x = Activation("relu", name="Block17_2_Activation")(x)
674
+
675
+ branch_0 = Conv2D(
676
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_0_Conv2d_1x1"
677
+ )(x)
678
+ branch_0 = BatchNormalization(
679
+ axis=3,
680
+ momentum=0.995,
681
+ epsilon=0.001,
682
+ scale=False,
683
+ name="Block17_3_Branch_0_Conv2d_1x1_BatchNorm",
684
+ )(branch_0)
685
+ branch_0 = Activation("relu", name="Block17_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
686
+ branch_1 = Conv2D(
687
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_3_Branch_3_Conv2d_0a_1x1"
688
+ )(x)
689
+ branch_1 = BatchNormalization(
690
+ axis=3,
691
+ momentum=0.995,
692
+ epsilon=0.001,
693
+ scale=False,
694
+ name="Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
695
+ )(branch_1)
696
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
697
+ branch_1 = Conv2D(
698
+ 128,
699
+ [1, 7],
700
+ strides=1,
701
+ padding="same",
702
+ use_bias=False,
703
+ name="Block17_3_Branch_3_Conv2d_0b_1x7",
704
+ )(branch_1)
705
+ branch_1 = BatchNormalization(
706
+ axis=3,
707
+ momentum=0.995,
708
+ epsilon=0.001,
709
+ scale=False,
710
+ name="Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm",
711
+ )(branch_1)
712
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0b_1x7_Activation")(branch_1)
713
+ branch_1 = Conv2D(
714
+ 128,
715
+ [7, 1],
716
+ strides=1,
717
+ padding="same",
718
+ use_bias=False,
719
+ name="Block17_3_Branch_3_Conv2d_0c_7x1",
720
+ )(branch_1)
721
+ branch_1 = BatchNormalization(
722
+ axis=3,
723
+ momentum=0.995,
724
+ epsilon=0.001,
725
+ scale=False,
726
+ name="Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm",
727
+ )(branch_1)
728
+ branch_1 = Activation("relu", name="Block17_3_Branch_3_Conv2d_0c_7x1_Activation")(branch_1)
729
+ branches = [branch_0, branch_1]
730
+ mixed = Concatenate(axis=3, name="Block17_3_Concatenate")(branches)
731
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_3_Conv2d_1x1")(
732
+ mixed
733
+ )
734
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
735
+ x = add([x, up])
736
+ x = Activation("relu", name="Block17_3_Activation")(x)
737
+
738
+ branch_0 = Conv2D(
739
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_0_Conv2d_1x1"
740
+ )(x)
741
+ branch_0 = BatchNormalization(
742
+ axis=3,
743
+ momentum=0.995,
744
+ epsilon=0.001,
745
+ scale=False,
746
+ name="Block17_4_Branch_0_Conv2d_1x1_BatchNorm",
747
+ )(branch_0)
748
+ branch_0 = Activation("relu", name="Block17_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
749
+ branch_1 = Conv2D(
750
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_4_Branch_4_Conv2d_0a_1x1"
751
+ )(x)
752
+ branch_1 = BatchNormalization(
753
+ axis=3,
754
+ momentum=0.995,
755
+ epsilon=0.001,
756
+ scale=False,
757
+ name="Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
758
+ )(branch_1)
759
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
760
+ branch_1 = Conv2D(
761
+ 128,
762
+ [1, 7],
763
+ strides=1,
764
+ padding="same",
765
+ use_bias=False,
766
+ name="Block17_4_Branch_4_Conv2d_0b_1x7",
767
+ )(branch_1)
768
+ branch_1 = BatchNormalization(
769
+ axis=3,
770
+ momentum=0.995,
771
+ epsilon=0.001,
772
+ scale=False,
773
+ name="Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm",
774
+ )(branch_1)
775
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0b_1x7_Activation")(branch_1)
776
+ branch_1 = Conv2D(
777
+ 128,
778
+ [7, 1],
779
+ strides=1,
780
+ padding="same",
781
+ use_bias=False,
782
+ name="Block17_4_Branch_4_Conv2d_0c_7x1",
783
+ )(branch_1)
784
+ branch_1 = BatchNormalization(
785
+ axis=3,
786
+ momentum=0.995,
787
+ epsilon=0.001,
788
+ scale=False,
789
+ name="Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm",
790
+ )(branch_1)
791
+ branch_1 = Activation("relu", name="Block17_4_Branch_4_Conv2d_0c_7x1_Activation")(branch_1)
792
+ branches = [branch_0, branch_1]
793
+ mixed = Concatenate(axis=3, name="Block17_4_Concatenate")(branches)
794
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_4_Conv2d_1x1")(
795
+ mixed
796
+ )
797
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
798
+ x = add([x, up])
799
+ x = Activation("relu", name="Block17_4_Activation")(x)
800
+
801
+ branch_0 = Conv2D(
802
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_0_Conv2d_1x1"
803
+ )(x)
804
+ branch_0 = BatchNormalization(
805
+ axis=3,
806
+ momentum=0.995,
807
+ epsilon=0.001,
808
+ scale=False,
809
+ name="Block17_5_Branch_0_Conv2d_1x1_BatchNorm",
810
+ )(branch_0)
811
+ branch_0 = Activation("relu", name="Block17_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
812
+ branch_1 = Conv2D(
813
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_5_Branch_5_Conv2d_0a_1x1"
814
+ )(x)
815
+ branch_1 = BatchNormalization(
816
+ axis=3,
817
+ momentum=0.995,
818
+ epsilon=0.001,
819
+ scale=False,
820
+ name="Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
821
+ )(branch_1)
822
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
823
+ branch_1 = Conv2D(
824
+ 128,
825
+ [1, 7],
826
+ strides=1,
827
+ padding="same",
828
+ use_bias=False,
829
+ name="Block17_5_Branch_5_Conv2d_0b_1x7",
830
+ )(branch_1)
831
+ branch_1 = BatchNormalization(
832
+ axis=3,
833
+ momentum=0.995,
834
+ epsilon=0.001,
835
+ scale=False,
836
+ name="Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm",
837
+ )(branch_1)
838
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0b_1x7_Activation")(branch_1)
839
+ branch_1 = Conv2D(
840
+ 128,
841
+ [7, 1],
842
+ strides=1,
843
+ padding="same",
844
+ use_bias=False,
845
+ name="Block17_5_Branch_5_Conv2d_0c_7x1",
846
+ )(branch_1)
847
+ branch_1 = BatchNormalization(
848
+ axis=3,
849
+ momentum=0.995,
850
+ epsilon=0.001,
851
+ scale=False,
852
+ name="Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm",
853
+ )(branch_1)
854
+ branch_1 = Activation("relu", name="Block17_5_Branch_5_Conv2d_0c_7x1_Activation")(branch_1)
855
+ branches = [branch_0, branch_1]
856
+ mixed = Concatenate(axis=3, name="Block17_5_Concatenate")(branches)
857
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_5_Conv2d_1x1")(
858
+ mixed
859
+ )
860
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
861
+ x = add([x, up])
862
+ x = Activation("relu", name="Block17_5_Activation")(x)
863
+
864
+ branch_0 = Conv2D(
865
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_0_Conv2d_1x1"
866
+ )(x)
867
+ branch_0 = BatchNormalization(
868
+ axis=3,
869
+ momentum=0.995,
870
+ epsilon=0.001,
871
+ scale=False,
872
+ name="Block17_6_Branch_0_Conv2d_1x1_BatchNorm",
873
+ )(branch_0)
874
+ branch_0 = Activation("relu", name="Block17_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
875
+ branch_1 = Conv2D(
876
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_6_Branch_6_Conv2d_0a_1x1"
877
+ )(x)
878
+ branch_1 = BatchNormalization(
879
+ axis=3,
880
+ momentum=0.995,
881
+ epsilon=0.001,
882
+ scale=False,
883
+ name="Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm",
884
+ )(branch_1)
885
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0a_1x1_Activation")(branch_1)
886
+ branch_1 = Conv2D(
887
+ 128,
888
+ [1, 7],
889
+ strides=1,
890
+ padding="same",
891
+ use_bias=False,
892
+ name="Block17_6_Branch_6_Conv2d_0b_1x7",
893
+ )(branch_1)
894
+ branch_1 = BatchNormalization(
895
+ axis=3,
896
+ momentum=0.995,
897
+ epsilon=0.001,
898
+ scale=False,
899
+ name="Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm",
900
+ )(branch_1)
901
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0b_1x7_Activation")(branch_1)
902
+ branch_1 = Conv2D(
903
+ 128,
904
+ [7, 1],
905
+ strides=1,
906
+ padding="same",
907
+ use_bias=False,
908
+ name="Block17_6_Branch_6_Conv2d_0c_7x1",
909
+ )(branch_1)
910
+ branch_1 = BatchNormalization(
911
+ axis=3,
912
+ momentum=0.995,
913
+ epsilon=0.001,
914
+ scale=False,
915
+ name="Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm",
916
+ )(branch_1)
917
+ branch_1 = Activation("relu", name="Block17_6_Branch_6_Conv2d_0c_7x1_Activation")(branch_1)
918
+ branches = [branch_0, branch_1]
919
+ mixed = Concatenate(axis=3, name="Block17_6_Concatenate")(branches)
920
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_6_Conv2d_1x1")(
921
+ mixed
922
+ )
923
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
924
+ x = add([x, up])
925
+ x = Activation("relu", name="Block17_6_Activation")(x)
926
+
927
+ branch_0 = Conv2D(
928
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_0_Conv2d_1x1"
929
+ )(x)
930
+ branch_0 = BatchNormalization(
931
+ axis=3,
932
+ momentum=0.995,
933
+ epsilon=0.001,
934
+ scale=False,
935
+ name="Block17_7_Branch_0_Conv2d_1x1_BatchNorm",
936
+ )(branch_0)
937
+ branch_0 = Activation("relu", name="Block17_7_Branch_0_Conv2d_1x1_Activation")(branch_0)
938
+ branch_1 = Conv2D(
939
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_7_Branch_7_Conv2d_0a_1x1"
940
+ )(x)
941
+ branch_1 = BatchNormalization(
942
+ axis=3,
943
+ momentum=0.995,
944
+ epsilon=0.001,
945
+ scale=False,
946
+ name="Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm",
947
+ )(branch_1)
948
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0a_1x1_Activation")(branch_1)
949
+ branch_1 = Conv2D(
950
+ 128,
951
+ [1, 7],
952
+ strides=1,
953
+ padding="same",
954
+ use_bias=False,
955
+ name="Block17_7_Branch_7_Conv2d_0b_1x7",
956
+ )(branch_1)
957
+ branch_1 = BatchNormalization(
958
+ axis=3,
959
+ momentum=0.995,
960
+ epsilon=0.001,
961
+ scale=False,
962
+ name="Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm",
963
+ )(branch_1)
964
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0b_1x7_Activation")(branch_1)
965
+ branch_1 = Conv2D(
966
+ 128,
967
+ [7, 1],
968
+ strides=1,
969
+ padding="same",
970
+ use_bias=False,
971
+ name="Block17_7_Branch_7_Conv2d_0c_7x1",
972
+ )(branch_1)
973
+ branch_1 = BatchNormalization(
974
+ axis=3,
975
+ momentum=0.995,
976
+ epsilon=0.001,
977
+ scale=False,
978
+ name="Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm",
979
+ )(branch_1)
980
+ branch_1 = Activation("relu", name="Block17_7_Branch_7_Conv2d_0c_7x1_Activation")(branch_1)
981
+ branches = [branch_0, branch_1]
982
+ mixed = Concatenate(axis=3, name="Block17_7_Concatenate")(branches)
983
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_7_Conv2d_1x1")(
984
+ mixed
985
+ )
986
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
987
+ x = add([x, up])
988
+ x = Activation("relu", name="Block17_7_Activation")(x)
989
+
990
+ branch_0 = Conv2D(
991
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_0_Conv2d_1x1"
992
+ )(x)
993
+ branch_0 = BatchNormalization(
994
+ axis=3,
995
+ momentum=0.995,
996
+ epsilon=0.001,
997
+ scale=False,
998
+ name="Block17_8_Branch_0_Conv2d_1x1_BatchNorm",
999
+ )(branch_0)
1000
+ branch_0 = Activation("relu", name="Block17_8_Branch_0_Conv2d_1x1_Activation")(branch_0)
1001
+ branch_1 = Conv2D(
1002
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_8_Branch_8_Conv2d_0a_1x1"
1003
+ )(x)
1004
+ branch_1 = BatchNormalization(
1005
+ axis=3,
1006
+ momentum=0.995,
1007
+ epsilon=0.001,
1008
+ scale=False,
1009
+ name="Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm",
1010
+ )(branch_1)
1011
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0a_1x1_Activation")(branch_1)
1012
+ branch_1 = Conv2D(
1013
+ 128,
1014
+ [1, 7],
1015
+ strides=1,
1016
+ padding="same",
1017
+ use_bias=False,
1018
+ name="Block17_8_Branch_8_Conv2d_0b_1x7",
1019
+ )(branch_1)
1020
+ branch_1 = BatchNormalization(
1021
+ axis=3,
1022
+ momentum=0.995,
1023
+ epsilon=0.001,
1024
+ scale=False,
1025
+ name="Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm",
1026
+ )(branch_1)
1027
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0b_1x7_Activation")(branch_1)
1028
+ branch_1 = Conv2D(
1029
+ 128,
1030
+ [7, 1],
1031
+ strides=1,
1032
+ padding="same",
1033
+ use_bias=False,
1034
+ name="Block17_8_Branch_8_Conv2d_0c_7x1",
1035
+ )(branch_1)
1036
+ branch_1 = BatchNormalization(
1037
+ axis=3,
1038
+ momentum=0.995,
1039
+ epsilon=0.001,
1040
+ scale=False,
1041
+ name="Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm",
1042
+ )(branch_1)
1043
+ branch_1 = Activation("relu", name="Block17_8_Branch_8_Conv2d_0c_7x1_Activation")(branch_1)
1044
+ branches = [branch_0, branch_1]
1045
+ mixed = Concatenate(axis=3, name="Block17_8_Concatenate")(branches)
1046
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_8_Conv2d_1x1")(
1047
+ mixed
1048
+ )
1049
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1050
+ x = add([x, up])
1051
+ x = Activation("relu", name="Block17_8_Activation")(x)
1052
+
1053
+ branch_0 = Conv2D(
1054
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_0_Conv2d_1x1"
1055
+ )(x)
1056
+ branch_0 = BatchNormalization(
1057
+ axis=3,
1058
+ momentum=0.995,
1059
+ epsilon=0.001,
1060
+ scale=False,
1061
+ name="Block17_9_Branch_0_Conv2d_1x1_BatchNorm",
1062
+ )(branch_0)
1063
+ branch_0 = Activation("relu", name="Block17_9_Branch_0_Conv2d_1x1_Activation")(branch_0)
1064
+ branch_1 = Conv2D(
1065
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_9_Branch_9_Conv2d_0a_1x1"
1066
+ )(x)
1067
+ branch_1 = BatchNormalization(
1068
+ axis=3,
1069
+ momentum=0.995,
1070
+ epsilon=0.001,
1071
+ scale=False,
1072
+ name="Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm",
1073
+ )(branch_1)
1074
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0a_1x1_Activation")(branch_1)
1075
+ branch_1 = Conv2D(
1076
+ 128,
1077
+ [1, 7],
1078
+ strides=1,
1079
+ padding="same",
1080
+ use_bias=False,
1081
+ name="Block17_9_Branch_9_Conv2d_0b_1x7",
1082
+ )(branch_1)
1083
+ branch_1 = BatchNormalization(
1084
+ axis=3,
1085
+ momentum=0.995,
1086
+ epsilon=0.001,
1087
+ scale=False,
1088
+ name="Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm",
1089
+ )(branch_1)
1090
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0b_1x7_Activation")(branch_1)
1091
+ branch_1 = Conv2D(
1092
+ 128,
1093
+ [7, 1],
1094
+ strides=1,
1095
+ padding="same",
1096
+ use_bias=False,
1097
+ name="Block17_9_Branch_9_Conv2d_0c_7x1",
1098
+ )(branch_1)
1099
+ branch_1 = BatchNormalization(
1100
+ axis=3,
1101
+ momentum=0.995,
1102
+ epsilon=0.001,
1103
+ scale=False,
1104
+ name="Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm",
1105
+ )(branch_1)
1106
+ branch_1 = Activation("relu", name="Block17_9_Branch_9_Conv2d_0c_7x1_Activation")(branch_1)
1107
+ branches = [branch_0, branch_1]
1108
+ mixed = Concatenate(axis=3, name="Block17_9_Concatenate")(branches)
1109
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_9_Conv2d_1x1")(
1110
+ mixed
1111
+ )
1112
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1113
+ x = add([x, up])
1114
+ x = Activation("relu", name="Block17_9_Activation")(x)
1115
+
1116
+ branch_0 = Conv2D(
1117
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_0_Conv2d_1x1"
1118
+ )(x)
1119
+ branch_0 = BatchNormalization(
1120
+ axis=3,
1121
+ momentum=0.995,
1122
+ epsilon=0.001,
1123
+ scale=False,
1124
+ name="Block17_10_Branch_0_Conv2d_1x1_BatchNorm",
1125
+ )(branch_0)
1126
+ branch_0 = Activation("relu", name="Block17_10_Branch_0_Conv2d_1x1_Activation")(branch_0)
1127
+ branch_1 = Conv2D(
1128
+ 128, 1, strides=1, padding="same", use_bias=False, name="Block17_10_Branch_10_Conv2d_0a_1x1"
1129
+ )(x)
1130
+ branch_1 = BatchNormalization(
1131
+ axis=3,
1132
+ momentum=0.995,
1133
+ epsilon=0.001,
1134
+ scale=False,
1135
+ name="Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm",
1136
+ )(branch_1)
1137
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0a_1x1_Activation")(branch_1)
1138
+ branch_1 = Conv2D(
1139
+ 128,
1140
+ [1, 7],
1141
+ strides=1,
1142
+ padding="same",
1143
+ use_bias=False,
1144
+ name="Block17_10_Branch_10_Conv2d_0b_1x7",
1145
+ )(branch_1)
1146
+ branch_1 = BatchNormalization(
1147
+ axis=3,
1148
+ momentum=0.995,
1149
+ epsilon=0.001,
1150
+ scale=False,
1151
+ name="Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm",
1152
+ )(branch_1)
1153
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0b_1x7_Activation")(branch_1)
1154
+ branch_1 = Conv2D(
1155
+ 128,
1156
+ [7, 1],
1157
+ strides=1,
1158
+ padding="same",
1159
+ use_bias=False,
1160
+ name="Block17_10_Branch_10_Conv2d_0c_7x1",
1161
+ )(branch_1)
1162
+ branch_1 = BatchNormalization(
1163
+ axis=3,
1164
+ momentum=0.995,
1165
+ epsilon=0.001,
1166
+ scale=False,
1167
+ name="Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm",
1168
+ )(branch_1)
1169
+ branch_1 = Activation("relu", name="Block17_10_Branch_10_Conv2d_0c_7x1_Activation")(branch_1)
1170
+ branches = [branch_0, branch_1]
1171
+ mixed = Concatenate(axis=3, name="Block17_10_Concatenate")(branches)
1172
+ up = Conv2D(896, 1, strides=1, padding="same", use_bias=True, name="Block17_10_Conv2d_1x1")(
1173
+ mixed
1174
+ )
1175
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.1})(up)
1176
+ x = add([x, up])
1177
+ x = Activation("relu", name="Block17_10_Activation")(x)
1178
+
1179
+ # Mixed 7a (Reduction-B block): 8 x 8 x 2080
1180
+ branch_0 = Conv2D(
1181
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_0a_1x1"
1182
+ )(x)
1183
+ branch_0 = BatchNormalization(
1184
+ axis=3,
1185
+ momentum=0.995,
1186
+ epsilon=0.001,
1187
+ scale=False,
1188
+ name="Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm",
1189
+ )(branch_0)
1190
+ branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation")(branch_0)
1191
+ branch_0 = Conv2D(
1192
+ 384, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_0_Conv2d_1a_3x3"
1193
+ )(branch_0)
1194
+ branch_0 = BatchNormalization(
1195
+ axis=3,
1196
+ momentum=0.995,
1197
+ epsilon=0.001,
1198
+ scale=False,
1199
+ name="Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm",
1200
+ )(branch_0)
1201
+ branch_0 = Activation("relu", name="Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation")(branch_0)
1202
+ branch_1 = Conv2D(
1203
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_0a_1x1"
1204
+ )(x)
1205
+ branch_1 = BatchNormalization(
1206
+ axis=3,
1207
+ momentum=0.995,
1208
+ epsilon=0.001,
1209
+ scale=False,
1210
+ name="Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm",
1211
+ )(branch_1)
1212
+ branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1213
+ branch_1 = Conv2D(
1214
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_1_Conv2d_1a_3x3"
1215
+ )(branch_1)
1216
+ branch_1 = BatchNormalization(
1217
+ axis=3,
1218
+ momentum=0.995,
1219
+ epsilon=0.001,
1220
+ scale=False,
1221
+ name="Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm",
1222
+ )(branch_1)
1223
+ branch_1 = Activation("relu", name="Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation")(branch_1)
1224
+ branch_2 = Conv2D(
1225
+ 256, 1, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0a_1x1"
1226
+ )(x)
1227
+ branch_2 = BatchNormalization(
1228
+ axis=3,
1229
+ momentum=0.995,
1230
+ epsilon=0.001,
1231
+ scale=False,
1232
+ name="Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm",
1233
+ )(branch_2)
1234
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation")(branch_2)
1235
+ branch_2 = Conv2D(
1236
+ 256, 3, strides=1, padding="same", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_0b_3x3"
1237
+ )(branch_2)
1238
+ branch_2 = BatchNormalization(
1239
+ axis=3,
1240
+ momentum=0.995,
1241
+ epsilon=0.001,
1242
+ scale=False,
1243
+ name="Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm",
1244
+ )(branch_2)
1245
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation")(branch_2)
1246
+ branch_2 = Conv2D(
1247
+ 256, 3, strides=2, padding="valid", use_bias=False, name="Mixed_7a_Branch_2_Conv2d_1a_3x3"
1248
+ )(branch_2)
1249
+ branch_2 = BatchNormalization(
1250
+ axis=3,
1251
+ momentum=0.995,
1252
+ epsilon=0.001,
1253
+ scale=False,
1254
+ name="Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm",
1255
+ )(branch_2)
1256
+ branch_2 = Activation("relu", name="Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation")(branch_2)
1257
+ branch_pool = MaxPooling2D(
1258
+ 3, strides=2, padding="valid", name="Mixed_7a_Branch_3_MaxPool_1a_3x3"
1259
+ )(x)
1260
+ branches = [branch_0, branch_1, branch_2, branch_pool]
1261
+ x = Concatenate(axis=3, name="Mixed_7a")(branches)
1262
+
1263
+ # 5x Block8 (Inception-ResNet-C block):
1264
+
1265
+ branch_0 = Conv2D(
1266
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_0_Conv2d_1x1"
1267
+ )(x)
1268
+ branch_0 = BatchNormalization(
1269
+ axis=3,
1270
+ momentum=0.995,
1271
+ epsilon=0.001,
1272
+ scale=False,
1273
+ name="Block8_1_Branch_0_Conv2d_1x1_BatchNorm",
1274
+ )(branch_0)
1275
+ branch_0 = Activation("relu", name="Block8_1_Branch_0_Conv2d_1x1_Activation")(branch_0)
1276
+ branch_1 = Conv2D(
1277
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_1_Branch_1_Conv2d_0a_1x1"
1278
+ )(x)
1279
+ branch_1 = BatchNormalization(
1280
+ axis=3,
1281
+ momentum=0.995,
1282
+ epsilon=0.001,
1283
+ scale=False,
1284
+ name="Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm",
1285
+ )(branch_1)
1286
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1287
+ branch_1 = Conv2D(
1288
+ 192,
1289
+ [1, 3],
1290
+ strides=1,
1291
+ padding="same",
1292
+ use_bias=False,
1293
+ name="Block8_1_Branch_1_Conv2d_0b_1x3",
1294
+ )(branch_1)
1295
+ branch_1 = BatchNormalization(
1296
+ axis=3,
1297
+ momentum=0.995,
1298
+ epsilon=0.001,
1299
+ scale=False,
1300
+ name="Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm",
1301
+ )(branch_1)
1302
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
1303
+ branch_1 = Conv2D(
1304
+ 192,
1305
+ [3, 1],
1306
+ strides=1,
1307
+ padding="same",
1308
+ use_bias=False,
1309
+ name="Block8_1_Branch_1_Conv2d_0c_3x1",
1310
+ )(branch_1)
1311
+ branch_1 = BatchNormalization(
1312
+ axis=3,
1313
+ momentum=0.995,
1314
+ epsilon=0.001,
1315
+ scale=False,
1316
+ name="Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm",
1317
+ )(branch_1)
1318
+ branch_1 = Activation("relu", name="Block8_1_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
1319
+ branches = [branch_0, branch_1]
1320
+ mixed = Concatenate(axis=3, name="Block8_1_Concatenate")(branches)
1321
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_1_Conv2d_1x1")(
1322
+ mixed
1323
+ )
1324
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1325
+ x = add([x, up])
1326
+ x = Activation("relu", name="Block8_1_Activation")(x)
1327
+
1328
+ branch_0 = Conv2D(
1329
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_0_Conv2d_1x1"
1330
+ )(x)
1331
+ branch_0 = BatchNormalization(
1332
+ axis=3,
1333
+ momentum=0.995,
1334
+ epsilon=0.001,
1335
+ scale=False,
1336
+ name="Block8_2_Branch_0_Conv2d_1x1_BatchNorm",
1337
+ )(branch_0)
1338
+ branch_0 = Activation("relu", name="Block8_2_Branch_0_Conv2d_1x1_Activation")(branch_0)
1339
+ branch_1 = Conv2D(
1340
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_2_Branch_2_Conv2d_0a_1x1"
1341
+ )(x)
1342
+ branch_1 = BatchNormalization(
1343
+ axis=3,
1344
+ momentum=0.995,
1345
+ epsilon=0.001,
1346
+ scale=False,
1347
+ name="Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm",
1348
+ )(branch_1)
1349
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0a_1x1_Activation")(branch_1)
1350
+ branch_1 = Conv2D(
1351
+ 192,
1352
+ [1, 3],
1353
+ strides=1,
1354
+ padding="same",
1355
+ use_bias=False,
1356
+ name="Block8_2_Branch_2_Conv2d_0b_1x3",
1357
+ )(branch_1)
1358
+ branch_1 = BatchNormalization(
1359
+ axis=3,
1360
+ momentum=0.995,
1361
+ epsilon=0.001,
1362
+ scale=False,
1363
+ name="Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm",
1364
+ )(branch_1)
1365
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0b_1x3_Activation")(branch_1)
1366
+ branch_1 = Conv2D(
1367
+ 192,
1368
+ [3, 1],
1369
+ strides=1,
1370
+ padding="same",
1371
+ use_bias=False,
1372
+ name="Block8_2_Branch_2_Conv2d_0c_3x1",
1373
+ )(branch_1)
1374
+ branch_1 = BatchNormalization(
1375
+ axis=3,
1376
+ momentum=0.995,
1377
+ epsilon=0.001,
1378
+ scale=False,
1379
+ name="Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm",
1380
+ )(branch_1)
1381
+ branch_1 = Activation("relu", name="Block8_2_Branch_2_Conv2d_0c_3x1_Activation")(branch_1)
1382
+ branches = [branch_0, branch_1]
1383
+ mixed = Concatenate(axis=3, name="Block8_2_Concatenate")(branches)
1384
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_2_Conv2d_1x1")(
1385
+ mixed
1386
+ )
1387
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1388
+ x = add([x, up])
1389
+ x = Activation("relu", name="Block8_2_Activation")(x)
1390
+
1391
+ branch_0 = Conv2D(
1392
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_0_Conv2d_1x1"
1393
+ )(x)
1394
+ branch_0 = BatchNormalization(
1395
+ axis=3,
1396
+ momentum=0.995,
1397
+ epsilon=0.001,
1398
+ scale=False,
1399
+ name="Block8_3_Branch_0_Conv2d_1x1_BatchNorm",
1400
+ )(branch_0)
1401
+ branch_0 = Activation("relu", name="Block8_3_Branch_0_Conv2d_1x1_Activation")(branch_0)
1402
+ branch_1 = Conv2D(
1403
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_3_Branch_3_Conv2d_0a_1x1"
1404
+ )(x)
1405
+ branch_1 = BatchNormalization(
1406
+ axis=3,
1407
+ momentum=0.995,
1408
+ epsilon=0.001,
1409
+ scale=False,
1410
+ name="Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm",
1411
+ )(branch_1)
1412
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0a_1x1_Activation")(branch_1)
1413
+ branch_1 = Conv2D(
1414
+ 192,
1415
+ [1, 3],
1416
+ strides=1,
1417
+ padding="same",
1418
+ use_bias=False,
1419
+ name="Block8_3_Branch_3_Conv2d_0b_1x3",
1420
+ )(branch_1)
1421
+ branch_1 = BatchNormalization(
1422
+ axis=3,
1423
+ momentum=0.995,
1424
+ epsilon=0.001,
1425
+ scale=False,
1426
+ name="Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm",
1427
+ )(branch_1)
1428
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0b_1x3_Activation")(branch_1)
1429
+ branch_1 = Conv2D(
1430
+ 192,
1431
+ [3, 1],
1432
+ strides=1,
1433
+ padding="same",
1434
+ use_bias=False,
1435
+ name="Block8_3_Branch_3_Conv2d_0c_3x1",
1436
+ )(branch_1)
1437
+ branch_1 = BatchNormalization(
1438
+ axis=3,
1439
+ momentum=0.995,
1440
+ epsilon=0.001,
1441
+ scale=False,
1442
+ name="Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm",
1443
+ )(branch_1)
1444
+ branch_1 = Activation("relu", name="Block8_3_Branch_3_Conv2d_0c_3x1_Activation")(branch_1)
1445
+ branches = [branch_0, branch_1]
1446
+ mixed = Concatenate(axis=3, name="Block8_3_Concatenate")(branches)
1447
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_3_Conv2d_1x1")(
1448
+ mixed
1449
+ )
1450
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1451
+ x = add([x, up])
1452
+ x = Activation("relu", name="Block8_3_Activation")(x)
1453
+
1454
+ branch_0 = Conv2D(
1455
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_0_Conv2d_1x1"
1456
+ )(x)
1457
+ branch_0 = BatchNormalization(
1458
+ axis=3,
1459
+ momentum=0.995,
1460
+ epsilon=0.001,
1461
+ scale=False,
1462
+ name="Block8_4_Branch_0_Conv2d_1x1_BatchNorm",
1463
+ )(branch_0)
1464
+ branch_0 = Activation("relu", name="Block8_4_Branch_0_Conv2d_1x1_Activation")(branch_0)
1465
+ branch_1 = Conv2D(
1466
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_4_Branch_4_Conv2d_0a_1x1"
1467
+ )(x)
1468
+ branch_1 = BatchNormalization(
1469
+ axis=3,
1470
+ momentum=0.995,
1471
+ epsilon=0.001,
1472
+ scale=False,
1473
+ name="Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm",
1474
+ )(branch_1)
1475
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0a_1x1_Activation")(branch_1)
1476
+ branch_1 = Conv2D(
1477
+ 192,
1478
+ [1, 3],
1479
+ strides=1,
1480
+ padding="same",
1481
+ use_bias=False,
1482
+ name="Block8_4_Branch_4_Conv2d_0b_1x3",
1483
+ )(branch_1)
1484
+ branch_1 = BatchNormalization(
1485
+ axis=3,
1486
+ momentum=0.995,
1487
+ epsilon=0.001,
1488
+ scale=False,
1489
+ name="Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm",
1490
+ )(branch_1)
1491
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0b_1x3_Activation")(branch_1)
1492
+ branch_1 = Conv2D(
1493
+ 192,
1494
+ [3, 1],
1495
+ strides=1,
1496
+ padding="same",
1497
+ use_bias=False,
1498
+ name="Block8_4_Branch_4_Conv2d_0c_3x1",
1499
+ )(branch_1)
1500
+ branch_1 = BatchNormalization(
1501
+ axis=3,
1502
+ momentum=0.995,
1503
+ epsilon=0.001,
1504
+ scale=False,
1505
+ name="Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm",
1506
+ )(branch_1)
1507
+ branch_1 = Activation("relu", name="Block8_4_Branch_4_Conv2d_0c_3x1_Activation")(branch_1)
1508
+ branches = [branch_0, branch_1]
1509
+ mixed = Concatenate(axis=3, name="Block8_4_Concatenate")(branches)
1510
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_4_Conv2d_1x1")(
1511
+ mixed
1512
+ )
1513
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1514
+ x = add([x, up])
1515
+ x = Activation("relu", name="Block8_4_Activation")(x)
1516
+
1517
+ branch_0 = Conv2D(
1518
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_0_Conv2d_1x1"
1519
+ )(x)
1520
+ branch_0 = BatchNormalization(
1521
+ axis=3,
1522
+ momentum=0.995,
1523
+ epsilon=0.001,
1524
+ scale=False,
1525
+ name="Block8_5_Branch_0_Conv2d_1x1_BatchNorm",
1526
+ )(branch_0)
1527
+ branch_0 = Activation("relu", name="Block8_5_Branch_0_Conv2d_1x1_Activation")(branch_0)
1528
+ branch_1 = Conv2D(
1529
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_5_Branch_5_Conv2d_0a_1x1"
1530
+ )(x)
1531
+ branch_1 = BatchNormalization(
1532
+ axis=3,
1533
+ momentum=0.995,
1534
+ epsilon=0.001,
1535
+ scale=False,
1536
+ name="Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm",
1537
+ )(branch_1)
1538
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0a_1x1_Activation")(branch_1)
1539
+ branch_1 = Conv2D(
1540
+ 192,
1541
+ [1, 3],
1542
+ strides=1,
1543
+ padding="same",
1544
+ use_bias=False,
1545
+ name="Block8_5_Branch_5_Conv2d_0b_1x3",
1546
+ )(branch_1)
1547
+ branch_1 = BatchNormalization(
1548
+ axis=3,
1549
+ momentum=0.995,
1550
+ epsilon=0.001,
1551
+ scale=False,
1552
+ name="Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm",
1553
+ )(branch_1)
1554
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0b_1x3_Activation")(branch_1)
1555
+ branch_1 = Conv2D(
1556
+ 192,
1557
+ [3, 1],
1558
+ strides=1,
1559
+ padding="same",
1560
+ use_bias=False,
1561
+ name="Block8_5_Branch_5_Conv2d_0c_3x1",
1562
+ )(branch_1)
1563
+ branch_1 = BatchNormalization(
1564
+ axis=3,
1565
+ momentum=0.995,
1566
+ epsilon=0.001,
1567
+ scale=False,
1568
+ name="Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm",
1569
+ )(branch_1)
1570
+ branch_1 = Activation("relu", name="Block8_5_Branch_5_Conv2d_0c_3x1_Activation")(branch_1)
1571
+ branches = [branch_0, branch_1]
1572
+ mixed = Concatenate(axis=3, name="Block8_5_Concatenate")(branches)
1573
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_5_Conv2d_1x1")(
1574
+ mixed
1575
+ )
1576
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 0.2})(up)
1577
+ x = add([x, up])
1578
+ x = Activation("relu", name="Block8_5_Activation")(x)
1579
+
1580
+ branch_0 = Conv2D(
1581
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_0_Conv2d_1x1"
1582
+ )(x)
1583
+ branch_0 = BatchNormalization(
1584
+ axis=3,
1585
+ momentum=0.995,
1586
+ epsilon=0.001,
1587
+ scale=False,
1588
+ name="Block8_6_Branch_0_Conv2d_1x1_BatchNorm",
1589
+ )(branch_0)
1590
+ branch_0 = Activation("relu", name="Block8_6_Branch_0_Conv2d_1x1_Activation")(branch_0)
1591
+ branch_1 = Conv2D(
1592
+ 192, 1, strides=1, padding="same", use_bias=False, name="Block8_6_Branch_1_Conv2d_0a_1x1"
1593
+ )(x)
1594
+ branch_1 = BatchNormalization(
1595
+ axis=3,
1596
+ momentum=0.995,
1597
+ epsilon=0.001,
1598
+ scale=False,
1599
+ name="Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm",
1600
+ )(branch_1)
1601
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0a_1x1_Activation")(branch_1)
1602
+ branch_1 = Conv2D(
1603
+ 192,
1604
+ [1, 3],
1605
+ strides=1,
1606
+ padding="same",
1607
+ use_bias=False,
1608
+ name="Block8_6_Branch_1_Conv2d_0b_1x3",
1609
+ )(branch_1)
1610
+ branch_1 = BatchNormalization(
1611
+ axis=3,
1612
+ momentum=0.995,
1613
+ epsilon=0.001,
1614
+ scale=False,
1615
+ name="Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm",
1616
+ )(branch_1)
1617
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0b_1x3_Activation")(branch_1)
1618
+ branch_1 = Conv2D(
1619
+ 192,
1620
+ [3, 1],
1621
+ strides=1,
1622
+ padding="same",
1623
+ use_bias=False,
1624
+ name="Block8_6_Branch_1_Conv2d_0c_3x1",
1625
+ )(branch_1)
1626
+ branch_1 = BatchNormalization(
1627
+ axis=3,
1628
+ momentum=0.995,
1629
+ epsilon=0.001,
1630
+ scale=False,
1631
+ name="Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm",
1632
+ )(branch_1)
1633
+ branch_1 = Activation("relu", name="Block8_6_Branch_1_Conv2d_0c_3x1_Activation")(branch_1)
1634
+ branches = [branch_0, branch_1]
1635
+ mixed = Concatenate(axis=3, name="Block8_6_Concatenate")(branches)
1636
+ up = Conv2D(1792, 1, strides=1, padding="same", use_bias=True, name="Block8_6_Conv2d_1x1")(
1637
+ mixed
1638
+ )
1639
+ up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={"scale": 1})(up)
1640
+ x = add([x, up])
1641
+
1642
+ # Classification block
1643
+ x = GlobalAveragePooling2D(name="AvgPool")(x)
1644
+ x = Dropout(1.0 - 0.8, name="Dropout")(x)
1645
+ # Bottleneck
1646
+ x = Dense(dimension, use_bias=False, name="Bottleneck")(x)
1647
+ x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name="Bottleneck_BatchNorm")(
1648
+ x
1649
+ )
1650
+
1651
+ # Create model
1652
+ model = Model(inputs, x, name="inception_resnet_v1")
1653
+
1654
+ return model
1655
+
1656
+
1657
+ def load_facenet128d_model(
1658
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet_weights.h5",
1659
+ ) -> Model:
1660
+ """
1661
+ Construct FaceNet-128d model, download weights and then load weights
1662
+ Args:
1663
+ dimension (int): construct FaceNet-128d or FaceNet-512d models
1664
+ Returns:
1665
+ model (Model)
1666
+ """
1667
+ model = InceptionResNetV1()
1668
+
1669
+ # -----------------------------------
1670
+
1671
+ home = folder_utils.get_deepface_home()
1672
+
1673
+ if os.path.isfile(home + "/.deepface/weights/facenet_weights.h5") != True:
1674
+ logger.info("facenet_weights.h5 will be downloaded...")
1675
+
1676
+ output = home + "/.deepface/weights/facenet_weights.h5"
1677
+ gdown.download(url, output, quiet=False)
1678
+
1679
+ # -----------------------------------
1680
+
1681
+ model.load_weights(home + "/.deepface/weights/facenet_weights.h5")
1682
+
1683
+ # -----------------------------------
1684
+
1685
+ return model
1686
+
1687
+
1688
+ def load_facenet512d_model(
1689
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/facenet512_weights.h5",
1690
+ ) -> Model:
1691
+ """
1692
+ Construct FaceNet-512d model, download its weights and load
1693
+ Returns:
1694
+ model (Model)
1695
+ """
1696
+
1697
+ model = InceptionResNetV1(dimension=512)
1698
+
1699
+ # -------------------------
1700
+
1701
+ home = folder_utils.get_deepface_home()
1702
+
1703
+ if os.path.isfile(home + "/.deepface/weights/facenet512_weights.h5") != True:
1704
+ logger.info("facenet512_weights.h5 will be downloaded...")
1705
+
1706
+ output = home + "/.deepface/weights/facenet512_weights.h5"
1707
+ gdown.download(url, output, quiet=False)
1708
+
1709
+ # -------------------------
1710
+
1711
+ model.load_weights(home + "/.deepface/weights/facenet512_weights.h5")
1712
+
1713
+ # -------------------------
1714
+
1715
+ return model
deepface/basemodels/FbDeepFace.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import zipfile
3
+ import gdown
4
+ from deepface.commons import package_utils, folder_utils
5
+ from deepface.models.FacialRecognition import FacialRecognition
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ # --------------------------------
11
+ # dependency configuration
12
+
13
+ tf_major = package_utils.get_tf_major_version()
14
+ tf_minor = package_utils.get_tf_minor_version()
15
+
16
+ if tf_major == 1:
17
+ from keras.models import Model, Sequential
18
+ from keras.layers import (
19
+ Convolution2D,
20
+ MaxPooling2D,
21
+ Flatten,
22
+ Dense,
23
+ Dropout,
24
+ )
25
+ else:
26
+ from tensorflow.keras.models import Model, Sequential
27
+ from tensorflow.keras.layers import (
28
+ Convolution2D,
29
+ MaxPooling2D,
30
+ Flatten,
31
+ Dense,
32
+ Dropout,
33
+ )
34
+
35
+
36
+ # -------------------------------------
37
+ # pylint: disable=line-too-long, too-few-public-methods
38
+ class DeepFaceClient(FacialRecognition):
39
+ """
40
+ Fb's DeepFace model class
41
+ """
42
+
43
+ def __init__(self):
44
+ # DeepFace requires tf 2.12 or less
45
+ if tf_major == 2 and tf_minor > 12:
46
+ # Ref: https://github.com/serengil/deepface/pull/1079
47
+ raise ValueError(
48
+ "DeepFace model requires LocallyConnected2D but it is no longer supported"
49
+ f" after tf 2.12 but you have {tf_major}.{tf_minor}. You need to downgrade your tf."
50
+ )
51
+
52
+ self.model = load_model()
53
+ self.model_name = "DeepFace"
54
+ self.input_shape = (152, 152)
55
+ self.output_shape = 4096
56
+
57
+
58
+ def load_model(
59
+ url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip",
60
+ ) -> Model:
61
+ """
62
+ Construct DeepFace model, download its weights and load
63
+ """
64
+ # we have some checks for this dependency in the init of client
65
+ # putting this in global causes library initialization
66
+ if tf_major == 1:
67
+ from keras.layers import LocallyConnected2D
68
+ else:
69
+ from tensorflow.keras.layers import LocallyConnected2D
70
+
71
+ base_model = Sequential()
72
+ base_model.add(
73
+ Convolution2D(32, (11, 11), activation="relu", name="C1", input_shape=(152, 152, 3))
74
+ )
75
+ base_model.add(MaxPooling2D(pool_size=3, strides=2, padding="same", name="M2"))
76
+ base_model.add(Convolution2D(16, (9, 9), activation="relu", name="C3"))
77
+ base_model.add(LocallyConnected2D(16, (9, 9), activation="relu", name="L4"))
78
+ base_model.add(LocallyConnected2D(16, (7, 7), strides=2, activation="relu", name="L5"))
79
+ base_model.add(LocallyConnected2D(16, (5, 5), activation="relu", name="L6"))
80
+ base_model.add(Flatten(name="F0"))
81
+ base_model.add(Dense(4096, activation="relu", name="F7"))
82
+ base_model.add(Dropout(rate=0.5, name="D0"))
83
+ base_model.add(Dense(8631, activation="softmax", name="F8"))
84
+
85
+ # ---------------------------------
86
+
87
+ home = folder_utils.get_deepface_home()
88
+
89
+ if os.path.isfile(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5") != True:
90
+ logger.info("VGGFace2_DeepFace_weights_val-0.9034.h5 will be downloaded...")
91
+
92
+ output = home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5.zip"
93
+
94
+ gdown.download(url, output, quiet=False)
95
+
96
+ # unzip VGGFace2_DeepFace_weights_val-0.9034.h5.zip
97
+ with zipfile.ZipFile(output, "r") as zip_ref:
98
+ zip_ref.extractall(home + "/.deepface/weights/")
99
+
100
+ base_model.load_weights(home + "/.deepface/weights/VGGFace2_DeepFace_weights_val-0.9034.h5")
101
+
102
+ # drop F8 and D0. F7 is the representation layer.
103
+ deepface_model = Model(inputs=base_model.layers[0].input, outputs=base_model.layers[-3].output)
104
+
105
+ return deepface_model
deepface/basemodels/GhostFaceNet.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+
4
+ # 3rd party dependencies
5
+ import gdown
6
+ import tensorflow as tf
7
+
8
+ # project dependencies
9
+ from deepface.commons import package_utils, folder_utils
10
+ from deepface.models.FacialRecognition import FacialRecognition
11
+ from deepface.commons import logger as log
12
+
13
+ logger = log.get_singletonish_logger()
14
+
15
+ tf_major = package_utils.get_tf_major_version()
16
+ if tf_major == 1:
17
+ import keras
18
+ from keras import backend as K
19
+ from keras.models import Model
20
+ from keras.layers import (
21
+ Activation,
22
+ Add,
23
+ BatchNormalization,
24
+ Concatenate,
25
+ Conv2D,
26
+ DepthwiseConv2D,
27
+ GlobalAveragePooling2D,
28
+ Input,
29
+ Reshape,
30
+ Multiply,
31
+ ReLU,
32
+ PReLU,
33
+ )
34
+ else:
35
+ from tensorflow import keras
36
+ from tensorflow.keras import backend as K
37
+ from tensorflow.keras.models import Model
38
+ from tensorflow.keras.layers import (
39
+ Activation,
40
+ Add,
41
+ BatchNormalization,
42
+ Concatenate,
43
+ Conv2D,
44
+ DepthwiseConv2D,
45
+ GlobalAveragePooling2D,
46
+ Input,
47
+ Reshape,
48
+ Multiply,
49
+ ReLU,
50
+ PReLU,
51
+ )
52
+
53
+
54
+ # pylint: disable=line-too-long, too-few-public-methods, no-else-return, unsubscriptable-object, comparison-with-callable
55
+ PRETRAINED_WEIGHTS = "https://github.com/HamadYA/GhostFaceNets/releases/download/v1.2/GhostFaceNet_W1.3_S1_ArcFace.h5"
56
+
57
+
58
+ class GhostFaceNetClient(FacialRecognition):
59
+ """
60
+ GhostFaceNet model (GhostFaceNetV1 backbone)
61
+ Repo: https://github.com/HamadYA/GhostFaceNets
62
+ Pre-trained weights: https://github.com/HamadYA/GhostFaceNets/releases/tag/v1.2
63
+ GhostFaceNet_W1.3_S1_ArcFace.h5 ~ 16.5MB
64
+ Author declared that this backbone and pre-trained weights got 99.7667% accuracy on LFW
65
+ """
66
+
67
+ def __init__(self):
68
+ self.model_name = "GhostFaceNet"
69
+ self.input_shape = (112, 112)
70
+ self.output_shape = 512
71
+ self.model = load_model()
72
+
73
+
74
+ def load_model():
75
+ model = GhostFaceNetV1()
76
+
77
+ home = folder_utils.get_deepface_home()
78
+ output = home + "/.deepface/weights/ghostfacenet_v1.h5"
79
+
80
+ if os.path.isfile(output) is not True:
81
+ logger.info(f"Pre-trained weights is downloaded from {PRETRAINED_WEIGHTS} to {output}")
82
+ gdown.download(PRETRAINED_WEIGHTS, output, quiet=False)
83
+ logger.info(f"Pre-trained weights is just downloaded to {output}")
84
+
85
+ model.load_weights(output)
86
+
87
+ return model
88
+
89
+
90
+ def GhostFaceNetV1() -> Model:
91
+ """
92
+ Build GhostFaceNetV1 model. Refactored from
93
+ github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
94
+ Returns:
95
+ model (Model)
96
+ """
97
+ inputs = Input(shape=(112, 112, 3))
98
+
99
+ out_channel = 20
100
+
101
+ nn = Conv2D(
102
+ out_channel,
103
+ (3, 3),
104
+ strides=1,
105
+ padding="same",
106
+ use_bias=False,
107
+ kernel_initializer=keras.initializers.VarianceScaling(
108
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
109
+ ),
110
+ )(inputs)
111
+
112
+ nn = BatchNormalization(axis=-1)(nn)
113
+ nn = Activation("relu")(nn)
114
+
115
+ dwkernels = [3, 3, 3, 5, 5, 3, 3, 3, 3, 3, 3, 5, 5, 5, 5, 5]
116
+ exps = [20, 64, 92, 92, 156, 312, 260, 240, 240, 624, 872, 872, 1248, 1248, 1248, 664]
117
+ outs = [20, 32, 32, 52, 52, 104, 104, 104, 104, 144, 144, 208, 208, 208, 208, 208]
118
+ strides_set = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1]
119
+ reductions = [0, 0, 0, 24, 40, 0, 0, 0, 0, 156, 220, 220, 0, 312, 0, 168]
120
+
121
+ pre_out = out_channel
122
+ for dwk, stride, exp, out, reduction in zip(dwkernels, strides_set, exps, outs, reductions):
123
+ shortcut = not (out == pre_out and stride == 1)
124
+ nn = ghost_bottleneck(nn, dwk, stride, exp, out, reduction, shortcut)
125
+ pre_out = out
126
+
127
+ nn = Conv2D(
128
+ 664,
129
+ (1, 1),
130
+ strides=(1, 1),
131
+ padding="valid",
132
+ use_bias=False,
133
+ kernel_initializer=keras.initializers.VarianceScaling(
134
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
135
+ ),
136
+ )(nn)
137
+ nn = BatchNormalization(axis=-1)(nn)
138
+ nn = Activation("relu")(nn)
139
+
140
+ xx = Model(inputs=inputs, outputs=nn, name="GhostFaceNetV1")
141
+
142
+ # post modelling
143
+ inputs = xx.inputs[0]
144
+ nn = xx.outputs[0]
145
+
146
+ nn = keras.layers.DepthwiseConv2D(nn.shape[1], use_bias=False, name="GDC_dw")(nn)
147
+ nn = keras.layers.BatchNormalization(momentum=0.99, epsilon=0.001, name="GDC_batchnorm")(nn)
148
+ nn = keras.layers.Conv2D(
149
+ 512, 1, use_bias=True, kernel_initializer="glorot_normal", name="GDC_conv"
150
+ )(nn)
151
+ nn = keras.layers.Flatten(name="GDC_flatten")(nn)
152
+
153
+ embedding = keras.layers.BatchNormalization(
154
+ momentum=0.99, epsilon=0.001, scale=True, name="pre_embedding"
155
+ )(nn)
156
+ embedding_fp32 = keras.layers.Activation("linear", dtype="float32", name="embedding")(embedding)
157
+
158
+ model = keras.models.Model(inputs, embedding_fp32, name=xx.name)
159
+ model = replace_relu_with_prelu(model=model)
160
+ return model
161
+
162
+
163
+ def se_module(inputs, reduction):
164
+ """
165
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
166
+ """
167
+ # get the channel axis
168
+ channel_axis = 1 if K.image_data_format() == "channels_first" else -1
169
+ # filters = channel axis shape
170
+ filters = inputs.shape[channel_axis]
171
+
172
+ # from None x H x W x C to None x C
173
+ se = GlobalAveragePooling2D()(inputs)
174
+
175
+ # Reshape None x C to None 1 x 1 x C
176
+ se = Reshape((1, 1, filters))(se)
177
+
178
+ # Squeeze by using C*se_ratio. The size will be 1 x 1 x C*se_ratio
179
+ se = Conv2D(
180
+ reduction,
181
+ kernel_size=1,
182
+ use_bias=True,
183
+ kernel_initializer=keras.initializers.VarianceScaling(
184
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
185
+ ),
186
+ )(se)
187
+ se = Activation("relu")(se)
188
+
189
+ # Excitation using C filters. The size will be 1 x 1 x C
190
+ se = Conv2D(
191
+ filters,
192
+ kernel_size=1,
193
+ use_bias=True,
194
+ kernel_initializer=keras.initializers.VarianceScaling(
195
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
196
+ ),
197
+ )(se)
198
+ se = Activation("hard_sigmoid")(se)
199
+
200
+ return Multiply()([inputs, se])
201
+
202
+
203
+ def ghost_module(inputs, out, convkernel=1, dwkernel=3, add_activation=True):
204
+ """
205
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
206
+ """
207
+ conv_out_channel = out // 2
208
+ cc = Conv2D(
209
+ conv_out_channel,
210
+ convkernel,
211
+ use_bias=False,
212
+ strides=(1, 1),
213
+ padding="same",
214
+ kernel_initializer=keras.initializers.VarianceScaling(
215
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
216
+ ),
217
+ )(inputs)
218
+ cc = BatchNormalization(axis=-1)(cc)
219
+ if add_activation:
220
+ cc = Activation("relu")(cc)
221
+
222
+ nn = DepthwiseConv2D(
223
+ dwkernel,
224
+ 1,
225
+ padding="same",
226
+ use_bias=False,
227
+ depthwise_initializer=keras.initializers.VarianceScaling(
228
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
229
+ ),
230
+ )(cc)
231
+ nn = BatchNormalization(axis=-1)(nn)
232
+ if add_activation:
233
+ nn = Activation("relu")(nn)
234
+ return Concatenate()([cc, nn])
235
+
236
+
237
+ def ghost_bottleneck(inputs, dwkernel, strides, exp, out, reduction, shortcut=True):
238
+ """
239
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
240
+ """
241
+ nn = ghost_module(inputs, exp, add_activation=True)
242
+ if strides > 1:
243
+ # Extra depth conv if strides higher than 1
244
+ nn = DepthwiseConv2D(
245
+ dwkernel,
246
+ strides,
247
+ padding="same",
248
+ use_bias=False,
249
+ depthwise_initializer=keras.initializers.VarianceScaling(
250
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
251
+ ),
252
+ )(nn)
253
+ nn = BatchNormalization(axis=-1)(nn)
254
+
255
+ if reduction > 0:
256
+ # Squeeze and excite
257
+ nn = se_module(nn, reduction)
258
+
259
+ # Point-wise linear projection
260
+ nn = ghost_module(nn, out, add_activation=False) # ghost2 = GhostModule(exp, out, relu=False)
261
+
262
+ if shortcut:
263
+ xx = DepthwiseConv2D(
264
+ dwkernel,
265
+ strides,
266
+ padding="same",
267
+ use_bias=False,
268
+ depthwise_initializer=keras.initializers.VarianceScaling(
269
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
270
+ ),
271
+ )(inputs)
272
+ xx = BatchNormalization(axis=-1)(xx)
273
+ xx = Conv2D(
274
+ out,
275
+ (1, 1),
276
+ strides=(1, 1),
277
+ padding="valid",
278
+ use_bias=False,
279
+ kernel_initializer=keras.initializers.VarianceScaling(
280
+ scale=2.0, mode="fan_out", distribution="truncated_normal"
281
+ ),
282
+ )(xx)
283
+ xx = BatchNormalization(axis=-1)(xx)
284
+ else:
285
+ xx = inputs
286
+ return Add()([xx, nn])
287
+
288
+
289
+ def replace_relu_with_prelu(model) -> Model:
290
+ """
291
+ Replaces relu activation function in the built model with prelu.
292
+ Refactored from github.com/HamadYA/GhostFaceNets/blob/main/backbones/ghost_model.py
293
+ Args:
294
+ model (Model): built model with relu activation functions
295
+ Returns
296
+ model (Model): built model with prelu activation functions
297
+ """
298
+
299
+ def convert_relu(layer):
300
+ if isinstance(layer, ReLU) or (
301
+ isinstance(layer, Activation) and layer.activation == keras.activations.relu
302
+ ):
303
+ layer_name = layer.name.replace("_relu", "_prelu")
304
+ return PReLU(
305
+ shared_axes=[1, 2],
306
+ alpha_initializer=tf.initializers.Constant(0.25),
307
+ name=layer_name,
308
+ )
309
+ return layer
310
+
311
+ input_tensors = keras.layers.Input(model.input_shape[1:])
312
+ return keras.models.clone_model(model, input_tensors=input_tensors, clone_function=convert_relu)
deepface/basemodels/OpenFace.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gdown
3
+ import tensorflow as tf
4
+ from deepface.commons import package_utils, folder_utils
5
+ from deepface.models.FacialRecognition import FacialRecognition
6
+ from deepface.commons import logger as log
7
+
8
+ logger = log.get_singletonish_logger()
9
+
10
+ tf_version = package_utils.get_tf_major_version()
11
+ if tf_version == 1:
12
+ from keras.models import Model
13
+ from keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
14
+ from keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
15
+ from keras.layers import MaxPooling2D, AveragePooling2D
16
+ from keras import backend as K
17
+ else:
18
+ from tensorflow.keras.models import Model
19
+ from tensorflow.keras.layers import Conv2D, ZeroPadding2D, Input, concatenate
20
+ from tensorflow.keras.layers import Dense, Activation, Lambda, Flatten, BatchNormalization
21
+ from tensorflow.keras.layers import MaxPooling2D, AveragePooling2D
22
+ from tensorflow.keras import backend as K
23
+
24
+ # pylint: disable=unnecessary-lambda
25
+
26
+ # ---------------------------------------
27
+
28
+ # pylint: disable=too-few-public-methods
29
+ class OpenFaceClient(FacialRecognition):
30
+ """
31
+ OpenFace model class
32
+ """
33
+
34
+ def __init__(self):
35
+ self.model = load_model()
36
+ self.model_name = "OpenFace"
37
+ self.input_shape = (96, 96)
38
+ self.output_shape = 128
39
+
40
+
41
+ def load_model(
42
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5",
43
+ ) -> Model:
44
+ """
45
+ Consturct OpenFace model, download its weights and load
46
+ Returns:
47
+ model (Model)
48
+ """
49
+ myInput = Input(shape=(96, 96, 3))
50
+
51
+ x = ZeroPadding2D(padding=(3, 3), input_shape=(96, 96, 3))(myInput)
52
+ x = Conv2D(64, (7, 7), strides=(2, 2), name="conv1")(x)
53
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn1")(x)
54
+ x = Activation("relu")(x)
55
+ x = ZeroPadding2D(padding=(1, 1))(x)
56
+ x = MaxPooling2D(pool_size=3, strides=2)(x)
57
+ x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_1")(x)
58
+ x = Conv2D(64, (1, 1), name="conv2")(x)
59
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn2")(x)
60
+ x = Activation("relu")(x)
61
+ x = ZeroPadding2D(padding=(1, 1))(x)
62
+ x = Conv2D(192, (3, 3), name="conv3")(x)
63
+ x = BatchNormalization(axis=3, epsilon=0.00001, name="bn3")(x)
64
+ x = Activation("relu")(x)
65
+ x = Lambda(lambda x: tf.nn.lrn(x, alpha=1e-4, beta=0.75), name="lrn_2")(x) # x is equal added
66
+ x = ZeroPadding2D(padding=(1, 1))(x)
67
+ x = MaxPooling2D(pool_size=3, strides=2)(x)
68
+
69
+ # Inception3a
70
+ inception_3a_3x3 = Conv2D(96, (1, 1), name="inception_3a_3x3_conv1")(x)
71
+ inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn1")(
72
+ inception_3a_3x3
73
+ )
74
+ inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
75
+ inception_3a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3a_3x3)
76
+ inception_3a_3x3 = Conv2D(128, (3, 3), name="inception_3a_3x3_conv2")(inception_3a_3x3)
77
+ inception_3a_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_3x3_bn2")(
78
+ inception_3a_3x3
79
+ )
80
+ inception_3a_3x3 = Activation("relu")(inception_3a_3x3)
81
+
82
+ inception_3a_5x5 = Conv2D(16, (1, 1), name="inception_3a_5x5_conv1")(x)
83
+ inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn1")(
84
+ inception_3a_5x5
85
+ )
86
+ inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
87
+ inception_3a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3a_5x5)
88
+ inception_3a_5x5 = Conv2D(32, (5, 5), name="inception_3a_5x5_conv2")(inception_3a_5x5)
89
+ inception_3a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_5x5_bn2")(
90
+ inception_3a_5x5
91
+ )
92
+ inception_3a_5x5 = Activation("relu")(inception_3a_5x5)
93
+
94
+ inception_3a_pool = MaxPooling2D(pool_size=3, strides=2)(x)
95
+ inception_3a_pool = Conv2D(32, (1, 1), name="inception_3a_pool_conv")(inception_3a_pool)
96
+ inception_3a_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_pool_bn")(
97
+ inception_3a_pool
98
+ )
99
+ inception_3a_pool = Activation("relu")(inception_3a_pool)
100
+ inception_3a_pool = ZeroPadding2D(padding=((3, 4), (3, 4)))(inception_3a_pool)
101
+
102
+ inception_3a_1x1 = Conv2D(64, (1, 1), name="inception_3a_1x1_conv")(x)
103
+ inception_3a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3a_1x1_bn")(
104
+ inception_3a_1x1
105
+ )
106
+ inception_3a_1x1 = Activation("relu")(inception_3a_1x1)
107
+
108
+ inception_3a = concatenate(
109
+ [inception_3a_3x3, inception_3a_5x5, inception_3a_pool, inception_3a_1x1], axis=3
110
+ )
111
+
112
+ # Inception3b
113
+ inception_3b_3x3 = Conv2D(96, (1, 1), name="inception_3b_3x3_conv1")(inception_3a)
114
+ inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn1")(
115
+ inception_3b_3x3
116
+ )
117
+ inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
118
+ inception_3b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3b_3x3)
119
+ inception_3b_3x3 = Conv2D(128, (3, 3), name="inception_3b_3x3_conv2")(inception_3b_3x3)
120
+ inception_3b_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_3x3_bn2")(
121
+ inception_3b_3x3
122
+ )
123
+ inception_3b_3x3 = Activation("relu")(inception_3b_3x3)
124
+
125
+ inception_3b_5x5 = Conv2D(32, (1, 1), name="inception_3b_5x5_conv1")(inception_3a)
126
+ inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn1")(
127
+ inception_3b_5x5
128
+ )
129
+ inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
130
+ inception_3b_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3b_5x5)
131
+ inception_3b_5x5 = Conv2D(64, (5, 5), name="inception_3b_5x5_conv2")(inception_3b_5x5)
132
+ inception_3b_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_5x5_bn2")(
133
+ inception_3b_5x5
134
+ )
135
+ inception_3b_5x5 = Activation("relu")(inception_3b_5x5)
136
+
137
+ inception_3b_pool = Lambda(lambda x: x**2, name="power2_3b")(inception_3a)
138
+ inception_3b_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_3b_pool)
139
+ inception_3b_pool = Lambda(lambda x: x * 9, name="mult9_3b")(inception_3b_pool)
140
+ inception_3b_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_3b")(inception_3b_pool)
141
+ inception_3b_pool = Conv2D(64, (1, 1), name="inception_3b_pool_conv")(inception_3b_pool)
142
+ inception_3b_pool = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_pool_bn")(
143
+ inception_3b_pool
144
+ )
145
+ inception_3b_pool = Activation("relu")(inception_3b_pool)
146
+ inception_3b_pool = ZeroPadding2D(padding=(4, 4))(inception_3b_pool)
147
+
148
+ inception_3b_1x1 = Conv2D(64, (1, 1), name="inception_3b_1x1_conv")(inception_3a)
149
+ inception_3b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3b_1x1_bn")(
150
+ inception_3b_1x1
151
+ )
152
+ inception_3b_1x1 = Activation("relu")(inception_3b_1x1)
153
+
154
+ inception_3b = concatenate(
155
+ [inception_3b_3x3, inception_3b_5x5, inception_3b_pool, inception_3b_1x1], axis=3
156
+ )
157
+
158
+ # Inception3c
159
+ inception_3c_3x3 = Conv2D(128, (1, 1), strides=(1, 1), name="inception_3c_3x3_conv1")(
160
+ inception_3b
161
+ )
162
+ inception_3c_3x3 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_3x3_bn1")(
163
+ inception_3c_3x3
164
+ )
165
+ inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
166
+ inception_3c_3x3 = ZeroPadding2D(padding=(1, 1))(inception_3c_3x3)
167
+ inception_3c_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_3c_3x3_conv" + "2")(
168
+ inception_3c_3x3
169
+ )
170
+ inception_3c_3x3 = BatchNormalization(
171
+ axis=3, epsilon=0.00001, name="inception_3c_3x3_bn" + "2"
172
+ )(inception_3c_3x3)
173
+ inception_3c_3x3 = Activation("relu")(inception_3c_3x3)
174
+
175
+ inception_3c_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_3c_5x5_conv1")(
176
+ inception_3b
177
+ )
178
+ inception_3c_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_3c_5x5_bn1")(
179
+ inception_3c_5x5
180
+ )
181
+ inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
182
+ inception_3c_5x5 = ZeroPadding2D(padding=(2, 2))(inception_3c_5x5)
183
+ inception_3c_5x5 = Conv2D(64, (5, 5), strides=(2, 2), name="inception_3c_5x5_conv" + "2")(
184
+ inception_3c_5x5
185
+ )
186
+ inception_3c_5x5 = BatchNormalization(
187
+ axis=3, epsilon=0.00001, name="inception_3c_5x5_bn" + "2"
188
+ )(inception_3c_5x5)
189
+ inception_3c_5x5 = Activation("relu")(inception_3c_5x5)
190
+
191
+ inception_3c_pool = MaxPooling2D(pool_size=3, strides=2)(inception_3b)
192
+ inception_3c_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_3c_pool)
193
+
194
+ inception_3c = concatenate([inception_3c_3x3, inception_3c_5x5, inception_3c_pool], axis=3)
195
+
196
+ # inception 4a
197
+ inception_4a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_4a_3x3_conv" + "1")(
198
+ inception_3c
199
+ )
200
+ inception_4a_3x3 = BatchNormalization(
201
+ axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "1"
202
+ )(inception_4a_3x3)
203
+ inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
204
+ inception_4a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4a_3x3)
205
+ inception_4a_3x3 = Conv2D(192, (3, 3), strides=(1, 1), name="inception_4a_3x3_conv" + "2")(
206
+ inception_4a_3x3
207
+ )
208
+ inception_4a_3x3 = BatchNormalization(
209
+ axis=3, epsilon=0.00001, name="inception_4a_3x3_bn" + "2"
210
+ )(inception_4a_3x3)
211
+ inception_4a_3x3 = Activation("relu")(inception_4a_3x3)
212
+
213
+ inception_4a_5x5 = Conv2D(32, (1, 1), strides=(1, 1), name="inception_4a_5x5_conv1")(
214
+ inception_3c
215
+ )
216
+ inception_4a_5x5 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_5x5_bn1")(
217
+ inception_4a_5x5
218
+ )
219
+ inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
220
+ inception_4a_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4a_5x5)
221
+ inception_4a_5x5 = Conv2D(64, (5, 5), strides=(1, 1), name="inception_4a_5x5_conv" + "2")(
222
+ inception_4a_5x5
223
+ )
224
+ inception_4a_5x5 = BatchNormalization(
225
+ axis=3, epsilon=0.00001, name="inception_4a_5x5_bn" + "2"
226
+ )(inception_4a_5x5)
227
+ inception_4a_5x5 = Activation("relu")(inception_4a_5x5)
228
+
229
+ inception_4a_pool = Lambda(lambda x: x**2, name="power2_4a")(inception_3c)
230
+ inception_4a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_4a_pool)
231
+ inception_4a_pool = Lambda(lambda x: x * 9, name="mult9_4a")(inception_4a_pool)
232
+ inception_4a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_4a")(inception_4a_pool)
233
+
234
+ inception_4a_pool = Conv2D(128, (1, 1), strides=(1, 1), name="inception_4a_pool_conv" + "")(
235
+ inception_4a_pool
236
+ )
237
+ inception_4a_pool = BatchNormalization(
238
+ axis=3, epsilon=0.00001, name="inception_4a_pool_bn" + ""
239
+ )(inception_4a_pool)
240
+ inception_4a_pool = Activation("relu")(inception_4a_pool)
241
+ inception_4a_pool = ZeroPadding2D(padding=(2, 2))(inception_4a_pool)
242
+
243
+ inception_4a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_4a_1x1_conv" + "")(
244
+ inception_3c
245
+ )
246
+ inception_4a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_4a_1x1_bn" + "")(
247
+ inception_4a_1x1
248
+ )
249
+ inception_4a_1x1 = Activation("relu")(inception_4a_1x1)
250
+
251
+ inception_4a = concatenate(
252
+ [inception_4a_3x3, inception_4a_5x5, inception_4a_pool, inception_4a_1x1], axis=3
253
+ )
254
+
255
+ # inception4e
256
+ inception_4e_3x3 = Conv2D(160, (1, 1), strides=(1, 1), name="inception_4e_3x3_conv" + "1")(
257
+ inception_4a
258
+ )
259
+ inception_4e_3x3 = BatchNormalization(
260
+ axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "1"
261
+ )(inception_4e_3x3)
262
+ inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
263
+ inception_4e_3x3 = ZeroPadding2D(padding=(1, 1))(inception_4e_3x3)
264
+ inception_4e_3x3 = Conv2D(256, (3, 3), strides=(2, 2), name="inception_4e_3x3_conv" + "2")(
265
+ inception_4e_3x3
266
+ )
267
+ inception_4e_3x3 = BatchNormalization(
268
+ axis=3, epsilon=0.00001, name="inception_4e_3x3_bn" + "2"
269
+ )(inception_4e_3x3)
270
+ inception_4e_3x3 = Activation("relu")(inception_4e_3x3)
271
+
272
+ inception_4e_5x5 = Conv2D(64, (1, 1), strides=(1, 1), name="inception_4e_5x5_conv" + "1")(
273
+ inception_4a
274
+ )
275
+ inception_4e_5x5 = BatchNormalization(
276
+ axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "1"
277
+ )(inception_4e_5x5)
278
+ inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
279
+ inception_4e_5x5 = ZeroPadding2D(padding=(2, 2))(inception_4e_5x5)
280
+ inception_4e_5x5 = Conv2D(128, (5, 5), strides=(2, 2), name="inception_4e_5x5_conv" + "2")(
281
+ inception_4e_5x5
282
+ )
283
+ inception_4e_5x5 = BatchNormalization(
284
+ axis=3, epsilon=0.00001, name="inception_4e_5x5_bn" + "2"
285
+ )(inception_4e_5x5)
286
+ inception_4e_5x5 = Activation("relu")(inception_4e_5x5)
287
+
288
+ inception_4e_pool = MaxPooling2D(pool_size=3, strides=2)(inception_4a)
289
+ inception_4e_pool = ZeroPadding2D(padding=((0, 1), (0, 1)))(inception_4e_pool)
290
+
291
+ inception_4e = concatenate([inception_4e_3x3, inception_4e_5x5, inception_4e_pool], axis=3)
292
+
293
+ # inception5a
294
+ inception_5a_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_3x3_conv" + "1")(
295
+ inception_4e
296
+ )
297
+ inception_5a_3x3 = BatchNormalization(
298
+ axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "1"
299
+ )(inception_5a_3x3)
300
+ inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
301
+ inception_5a_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5a_3x3)
302
+ inception_5a_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5a_3x3_conv" + "2")(
303
+ inception_5a_3x3
304
+ )
305
+ inception_5a_3x3 = BatchNormalization(
306
+ axis=3, epsilon=0.00001, name="inception_5a_3x3_bn" + "2"
307
+ )(inception_5a_3x3)
308
+ inception_5a_3x3 = Activation("relu")(inception_5a_3x3)
309
+
310
+ inception_5a_pool = Lambda(lambda x: x**2, name="power2_5a")(inception_4e)
311
+ inception_5a_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3))(inception_5a_pool)
312
+ inception_5a_pool = Lambda(lambda x: x * 9, name="mult9_5a")(inception_5a_pool)
313
+ inception_5a_pool = Lambda(lambda x: K.sqrt(x), name="sqrt_5a")(inception_5a_pool)
314
+
315
+ inception_5a_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5a_pool_conv" + "")(
316
+ inception_5a_pool
317
+ )
318
+ inception_5a_pool = BatchNormalization(
319
+ axis=3, epsilon=0.00001, name="inception_5a_pool_bn" + ""
320
+ )(inception_5a_pool)
321
+ inception_5a_pool = Activation("relu")(inception_5a_pool)
322
+ inception_5a_pool = ZeroPadding2D(padding=(1, 1))(inception_5a_pool)
323
+
324
+ inception_5a_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5a_1x1_conv" + "")(
325
+ inception_4e
326
+ )
327
+ inception_5a_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5a_1x1_bn" + "")(
328
+ inception_5a_1x1
329
+ )
330
+ inception_5a_1x1 = Activation("relu")(inception_5a_1x1)
331
+
332
+ inception_5a = concatenate([inception_5a_3x3, inception_5a_pool, inception_5a_1x1], axis=3)
333
+
334
+ # inception_5b
335
+ inception_5b_3x3 = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_3x3_conv" + "1")(
336
+ inception_5a
337
+ )
338
+ inception_5b_3x3 = BatchNormalization(
339
+ axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "1"
340
+ )(inception_5b_3x3)
341
+ inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
342
+ inception_5b_3x3 = ZeroPadding2D(padding=(1, 1))(inception_5b_3x3)
343
+ inception_5b_3x3 = Conv2D(384, (3, 3), strides=(1, 1), name="inception_5b_3x3_conv" + "2")(
344
+ inception_5b_3x3
345
+ )
346
+ inception_5b_3x3 = BatchNormalization(
347
+ axis=3, epsilon=0.00001, name="inception_5b_3x3_bn" + "2"
348
+ )(inception_5b_3x3)
349
+ inception_5b_3x3 = Activation("relu")(inception_5b_3x3)
350
+
351
+ inception_5b_pool = MaxPooling2D(pool_size=3, strides=2)(inception_5a)
352
+
353
+ inception_5b_pool = Conv2D(96, (1, 1), strides=(1, 1), name="inception_5b_pool_conv" + "")(
354
+ inception_5b_pool
355
+ )
356
+ inception_5b_pool = BatchNormalization(
357
+ axis=3, epsilon=0.00001, name="inception_5b_pool_bn" + ""
358
+ )(inception_5b_pool)
359
+ inception_5b_pool = Activation("relu")(inception_5b_pool)
360
+
361
+ inception_5b_pool = ZeroPadding2D(padding=(1, 1))(inception_5b_pool)
362
+
363
+ inception_5b_1x1 = Conv2D(256, (1, 1), strides=(1, 1), name="inception_5b_1x1_conv" + "")(
364
+ inception_5a
365
+ )
366
+ inception_5b_1x1 = BatchNormalization(axis=3, epsilon=0.00001, name="inception_5b_1x1_bn" + "")(
367
+ inception_5b_1x1
368
+ )
369
+ inception_5b_1x1 = Activation("relu")(inception_5b_1x1)
370
+
371
+ inception_5b = concatenate([inception_5b_3x3, inception_5b_pool, inception_5b_1x1], axis=3)
372
+
373
+ av_pool = AveragePooling2D(pool_size=(3, 3), strides=(1, 1))(inception_5b)
374
+ reshape_layer = Flatten()(av_pool)
375
+ dense_layer = Dense(128, name="dense_layer")(reshape_layer)
376
+ norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(dense_layer)
377
+
378
+ # Final Model
379
+ model = Model(inputs=[myInput], outputs=norm_layer)
380
+
381
+ # -----------------------------------
382
+
383
+ home = folder_utils.get_deepface_home()
384
+
385
+ if os.path.isfile(home + "/.deepface/weights/openface_weights.h5") != True:
386
+ logger.info("openface_weights.h5 will be downloaded...")
387
+
388
+ output = home + "/.deepface/weights/openface_weights.h5"
389
+ gdown.download(url, output, quiet=False)
390
+
391
+ # -----------------------------------
392
+
393
+ model.load_weights(home + "/.deepface/weights/openface_weights.h5")
394
+
395
+ # -----------------------------------
396
+
397
+ return model
deepface/basemodels/SFace.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ from typing import Any, List
4
+
5
+ # 3rd party dependencies
6
+ import numpy as np
7
+ import cv2 as cv
8
+ import gdown
9
+
10
+ # project dependencies
11
+ from deepface.commons import folder_utils
12
+ from deepface.models.FacialRecognition import FacialRecognition
13
+ from deepface.commons import logger as log
14
+
15
+ logger = log.get_singletonish_logger()
16
+
17
+ # pylint: disable=line-too-long, too-few-public-methods
18
+
19
+
20
+ class SFaceClient(FacialRecognition):
21
+ """
22
+ SFace model class
23
+ """
24
+
25
+ def __init__(self):
26
+ self.model = load_model()
27
+ self.model_name = "SFace"
28
+ self.input_shape = (112, 112)
29
+ self.output_shape = 128
30
+
31
+ def forward(self, img: np.ndarray) -> List[float]:
32
+ """
33
+ Find embeddings with SFace model
34
+ This model necessitates the override of the forward method
35
+ because it is not a keras model.
36
+ Args:
37
+ img (np.ndarray): pre-loaded image in BGR
38
+ Returns
39
+ embeddings (list): multi-dimensional vector
40
+ """
41
+ # return self.model.predict(img)[0].tolist()
42
+
43
+ # revert the image to original format and preprocess using the model
44
+ input_blob = (img[0] * 255).astype(np.uint8)
45
+
46
+ embeddings = self.model.model.feature(input_blob)
47
+
48
+ return embeddings[0].tolist()
49
+
50
+
51
+ def load_model(
52
+ url="https://github.com/opencv/opencv_zoo/raw/main/models/face_recognition_sface/face_recognition_sface_2021dec.onnx",
53
+ ) -> Any:
54
+ """
55
+ Construct SFace model, download its weights and load
56
+ """
57
+
58
+ home = folder_utils.get_deepface_home()
59
+
60
+ file_name = home + "/.deepface/weights/face_recognition_sface_2021dec.onnx"
61
+
62
+ if not os.path.isfile(file_name):
63
+
64
+ logger.info("sface weights will be downloaded...")
65
+
66
+ gdown.download(url, file_name, quiet=False)
67
+
68
+ model = SFaceWrapper(model_path=file_name)
69
+
70
+ return model
71
+
72
+
73
+ class SFaceWrapper:
74
+ def __init__(self, model_path):
75
+ """
76
+ SFace wrapper covering model construction, layer infos and predict
77
+ """
78
+ try:
79
+ self.model = cv.FaceRecognizerSF.create(
80
+ model=model_path, config="", backend_id=0, target_id=0
81
+ )
82
+ except Exception as err:
83
+ raise ValueError(
84
+ "Exception while calling opencv.FaceRecognizerSF module."
85
+ + "This is an optional dependency."
86
+ + "You can install it as pip install opencv-contrib-python."
87
+ ) from err
deepface/basemodels/VGGFace.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import os
3
+ import gdown
4
+ import numpy as np
5
+ from deepface.commons import package_utils, folder_utils
6
+ from deepface.modules import verification
7
+ from deepface.models.FacialRecognition import FacialRecognition
8
+ from deepface.commons import logger as log
9
+
10
+ logger = log.get_singletonish_logger()
11
+
12
+ # ---------------------------------------
13
+
14
+ tf_version = package_utils.get_tf_major_version()
15
+ if tf_version == 1:
16
+ from keras.models import Model, Sequential
17
+ from keras.layers import (
18
+ Convolution2D,
19
+ ZeroPadding2D,
20
+ MaxPooling2D,
21
+ Flatten,
22
+ Dropout,
23
+ Activation,
24
+ )
25
+ else:
26
+ from tensorflow.keras.models import Model, Sequential
27
+ from tensorflow.keras.layers import (
28
+ Convolution2D,
29
+ ZeroPadding2D,
30
+ MaxPooling2D,
31
+ Flatten,
32
+ Dropout,
33
+ Activation,
34
+ )
35
+
36
+ # ---------------------------------------
37
+
38
+ # pylint: disable=too-few-public-methods
39
+ class VggFaceClient(FacialRecognition):
40
+ """
41
+ VGG-Face model class
42
+ """
43
+
44
+ def __init__(self):
45
+ self.model = load_model()
46
+ self.model_name = "VGG-Face"
47
+ self.input_shape = (224, 224)
48
+ self.output_shape = 4096
49
+
50
+ def forward(self, img: np.ndarray) -> List[float]:
51
+ """
52
+ Generates embeddings using the VGG-Face model.
53
+ This method incorporates an additional normalization layer,
54
+ necessitating the override of the forward method.
55
+
56
+ Args:
57
+ img (np.ndarray): pre-loaded image in BGR
58
+ Returns
59
+ embeddings (list): multi-dimensional vector
60
+ """
61
+ # model.predict causes memory issue when it is called in a for loop
62
+ # embedding = model.predict(img, verbose=0)[0].tolist()
63
+
64
+ # having normalization layer in descriptor troubles for some gpu users (e.g. issue 957, 966)
65
+ # instead we are now calculating it with traditional way not with keras backend
66
+ embedding = self.model(img, training=False).numpy()[0].tolist()
67
+ embedding = verification.l2_normalize(embedding)
68
+ return embedding.tolist()
69
+
70
+
71
+ def base_model() -> Sequential:
72
+ """
73
+ Base model of VGG-Face being used for classification - not to find embeddings
74
+ Returns:
75
+ model (Sequential): model was trained to classify 2622 identities
76
+ """
77
+ model = Sequential()
78
+ model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
79
+ model.add(Convolution2D(64, (3, 3), activation="relu"))
80
+ model.add(ZeroPadding2D((1, 1)))
81
+ model.add(Convolution2D(64, (3, 3), activation="relu"))
82
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
83
+
84
+ model.add(ZeroPadding2D((1, 1)))
85
+ model.add(Convolution2D(128, (3, 3), activation="relu"))
86
+ model.add(ZeroPadding2D((1, 1)))
87
+ model.add(Convolution2D(128, (3, 3), activation="relu"))
88
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
89
+
90
+ model.add(ZeroPadding2D((1, 1)))
91
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
92
+ model.add(ZeroPadding2D((1, 1)))
93
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
94
+ model.add(ZeroPadding2D((1, 1)))
95
+ model.add(Convolution2D(256, (3, 3), activation="relu"))
96
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
97
+
98
+ model.add(ZeroPadding2D((1, 1)))
99
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
100
+ model.add(ZeroPadding2D((1, 1)))
101
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
102
+ model.add(ZeroPadding2D((1, 1)))
103
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
104
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
105
+
106
+ model.add(ZeroPadding2D((1, 1)))
107
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
108
+ model.add(ZeroPadding2D((1, 1)))
109
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
110
+ model.add(ZeroPadding2D((1, 1)))
111
+ model.add(Convolution2D(512, (3, 3), activation="relu"))
112
+ model.add(MaxPooling2D((2, 2), strides=(2, 2)))
113
+
114
+ model.add(Convolution2D(4096, (7, 7), activation="relu"))
115
+ model.add(Dropout(0.5))
116
+ model.add(Convolution2D(4096, (1, 1), activation="relu"))
117
+ model.add(Dropout(0.5))
118
+ model.add(Convolution2D(2622, (1, 1)))
119
+ model.add(Flatten())
120
+ model.add(Activation("softmax"))
121
+
122
+ return model
123
+
124
+
125
+ def load_model(
126
+ url="https://github.com/serengil/deepface_models/releases/download/v1.0/vgg_face_weights.h5",
127
+ ) -> Model:
128
+ """
129
+ Final VGG-Face model being used for finding embeddings
130
+ Returns:
131
+ model (Model): returning 4096 dimensional vectors
132
+ """
133
+
134
+ model = base_model()
135
+
136
+ home = folder_utils.get_deepface_home()
137
+ output = home + "/.deepface/weights/vgg_face_weights.h5"
138
+
139
+ if os.path.isfile(output) != True:
140
+ logger.info("vgg_face_weights.h5 will be downloaded...")
141
+ gdown.download(url, output, quiet=False)
142
+
143
+ model.load_weights(output)
144
+
145
+ # 2622d dimensional model
146
+ # vgg_face_descriptor = Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
147
+
148
+ # 4096 dimensional model offers 6% to 14% increasement on accuracy!
149
+ # - softmax causes underfitting
150
+ # - added normalization layer to avoid underfitting with euclidean
151
+ # as described here: https://github.com/serengil/deepface/issues/944
152
+ base_model_output = Sequential()
153
+ base_model_output = Flatten()(model.layers[-5].output)
154
+ # keras backend's l2 normalization layer troubles some gpu users (e.g. issue 957, 966)
155
+ # base_model_output = Lambda(lambda x: K.l2_normalize(x, axis=1), name="norm_layer")(
156
+ # base_model_output
157
+ # )
158
+ vgg_face_descriptor = Model(inputs=model.input, outputs=base_model_output)
159
+
160
+ return vgg_face_descriptor
deepface/basemodels/__init__.py ADDED
File without changes
deepface/commons/__init__.py ADDED
File without changes
deepface/commons/constant.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import os
2
+
3
+ SRC_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
4
+ ROOT_DIR = os.path.dirname(SRC_DIR)
deepface/commons/folder_utils.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from deepface.commons import logger as log
4
+
5
+ logger = log.get_singletonish_logger()
6
+
7
+
8
+ def initialize_folder() -> None:
9
+ """
10
+ Initialize the folder for storing model weights.
11
+
12
+ Raises:
13
+ OSError: if the folder cannot be created.
14
+ """
15
+ home = get_deepface_home()
16
+ deepface_home_path = home + "/.deepface"
17
+ weights_path = deepface_home_path + "/weights"
18
+
19
+ if not os.path.exists(deepface_home_path):
20
+ os.makedirs(deepface_home_path, exist_ok=True)
21
+ logger.info(f"Directory {home}/.deepface created")
22
+
23
+ if not os.path.exists(weights_path):
24
+ os.makedirs(weights_path, exist_ok=True)
25
+ logger.info(f"Directory {home}/.deepface/weights created")
26
+
27
+
28
+ def get_deepface_home() -> str:
29
+ """
30
+ Get the home directory for storing model weights
31
+
32
+ Returns:
33
+ str: the home directory.
34
+ """
35
+ return str(os.getenv("DEEPFACE_HOME", default=str(Path.home())))
deepface/commons/image_utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ import io
4
+ from typing import List, Union, Tuple
5
+ import hashlib
6
+ import base64
7
+ from pathlib import Path
8
+
9
+ # 3rd party dependencies
10
+ import requests
11
+ import numpy as np
12
+ import cv2
13
+ from PIL import Image
14
+
15
+
16
+ def list_images(path: str) -> List[str]:
17
+ """
18
+ List images in a given path
19
+ Args:
20
+ path (str): path's location
21
+ Returns:
22
+ images (list): list of exact image paths
23
+ """
24
+ images = []
25
+ for r, _, f in os.walk(path):
26
+ for file in f:
27
+ exact_path = os.path.join(r, file)
28
+
29
+ _, ext = os.path.splitext(exact_path)
30
+ ext_lower = ext.lower()
31
+
32
+ if ext_lower not in {".jpg", ".jpeg", ".png"}:
33
+ continue
34
+
35
+ with Image.open(exact_path) as img: # lazy
36
+ if img.format.lower() in ["jpeg", "png"]:
37
+ images.append(exact_path)
38
+ return images
39
+
40
+
41
+ def find_image_hash(file_path: str) -> str:
42
+ """
43
+ Find the hash of given image file with its properties
44
+ finding the hash of image content is costly operation
45
+ Args:
46
+ file_path (str): exact image path
47
+ Returns:
48
+ hash (str): digest with sha1 algorithm
49
+ """
50
+ file_stats = os.stat(file_path)
51
+
52
+ # some properties
53
+ file_size = file_stats.st_size
54
+ creation_time = file_stats.st_ctime
55
+ modification_time = file_stats.st_mtime
56
+
57
+ properties = f"{file_size}-{creation_time}-{modification_time}"
58
+
59
+ hasher = hashlib.sha1()
60
+ hasher.update(properties.encode("utf-8"))
61
+ return hasher.hexdigest()
62
+
63
+
64
+ def load_image(img: Union[str, np.ndarray]) -> Tuple[np.ndarray, str]:
65
+ """
66
+ Load image from path, url, base64 or numpy array.
67
+ Args:
68
+ img: a path, url, base64 or numpy array.
69
+ Returns:
70
+ image (numpy array): the loaded image in BGR format
71
+ image name (str): image name itself
72
+ """
73
+
74
+ # The image is already a numpy array
75
+ if isinstance(img, np.ndarray):
76
+ return img, "numpy array"
77
+
78
+ if isinstance(img, Path):
79
+ img = str(img)
80
+
81
+ if not isinstance(img, str):
82
+ raise ValueError(f"img must be numpy array or str but it is {type(img)}")
83
+
84
+ # The image is a base64 string
85
+ if img.startswith("data:image/"):
86
+ return load_image_from_base64(img), "base64 encoded string"
87
+
88
+ # The image is a url
89
+ if img.lower().startswith("http://") or img.lower().startswith("https://"):
90
+ return load_image_from_web(url=img), img
91
+
92
+ # The image is a path
93
+ if os.path.isfile(img) is not True:
94
+ raise ValueError(f"Confirm that {img} exists")
95
+
96
+ # image must be a file on the system then
97
+
98
+ # image name must have english characters
99
+ if img.isascii() is False:
100
+ raise ValueError(f"Input image must not have non-english characters - {img}")
101
+
102
+ img_obj_bgr = cv2.imread(img)
103
+ # img_obj_rgb = cv2.cvtColor(img_obj_bgr, cv2.COLOR_BGR2RGB)
104
+ return img_obj_bgr, img
105
+
106
+
107
+ def load_image_from_base64(uri: str) -> np.ndarray:
108
+ """
109
+ Load image from base64 string.
110
+ Args:
111
+ uri: a base64 string.
112
+ Returns:
113
+ numpy array: the loaded image.
114
+ """
115
+
116
+ encoded_data_parts = uri.split(",")
117
+
118
+ if len(encoded_data_parts) < 2:
119
+ raise ValueError("format error in base64 encoded string")
120
+
121
+ encoded_data = encoded_data_parts[1]
122
+ decoded_bytes = base64.b64decode(encoded_data)
123
+
124
+ # similar to find functionality, we are just considering these extensions
125
+ # content type is safer option than file extension
126
+ with Image.open(io.BytesIO(decoded_bytes)) as img:
127
+ file_type = img.format.lower()
128
+ if file_type not in ["jpeg", "png"]:
129
+ raise ValueError(f"input image can be jpg or png, but it is {file_type}")
130
+
131
+ nparr = np.fromstring(decoded_bytes, np.uint8)
132
+ img_bgr = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
133
+ # img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
134
+ return img_bgr
135
+
136
+
137
+ def load_image_from_web(url: str) -> np.ndarray:
138
+ """
139
+ Loading an image from web
140
+ Args:
141
+ url: link for the image
142
+ Returns:
143
+ img (np.ndarray): equivalent to pre-loaded image from opencv (BGR format)
144
+ """
145
+ response = requests.get(url, stream=True, timeout=60)
146
+ response.raise_for_status()
147
+ image_array = np.asarray(bytearray(response.raw.read()), dtype=np.uint8)
148
+ img = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
149
+ return img
deepface/commons/logger.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from datetime import datetime
4
+
5
+ # pylint: disable=broad-except
6
+ class Logger:
7
+ def __init__(self, module=None):
8
+ self.module = module
9
+ log_level = os.environ.get("DEEPFACE_LOG_LEVEL", str(logging.INFO))
10
+ try:
11
+ self.log_level = int(log_level)
12
+ except Exception as err:
13
+ self.dump_log(
14
+ f"Exception while parsing $DEEPFACE_LOG_LEVEL."
15
+ f"Expected int but it is {log_level} ({str(err)})."
16
+ "Setting app log level to info."
17
+ )
18
+ self.log_level = logging.INFO
19
+
20
+ def info(self, message):
21
+ if self.log_level <= logging.INFO:
22
+ self.dump_log(f"{message}")
23
+
24
+ def debug(self, message):
25
+ if self.log_level <= logging.DEBUG:
26
+ self.dump_log(f"🕷️ {message}")
27
+
28
+ def warn(self, message):
29
+ if self.log_level <= logging.WARNING:
30
+ self.dump_log(f"⚠️ {message}")
31
+
32
+ def error(self, message):
33
+ if self.log_level <= logging.ERROR:
34
+ self.dump_log(f"🔴 {message}")
35
+
36
+ def critical(self, message):
37
+ if self.log_level <= logging.CRITICAL:
38
+ self.dump_log(f"💥 {message}")
39
+
40
+ def dump_log(self, message):
41
+ print(f"{str(datetime.now())[2:-7]} - {message}")
42
+
43
+
44
+ def get_singletonish_logger():
45
+ # singleton design pattern
46
+ global model_obj
47
+
48
+ if not "model_obj" in globals():
49
+ model_obj = {}
50
+
51
+ if "logger" not in model_obj.keys():
52
+ model_obj["logger"] = Logger(module="Singleton")
53
+
54
+ return model_obj["logger"]
deepface/commons/os_path.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ class os_path :
4
+
5
+ def get_main_directory():
6
+ path = os.path.abspath(__file__)
7
+ drive, _ = os.path.splitdrive(path)
8
+ if not drive.endswith(os.path.sep):
9
+ drive += os.path.sep
10
+ return drive
deepface/commons/package_utils.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 3rd party dependencies
2
+ import tensorflow as tf
3
+
4
+ # package dependencies
5
+ from deepface.commons import logger as log
6
+
7
+ logger = log.get_singletonish_logger()
8
+
9
+
10
+ def get_tf_major_version() -> int:
11
+ """
12
+ Find tensorflow's major version
13
+ Returns
14
+ major_version (int)
15
+ """
16
+ return int(tf.__version__.split(".", maxsplit=1)[0])
17
+
18
+
19
+ def get_tf_minor_version() -> int:
20
+ """
21
+ Find tensorflow's minor version
22
+ Returns
23
+ minor_version (int)
24
+ """
25
+ return int(tf.__version__.split(".", maxsplit=-1)[1])
26
+
27
+
28
+ def validate_for_keras3():
29
+ tf_major = get_tf_major_version()
30
+ tf_minor = get_tf_minor_version()
31
+
32
+ # tf_keras is a must dependency after tf 2.16
33
+ if tf_major == 1 or (tf_major == 2 and tf_minor < 16):
34
+ return
35
+
36
+ try:
37
+ import tf_keras
38
+
39
+ logger.debug(f"tf_keras is already available - {tf_keras.__version__}")
40
+ except ImportError as err:
41
+ # you may consider to install that package here
42
+ raise ValueError(
43
+ f"You have tensorflow {tf.__version__} and this requires "
44
+ "tf-keras package. Please run `pip install tf-keras` "
45
+ "or downgrade your tensorflow."
46
+ ) from err
deepface/commons/path.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ class path :
4
+
5
+ def get_parent_path(path,levels=1):
6
+ for _ in range(levels):
7
+ path = os.path.dirname(path)
8
+ return path
9
+
deepface/detectors/CenterFace.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # built-in dependencies
2
+ import os
3
+ from typing import List
4
+
5
+ # 3rd party dependencies
6
+ import numpy as np
7
+ import cv2
8
+ import gdown
9
+
10
+ # project dependencies
11
+ from deepface.commons import folder_utils
12
+ from deepface.models.Detector import Detector, FacialAreaRegion
13
+ from deepface.commons import logger as log
14
+
15
+ logger = log.get_singletonish_logger()
16
+
17
+ # pylint: disable=c-extension-no-member
18
+
19
+ WEIGHTS_URL = "https://github.com/Star-Clouds/CenterFace/raw/master/models/onnx/centerface.onnx"
20
+
21
+
22
+ class CenterFaceClient(Detector):
23
+ def __init__(self):
24
+ # BUG: model must be flushed for each call
25
+ # self.model = self.build_model()
26
+ pass
27
+
28
+ def build_model(self):
29
+ """
30
+ Download pre-trained weights of CenterFace model if necessary and load built model
31
+ """
32
+ weights_path = f"{folder_utils.get_deepface_home()}/.deepface/weights/centerface.onnx"
33
+ if not os.path.isfile(weights_path):
34
+ logger.info(f"Downloading CenterFace weights from {WEIGHTS_URL} to {weights_path}...")
35
+ try:
36
+ gdown.download(WEIGHTS_URL, weights_path, quiet=False)
37
+ except Exception as err:
38
+ raise ValueError(
39
+ f"Exception while downloading CenterFace weights from {WEIGHTS_URL}."
40
+ f"You may consider to download it to {weights_path} manually."
41
+ ) from err
42
+ logger.info(f"CenterFace model is just downloaded to {os.path.basename(weights_path)}")
43
+
44
+ return CenterFace(weight_path=weights_path)
45
+
46
+ def detect_faces(self, img: np.ndarray) -> List["FacialAreaRegion"]:
47
+ """
48
+ Detect and align face with CenterFace
49
+
50
+ Args:
51
+ img (np.ndarray): pre-loaded image as numpy array
52
+
53
+ Returns:
54
+ results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
55
+ """
56
+ resp = []
57
+
58
+ threshold = float(os.getenv("CENTERFACE_THRESHOLD", "0.80"))
59
+
60
+ # BUG: model causes problematic results from 2nd call if it is not flushed
61
+ # detections, landmarks = self.model.forward(
62
+ # img, img.shape[0], img.shape[1], threshold=threshold
63
+ # )
64
+ detections, landmarks = self.build_model().forward(
65
+ img, img.shape[0], img.shape[1], threshold=threshold
66
+ )
67
+
68
+ for i, detection in enumerate(detections):
69
+ boxes, confidence = detection[:4], detection[4]
70
+
71
+ x = boxes[0]
72
+ y = boxes[1]
73
+ w = boxes[2] - x
74
+ h = boxes[3] - y
75
+
76
+ landmark = landmarks[i]
77
+
78
+ right_eye = (int(landmark[0]), int(landmark[1]))
79
+ left_eye = (int(landmark[2]), int(landmark[3]))
80
+ # nose = (int(landmark[4]), int(landmark [5]))
81
+ # mouth_right = (int(landmark[6]), int(landmark [7]))
82
+ # mouth_left = (int(landmark[8]), int(landmark [9]))
83
+
84
+ facial_area = FacialAreaRegion(
85
+ x=int(x),
86
+ y=int(y),
87
+ w=int(w),
88
+ h=int(h),
89
+ left_eye=left_eye,
90
+ right_eye=right_eye,
91
+ confidence=min(max(0, float(confidence)), 1.0),
92
+ )
93
+ resp.append(facial_area)
94
+
95
+ return resp
96
+
97
+
98
+ class CenterFace:
99
+ """
100
+ This class is heavily inspired from
101
+ github.com/Star-Clouds/CenterFace/blob/master/prj-python/centerface.py
102
+ """
103
+
104
+ def __init__(self, weight_path: str):
105
+ self.net = cv2.dnn.readNetFromONNX(weight_path)
106
+ self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = 0, 0, 0, 0
107
+
108
+ def forward(self, img, height, width, threshold=0.5):
109
+ self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = self.transform(height, width)
110
+ return self.inference_opencv(img, threshold)
111
+
112
+ def inference_opencv(self, img, threshold):
113
+ blob = cv2.dnn.blobFromImage(
114
+ img,
115
+ scalefactor=1.0,
116
+ size=(self.img_w_new, self.img_h_new),
117
+ mean=(0, 0, 0),
118
+ swapRB=True,
119
+ crop=False,
120
+ )
121
+ self.net.setInput(blob)
122
+ heatmap, scale, offset, lms = self.net.forward(["537", "538", "539", "540"])
123
+ return self.postprocess(heatmap, lms, offset, scale, threshold)
124
+
125
+ def transform(self, h, w):
126
+ img_h_new, img_w_new = int(np.ceil(h / 32) * 32), int(np.ceil(w / 32) * 32)
127
+ scale_h, scale_w = img_h_new / h, img_w_new / w
128
+ return img_h_new, img_w_new, scale_h, scale_w
129
+
130
+ def postprocess(self, heatmap, lms, offset, scale, threshold):
131
+ dets, lms = self.decode(
132
+ heatmap, scale, offset, lms, (self.img_h_new, self.img_w_new), threshold=threshold
133
+ )
134
+ if len(dets) > 0:
135
+ dets[:, 0:4:2], dets[:, 1:4:2] = (
136
+ dets[:, 0:4:2] / self.scale_w,
137
+ dets[:, 1:4:2] / self.scale_h,
138
+ )
139
+ lms[:, 0:10:2], lms[:, 1:10:2] = (
140
+ lms[:, 0:10:2] / self.scale_w,
141
+ lms[:, 1:10:2] / self.scale_h,
142
+ )
143
+ else:
144
+ dets = np.empty(shape=[0, 5], dtype=np.float32)
145
+ lms = np.empty(shape=[0, 10], dtype=np.float32)
146
+ return dets, lms
147
+
148
+ def decode(self, heatmap, scale, offset, landmark, size, threshold=0.1):
149
+ heatmap = np.squeeze(heatmap)
150
+ scale0, scale1 = scale[0, 0, :, :], scale[0, 1, :, :]
151
+ offset0, offset1 = offset[0, 0, :, :], offset[0, 1, :, :]
152
+ c0, c1 = np.where(heatmap > threshold)
153
+ boxes, lms = [], []
154
+ if len(c0) > 0:
155
+ # pylint:disable=consider-using-enumerate
156
+ for i in range(len(c0)):
157
+ s0, s1 = np.exp(scale0[c0[i], c1[i]]) * 4, np.exp(scale1[c0[i], c1[i]]) * 4
158
+ o0, o1 = offset0[c0[i], c1[i]], offset1[c0[i], c1[i]]
159
+ s = heatmap[c0[i], c1[i]]
160
+ x1, y1 = max(0, (c1[i] + o1 + 0.5) * 4 - s1 / 2), max(
161
+ 0, (c0[i] + o0 + 0.5) * 4 - s0 / 2
162
+ )
163
+ x1, y1 = min(x1, size[1]), min(y1, size[0])
164
+ boxes.append([x1, y1, min(x1 + s1, size[1]), min(y1 + s0, size[0]), s])
165
+ lm = []
166
+ for j in range(5):
167
+ lm.append(landmark[0, j * 2 + 1, c0[i], c1[i]] * s1 + x1)
168
+ lm.append(landmark[0, j * 2, c0[i], c1[i]] * s0 + y1)
169
+ lms.append(lm)
170
+ boxes = np.asarray(boxes, dtype=np.float32)
171
+ keep = self.nms(boxes[:, :4], boxes[:, 4], 0.3)
172
+ boxes = boxes[keep, :]
173
+ lms = np.asarray(lms, dtype=np.float32)
174
+ lms = lms[keep, :]
175
+ return boxes, lms
176
+
177
+ def nms(self, boxes, scores, nms_thresh):
178
+ x1 = boxes[:, 0]
179
+ y1 = boxes[:, 1]
180
+ x2 = boxes[:, 2]
181
+ y2 = boxes[:, 3]
182
+ areas = (x2 - x1 + 1) * (y2 - y1 + 1)
183
+ order = np.argsort(scores)[::-1]
184
+ num_detections = boxes.shape[0]
185
+ suppressed = np.zeros((num_detections,), dtype=bool)
186
+
187
+ keep = []
188
+ for _i in range(num_detections):
189
+ i = order[_i]
190
+ if suppressed[i]:
191
+ continue
192
+ keep.append(i)
193
+
194
+ ix1 = x1[i]
195
+ iy1 = y1[i]
196
+ ix2 = x2[i]
197
+ iy2 = y2[i]
198
+ iarea = areas[i]
199
+
200
+ for _j in range(_i + 1, num_detections):
201
+ j = order[_j]
202
+ if suppressed[j]:
203
+ continue
204
+
205
+ xx1 = max(ix1, x1[j])
206
+ yy1 = max(iy1, y1[j])
207
+ xx2 = min(ix2, x2[j])
208
+ yy2 = min(iy2, y2[j])
209
+ w = max(0, xx2 - xx1 + 1)
210
+ h = max(0, yy2 - yy1 + 1)
211
+
212
+ inter = w * h
213
+ ovr = inter / (iarea + areas[j] - inter)
214
+ if ovr >= nms_thresh:
215
+ suppressed[j] = True
216
+
217
+ return keep
deepface/detectors/DetectorWrapper.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List, Tuple
2
+ import numpy as np
3
+ from deepface.modules import detection
4
+ from deepface.models.Detector import Detector, DetectedFace, FacialAreaRegion
5
+ from deepface.detectors import (
6
+ FastMtCnn,
7
+ MediaPipe,
8
+ MtCnn,
9
+ OpenCv,
10
+ Dlib,
11
+ RetinaFace,
12
+ Ssd,
13
+ Yolo,
14
+ YuNet,
15
+ CenterFace,
16
+ )
17
+ from deepface.commons import logger as log
18
+
19
+ logger = log.get_singletonish_logger()
20
+
21
+
22
+ def build_model(detector_backend: str) -> Any:
23
+ """
24
+ Build a face detector model
25
+ Args:
26
+ detector_backend (str): backend detector name
27
+ Returns:
28
+ built detector (Any)
29
+ """
30
+ global face_detector_obj # singleton design pattern
31
+
32
+ backends = {
33
+ "opencv": OpenCv.OpenCvClient,
34
+ "mtcnn": MtCnn.MtCnnClient,
35
+ "ssd": Ssd.SsdClient,
36
+ "dlib": Dlib.DlibClient,
37
+ "retinaface": RetinaFace.RetinaFaceClient,
38
+ "mediapipe": MediaPipe.MediaPipeClient,
39
+ "yolov8": Yolo.YoloClient,
40
+ "yunet": YuNet.YuNetClient,
41
+ "fastmtcnn": FastMtCnn.FastMtCnnClient,
42
+ "centerface": CenterFace.CenterFaceClient,
43
+ }
44
+
45
+ if not "face_detector_obj" in globals():
46
+ face_detector_obj = {}
47
+
48
+ built_models = list(face_detector_obj.keys())
49
+ if detector_backend not in built_models:
50
+ face_detector = backends.get(detector_backend)
51
+
52
+ if face_detector:
53
+ face_detector = face_detector()
54
+ face_detector_obj[detector_backend] = face_detector
55
+ else:
56
+ raise ValueError("invalid detector_backend passed - " + detector_backend)
57
+
58
+ return face_detector_obj[detector_backend]
59
+
60
+
61
+ def detect_faces(
62
+ detector_backend: str, img: np.ndarray, align: bool = True, expand_percentage: int = 0
63
+ ) -> List[DetectedFace]:
64
+ """
65
+ Detect face(s) from a given image
66
+ Args:
67
+ detector_backend (str): detector name
68
+
69
+ img (np.ndarray): pre-loaded image
70
+
71
+ align (bool): enable or disable alignment after detection
72
+
73
+ expand_percentage (int): expand detected facial area with a percentage (default is 0).
74
+
75
+ Returns:
76
+ results (List[DetectedFace]): A list of DetectedFace objects
77
+ where each object contains:
78
+
79
+ - img (np.ndarray): The detected face as a NumPy array.
80
+
81
+ - facial_area (FacialAreaRegion): The facial area region represented as x, y, w, h,
82
+ left_eye and right eye. left eye and right eye are eyes on the left and right
83
+ with respect to the person instead of observer.
84
+
85
+ - confidence (float): The confidence score associated with the detected face.
86
+ """
87
+ face_detector: Detector = build_model(detector_backend)
88
+
89
+ # validate expand percentage score
90
+ if expand_percentage < 0:
91
+ logger.warn(
92
+ f"Expand percentage cannot be negative but you set it to {expand_percentage}."
93
+ "Overwritten it to 0."
94
+ )
95
+ expand_percentage = 0
96
+
97
+ # find facial areas of given image
98
+ facial_areas = face_detector.detect_faces(img)
99
+
100
+ results = []
101
+ for facial_area in facial_areas:
102
+ x = facial_area.x
103
+ y = facial_area.y
104
+ w = facial_area.w
105
+ h = facial_area.h
106
+ left_eye = facial_area.left_eye
107
+ right_eye = facial_area.right_eye
108
+ confidence = facial_area.confidence
109
+
110
+ if expand_percentage > 0:
111
+ # Expand the facial region height and width by the provided percentage
112
+ # ensuring that the expanded region stays within img.shape limits
113
+ expanded_w = w + int(w * expand_percentage / 100)
114
+ expanded_h = h + int(h * expand_percentage / 100)
115
+
116
+ x = max(0, x - int((expanded_w - w) / 2))
117
+ y = max(0, y - int((expanded_h - h) / 2))
118
+ w = min(img.shape[1] - x, expanded_w)
119
+ h = min(img.shape[0] - y, expanded_h)
120
+
121
+ # extract detected face unaligned
122
+ detected_face = img[int(y) : int(y + h), int(x) : int(x + w)]
123
+
124
+ # align original image, then find projection of detected face area after alignment
125
+ if align is True: # and left_eye is not None and right_eye is not None:
126
+ aligned_img, angle = detection.align_face(
127
+ img=img, left_eye=left_eye, right_eye=right_eye
128
+ )
129
+ rotated_x1, rotated_y1, rotated_x2, rotated_y2 = rotate_facial_area(
130
+ facial_area=(x, y, x + w, y + h), angle=angle, size=(img.shape[0], img.shape[1])
131
+ )
132
+ detected_face = aligned_img[
133
+ int(rotated_y1) : int(rotated_y2), int(rotated_x1) : int(rotated_x2)
134
+ ]
135
+
136
+ result = DetectedFace(
137
+ img=detected_face,
138
+ facial_area=FacialAreaRegion(
139
+ x=x, y=y, h=h, w=w, confidence=confidence, left_eye=left_eye, right_eye=right_eye
140
+ ),
141
+ confidence=confidence,
142
+ )
143
+ results.append(result)
144
+ return results
145
+
146
+
147
+ def rotate_facial_area(
148
+ facial_area: Tuple[int, int, int, int], angle: float, size: Tuple[int, int]
149
+ ) -> Tuple[int, int, int, int]:
150
+ """
151
+ Rotate the facial area around its center.
152
+ Inspried from the work of @UmutDeniz26 - github.com/serengil/retinaface/pull/80
153
+
154
+ Args:
155
+ facial_area (tuple of int): Representing the (x1, y1, x2, y2) of the facial area.
156
+ x2 is equal to x1 + w1, and y2 is equal to y1 + h1
157
+ angle (float): Angle of rotation in degrees. Its sign determines the direction of rotation.
158
+ Note that angles > 360 degrees are normalized to the range [0, 360).
159
+ size (tuple of int): Tuple representing the size of the image (width, height).
160
+
161
+ Returns:
162
+ rotated_coordinates (tuple of int): Representing the new coordinates
163
+ (x1, y1, x2, y2) or (x1, y1, x1+w1, y1+h1) of the rotated facial area.
164
+ """
165
+
166
+ # Normalize the witdh of the angle so we don't have to
167
+ # worry about rotations greater than 360 degrees.
168
+ # We workaround the quirky behavior of the modulo operator
169
+ # for negative angle values.
170
+ direction = 1 if angle >= 0 else -1
171
+ angle = abs(angle) % 360
172
+ if angle == 0:
173
+ return facial_area
174
+
175
+ # Angle in radians
176
+ angle = angle * np.pi / 180
177
+
178
+ height, weight = size
179
+
180
+ # Translate the facial area to the center of the image
181
+ x = (facial_area[0] + facial_area[2]) / 2 - weight / 2
182
+ y = (facial_area[1] + facial_area[3]) / 2 - height / 2
183
+
184
+ # Rotate the facial area
185
+ x_new = x * np.cos(angle) + y * direction * np.sin(angle)
186
+ y_new = -x * direction * np.sin(angle) + y * np.cos(angle)
187
+
188
+ # Translate the facial area back to the original position
189
+ x_new = x_new + weight / 2
190
+ y_new = y_new + height / 2
191
+
192
+ # Calculate projected coordinates after alignment
193
+ x1 = x_new - (facial_area[2] - facial_area[0]) / 2
194
+ y1 = y_new - (facial_area[3] - facial_area[1]) / 2
195
+ x2 = x_new + (facial_area[2] - facial_area[0]) / 2
196
+ y2 = y_new + (facial_area[3] - facial_area[1]) / 2
197
+
198
+ # validate projected coordinates are in image's boundaries
199
+ x1 = max(int(x1), 0)
200
+ y1 = max(int(y1), 0)
201
+ x2 = min(int(x2), weight)
202
+ y2 = min(int(y2), height)
203
+
204
+ return (x1, y1, x2, y2)
deepface/detectors/Dlib.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ import os
3
+ import bz2
4
+ import gdown
5
+ import numpy as np
6
+ from deepface.commons import folder_utils
7
+ from deepface.models.Detector import Detector, FacialAreaRegion
8
+ from deepface.commons import logger as log
9
+
10
+ logger = log.get_singletonish_logger()
11
+
12
+
13
+ class DlibClient(Detector):
14
+ def __init__(self):
15
+ self.model = self.build_model()
16
+
17
+ def build_model(self) -> dict:
18
+ """
19
+ Build a dlib hog face detector model
20
+ Returns:
21
+ model (Any)
22
+ """
23
+ home = folder_utils.get_deepface_home()
24
+
25
+ # this is not a must dependency. do not import it in the global level.
26
+ try:
27
+ import dlib
28
+ except ModuleNotFoundError as e:
29
+ raise ImportError(
30
+ "Dlib is an optional detector, ensure the library is installed."
31
+ "Please install using 'pip install dlib' "
32
+ ) from e
33
+
34
+ # check required file exists in the home/.deepface/weights folder
35
+ if os.path.isfile(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat") != True:
36
+
37
+ file_name = "shape_predictor_5_face_landmarks.dat.bz2"
38
+ logger.info(f"{file_name} is going to be downloaded")
39
+
40
+ url = f"http://dlib.net/files/{file_name}"
41
+ output = f"{home}/.deepface/weights/{file_name}"
42
+
43
+ gdown.download(url, output, quiet=False)
44
+
45
+ zipfile = bz2.BZ2File(output)
46
+ data = zipfile.read()
47
+ newfilepath = output[:-4] # discard .bz2 extension
48
+ with open(newfilepath, "wb") as f:
49
+ f.write(data)
50
+
51
+ face_detector = dlib.get_frontal_face_detector()
52
+ sp = dlib.shape_predictor(home + "/.deepface/weights/shape_predictor_5_face_landmarks.dat")
53
+
54
+ detector = {}
55
+ detector["face_detector"] = face_detector
56
+ detector["sp"] = sp
57
+ return detector
58
+
59
+ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
60
+ """
61
+ Detect and align face with dlib
62
+
63
+ Args:
64
+ img (np.ndarray): pre-loaded image as numpy array
65
+
66
+ Returns:
67
+ results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
68
+ """
69
+ resp = []
70
+
71
+ face_detector = self.model["face_detector"]
72
+
73
+ # note that, by design, dlib's fhog face detector scores are >0 but not capped at 1
74
+ detections, scores, _ = face_detector.run(img, 1)
75
+
76
+ if len(detections) > 0:
77
+
78
+ for idx, detection in enumerate(detections):
79
+ left = detection.left()
80
+ right = detection.right()
81
+ top = detection.top()
82
+ bottom = detection.bottom()
83
+
84
+ y = int(max(0, top))
85
+ h = int(min(bottom, img.shape[0]) - y)
86
+ x = int(max(0, left))
87
+ w = int(min(right, img.shape[1]) - x)
88
+
89
+ shape = self.model["sp"](img, detection)
90
+
91
+ right_eye = (
92
+ int((shape.part(2).x + shape.part(3).x) // 2),
93
+ int((shape.part(2).y + shape.part(3).y) // 2),
94
+ )
95
+ left_eye = (
96
+ int((shape.part(0).x + shape.part(1).x) // 2),
97
+ int((shape.part(0).y + shape.part(1).y) // 2),
98
+ )
99
+
100
+ # never saw confidence higher than +3.5 github.com/davisking/dlib/issues/761
101
+ confidence = scores[idx]
102
+
103
+ facial_area = FacialAreaRegion(
104
+ x=x,
105
+ y=y,
106
+ w=w,
107
+ h=h,
108
+ left_eye=left_eye,
109
+ right_eye=right_eye,
110
+ confidence=min(max(0, confidence), 1.0),
111
+ )
112
+ resp.append(facial_area)
113
+
114
+ return resp
deepface/detectors/FastMtCnn.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Union, List
2
+ import cv2
3
+ import numpy as np
4
+ from deepface.models.Detector import Detector, FacialAreaRegion
5
+
6
+ # Link -> https://github.com/timesler/facenet-pytorch
7
+ # Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch
8
+
9
+
10
+ class FastMtCnnClient(Detector):
11
+ def __init__(self):
12
+ self.model = self.build_model()
13
+
14
+ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
15
+ """
16
+ Detect and align face with mtcnn
17
+
18
+ Args:
19
+ img (np.ndarray): pre-loaded image as numpy array
20
+
21
+ Returns:
22
+ results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
23
+ """
24
+ resp = []
25
+
26
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR
27
+ detections = self.model.detect(
28
+ img_rgb, landmarks=True
29
+ ) # returns boundingbox, prob, landmark
30
+ if (
31
+ detections is not None
32
+ and len(detections) > 0
33
+ and not any(detection is None for detection in detections) # issue 1043
34
+ ):
35
+ for regions, confidence, eyes in zip(*detections):
36
+ x, y, w, h = xyxy_to_xywh(regions)
37
+ right_eye = eyes[0]
38
+ left_eye = eyes[1]
39
+
40
+ left_eye = tuple(int(i) for i in left_eye)
41
+ right_eye = tuple(int(i) for i in right_eye)
42
+
43
+ facial_area = FacialAreaRegion(
44
+ x=x,
45
+ y=y,
46
+ w=w,
47
+ h=h,
48
+ left_eye=left_eye,
49
+ right_eye=right_eye,
50
+ confidence=confidence,
51
+ )
52
+ resp.append(facial_area)
53
+
54
+ return resp
55
+
56
+ def build_model(self) -> Any:
57
+ """
58
+ Build a fast mtcnn face detector model
59
+ Returns:
60
+ model (Any)
61
+ """
62
+ # this is not a must dependency. do not import it in the global level.
63
+ try:
64
+ from facenet_pytorch import MTCNN as fast_mtcnn
65
+ import torch
66
+ except ModuleNotFoundError as e:
67
+ raise ImportError(
68
+ "FastMtcnn is an optional detector, ensure the library is installed."
69
+ "Please install using 'pip install facenet-pytorch' "
70
+ ) from e
71
+
72
+ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
73
+ face_detector = fast_mtcnn(device=device)
74
+
75
+ return face_detector
76
+
77
+
78
+ def xyxy_to_xywh(regions: Union[list, tuple]) -> tuple:
79
+ """
80
+ Convert (x1, y1, x2, y2) format to (x, y, w, h) format.
81
+ Args:
82
+ regions (list or tuple): facial area coordinates as x, y, x+w, y+h
83
+ Returns:
84
+ regions (tuple): facial area coordinates as x, y, w, h
85
+ """
86
+ x, y, x_plus_w, y_plus_h = regions[0], regions[1], regions[2], regions[3]
87
+ w = x_plus_w - x
88
+ h = y_plus_h - y
89
+ return (x, y, w, h)
deepface/detectors/MediaPipe.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, List
2
+ import numpy as np
3
+ from deepface.models.Detector import Detector, FacialAreaRegion
4
+
5
+ # Link - https://google.github.io/mediapipe/solutions/face_detection
6
+
7
+
8
+ class MediaPipeClient(Detector):
9
+ def __init__(self):
10
+ self.model = self.build_model()
11
+
12
+ def build_model(self) -> Any:
13
+ """
14
+ Build a mediapipe face detector model
15
+ Returns:
16
+ model (Any)
17
+ """
18
+ # this is not a must dependency. do not import it in the global level.
19
+ try:
20
+ import mediapipe as mp
21
+ except ModuleNotFoundError as e:
22
+ raise ImportError(
23
+ "MediaPipe is an optional detector, ensure the library is installed."
24
+ "Please install using 'pip install mediapipe' "
25
+ ) from e
26
+
27
+ mp_face_detection = mp.solutions.face_detection
28
+ face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.7)
29
+ return face_detection
30
+
31
+ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]:
32
+ """
33
+ Detect and align face with mediapipe
34
+
35
+ Args:
36
+ img (np.ndarray): pre-loaded image as numpy array
37
+
38
+ Returns:
39
+ results (List[FacialAreaRegion]): A list of FacialAreaRegion objects
40
+ """
41
+ resp = []
42
+
43
+ img_width = img.shape[1]
44
+ img_height = img.shape[0]
45
+
46
+ results = self.model.process(img)
47
+
48
+ # If no face has been detected, return an empty list
49
+ if results.detections is None:
50
+ return resp
51
+
52
+ # Extract the bounding box, the landmarks and the confidence score
53
+ for current_detection in results.detections:
54
+ (confidence,) = current_detection.score
55
+
56
+ bounding_box = current_detection.location_data.relative_bounding_box
57
+ landmarks = current_detection.location_data.relative_keypoints
58
+
59
+ x = int(bounding_box.xmin * img_width)
60
+ w = int(bounding_box.width * img_width)
61
+ y = int(bounding_box.ymin * img_height)
62
+ h = int(bounding_box.height * img_height)
63
+
64
+ right_eye = (int(landmarks[0].x * img_width), int(landmarks[0].y * img_height))
65
+ left_eye = (int(landmarks[1].x * img_width), int(landmarks[1].y * img_height))
66
+ # nose = (int(landmarks[2].x * img_width), int(landmarks[2].y * img_height))
67
+ # mouth = (int(landmarks[3].x * img_width), int(landmarks[3].y * img_height))
68
+ # right_ear = (int(landmarks[4].x * img_width), int(landmarks[4].y * img_height))
69
+ # left_ear = (int(landmarks[5].x * img_width), int(landmarks[5].y * img_height))
70
+
71
+ facial_area = FacialAreaRegion(
72
+ x=x, y=y, w=w, h=h, left_eye=left_eye, right_eye=right_eye, confidence=confidence
73
+ )
74
+ resp.append(facial_area)
75
+
76
+ return resp