Upload 53 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- .gitignore +162 -3
- LICENSE +201 -0
- README.md +186 -16
- assets/logo.png +0 -0
- assets/mark_chris_elon.gif +3 -0
- assets/screenshot_home_page.png +0 -0
- assets/video_face_swap_result.mp4 +3 -0
- backend/config.py +18 -0
- backend/face_swap/__init__.py +0 -0
- backend/face_swap/api.py +122 -0
- backend/face_swap/arcface_onnx.py +96 -0
- backend/face_swap/face_align.py +99 -0
- backend/face_swap/face_enhancer.py +77 -0
- backend/face_swap/face_swap.py +216 -0
- backend/face_swap/inswapper.py +100 -0
- backend/face_swap/retinaface.py +267 -0
- backend/face_swap/utils/common.py +49 -0
- backend/face_swap/weights/readme.txt +1 -0
- backend/main.py +24 -0
- backend/requirements.txt +8 -0
- frontend/vfs/.gitignore +23 -0
- frontend/vfs/README.md +70 -0
- frontend/vfs/package-lock.json +0 -0
- frontend/vfs/package.json +42 -0
- frontend/vfs/postcss.config.js +6 -0
- frontend/vfs/public/apple-touch-icon.png +0 -0
- frontend/vfs/public/browserconfig.xml +9 -0
- frontend/vfs/public/favicon-16x16.png +0 -0
- frontend/vfs/public/favicon-32x32.png +0 -0
- frontend/vfs/public/favicon.ico +0 -0
- frontend/vfs/public/index.html +43 -0
- frontend/vfs/public/logo192.png +0 -0
- frontend/vfs/public/logo512.png +0 -0
- frontend/vfs/public/manifest.json +25 -0
- frontend/vfs/public/mstile-150x150.png +0 -0
- frontend/vfs/public/robots.txt +3 -0
- frontend/vfs/public/safari-pinned-tab.svg +72 -0
- frontend/vfs/public/site.webmanifest +19 -0
- frontend/vfs/src/App.css +40 -0
- frontend/vfs/src/App.js +21 -0
- frontend/vfs/src/App.test.js +8 -0
- frontend/vfs/src/assets/logo.png +0 -0
- frontend/vfs/src/assets/screenshot_home_page.png +0 -0
- frontend/vfs/src/components/Footer.js +20 -0
- frontend/vfs/src/components/Header.js +19 -0
- frontend/vfs/src/components/ImageDisplay.js +195 -0
- frontend/vfs/src/components/MainContent.js +90 -0
- frontend/vfs/src/index.css +18 -0
- frontend/vfs/src/index.js +17 -0
.gitattributes
CHANGED
|
@@ -32,3 +32,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
assets/mark_chris_elon.gif filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
assets/video_face_swap_result.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
|
@@ -1,4 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
# PyCharm
|
| 156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 160 |
+
#.idea/
|
| 161 |
+
|
| 162 |
+
#model weight files
|
| 163 |
+
*.onnx
|
LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
README.md
CHANGED
|
@@ -1,21 +1,191 @@
|
|
| 1 |
-
|
| 2 |
-
title: VideoSwap
|
| 3 |
-
emoji: 🌍
|
| 4 |
-
colorFrom: yellow
|
| 5 |
-
colorTo: indigo
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
license: mit
|
| 9 |
-
short_description: face video swapping
|
| 10 |
-
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
|
|
|
| 14 |
|
| 15 |
-
|
|
|
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
<img src="assets/logo.png" alt="logo" width="200" height="auto" />
|
| 4 |
+
<h1>Video Face Swap (VFS)</h1>
|
| 5 |
+
|
| 6 |
+
<p>
|
| 7 |
+
Swap any face in a video with a few clicks!!
|
| 8 |
+
</p>
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
<!-- Badges -->
|
| 12 |
+
<p>
|
| 13 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/graphs/contributors">
|
| 14 |
+
<img src="https://img.shields.io/github/contributors/karanjakhar/Video-Face-Swap" alt="contributors" />
|
| 15 |
+
</a>
|
| 16 |
+
<a href="">
|
| 17 |
+
<img src="https://img.shields.io/github/last-commit/karanjakhar/Video-Face-Swap" alt="last update" />
|
| 18 |
+
</a>
|
| 19 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/network/members">
|
| 20 |
+
<img src="https://img.shields.io/github/forks/karanjakhar/Video-Face-Swap" alt="forks" />
|
| 21 |
+
</a>
|
| 22 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/stargazers">
|
| 23 |
+
<img src="https://img.shields.io/github/stars/karanjakhar/Video-Face-Swap" alt="stars" />
|
| 24 |
+
</a>
|
| 25 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/issues/">
|
| 26 |
+
<img src="https://img.shields.io/github/issues/karanjakhar/Video-Face-Swap" alt="open issues" />
|
| 27 |
+
</a>
|
| 28 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/blob/master/LICENSE">
|
| 29 |
+
<img src="https://img.shields.io/github/license/karanjakhar/Video-Face-Swap.svg" alt="license" />
|
| 30 |
+
</a>
|
| 31 |
+
</p>
|
| 32 |
+
|
| 33 |
+
<h4>
|
| 34 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/">View Demo</a>
|
| 35 |
+
<span> · </span>
|
| 36 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap">Documentation</a>
|
| 37 |
+
<span> · </span>
|
| 38 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/issues/">Report Bug</a>
|
| 39 |
+
<span> · </span>
|
| 40 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/issues/">Request Feature</a>
|
| 41 |
+
</h4>
|
| 42 |
+
</div>
|
| 43 |
|
| 44 |
+
<br />
|
| 45 |
|
| 46 |
+
<!-- Table of Contents -->
|
| 47 |
+
# :notebook_with_decorative_cover: Table of Contents
|
| 48 |
|
| 49 |
+
- [:notebook\_with\_decorative\_cover: Table of Contents](#notebook_with_decorative_cover-table-of-contents)
|
| 50 |
+
- [:star2: About the Project](#star2-about-the-project)
|
| 51 |
+
- [:camera: Screenshots](#camera-screenshots)
|
| 52 |
+
- [:space\_invader: Tech Stack](#space_invader-tech-stack)
|
| 53 |
+
- [:toolbox: Getting Started](#toolbox-getting-started)
|
| 54 |
+
- [:bangbang: Prerequisites](#bangbang-prerequisites)
|
| 55 |
+
- [:running: Run Locally](#running-run-locally)
|
| 56 |
+
- [:eyes: Usage](#eyes-usage)
|
| 57 |
+
- [:wave: Contributing](#wave-contributing)
|
| 58 |
+
- [:scroll: Code of Conduct](#scroll-code-of-conduct)
|
| 59 |
+
- [:warning: License](#warning-license)
|
| 60 |
+
- [:handshake: Contact](#handshake-contact)
|
| 61 |
+
- [:gem: Acknowledgements](#gem-acknowledgements)
|
| 62 |
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
<!-- About the Project -->
|
| 66 |
+
## :star2: About the Project
|
| 67 |
+
It detects faces and then group them together so you can upload a new face for a group and it will be swapped in the whole video.
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
<!-- Screenshots -->
|
| 71 |
+
### :camera: Screenshots
|
| 72 |
+
|
| 73 |
+
<div align="center">
|
| 74 |
+
<img src="assets/screenshot_home_page.png" />
|
| 75 |
+
</div>
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
|
| 79 |
+
<!-- TechStack -->
|
| 80 |
+
### :space_invader: Tech Stack
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
<ul>
|
| 84 |
+
<li><a href="https://reactjs.org/">React.js</a></li>
|
| 85 |
+
<li><a href="https://tailwindcss.com/">TailwindCSS</a></li>
|
| 86 |
+
<li><a href="https://fastapi.tiangolo.com/">FastAPI</a></li>
|
| 87 |
+
</ul>
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
<!-- Getting Started -->
|
| 92 |
+
## :toolbox: Getting Started
|
| 93 |
+
|
| 94 |
+
<!-- Prerequisites -->
|
| 95 |
+
### :bangbang: Prerequisites
|
| 96 |
+
|
| 97 |
+
<ul>
|
| 98 |
+
<li><a href="https://www.python.org/downloads/">Python3</a></li>
|
| 99 |
+
<li><a href="https://nodejs.org/en/download/package-manager">npm</a></li>
|
| 100 |
+
</ul>
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
<!-- Run Locally -->
|
| 105 |
+
### :running: Run Locally
|
| 106 |
+
|
| 107 |
+
Clone the project
|
| 108 |
+
|
| 109 |
+
```bash
|
| 110 |
+
git clone https://github.com/karanjakhar/Video-Face-Swap.git
|
| 111 |
+
```
|
| 112 |
+
|
| 113 |
+
Go to the project directory
|
| 114 |
+
|
| 115 |
+
```bash
|
| 116 |
+
cd Video-Face-Swap
|
| 117 |
+
```
|
| 118 |
+
|
| 119 |
+
Install dependencies
|
| 120 |
+
|
| 121 |
+
```bash
|
| 122 |
+
cd Video-Face-Swap/frontend
|
| 123 |
+
npm install
|
| 124 |
+
|
| 125 |
+
cd Video-Face-Swap/backend
|
| 126 |
+
pip install -r requirements.txt
|
| 127 |
+
```
|
| 128 |
+
|
| 129 |
+
Run Backend and Frontend
|
| 130 |
+
|
| 131 |
+
```bash
|
| 132 |
+
cd Video-Face-Swap/frontend
|
| 133 |
+
npm start
|
| 134 |
+
|
| 135 |
+
cd Video-Face-Swap/backend
|
| 136 |
+
python3 main.py
|
| 137 |
+
```
|
| 138 |
+
Model Weights
|
| 139 |
+
|
| 140 |
+
```
|
| 141 |
+
Download model weight files from release and place them in weights/ folder.
|
| 142 |
+
```
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
<!-- Usage -->
|
| 146 |
+
## :eyes: Usage
|
| 147 |
+
|
| 148 |
+
Project is in two part frontend (ReactJS) and backend (Python, FastAPI).
|
| 149 |
+
To run it locally you need to run both.
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
<!-- Contributing -->
|
| 153 |
+
## :wave: Contributing
|
| 154 |
+
|
| 155 |
+
<a href="https://github.com/karanjakhar/Video-Face-Swap/graphs/contributors">
|
| 156 |
+
<img src="https://contrib.rocks/image?repo=karanjakhar/Video-Face-Swap" />
|
| 157 |
+
</a>
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
Contributions are always welcome!
|
| 161 |
+
|
| 162 |
+
See `contributing.md` for ways to get started.
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
<!-- Code of Conduct -->
|
| 166 |
+
### :scroll: Code of Conduct
|
| 167 |
+
|
| 168 |
+
Please read the [Code of Conduct](https://github.com/karanjakhar/Video-Face-Swap/blob/master/CODE_OF_CONDUCT.md)
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
<!-- License -->
|
| 172 |
+
## :warning: License
|
| 173 |
+
|
| 174 |
+
Distributed under Apache2.0. See LICENSE for more information.
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
<!-- Contact -->
|
| 178 |
+
## :handshake: Contact
|
| 179 |
+
|
| 180 |
+
Karan Jakhar - [@karan-jakhar](https://www.linkedin.com/in/karan-jakhar/) - karanjakhar49@gmail.com
|
| 181 |
+
|
| 182 |
+
Project Link: [https://github.com/karanjakhar/Video-Face-Swap](https://github.com/karanjakhar/Video-Face-Swap)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
<!-- Acknowledgments -->
|
| 186 |
+
## :gem: Acknowledgements
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
- [InsightFace](https://github.com/deepinsight/insightface)
|
| 190 |
+
- [FaceFusion](https://github.com/facefusion/facefusion)
|
| 191 |
+
- [Readme Template](https://github.com/Louis3797/awesome-readme-template)
|
assets/logo.png
ADDED
|
assets/mark_chris_elon.gif
ADDED
|
Git LFS Details
|
assets/screenshot_home_page.png
ADDED
|
assets/video_face_swap_result.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ff6e1da9fa021d0b2c17dbf89819a05ee79a2823e2d0c57d35bfa53b485794d
|
| 3 |
+
size 16038001
|
backend/config.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
UPLOAD_FOLDER = "./uploaded_videos"
|
| 5 |
+
if not os.path.exists(UPLOAD_FOLDER):
|
| 6 |
+
os.makedirs(UPLOAD_FOLDER)
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
BASE_DIR = os.path.abspath(UPLOAD_FOLDER)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
RETINAFACE_MODEL_PATH = "./face_swap/weights/det_10g.onnx"
|
| 13 |
+
ARCFACE_MODEL_PATH = "./face_swap/weights/w600k_r50.onnx"
|
| 14 |
+
FACE_SWAPPER_MODEL_PATH = "./face_swap/weights/inswapper_128.onnx"
|
| 15 |
+
FACE_ENHANCER_MODEL_PATH = './face_swap/weights/gfpgan_1.4.onnx'
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
PROVIDERS = ['CUDAExecutionProvider', 'CPUExecutionProvider']
|
backend/face_swap/__init__.py
ADDED
|
File without changes
|
backend/face_swap/api.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, UploadFile, HTTPException
|
| 2 |
+
from fastapi.responses import JSONResponse, FileResponse
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
import os
|
| 5 |
+
import uuid
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import json
|
| 10 |
+
from pydantic import BaseModel
|
| 11 |
+
from typing import List
|
| 12 |
+
|
| 13 |
+
from config import UPLOAD_FOLDER, BASE_DIR
|
| 14 |
+
from face_swap.face_swap import run_face_swap, get_images_from_group, crop_faces
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
router = APIRouter()
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@router.post('/uploadnewfaces/{uid}/{group_id}')
|
| 23 |
+
def upload_new_faces( uid: str, group_id: str, file: UploadFile = None):
|
| 24 |
+
if not file:
|
| 25 |
+
raise HTTPException(status_code=400, detail="File not found")
|
| 26 |
+
|
| 27 |
+
if not os.path.exists(os.path.join(UPLOAD_FOLDER, uid, 'new_faces')):
|
| 28 |
+
os.mkdir(os.path.join(UPLOAD_FOLDER, uid, 'new_faces'))
|
| 29 |
+
|
| 30 |
+
file_location = os.path.join(UPLOAD_FOLDER,uid,'new_faces',str(group_id)+'.jpg')
|
| 31 |
+
with open(file_location, "wb+") as file_object:
|
| 32 |
+
file_object.write(file.file.read())
|
| 33 |
+
|
| 34 |
+
return JSONResponse(content={"message": "File "+uid+" uploaded successfully!", 'uid':uid}, status_code=200)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class FaceSwapRequest(BaseModel):
|
| 43 |
+
group_ids: List[int]
|
| 44 |
+
|
| 45 |
+
@router.post('/faceswap/{uid}')
|
| 46 |
+
def face_swap(uid: str, request: FaceSwapRequest):
|
| 47 |
+
group_ids = request.group_ids
|
| 48 |
+
print("Received group_ids:", group_ids)
|
| 49 |
+
|
| 50 |
+
with open(os.path.join(UPLOAD_FOLDER, uid,'all_info.json'), 'r') as file:
|
| 51 |
+
loaded_dict = json.load(file)
|
| 52 |
+
|
| 53 |
+
for group_id in group_ids:
|
| 54 |
+
if int(group_id) >= int(loaded_dict['max_groups']):
|
| 55 |
+
raise HTTPException(status_code=400, detail="Group ID not found")
|
| 56 |
+
|
| 57 |
+
# Paths to the files
|
| 58 |
+
embeddings_file = os.path.join(UPLOAD_FOLDER, uid, 'face_embeddings.npy')
|
| 59 |
+
bboxes_file = os.path.join(UPLOAD_FOLDER, uid, 'face_bboxes.npy')
|
| 60 |
+
kps_file = os.path.join(UPLOAD_FOLDER, uid, 'face_kps.npy')
|
| 61 |
+
|
| 62 |
+
# Load the data from each file
|
| 63 |
+
all_embeddings = np.load(embeddings_file)
|
| 64 |
+
all_bboxes = np.load(bboxes_file)
|
| 65 |
+
all_kps = np.load(kps_file)
|
| 66 |
+
all_face_info = loaded_dict['all_face_info']
|
| 67 |
+
|
| 68 |
+
result_file_path = os.path.join(UPLOAD_FOLDER, uid, 'result.mp4')
|
| 69 |
+
input_file_path = os.path.join(UPLOAD_FOLDER, uid, 'input.mp4')
|
| 70 |
+
|
| 71 |
+
run_face_swap(uid, all_face_info,group_ids, all_embeddings, all_bboxes, all_kps,input_file_path, result_file_path)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
return JSONResponse(content={"message": "Face swroutered successfully!", 'uid':uid}, status_code=200)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@router.post("/uploadvideo/")
|
| 85 |
+
async def upload_video(file: UploadFile = None):
|
| 86 |
+
if not file:
|
| 87 |
+
raise HTTPException(status_code=400, detail="File not found")
|
| 88 |
+
uid = str(uuid.uuid4())
|
| 89 |
+
if not os.path.exists(os.path.join(UPLOAD_FOLDER, uid)):
|
| 90 |
+
os.mkdir(os.path.join(UPLOAD_FOLDER, uid))
|
| 91 |
+
|
| 92 |
+
file_location = os.path.join(UPLOAD_FOLDER,uid, 'input.mp4')
|
| 93 |
+
with open(file_location, "wb+") as file_object:
|
| 94 |
+
file_object.write(file.file.read())
|
| 95 |
+
crop_faces(file_location, uid)
|
| 96 |
+
return JSONResponse(content={"message": "File "+uid+" uploaded successfully!", 'uid':uid}, status_code=200)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
@router.get("/get_images/{uid}")
|
| 102 |
+
async def read_images(uid: str):
|
| 103 |
+
images = get_images_from_group(uid)
|
| 104 |
+
return images
|
| 105 |
+
|
| 106 |
+
@router.get("/images/{uid}/{cropped}/{group}/{filename}")
|
| 107 |
+
async def serve_image(uid: str, group: str, filename: str):
|
| 108 |
+
image_path = os.path.join(BASE_DIR, uid, "cropped_faces", group, filename)
|
| 109 |
+
if not os.path.isfile(image_path):
|
| 110 |
+
raise HTTPException(status_code=404, detail="Image not found")
|
| 111 |
+
return FileResponse(image_path)
|
| 112 |
+
|
| 113 |
+
@router.get("/download_result_video/{uid}")
|
| 114 |
+
async def download_video(uid: str):
|
| 115 |
+
video_path = os.path.join(UPLOAD_FOLDER, uid, 'result.mp4')
|
| 116 |
+
|
| 117 |
+
if not os.path.exists(video_path):
|
| 118 |
+
raise HTTPException(status_code=400, detail="File not found")
|
| 119 |
+
|
| 120 |
+
return FileResponse(video_path, media_type='video/mp4', filename=f"result_{uid}.mp4")
|
| 121 |
+
|
| 122 |
+
|
backend/face_swap/arcface_onnx.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from __future__ import division
|
| 3 |
+
import numpy as np
|
| 4 |
+
import cv2
|
| 5 |
+
import onnx
|
| 6 |
+
import onnxruntime
|
| 7 |
+
import os
|
| 8 |
+
from face_swap.face_align import norm_crop
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'ArcFaceONNX',
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class ArcFaceONNX:
|
| 16 |
+
def __init__(self, model_file=None, session=None, providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']):
|
| 17 |
+
assert model_file is not None
|
| 18 |
+
self.model_file = model_file
|
| 19 |
+
self.session = session
|
| 20 |
+
self.taskname = 'recognition'
|
| 21 |
+
find_sub = False
|
| 22 |
+
find_mul = False
|
| 23 |
+
model = onnx.load(self.model_file)
|
| 24 |
+
graph = model.graph
|
| 25 |
+
for nid, node in enumerate(graph.node[:8]):
|
| 26 |
+
if node.name.startswith('Sub') or node.name.startswith('_minus'):
|
| 27 |
+
find_sub = True
|
| 28 |
+
if node.name.startswith('Mul') or node.name.startswith('_mul'):
|
| 29 |
+
find_mul = True
|
| 30 |
+
if find_sub and find_mul:
|
| 31 |
+
#mxnet arcface model
|
| 32 |
+
input_mean = 0.0
|
| 33 |
+
input_std = 1.0
|
| 34 |
+
else:
|
| 35 |
+
input_mean = 127.5
|
| 36 |
+
input_std = 127.5
|
| 37 |
+
self.input_mean = input_mean
|
| 38 |
+
self.input_std = input_std
|
| 39 |
+
if self.session is None:
|
| 40 |
+
assert self.model_file is not None, "Model file path is None."
|
| 41 |
+
assert os.path.exists(self.model_file), "ArcFace weights not found."
|
| 42 |
+
self.session = onnxruntime.InferenceSession(self.model_file, providers=providers)
|
| 43 |
+
input_cfg = self.session.get_inputs()[0]
|
| 44 |
+
input_shape = input_cfg.shape
|
| 45 |
+
input_name = input_cfg.name
|
| 46 |
+
self.input_size = tuple(input_shape[2:4][::-1])
|
| 47 |
+
self.input_shape = input_shape
|
| 48 |
+
outputs = self.session.get_outputs()
|
| 49 |
+
output_names = []
|
| 50 |
+
for out in outputs:
|
| 51 |
+
output_names.append(out.name)
|
| 52 |
+
self.input_name = input_name
|
| 53 |
+
self.output_names = output_names
|
| 54 |
+
assert len(self.output_names)==1
|
| 55 |
+
self.output_shape = outputs[0].shape
|
| 56 |
+
|
| 57 |
+
def prepare(self, ctx_id, **kwargs):
|
| 58 |
+
if ctx_id<0:
|
| 59 |
+
self.session.set_providers(['CPUExecutionProvider'])
|
| 60 |
+
|
| 61 |
+
def get(self, img, face):
|
| 62 |
+
aimg = norm_crop(img, landmark=face.kps, image_size=self.input_size[0])
|
| 63 |
+
face.embedding = self.get_feat(aimg).flatten()
|
| 64 |
+
return face.embedding
|
| 65 |
+
|
| 66 |
+
def compute_sim(self, feat1, feat2):
|
| 67 |
+
from numpy.linalg import norm
|
| 68 |
+
feat1 = feat1.ravel()
|
| 69 |
+
feat2 = feat2.ravel()
|
| 70 |
+
sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))
|
| 71 |
+
return sim
|
| 72 |
+
|
| 73 |
+
def get_feat(self, imgs):
|
| 74 |
+
if not isinstance(imgs, list):
|
| 75 |
+
imgs = [imgs]
|
| 76 |
+
input_size = self.input_size
|
| 77 |
+
|
| 78 |
+
blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,
|
| 79 |
+
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
| 80 |
+
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
|
| 81 |
+
return net_out
|
| 82 |
+
|
| 83 |
+
def forward(self, batch_data):
|
| 84 |
+
blob = (batch_data - self.input_mean) / self.input_std
|
| 85 |
+
net_out = self.session.run(self.output_names, {self.input_name: blob})[0]
|
| 86 |
+
return net_out
|
| 87 |
+
|
| 88 |
+
def batch_get(self, img, faces):
|
| 89 |
+
all_aimg = []
|
| 90 |
+
for face in faces:
|
| 91 |
+
all_aimg.append(norm_crop(img,landmark=face.kps, image_size=self.input_size[0]))
|
| 92 |
+
|
| 93 |
+
blob = cv2.dnn.blobFromImages(all_aimg, 1.0 / self.input_std, self.input_size,
|
| 94 |
+
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
| 95 |
+
net_out = self.session.run(self.output_names, {self.input_name: blob})
|
| 96 |
+
return net_out
|
backend/face_swap/face_align.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
from skimage import transform as trans
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
arcface_dst = np.array(
|
| 7 |
+
[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366],
|
| 8 |
+
[41.5493, 92.3655], [70.7299, 92.2041]],
|
| 9 |
+
dtype=np.float32)
|
| 10 |
+
|
| 11 |
+
def estimate_norm(lmk, image_size=112,mode='arcface'):
|
| 12 |
+
assert lmk.shape == (5, 2)
|
| 13 |
+
assert image_size%112==0 or image_size%128==0
|
| 14 |
+
if image_size%112==0:
|
| 15 |
+
ratio = float(image_size)/112.0
|
| 16 |
+
diff_x = 0
|
| 17 |
+
else:
|
| 18 |
+
ratio = float(image_size)/128.0
|
| 19 |
+
diff_x = 8.0*ratio
|
| 20 |
+
dst = arcface_dst * ratio
|
| 21 |
+
dst[:,0] += diff_x
|
| 22 |
+
tform = trans.SimilarityTransform()
|
| 23 |
+
tform.estimate(lmk, dst)
|
| 24 |
+
M = tform.params[0:2, :]
|
| 25 |
+
return M
|
| 26 |
+
|
| 27 |
+
def norm_crop(img, landmark, image_size=112, mode='arcface'):
|
| 28 |
+
M = estimate_norm(landmark, image_size, mode)
|
| 29 |
+
warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
|
| 30 |
+
return warped
|
| 31 |
+
|
| 32 |
+
def norm_crop2(img, landmark, image_size=112, mode='arcface'):
|
| 33 |
+
M = estimate_norm(landmark, image_size, mode)
|
| 34 |
+
warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)
|
| 35 |
+
return warped, M
|
| 36 |
+
|
| 37 |
+
def square_crop(im, S):
|
| 38 |
+
if im.shape[0] > im.shape[1]:
|
| 39 |
+
height = S
|
| 40 |
+
width = int(float(im.shape[1]) / im.shape[0] * S)
|
| 41 |
+
scale = float(S) / im.shape[0]
|
| 42 |
+
else:
|
| 43 |
+
width = S
|
| 44 |
+
height = int(float(im.shape[0]) / im.shape[1] * S)
|
| 45 |
+
scale = float(S) / im.shape[1]
|
| 46 |
+
resized_im = cv2.resize(im, (width, height))
|
| 47 |
+
det_im = np.zeros((S, S, 3), dtype=np.uint8)
|
| 48 |
+
det_im[:resized_im.shape[0], :resized_im.shape[1], :] = resized_im
|
| 49 |
+
return det_im, scale
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def transform(data, center, output_size, scale, rotation):
|
| 53 |
+
scale_ratio = scale
|
| 54 |
+
rot = float(rotation) * np.pi / 180.0
|
| 55 |
+
t1 = trans.SimilarityTransform(scale=scale_ratio)
|
| 56 |
+
cx = center[0] * scale_ratio
|
| 57 |
+
cy = center[1] * scale_ratio
|
| 58 |
+
t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))
|
| 59 |
+
t3 = trans.SimilarityTransform(rotation=rot)
|
| 60 |
+
t4 = trans.SimilarityTransform(translation=(output_size / 2,
|
| 61 |
+
output_size / 2))
|
| 62 |
+
t = t1 + t2 + t3 + t4
|
| 63 |
+
M = t.params[0:2]
|
| 64 |
+
cropped = cv2.warpAffine(data,
|
| 65 |
+
M, (output_size, output_size),
|
| 66 |
+
borderValue=0.0)
|
| 67 |
+
return cropped, M
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def trans_points2d(pts, M):
|
| 71 |
+
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
|
| 72 |
+
for i in range(pts.shape[0]):
|
| 73 |
+
pt = pts[i]
|
| 74 |
+
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
|
| 75 |
+
new_pt = np.dot(M, new_pt)
|
| 76 |
+
new_pts[i] = new_pt[0:2]
|
| 77 |
+
|
| 78 |
+
return new_pts
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def trans_points3d(pts, M):
|
| 82 |
+
scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])
|
| 83 |
+
new_pts = np.zeros(shape=pts.shape, dtype=np.float32)
|
| 84 |
+
for i in range(pts.shape[0]):
|
| 85 |
+
pt = pts[i]
|
| 86 |
+
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32)
|
| 87 |
+
new_pt = np.dot(M, new_pt)
|
| 88 |
+
new_pts[i][0:2] = new_pt[0:2]
|
| 89 |
+
new_pts[i][2] = pts[i][2] * scale
|
| 90 |
+
|
| 91 |
+
return new_pts
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def trans_points(pts, M):
|
| 95 |
+
if pts.shape[1] == 2:
|
| 96 |
+
return trans_points2d(pts, M)
|
| 97 |
+
else:
|
| 98 |
+
return trans_points3d(pts, M)
|
| 99 |
+
|
backend/face_swap/face_enhancer.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
|
| 4 |
+
from face_swap.utils.common import Face
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def blend_frame(temp_frame, paste_frame):
|
| 8 |
+
face_enhancer_blend = 0.5
|
| 9 |
+
temp_frame = cv2.addWeighted(temp_frame, face_enhancer_blend, paste_frame, 1 - face_enhancer_blend, 0)
|
| 10 |
+
return temp_frame
|
| 11 |
+
|
| 12 |
+
def paste_back(temp_frame, crop_frame, affine_matrix ):
|
| 13 |
+
inverse_affine_matrix = cv2.invertAffineTransform(affine_matrix)
|
| 14 |
+
temp_frame_height, temp_frame_width = temp_frame.shape[0:2]
|
| 15 |
+
crop_frame_height, crop_frame_width = crop_frame.shape[0:2]
|
| 16 |
+
inverse_crop_frame = cv2.warpAffine(crop_frame, inverse_affine_matrix, (temp_frame_width, temp_frame_height))
|
| 17 |
+
inverse_mask = np.ones((crop_frame_height, crop_frame_width, 3), dtype = np.float32)
|
| 18 |
+
inverse_mask_frame = cv2.warpAffine(inverse_mask, inverse_affine_matrix, (temp_frame_width, temp_frame_height))
|
| 19 |
+
inverse_mask_frame = cv2.erode(inverse_mask_frame, np.ones((2, 2)))
|
| 20 |
+
inverse_mask_border = inverse_mask_frame * inverse_crop_frame
|
| 21 |
+
inverse_mask_area = np.sum(inverse_mask_frame) // 3
|
| 22 |
+
inverse_mask_edge = int(inverse_mask_area ** 0.5) // 20
|
| 23 |
+
inverse_mask_radius = inverse_mask_edge * 2
|
| 24 |
+
inverse_mask_center = cv2.erode(inverse_mask_frame, np.ones((inverse_mask_radius, inverse_mask_radius)))
|
| 25 |
+
inverse_mask_blur_size = inverse_mask_edge * 2 + 1
|
| 26 |
+
inverse_mask_blur_area = cv2.GaussianBlur(inverse_mask_center, (inverse_mask_blur_size, inverse_mask_blur_size), 0)
|
| 27 |
+
temp_frame = inverse_mask_blur_area * inverse_mask_border + (1 - inverse_mask_blur_area) * temp_frame
|
| 28 |
+
temp_frame = temp_frame.clip(0, 255).astype(np.uint8)
|
| 29 |
+
return temp_frame
|
| 30 |
+
|
| 31 |
+
def normalize_crop_frame(crop_frame):
|
| 32 |
+
crop_frame = np.clip(crop_frame, -1, 1)
|
| 33 |
+
crop_frame = (crop_frame + 1) / 2
|
| 34 |
+
crop_frame = crop_frame.transpose(1, 2, 0)
|
| 35 |
+
crop_frame = (crop_frame * 255.0).round()
|
| 36 |
+
crop_frame = crop_frame.astype(np.uint8)[:, :, ::-1]
|
| 37 |
+
return crop_frame
|
| 38 |
+
|
| 39 |
+
def prepare_crop_frame(crop_frame):
|
| 40 |
+
crop_frame = crop_frame[:, :, ::-1] / 255.0
|
| 41 |
+
crop_frame = (crop_frame - 0.5) / 0.5
|
| 42 |
+
crop_frame = np.expand_dims(crop_frame.transpose(2, 0, 1), axis = 0).astype(np.float32)
|
| 43 |
+
return crop_frame
|
| 44 |
+
|
| 45 |
+
def warp_face(target_face : Face, temp_frame):
|
| 46 |
+
template = np.array(
|
| 47 |
+
[
|
| 48 |
+
[ 192.98138, 239.94708 ],
|
| 49 |
+
[ 318.90277, 240.1936 ],
|
| 50 |
+
[ 256.63416, 314.01935 ],
|
| 51 |
+
[ 201.26117, 371.41043 ],
|
| 52 |
+
[ 313.08905, 371.15118 ]
|
| 53 |
+
])
|
| 54 |
+
affine_matrix = cv2.estimateAffinePartial2D(target_face['kps'], template, method = cv2.LMEDS)[0]
|
| 55 |
+
crop_frame = cv2.warpAffine(temp_frame, affine_matrix, (512, 512))
|
| 56 |
+
return crop_frame, affine_matrix
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def enhance_face(target_face: Face, temp_frame, face_enhancer_model):
|
| 63 |
+
frame_processor = face_enhancer_model
|
| 64 |
+
crop_frame, affine_matrix = warp_face(target_face, temp_frame)
|
| 65 |
+
crop_frame = prepare_crop_frame(crop_frame)
|
| 66 |
+
frame_processor_inputs = {}
|
| 67 |
+
for frame_processor_input in frame_processor.get_inputs():
|
| 68 |
+
if frame_processor_input.name == 'input':
|
| 69 |
+
frame_processor_inputs[frame_processor_input.name] = crop_frame
|
| 70 |
+
if frame_processor_input.name == 'weight':
|
| 71 |
+
frame_processor_inputs[frame_processor_input.name] = np.array([ 1 ], dtype = np.double)
|
| 72 |
+
|
| 73 |
+
crop_frame = frame_processor.run(None, frame_processor_inputs)[0][0]
|
| 74 |
+
crop_frame = normalize_crop_frame(crop_frame)
|
| 75 |
+
paste_frame = paste_back(temp_frame, crop_frame, affine_matrix)
|
| 76 |
+
temp_frame = blend_frame(temp_frame, paste_frame)
|
| 77 |
+
return temp_frame
|
backend/face_swap/face_swap.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import cv2
|
| 3 |
+
import onnxruntime
|
| 4 |
+
import numpy as np
|
| 5 |
+
import os
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
+
from face_swap.utils.common import Face
|
| 9 |
+
from face_swap.retinaface import RetinaFace
|
| 10 |
+
from face_swap.arcface_onnx import ArcFaceONNX
|
| 11 |
+
from face_swap.inswapper import INSwapper
|
| 12 |
+
from face_swap.face_enhancer import enhance_face
|
| 13 |
+
from config import UPLOAD_FOLDER, BASE_DIR
|
| 14 |
+
from config import RETINAFACE_MODEL_PATH, ARCFACE_MODEL_PATH, FACE_SWAPPER_MODEL_PATH, FACE_ENHANCER_MODEL_PATH
|
| 15 |
+
from config import PROVIDERS
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
retinaface_det_model = RetinaFace(RETINAFACE_MODEL_PATH, providers=PROVIDERS)
|
| 19 |
+
retinaface_det_model.prepare(ctx_id=1, input_size=(640, 640), det_thresh=0.5)
|
| 20 |
+
arcface_emedding_model = ArcFaceONNX(ARCFACE_MODEL_PATH, providers=PROVIDERS)
|
| 21 |
+
face_swapper_model = INSwapper(FACE_SWAPPER_MODEL_PATH, providers=PROVIDERS)
|
| 22 |
+
face_enhancer_model = onnxruntime.InferenceSession(FACE_ENHANCER_MODEL_PATH,providers=PROVIDERS)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def crop_faces(video_path: str, uid: str):
|
| 28 |
+
cap = cv2.VideoCapture(video_path)
|
| 29 |
+
face_distance = 1.5
|
| 30 |
+
embeddings = []
|
| 31 |
+
frame_number = -1
|
| 32 |
+
all_bboxes = []
|
| 33 |
+
all_kps = []
|
| 34 |
+
all_face_info = {}
|
| 35 |
+
filename_counter = 1
|
| 36 |
+
directory = os.path.join(UPLOAD_FOLDER, uid,"cropped_faces")
|
| 37 |
+
if not os.path.exists(directory):
|
| 38 |
+
os.mkdir(directory)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
all_faces = {0:[]} # key id and value is Faces
|
| 42 |
+
unique_id = 0
|
| 43 |
+
|
| 44 |
+
while True:
|
| 45 |
+
# Read a frame from the video
|
| 46 |
+
ret, frame = cap.read()
|
| 47 |
+
frame_number += 1
|
| 48 |
+
|
| 49 |
+
# Break the loop if we are at the end of the video
|
| 50 |
+
if not ret:
|
| 51 |
+
break
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
bboxes, kpss = retinaface_det_model.detect(frame,max_num=0,metric='default')
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
print(bboxes.shape)
|
| 59 |
+
|
| 60 |
+
for i in range(bboxes.shape[0]):
|
| 61 |
+
# if bboxes.shape[0] > 0:
|
| 62 |
+
similarity_ids = {}
|
| 63 |
+
bbox = bboxes[i, 0:4]
|
| 64 |
+
det_score = bboxes[i, 4]
|
| 65 |
+
x1,y1,x2,y2 = bbox.astype(int)
|
| 66 |
+
if (x2 - x1) > 80 and (y2 - y1) > 80:
|
| 67 |
+
|
| 68 |
+
kps = None
|
| 69 |
+
if kpss is not None:
|
| 70 |
+
kps = kpss[i]
|
| 71 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
| 72 |
+
face['frame_number'] = frame_number
|
| 73 |
+
face['embedding'] = arcface_emedding_model.get(frame, face)
|
| 74 |
+
|
| 75 |
+
if unique_id == 0:
|
| 76 |
+
max_sim = 1
|
| 77 |
+
max_index = 0
|
| 78 |
+
unique_id += 1
|
| 79 |
+
else:
|
| 80 |
+
max_sim = 0
|
| 81 |
+
max_index = unique_id
|
| 82 |
+
for i in range(unique_id):
|
| 83 |
+
similarity_ids[i] = 0
|
| 84 |
+
for known_face in all_faces[i]:
|
| 85 |
+
current_face_distance = np.sum(np.square(face.normed_embedding - known_face.normed_embedding))
|
| 86 |
+
if current_face_distance < face_distance:
|
| 87 |
+
similarity_ids[i] += 1
|
| 88 |
+
|
| 89 |
+
similarity_ids[i] /= len(all_faces[i])
|
| 90 |
+
if similarity_ids[i] > max_sim:
|
| 91 |
+
max_sim = similarity_ids[i]
|
| 92 |
+
max_index = i
|
| 93 |
+
if max_sim > 0.25:
|
| 94 |
+
sim_id = max_index
|
| 95 |
+
|
| 96 |
+
else:
|
| 97 |
+
sim_id = unique_id
|
| 98 |
+
unique_id += 1
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
face['group_id'] = sim_id
|
| 102 |
+
|
| 103 |
+
if sim_id in all_faces.keys():
|
| 104 |
+
all_faces[sim_id].append(face)
|
| 105 |
+
else:
|
| 106 |
+
all_faces[sim_id] = [face]
|
| 107 |
+
|
| 108 |
+
face_crop = frame[y1:y2, x1:x2]
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
sim_id_directory = os.path.join(directory, str(sim_id))
|
| 112 |
+
if not os.path.exists(sim_id_directory):
|
| 113 |
+
os.mkdir(sim_id_directory)
|
| 114 |
+
filename = f"images_{filename_counter}.jpg"
|
| 115 |
+
filename_counter += 1
|
| 116 |
+
|
| 117 |
+
cv2.imwrite(os.path.join(sim_id_directory, filename), face_crop)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
if frame_number in all_face_info:
|
| 121 |
+
if face['group_id'] in all_face_info[frame_number]:
|
| 122 |
+
all_face_info[frame_number][face['group_id']].append(len(embeddings))
|
| 123 |
+
else:
|
| 124 |
+
all_face_info[frame_number][face['group_id']]= [len(embeddings)]
|
| 125 |
+
else:
|
| 126 |
+
all_face_info[frame_number]= {face['group_id']:[len(embeddings)]}
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
embeddings.append(face['embedding'])
|
| 131 |
+
all_bboxes.append(face['bbox'])
|
| 132 |
+
all_kps.append(face['kps'])
|
| 133 |
+
|
| 134 |
+
np.save(os.path.join(UPLOAD_FOLDER, uid,'face_embeddings.npy'),embeddings)
|
| 135 |
+
np.save(os.path.join(UPLOAD_FOLDER, uid,"face_bboxes.npy"), all_bboxes)
|
| 136 |
+
np.save(os.path.join(UPLOAD_FOLDER, uid,"face_kps.npy"), all_kps)
|
| 137 |
+
|
| 138 |
+
# Write the dictionary to a file
|
| 139 |
+
info = {'max_groups': unique_id, 'all_face_info': all_face_info}
|
| 140 |
+
with open(os.path.join(UPLOAD_FOLDER, uid,'all_info.json'), 'w') as file:
|
| 141 |
+
json.dump(info, file, indent=4)
|
| 142 |
+
|
| 143 |
+
# Release the video capture object
|
| 144 |
+
cap.release()
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def get_processed_face(img_path):
|
| 149 |
+
image = cv2.imread(img_path)
|
| 150 |
+
bboxes, kpss = retinaface_det_model.detect(image,max_num=1,metric='default')
|
| 151 |
+
print(bboxes)
|
| 152 |
+
bbox = bboxes[0, 0:4]
|
| 153 |
+
det_score = bboxes[0, 4]
|
| 154 |
+
kps = kpss[0]
|
| 155 |
+
face = Face(bbox=bbox, kps=kps, det_score=det_score)
|
| 156 |
+
face['embedding'] = arcface_emedding_model.get(image, face)
|
| 157 |
+
return face
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def run_face_swap(uid,all_face_info, group_ids, all_embeddings, all_bboxes, all_kps,input_file_path, result_file_path):
|
| 161 |
+
group_new_faces = {}
|
| 162 |
+
for gi in group_ids:
|
| 163 |
+
new_face = get_processed_face(os.path.join(UPLOAD_FOLDER, uid, 'new_faces', f'{gi}.jpg'))
|
| 164 |
+
group_new_faces[gi] = new_face
|
| 165 |
+
|
| 166 |
+
# Create a VideoCapture object
|
| 167 |
+
cap = cv2.VideoCapture(input_file_path)
|
| 168 |
+
|
| 169 |
+
# Check if camera opened successfully
|
| 170 |
+
if (cap.isOpened() == False):
|
| 171 |
+
print("Unable to read camera feed")
|
| 172 |
+
|
| 173 |
+
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
|
| 174 |
+
# We convert the resolutions from float to integer.
|
| 175 |
+
frame_width = int(cap.get(3))
|
| 176 |
+
frame_height = int(cap.get(4))
|
| 177 |
+
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 178 |
+
|
| 179 |
+
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
|
| 180 |
+
out = cv2.VideoWriter(result_file_path,cv2.VideoWriter_fourcc(*'mp4v'), fps, (frame_width,frame_height))
|
| 181 |
+
frame_number = 0
|
| 182 |
+
|
| 183 |
+
while(True):
|
| 184 |
+
ret, frame = cap.read()
|
| 185 |
+
|
| 186 |
+
if ret == True:
|
| 187 |
+
if str(frame_number) in all_face_info:
|
| 188 |
+
for group_id in all_face_info[str(frame_number)]:
|
| 189 |
+
if int(group_id) in group_ids:
|
| 190 |
+
for gi in all_face_info[str(frame_number)][str(group_id)]:
|
| 191 |
+
old_face = Face(bbox=all_bboxes[gi], kps=all_kps[gi])
|
| 192 |
+
old_face['embedding'] = all_embeddings[gi]
|
| 193 |
+
frame = face_swapper_model.get(frame, old_face, group_new_faces[int(group_id)], paste_back=True)
|
| 194 |
+
frame = enhance_face(old_face, frame, face_enhancer_model)
|
| 195 |
+
out.write(frame)
|
| 196 |
+
frame_number += 1
|
| 197 |
+
else:
|
| 198 |
+
break
|
| 199 |
+
|
| 200 |
+
# When everything done, release the video capture and video write objects
|
| 201 |
+
cap.release()
|
| 202 |
+
out.release()
|
| 203 |
+
|
| 204 |
+
def get_images_from_group(uid: str, num_images: int = 5) -> dict:
|
| 205 |
+
base_path = os.path.join(BASE_DIR, uid, "cropped_faces")
|
| 206 |
+
groups = os.listdir(base_path)
|
| 207 |
+
group_images = {}
|
| 208 |
+
|
| 209 |
+
for group in groups:
|
| 210 |
+
group_path = os.path.join(base_path, group)
|
| 211 |
+
images = os.listdir(group_path)[:num_images]
|
| 212 |
+
# Store relative paths
|
| 213 |
+
images = [f"{uid}/cropped_faces/{group}/{img}" for img in images]
|
| 214 |
+
group_images[group] = images
|
| 215 |
+
|
| 216 |
+
return group_images
|
backend/face_swap/inswapper.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import onnxruntime
|
| 3 |
+
import cv2
|
| 4 |
+
import onnx
|
| 5 |
+
import os
|
| 6 |
+
from onnx import numpy_helper
|
| 7 |
+
from face_swap import face_align
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class INSwapper():
|
| 13 |
+
def __init__(self, model_file=None, session=None, providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']):
|
| 14 |
+
self.model_file = model_file
|
| 15 |
+
self.session = session
|
| 16 |
+
model = onnx.load(self.model_file)
|
| 17 |
+
graph = model.graph
|
| 18 |
+
self.emap = numpy_helper.to_array(graph.initializer[-1])
|
| 19 |
+
self.input_mean = 0.0
|
| 20 |
+
self.input_std = 255.0
|
| 21 |
+
if self.session is None:
|
| 22 |
+
assert self.model_file is not None, "Model file path is None."
|
| 23 |
+
assert os.path.exists(self.model_file), "INSwapper weights not found."
|
| 24 |
+
self.session = onnxruntime.InferenceSession(self.model_file, providers=providers)
|
| 25 |
+
inputs = self.session.get_inputs()
|
| 26 |
+
self.input_names = []
|
| 27 |
+
for inp in inputs:
|
| 28 |
+
self.input_names.append(inp.name)
|
| 29 |
+
outputs = self.session.get_outputs()
|
| 30 |
+
output_names = []
|
| 31 |
+
for out in outputs:
|
| 32 |
+
output_names.append(out.name)
|
| 33 |
+
self.output_names = output_names
|
| 34 |
+
assert len(self.output_names)==1
|
| 35 |
+
output_shape = outputs[0].shape
|
| 36 |
+
input_cfg = inputs[0]
|
| 37 |
+
input_shape = input_cfg.shape
|
| 38 |
+
self.input_shape = input_shape
|
| 39 |
+
self.input_size = tuple(input_shape[2:4][::-1])
|
| 40 |
+
|
| 41 |
+
def forward(self, img, latent):
|
| 42 |
+
img = (img - self.input_mean) / self.input_std
|
| 43 |
+
pred = self.session.run(self.output_names, {self.input_names[0]: img, self.input_names[1]: latent})[0]
|
| 44 |
+
return pred
|
| 45 |
+
|
| 46 |
+
def get(self, img, target_face, source_face, paste_back=True):
|
| 47 |
+
aimg, M = face_align.norm_crop2(img, target_face.kps, self.input_size[0])
|
| 48 |
+
blob = cv2.dnn.blobFromImage(aimg, 1.0 / self.input_std, self.input_size,
|
| 49 |
+
(self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
| 50 |
+
latent = source_face.normed_embedding.reshape((1,-1))
|
| 51 |
+
latent = np.dot(latent, self.emap)
|
| 52 |
+
latent /= np.linalg.norm(latent)
|
| 53 |
+
pred = self.session.run(self.output_names, {self.input_names[0]: blob, self.input_names[1]: latent})[0]
|
| 54 |
+
print(type(pred), pred.shape)
|
| 55 |
+
img_fake = pred.transpose((0,2,3,1))[0]
|
| 56 |
+
bgr_fake = np.clip(255 * img_fake, 0, 255).astype(np.uint8)[:,:,::-1]
|
| 57 |
+
if not paste_back:
|
| 58 |
+
return bgr_fake, M
|
| 59 |
+
else:
|
| 60 |
+
target_img = img
|
| 61 |
+
fake_diff = bgr_fake.astype(np.float32) - aimg.astype(np.float32)
|
| 62 |
+
fake_diff = np.abs(fake_diff).mean(axis=2)
|
| 63 |
+
fake_diff[:2,:] = 0
|
| 64 |
+
fake_diff[-2:,:] = 0
|
| 65 |
+
fake_diff[:,:2] = 0
|
| 66 |
+
fake_diff[:,-2:] = 0
|
| 67 |
+
IM = cv2.invertAffineTransform(M)
|
| 68 |
+
img_white = np.full((aimg.shape[0],aimg.shape[1]), 255, dtype=np.float32)
|
| 69 |
+
bgr_fake = cv2.warpAffine(bgr_fake, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
| 70 |
+
img_white = cv2.warpAffine(img_white, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
| 71 |
+
fake_diff = cv2.warpAffine(fake_diff, IM, (target_img.shape[1], target_img.shape[0]), borderValue=0.0)
|
| 72 |
+
img_white[img_white>20] = 255
|
| 73 |
+
fthresh = 10
|
| 74 |
+
fake_diff[fake_diff<fthresh] = 0
|
| 75 |
+
fake_diff[fake_diff>=fthresh] = 255
|
| 76 |
+
img_mask = img_white
|
| 77 |
+
mask_h_inds, mask_w_inds = np.where(img_mask==255)
|
| 78 |
+
mask_h = np.max(mask_h_inds) - np.min(mask_h_inds)
|
| 79 |
+
mask_w = np.max(mask_w_inds) - np.min(mask_w_inds)
|
| 80 |
+
mask_size = int(np.sqrt(mask_h*mask_w))
|
| 81 |
+
k = max(mask_size//10, 10)
|
| 82 |
+
kernel = np.ones((k,k),np.uint8)
|
| 83 |
+
img_mask = cv2.erode(img_mask,kernel,iterations = 1)
|
| 84 |
+
kernel = np.ones((2,2),np.uint8)
|
| 85 |
+
fake_diff = cv2.dilate(fake_diff,kernel,iterations = 1)
|
| 86 |
+
k = max(mask_size//20, 5)
|
| 87 |
+
kernel_size = (k, k)
|
| 88 |
+
blur_size = tuple(2*i+1 for i in kernel_size)
|
| 89 |
+
img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
|
| 90 |
+
k = 5
|
| 91 |
+
kernel_size = (k, k)
|
| 92 |
+
blur_size = tuple(2*i+1 for i in kernel_size)
|
| 93 |
+
fake_diff = cv2.GaussianBlur(fake_diff, blur_size, 0)
|
| 94 |
+
img_mask /= 255
|
| 95 |
+
fake_diff /= 255
|
| 96 |
+
img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
|
| 97 |
+
fake_merged = img_mask * bgr_fake + (1-img_mask) * target_img.astype(np.float32)
|
| 98 |
+
fake_merged = fake_merged.astype(np.uint8)
|
| 99 |
+
return fake_merged
|
| 100 |
+
|
backend/face_swap/retinaface.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import division
|
| 2 |
+
import numpy as np
|
| 3 |
+
import onnxruntime
|
| 4 |
+
import os
|
| 5 |
+
import cv2
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def softmax(z):
|
| 9 |
+
assert len(z.shape) == 2
|
| 10 |
+
s = np.max(z, axis=1)
|
| 11 |
+
s = s[:, np.newaxis] # necessary step to do broadcasting
|
| 12 |
+
e_x = np.exp(z - s)
|
| 13 |
+
div = np.sum(e_x, axis=1)
|
| 14 |
+
div = div[:, np.newaxis] # dito
|
| 15 |
+
return e_x / div
|
| 16 |
+
|
| 17 |
+
def distance2bbox(points, distance, max_shape=None):
|
| 18 |
+
"""Decode distance prediction to bounding box.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
points (Tensor): Shape (n, 2), [x, y].
|
| 22 |
+
distance (Tensor): Distance from the given point to 4
|
| 23 |
+
boundaries (left, top, right, bottom).
|
| 24 |
+
max_shape (tuple): Shape of the image.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
Tensor: Decoded bboxes.
|
| 28 |
+
"""
|
| 29 |
+
x1 = points[:, 0] - distance[:, 0]
|
| 30 |
+
y1 = points[:, 1] - distance[:, 1]
|
| 31 |
+
x2 = points[:, 0] + distance[:, 2]
|
| 32 |
+
y2 = points[:, 1] + distance[:, 3]
|
| 33 |
+
if max_shape is not None:
|
| 34 |
+
x1 = x1.clamp(min=0, max=max_shape[1])
|
| 35 |
+
y1 = y1.clamp(min=0, max=max_shape[0])
|
| 36 |
+
x2 = x2.clamp(min=0, max=max_shape[1])
|
| 37 |
+
y2 = y2.clamp(min=0, max=max_shape[0])
|
| 38 |
+
return np.stack([x1, y1, x2, y2], axis=-1)
|
| 39 |
+
|
| 40 |
+
def distance2kps(points, distance, max_shape=None):
|
| 41 |
+
"""Decode distance prediction to bounding box.
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
points (Tensor): Shape (n, 2), [x, y].
|
| 45 |
+
distance (Tensor): Distance from the given point to 4
|
| 46 |
+
boundaries (left, top, right, bottom).
|
| 47 |
+
max_shape (tuple): Shape of the image.
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
Tensor: Decoded bboxes.
|
| 51 |
+
"""
|
| 52 |
+
preds = []
|
| 53 |
+
for i in range(0, distance.shape[1], 2):
|
| 54 |
+
px = points[:, i%2] + distance[:, i]
|
| 55 |
+
py = points[:, i%2+1] + distance[:, i+1]
|
| 56 |
+
if max_shape is not None:
|
| 57 |
+
px = px.clamp(min=0, max=max_shape[1])
|
| 58 |
+
py = py.clamp(min=0, max=max_shape[0])
|
| 59 |
+
preds.append(px)
|
| 60 |
+
preds.append(py)
|
| 61 |
+
return np.stack(preds, axis=-1)
|
| 62 |
+
|
| 63 |
+
class RetinaFace:
|
| 64 |
+
def __init__(self, model_file=None, session=None, providers=['CUDAExecutionProvider', 'CPUExecutionProvider']):
|
| 65 |
+
self.model_file = model_file
|
| 66 |
+
self.session = session
|
| 67 |
+
self.taskname = 'detection'
|
| 68 |
+
if self.session is None:
|
| 69 |
+
assert self.model_file is not None, "Model file path is None."
|
| 70 |
+
assert os.path.exists(self.model_file), "RetinaFace weights not found."
|
| 71 |
+
self.session = onnxruntime.InferenceSession(self.model_file, providers=providers)
|
| 72 |
+
self.center_cache = {}
|
| 73 |
+
self.nms_thresh = 0.4
|
| 74 |
+
self.det_thresh = 0.5
|
| 75 |
+
self._init_vars()
|
| 76 |
+
|
| 77 |
+
def _init_vars(self):
|
| 78 |
+
input_cfg = self.session.get_inputs()[0]
|
| 79 |
+
input_shape = input_cfg.shape
|
| 80 |
+
if isinstance(input_shape[2], str):
|
| 81 |
+
self.input_size = None
|
| 82 |
+
else:
|
| 83 |
+
self.input_size = tuple(input_shape[2:4][::-1])
|
| 84 |
+
input_name = input_cfg.name
|
| 85 |
+
self.input_shape = input_shape
|
| 86 |
+
outputs = self.session.get_outputs()
|
| 87 |
+
output_names = []
|
| 88 |
+
for o in outputs:
|
| 89 |
+
output_names.append(o.name)
|
| 90 |
+
self.input_name = input_name
|
| 91 |
+
self.output_names = output_names
|
| 92 |
+
self.input_mean = 127.5
|
| 93 |
+
self.input_std = 128.0
|
| 94 |
+
|
| 95 |
+
self.use_kps = False
|
| 96 |
+
self._anchor_ratio = 1.0
|
| 97 |
+
self._num_anchors = 1
|
| 98 |
+
if len(outputs)==6:
|
| 99 |
+
self.fmc = 3
|
| 100 |
+
self._feat_stride_fpn = [8, 16, 32]
|
| 101 |
+
self._num_anchors = 2
|
| 102 |
+
elif len(outputs)==9:
|
| 103 |
+
self.fmc = 3
|
| 104 |
+
self._feat_stride_fpn = [8, 16, 32]
|
| 105 |
+
self._num_anchors = 2
|
| 106 |
+
self.use_kps = True
|
| 107 |
+
elif len(outputs)==10:
|
| 108 |
+
self.fmc = 5
|
| 109 |
+
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
| 110 |
+
self._num_anchors = 1
|
| 111 |
+
elif len(outputs)==15:
|
| 112 |
+
self.fmc = 5
|
| 113 |
+
self._feat_stride_fpn = [8, 16, 32, 64, 128]
|
| 114 |
+
self._num_anchors = 1
|
| 115 |
+
self.use_kps = True
|
| 116 |
+
|
| 117 |
+
def prepare(self, ctx_id, **kwargs):
|
| 118 |
+
if ctx_id<0:
|
| 119 |
+
self.session.set_providers(['CPUExecutionProvider'])
|
| 120 |
+
nms_thresh = kwargs.get('nms_thresh', None)
|
| 121 |
+
if nms_thresh is not None:
|
| 122 |
+
self.nms_thresh = nms_thresh
|
| 123 |
+
det_thresh = kwargs.get('det_thresh', None)
|
| 124 |
+
if det_thresh is not None:
|
| 125 |
+
self.det_thresh = det_thresh
|
| 126 |
+
input_size = kwargs.get('input_size', None)
|
| 127 |
+
if input_size is not None:
|
| 128 |
+
if self.input_size is not None:
|
| 129 |
+
print('warning: det_size is already set in detection model, ignore')
|
| 130 |
+
else:
|
| 131 |
+
self.input_size = input_size
|
| 132 |
+
|
| 133 |
+
def forward(self, img, threshold):
|
| 134 |
+
scores_list = []
|
| 135 |
+
bboxes_list = []
|
| 136 |
+
kpss_list = []
|
| 137 |
+
input_size = tuple(img.shape[0:2][::-1])
|
| 138 |
+
blob = cv2.dnn.blobFromImage(img, 1.0/self.input_std, input_size, (self.input_mean, self.input_mean, self.input_mean), swapRB=True)
|
| 139 |
+
net_outs = self.session.run(self.output_names, {self.input_name : blob})
|
| 140 |
+
|
| 141 |
+
input_height = blob.shape[2]
|
| 142 |
+
input_width = blob.shape[3]
|
| 143 |
+
fmc = self.fmc
|
| 144 |
+
for idx, stride in enumerate(self._feat_stride_fpn):
|
| 145 |
+
scores = net_outs[idx]
|
| 146 |
+
bbox_preds = net_outs[idx+fmc]
|
| 147 |
+
bbox_preds = bbox_preds * stride
|
| 148 |
+
if self.use_kps:
|
| 149 |
+
kps_preds = net_outs[idx+fmc*2] * stride
|
| 150 |
+
height = input_height // stride
|
| 151 |
+
width = input_width // stride
|
| 152 |
+
K = height * width
|
| 153 |
+
key = (height, width, stride)
|
| 154 |
+
if key in self.center_cache:
|
| 155 |
+
anchor_centers = self.center_cache[key]
|
| 156 |
+
else:
|
| 157 |
+
|
| 158 |
+
anchor_centers = np.stack(np.mgrid[:height, :width][::-1], axis=-1).astype(np.float32)
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
anchor_centers = (anchor_centers * stride).reshape( (-1, 2) )
|
| 162 |
+
if self._num_anchors>1:
|
| 163 |
+
anchor_centers = np.stack([anchor_centers]*self._num_anchors, axis=1).reshape( (-1,2) )
|
| 164 |
+
if len(self.center_cache)<100:
|
| 165 |
+
self.center_cache[key] = anchor_centers
|
| 166 |
+
|
| 167 |
+
pos_inds = np.where(scores>=threshold)[0]
|
| 168 |
+
bboxes = distance2bbox(anchor_centers, bbox_preds)
|
| 169 |
+
pos_scores = scores[pos_inds]
|
| 170 |
+
pos_bboxes = bboxes[pos_inds]
|
| 171 |
+
scores_list.append(pos_scores)
|
| 172 |
+
bboxes_list.append(pos_bboxes)
|
| 173 |
+
if self.use_kps:
|
| 174 |
+
kpss = distance2kps(anchor_centers, kps_preds)
|
| 175 |
+
kpss = kpss.reshape( (kpss.shape[0], -1, 2) )
|
| 176 |
+
pos_kpss = kpss[pos_inds]
|
| 177 |
+
kpss_list.append(pos_kpss)
|
| 178 |
+
return scores_list, bboxes_list, kpss_list
|
| 179 |
+
|
| 180 |
+
def detect(self, img, input_size = None, max_num=0, metric='default'):
|
| 181 |
+
assert input_size is not None or self.input_size is not None
|
| 182 |
+
input_size = self.input_size if input_size is None else input_size
|
| 183 |
+
|
| 184 |
+
im_ratio = float(img.shape[0]) / img.shape[1]
|
| 185 |
+
model_ratio = float(input_size[1]) / input_size[0]
|
| 186 |
+
if im_ratio>model_ratio:
|
| 187 |
+
new_height = input_size[1]
|
| 188 |
+
new_width = int(new_height / im_ratio)
|
| 189 |
+
else:
|
| 190 |
+
new_width = input_size[0]
|
| 191 |
+
new_height = int(new_width * im_ratio)
|
| 192 |
+
det_scale = float(new_height) / img.shape[0]
|
| 193 |
+
resized_img = cv2.resize(img, (new_width, new_height))
|
| 194 |
+
det_img = np.zeros( (input_size[1], input_size[0], 3), dtype=np.uint8 )
|
| 195 |
+
det_img[:new_height, :new_width, :] = resized_img
|
| 196 |
+
|
| 197 |
+
scores_list, bboxes_list, kpss_list = self.forward(det_img, self.det_thresh)
|
| 198 |
+
|
| 199 |
+
scores = np.vstack(scores_list)
|
| 200 |
+
scores_ravel = scores.ravel()
|
| 201 |
+
order = scores_ravel.argsort()[::-1]
|
| 202 |
+
bboxes = np.vstack(bboxes_list) / det_scale
|
| 203 |
+
if self.use_kps:
|
| 204 |
+
kpss = np.vstack(kpss_list) / det_scale
|
| 205 |
+
pre_det = np.hstack((bboxes, scores)).astype(np.float32, copy=False)
|
| 206 |
+
pre_det = pre_det[order, :]
|
| 207 |
+
keep = self.nms(pre_det)
|
| 208 |
+
det = pre_det[keep, :]
|
| 209 |
+
if self.use_kps:
|
| 210 |
+
kpss = kpss[order,:,:]
|
| 211 |
+
kpss = kpss[keep,:,:]
|
| 212 |
+
else:
|
| 213 |
+
kpss = None
|
| 214 |
+
if max_num > 0 and det.shape[0] > max_num:
|
| 215 |
+
area = (det[:, 2] - det[:, 0]) * (det[:, 3] -
|
| 216 |
+
det[:, 1])
|
| 217 |
+
img_center = img.shape[0] // 2, img.shape[1] // 2
|
| 218 |
+
offsets = np.vstack([
|
| 219 |
+
(det[:, 0] + det[:, 2]) / 2 - img_center[1],
|
| 220 |
+
(det[:, 1] + det[:, 3]) / 2 - img_center[0]
|
| 221 |
+
])
|
| 222 |
+
offset_dist_squared = np.sum(np.power(offsets, 2.0), 0)
|
| 223 |
+
if metric=='max':
|
| 224 |
+
values = area
|
| 225 |
+
else:
|
| 226 |
+
values = area - offset_dist_squared * 2.0 # some extra weight on the centering
|
| 227 |
+
bindex = np.argsort(
|
| 228 |
+
values)[::-1] # some extra weight on the centering
|
| 229 |
+
bindex = bindex[0:max_num]
|
| 230 |
+
det = det[bindex, :]
|
| 231 |
+
if kpss is not None:
|
| 232 |
+
kpss = kpss[bindex, :]
|
| 233 |
+
return det, kpss
|
| 234 |
+
|
| 235 |
+
def nms(self, dets):
|
| 236 |
+
thresh = self.nms_thresh
|
| 237 |
+
x1 = dets[:, 0]
|
| 238 |
+
y1 = dets[:, 1]
|
| 239 |
+
x2 = dets[:, 2]
|
| 240 |
+
y2 = dets[:, 3]
|
| 241 |
+
scores = dets[:, 4]
|
| 242 |
+
|
| 243 |
+
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
|
| 244 |
+
order = scores.argsort()[::-1]
|
| 245 |
+
|
| 246 |
+
keep = []
|
| 247 |
+
while order.size > 0:
|
| 248 |
+
i = order[0]
|
| 249 |
+
keep.append(i)
|
| 250 |
+
xx1 = np.maximum(x1[i], x1[order[1:]])
|
| 251 |
+
yy1 = np.maximum(y1[i], y1[order[1:]])
|
| 252 |
+
xx2 = np.minimum(x2[i], x2[order[1:]])
|
| 253 |
+
yy2 = np.minimum(y2[i], y2[order[1:]])
|
| 254 |
+
|
| 255 |
+
w = np.maximum(0.0, xx2 - xx1 + 1)
|
| 256 |
+
h = np.maximum(0.0, yy2 - yy1 + 1)
|
| 257 |
+
inter = w * h
|
| 258 |
+
ovr = inter / (areas[i] + areas[order[1:]] - inter)
|
| 259 |
+
|
| 260 |
+
inds = np.where(ovr <= thresh)[0]
|
| 261 |
+
order = order[inds + 1]
|
| 262 |
+
|
| 263 |
+
return keep
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
|
backend/face_swap/utils/common.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from numpy.linalg import norm as l2norm
|
| 3 |
+
#from easydict import EasyDict
|
| 4 |
+
|
| 5 |
+
class Face(dict):
|
| 6 |
+
|
| 7 |
+
def __init__(self, d=None, **kwargs):
|
| 8 |
+
if d is None:
|
| 9 |
+
d = {}
|
| 10 |
+
if kwargs:
|
| 11 |
+
d.update(**kwargs)
|
| 12 |
+
for k, v in d.items():
|
| 13 |
+
setattr(self, k, v)
|
| 14 |
+
# Class attributes
|
| 15 |
+
#for k in self.__class__.__dict__.keys():
|
| 16 |
+
# if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
|
| 17 |
+
# setattr(self, k, getattr(self, k))
|
| 18 |
+
|
| 19 |
+
def __setattr__(self, name, value):
|
| 20 |
+
if isinstance(value, (list, tuple)):
|
| 21 |
+
value = [self.__class__(x)
|
| 22 |
+
if isinstance(x, dict) else x for x in value]
|
| 23 |
+
elif isinstance(value, dict) and not isinstance(value, self.__class__):
|
| 24 |
+
value = self.__class__(value)
|
| 25 |
+
super(Face, self).__setattr__(name, value)
|
| 26 |
+
super(Face, self).__setitem__(name, value)
|
| 27 |
+
|
| 28 |
+
__setitem__ = __setattr__
|
| 29 |
+
|
| 30 |
+
def __getattr__(self, name):
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def embedding_norm(self):
|
| 35 |
+
if self.embedding is None:
|
| 36 |
+
return None
|
| 37 |
+
return l2norm(self.embedding)
|
| 38 |
+
|
| 39 |
+
@property
|
| 40 |
+
def normed_embedding(self):
|
| 41 |
+
if self.embedding is None:
|
| 42 |
+
return None
|
| 43 |
+
return self.embedding / self.embedding_norm
|
| 44 |
+
|
| 45 |
+
@property
|
| 46 |
+
def sex(self):
|
| 47 |
+
if self.gender is None:
|
| 48 |
+
return None
|
| 49 |
+
return 'M' if self.gender==1 else 'F'
|
backend/face_swap/weights/readme.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
Download model weight files from release of the repo and place them here.
|
backend/main.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from face_swap.api import router
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
# Add CORS middleware
|
| 8 |
+
origins = [
|
| 9 |
+
"http://localhost:3000", # React frontend
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
app.add_middleware(
|
| 13 |
+
CORSMiddleware,
|
| 14 |
+
allow_origins=origins,
|
| 15 |
+
allow_credentials=True,
|
| 16 |
+
allow_methods=["*"],
|
| 17 |
+
allow_headers=["*"],
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
app.include_router(router)
|
| 21 |
+
|
| 22 |
+
if __name__ == "__main__":
|
| 23 |
+
import uvicorn
|
| 24 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
backend/requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi
|
| 2 |
+
uvicorn
|
| 3 |
+
opencv-python
|
| 4 |
+
pydantic
|
| 5 |
+
numpy
|
| 6 |
+
onnx
|
| 7 |
+
onnxruntime-gpu
|
| 8 |
+
scikit-image
|
frontend/vfs/.gitignore
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
| 2 |
+
|
| 3 |
+
# dependencies
|
| 4 |
+
/node_modules
|
| 5 |
+
/.pnp
|
| 6 |
+
.pnp.js
|
| 7 |
+
|
| 8 |
+
# testing
|
| 9 |
+
/coverage
|
| 10 |
+
|
| 11 |
+
# production
|
| 12 |
+
/build
|
| 13 |
+
|
| 14 |
+
# misc
|
| 15 |
+
.DS_Store
|
| 16 |
+
.env.local
|
| 17 |
+
.env.development.local
|
| 18 |
+
.env.test.local
|
| 19 |
+
.env.production.local
|
| 20 |
+
|
| 21 |
+
npm-debug.log*
|
| 22 |
+
yarn-debug.log*
|
| 23 |
+
yarn-error.log*
|
frontend/vfs/README.md
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Getting Started with Create React App
|
| 2 |
+
|
| 3 |
+
This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
|
| 4 |
+
|
| 5 |
+
## Available Scripts
|
| 6 |
+
|
| 7 |
+
In the project directory, you can run:
|
| 8 |
+
|
| 9 |
+
### `npm start`
|
| 10 |
+
|
| 11 |
+
Runs the app in the development mode.\
|
| 12 |
+
Open [http://localhost:3000](http://localhost:3000) to view it in your browser.
|
| 13 |
+
|
| 14 |
+
The page will reload when you make changes.\
|
| 15 |
+
You may also see any lint errors in the console.
|
| 16 |
+
|
| 17 |
+
### `npm test`
|
| 18 |
+
|
| 19 |
+
Launches the test runner in the interactive watch mode.\
|
| 20 |
+
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
| 21 |
+
|
| 22 |
+
### `npm run build`
|
| 23 |
+
|
| 24 |
+
Builds the app for production to the `build` folder.\
|
| 25 |
+
It correctly bundles React in production mode and optimizes the build for the best performance.
|
| 26 |
+
|
| 27 |
+
The build is minified and the filenames include the hashes.\
|
| 28 |
+
Your app is ready to be deployed!
|
| 29 |
+
|
| 30 |
+
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
| 31 |
+
|
| 32 |
+
### `npm run eject`
|
| 33 |
+
|
| 34 |
+
**Note: this is a one-way operation. Once you `eject`, you can't go back!**
|
| 35 |
+
|
| 36 |
+
If you aren't satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
|
| 37 |
+
|
| 38 |
+
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you're on your own.
|
| 39 |
+
|
| 40 |
+
You don't have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn't feel obligated to use this feature. However we understand that this tool wouldn't be useful if you couldn't customize it when you are ready for it.
|
| 41 |
+
|
| 42 |
+
## Learn More
|
| 43 |
+
|
| 44 |
+
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
|
| 45 |
+
|
| 46 |
+
To learn React, check out the [React documentation](https://reactjs.org/).
|
| 47 |
+
|
| 48 |
+
### Code Splitting
|
| 49 |
+
|
| 50 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/code-splitting](https://facebook.github.io/create-react-app/docs/code-splitting)
|
| 51 |
+
|
| 52 |
+
### Analyzing the Bundle Size
|
| 53 |
+
|
| 54 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size](https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size)
|
| 55 |
+
|
| 56 |
+
### Making a Progressive Web App
|
| 57 |
+
|
| 58 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app](https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app)
|
| 59 |
+
|
| 60 |
+
### Advanced Configuration
|
| 61 |
+
|
| 62 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/advanced-configuration](https://facebook.github.io/create-react-app/docs/advanced-configuration)
|
| 63 |
+
|
| 64 |
+
### Deployment
|
| 65 |
+
|
| 66 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/deployment](https://facebook.github.io/create-react-app/docs/deployment)
|
| 67 |
+
|
| 68 |
+
### `npm run build` fails to minify
|
| 69 |
+
|
| 70 |
+
This section has moved here: [https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify](https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify)
|
frontend/vfs/package-lock.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
frontend/vfs/package.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "vfs",
|
| 3 |
+
"version": "0.1.0",
|
| 4 |
+
"private": true,
|
| 5 |
+
"dependencies": {
|
| 6 |
+
"@testing-library/jest-dom": "^5.17.0",
|
| 7 |
+
"@testing-library/react": "^13.4.0",
|
| 8 |
+
"@testing-library/user-event": "^13.5.0",
|
| 9 |
+
"autoprefixer": "^10.4.16",
|
| 10 |
+
"axios": "^1.6.3",
|
| 11 |
+
"postcss": "^8.4.32",
|
| 12 |
+
"react": "^18.2.0",
|
| 13 |
+
"react-dom": "^18.2.0",
|
| 14 |
+
"react-scripts": "5.0.1",
|
| 15 |
+
"tailwindcss": "^3.4.0",
|
| 16 |
+
"web-vitals": "^2.1.4"
|
| 17 |
+
},
|
| 18 |
+
"scripts": {
|
| 19 |
+
"start": "react-scripts start",
|
| 20 |
+
"build": "react-scripts build",
|
| 21 |
+
"test": "react-scripts test",
|
| 22 |
+
"eject": "react-scripts eject"
|
| 23 |
+
},
|
| 24 |
+
"eslintConfig": {
|
| 25 |
+
"extends": [
|
| 26 |
+
"react-app",
|
| 27 |
+
"react-app/jest"
|
| 28 |
+
]
|
| 29 |
+
},
|
| 30 |
+
"browserslist": {
|
| 31 |
+
"production": [
|
| 32 |
+
">0.2%",
|
| 33 |
+
"not dead",
|
| 34 |
+
"not op_mini all"
|
| 35 |
+
],
|
| 36 |
+
"development": [
|
| 37 |
+
"last 1 chrome version",
|
| 38 |
+
"last 1 firefox version",
|
| 39 |
+
"last 1 safari version"
|
| 40 |
+
]
|
| 41 |
+
}
|
| 42 |
+
}
|
frontend/vfs/postcss.config.js
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
module.exports = {
|
| 2 |
+
plugins: {
|
| 3 |
+
tailwindcss: {},
|
| 4 |
+
autoprefixer: {},
|
| 5 |
+
},
|
| 6 |
+
}
|
frontend/vfs/public/apple-touch-icon.png
ADDED
|
|
frontend/vfs/public/browserconfig.xml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="utf-8"?>
|
| 2 |
+
<browserconfig>
|
| 3 |
+
<msapplication>
|
| 4 |
+
<tile>
|
| 5 |
+
<square150x150logo src="/mstile-150x150.png"/>
|
| 6 |
+
<TileColor>#da532c</TileColor>
|
| 7 |
+
</tile>
|
| 8 |
+
</msapplication>
|
| 9 |
+
</browserconfig>
|
frontend/vfs/public/favicon-16x16.png
ADDED
|
|
frontend/vfs/public/favicon-32x32.png
ADDED
|
|
frontend/vfs/public/favicon.ico
ADDED
|
|
frontend/vfs/public/index.html
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="utf-8" />
|
| 5 |
+
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 7 |
+
<meta name="theme-color" content="#000000" />
|
| 8 |
+
<meta
|
| 9 |
+
name="description"
|
| 10 |
+
content="Face Swap in a video."
|
| 11 |
+
/>
|
| 12 |
+
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
|
| 13 |
+
<!--
|
| 14 |
+
manifest.json provides metadata used when your web app is installed on a
|
| 15 |
+
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
|
| 16 |
+
-->
|
| 17 |
+
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
|
| 18 |
+
<!--
|
| 19 |
+
Notice the use of %PUBLIC_URL% in the tags above.
|
| 20 |
+
It will be replaced with the URL of the `public` folder during the build.
|
| 21 |
+
Only files inside the `public` folder can be referenced from the HTML.
|
| 22 |
+
|
| 23 |
+
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
|
| 24 |
+
work correctly both with client-side routing and a non-root public URL.
|
| 25 |
+
Learn how to configure a non-root public URL by running `npm run build`.
|
| 26 |
+
-->
|
| 27 |
+
<title>Video Face Swap</title>
|
| 28 |
+
</head>
|
| 29 |
+
<body>
|
| 30 |
+
<noscript>You need to enable JavaScript to run this app.</noscript>
|
| 31 |
+
<div id="root"></div>
|
| 32 |
+
<!--
|
| 33 |
+
This HTML file is a template.
|
| 34 |
+
If you open it directly in the browser, you will see an empty page.
|
| 35 |
+
|
| 36 |
+
You can add webfonts, meta tags, or analytics to this file.
|
| 37 |
+
The build step will place the bundled scripts into the <body> tag.
|
| 38 |
+
|
| 39 |
+
To begin the development, run `npm start` or `yarn start`.
|
| 40 |
+
To create a production bundle, use `npm run build` or `yarn build`.
|
| 41 |
+
-->
|
| 42 |
+
</body>
|
| 43 |
+
</html>
|
frontend/vfs/public/logo192.png
ADDED
|
frontend/vfs/public/logo512.png
ADDED
|
frontend/vfs/public/manifest.json
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"short_name": "VFS",
|
| 3 |
+
"name": "Video Face Swap",
|
| 4 |
+
"icons": [
|
| 5 |
+
{
|
| 6 |
+
"src": "favicon.ico",
|
| 7 |
+
"sizes": "64x64 32x32 24x24 16x16",
|
| 8 |
+
"type": "image/x-icon"
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"src": "logo192.png",
|
| 12 |
+
"type": "image/png",
|
| 13 |
+
"sizes": "192x192"
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"src": "logo512.png",
|
| 17 |
+
"type": "image/png",
|
| 18 |
+
"sizes": "512x512"
|
| 19 |
+
}
|
| 20 |
+
],
|
| 21 |
+
"start_url": ".",
|
| 22 |
+
"display": "standalone",
|
| 23 |
+
"theme_color": "#000000",
|
| 24 |
+
"background_color": "#ffffff"
|
| 25 |
+
}
|
frontend/vfs/public/mstile-150x150.png
ADDED
|
frontend/vfs/public/robots.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://www.robotstxt.org/robotstxt.html
|
| 2 |
+
User-agent: *
|
| 3 |
+
Disallow:
|
frontend/vfs/public/safari-pinned-tab.svg
ADDED
|
|
frontend/vfs/public/site.webmanifest
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "",
|
| 3 |
+
"short_name": "",
|
| 4 |
+
"icons": [
|
| 5 |
+
{
|
| 6 |
+
"src": "/android-chrome-192x192.png",
|
| 7 |
+
"sizes": "192x192",
|
| 8 |
+
"type": "image/png"
|
| 9 |
+
},
|
| 10 |
+
{
|
| 11 |
+
"src": "/android-chrome-512x512.png",
|
| 12 |
+
"sizes": "512x512",
|
| 13 |
+
"type": "image/png"
|
| 14 |
+
}
|
| 15 |
+
],
|
| 16 |
+
"theme_color": "#ffffff",
|
| 17 |
+
"background_color": "#ffffff",
|
| 18 |
+
"display": "standalone"
|
| 19 |
+
}
|
frontend/vfs/src/App.css
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.App {
|
| 2 |
+
text-align: center;
|
| 3 |
+
background-color: #9CA3AF; /* Adjust the background to match your design */
|
| 4 |
+
}
|
| 5 |
+
|
| 6 |
+
.App-main {
|
| 7 |
+
padding: 20px;
|
| 8 |
+
max-width: 800px; /* Adjust as needed */
|
| 9 |
+
margin: auto;
|
| 10 |
+
}
|
| 11 |
+
|
| 12 |
+
button {
|
| 13 |
+
background-color: #61dafb;
|
| 14 |
+
border: none;
|
| 15 |
+
border-radius: 4px;
|
| 16 |
+
padding: 10px 20px;
|
| 17 |
+
margin-top: 20px;
|
| 18 |
+
cursor: pointer;
|
| 19 |
+
font-size: 16px;
|
| 20 |
+
transition: background-color 0.3s;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
button:hover {
|
| 24 |
+
background-color: #21a1f1;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
button:disabled {
|
| 28 |
+
background-color: #ccc;
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
input[type="file"] {
|
| 32 |
+
margin-top: 20px;
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
video {
|
| 36 |
+
margin-top: 20px;
|
| 37 |
+
max-width: 90%;
|
| 38 |
+
border: 2px solid #61dafb;
|
| 39 |
+
border-radius: 4px;
|
| 40 |
+
}
|
frontend/vfs/src/App.js
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import './App.css';
|
| 3 |
+
import Header from './components/Header';
|
| 4 |
+
import MainContent from './components/MainContent';
|
| 5 |
+
import Footer from './components/Footer';
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
function App() {
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
return (
|
| 12 |
+
<div className="App">
|
| 13 |
+
<Header />
|
| 14 |
+
<MainContent />
|
| 15 |
+
<Footer />
|
| 16 |
+
|
| 17 |
+
</div>
|
| 18 |
+
);
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
export default App;
|
frontend/vfs/src/App.test.js
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { render, screen } from '@testing-library/react';
|
| 2 |
+
import App from './App';
|
| 3 |
+
|
| 4 |
+
test('renders learn react link', () => {
|
| 5 |
+
render(<App />);
|
| 6 |
+
const linkElement = screen.getByText(/learn react/i);
|
| 7 |
+
expect(linkElement).toBeInTheDocument();
|
| 8 |
+
});
|
frontend/vfs/src/assets/logo.png
ADDED
|
frontend/vfs/src/assets/screenshot_home_page.png
ADDED
|
frontend/vfs/src/components/Footer.js
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from "react";
|
| 2 |
+
|
| 3 |
+
const Footer = () => {
|
| 4 |
+
const footerStyle = {
|
| 5 |
+
color: "white",
|
| 6 |
+
backgroundColor: "#000",
|
| 7 |
+
padding: "10px 20px",
|
| 8 |
+
textAlign: "center",
|
| 9 |
+
};
|
| 10 |
+
|
| 11 |
+
return (
|
| 12 |
+
<footer style={footerStyle}>
|
| 13 |
+
<p>
|
| 14 |
+
© {new Date().getFullYear()} Video Face Swap. All rights reserved.
|
| 15 |
+
</p>
|
| 16 |
+
</footer>
|
| 17 |
+
);
|
| 18 |
+
};
|
| 19 |
+
|
| 20 |
+
export default Footer;
|
frontend/vfs/src/components/Header.js
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import React from 'react';
|
| 3 |
+
import logo from '../assets/logo.png';
|
| 4 |
+
|
| 5 |
+
const Header = () => {
|
| 6 |
+
|
| 7 |
+
return (
|
| 8 |
+
<div className="bg-black text-white flex justify-between items-center p-8 rounded-3xl shadow-xl max-w-4xl mx-auto w-full">
|
| 9 |
+
<img src={logo} alt='VFS Logo' className="w-[100px] h-auto" />
|
| 10 |
+
<ul className="list-none flex gap-5 text-sm">
|
| 11 |
+
<li><a href="#" className="text-white no-underline mx-2">Home</a></li>
|
| 12 |
+
<li><a href="#" className="text-gray-400 no-underline mx-2">Pricing</a></li>
|
| 13 |
+
<li><a href="#" className="text-gray-400 no-underline mx-2">Contact Us</a></li>
|
| 14 |
+
</ul>
|
| 15 |
+
</div>
|
| 16 |
+
);
|
| 17 |
+
};
|
| 18 |
+
|
| 19 |
+
export default Header;
|
frontend/vfs/src/components/ImageDisplay.js
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState } from "react";
|
| 2 |
+
|
| 3 |
+
const ImageDisplay = ({ uid }) => {
|
| 4 |
+
const [imageGroups, setImageGroups] = useState({});
|
| 5 |
+
const [uploadedImages, setUploadedImages] = useState({});
|
| 6 |
+
const [uploadedGroups, setUploadedGroups] = useState(new Set());
|
| 7 |
+
const [isLoading, setIsLoading] = useState(false);
|
| 8 |
+
const [isDownload, setIsDownload] = useState(false);
|
| 9 |
+
|
| 10 |
+
const [currentGroupIndex, setCurrentGroupIndex] = useState(0);
|
| 11 |
+
|
| 12 |
+
const groupKeys = Object.keys(imageGroups);
|
| 13 |
+
const currentGroup = groupKeys[currentGroupIndex];
|
| 14 |
+
|
| 15 |
+
const nextGroup = () => {
|
| 16 |
+
if (currentGroupIndex < groupKeys.length - 1) {
|
| 17 |
+
setCurrentGroupIndex(currentGroupIndex + 1);
|
| 18 |
+
}
|
| 19 |
+
};
|
| 20 |
+
|
| 21 |
+
const previousGroup = () => {
|
| 22 |
+
if (currentGroupIndex > 0) {
|
| 23 |
+
setCurrentGroupIndex(currentGroupIndex - 1);
|
| 24 |
+
}
|
| 25 |
+
};
|
| 26 |
+
|
| 27 |
+
const fetchImages = async () => {
|
| 28 |
+
setIsLoading(true);
|
| 29 |
+
try {
|
| 30 |
+
const response = await fetch(`http://localhost:8000/get_images/${uid}`);
|
| 31 |
+
if (!response.ok) {
|
| 32 |
+
throw new Error(`HTTP error! Status: ${response.status}`);
|
| 33 |
+
}
|
| 34 |
+
const data = await response.json();
|
| 35 |
+
setImageGroups(data);
|
| 36 |
+
} catch (error) {
|
| 37 |
+
console.error("Could not fetch images:", error);
|
| 38 |
+
} finally {
|
| 39 |
+
setIsLoading(false);
|
| 40 |
+
}
|
| 41 |
+
};
|
| 42 |
+
|
| 43 |
+
const handleFileChange = (event, group) => {
|
| 44 |
+
setUploadedImages({
|
| 45 |
+
...uploadedImages,
|
| 46 |
+
[group]: event.target.files[0],
|
| 47 |
+
});
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
const uploadImage = async (group) => {
|
| 51 |
+
const formData = new FormData();
|
| 52 |
+
formData.append("file", uploadedImages[group]);
|
| 53 |
+
|
| 54 |
+
try {
|
| 55 |
+
const response = await fetch(
|
| 56 |
+
`http://localhost:8000/uploadnewfaces/${uid}/${group}`,
|
| 57 |
+
{
|
| 58 |
+
method: "POST",
|
| 59 |
+
body: formData,
|
| 60 |
+
}
|
| 61 |
+
);
|
| 62 |
+
if (!response.ok) {
|
| 63 |
+
throw new Error(`HTTP error! Status: ${response.status}`);
|
| 64 |
+
}
|
| 65 |
+
setUploadedGroups(new Set(uploadedGroups.add(group)));
|
| 66 |
+
// fetchImages();
|
| 67 |
+
} catch (error) {
|
| 68 |
+
console.error("Could not upload image:", error);
|
| 69 |
+
}
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
const downloadVideo = async () => {
|
| 73 |
+
const response = await fetch(
|
| 74 |
+
`http://localhost:8000/download_result_video/${uid}`
|
| 75 |
+
);
|
| 76 |
+
const blob = await response.blob();
|
| 77 |
+
const downloadUrl = window.URL.createObjectURL(blob);
|
| 78 |
+
const link = document.createElement("a");
|
| 79 |
+
link.href = downloadUrl;
|
| 80 |
+
link.download = uid + ".mp4";
|
| 81 |
+
document.body.appendChild(link);
|
| 82 |
+
link.click();
|
| 83 |
+
link.remove();
|
| 84 |
+
window.URL.revokeObjectURL(downloadUrl);
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
const faceSwap = async () => {
|
| 88 |
+
try {
|
| 89 |
+
const groupIdsArray = Array.from(uploadedGroups).map(Number);
|
| 90 |
+
console.log(groupIdsArray);
|
| 91 |
+
const response = await fetch(`http://localhost:8000/faceswap/${uid}`, {
|
| 92 |
+
method: 'POST',
|
| 93 |
+
headers: {
|
| 94 |
+
'Content-Type': 'application/json',
|
| 95 |
+
},
|
| 96 |
+
body: JSON.stringify({ group_ids: groupIdsArray })
|
| 97 |
+
});
|
| 98 |
+
|
| 99 |
+
if (!response.ok) {
|
| 100 |
+
const errorResponse = await response.json();
|
| 101 |
+
console.error('Error Response:', errorResponse);
|
| 102 |
+
throw new Error(`HTTP error! Status: ${response.status}`);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
const data = await response.json();
|
| 106 |
+
console.log('Face swap response:', data);
|
| 107 |
+
setIsDownload(true);
|
| 108 |
+
// fetchImages();
|
| 109 |
+
} catch (error) {
|
| 110 |
+
console.error("Could not perform face swap:", error);
|
| 111 |
+
}
|
| 112 |
+
};
|
| 113 |
+
return (
|
| 114 |
+
<div className="space-y-4">
|
| 115 |
+
<button
|
| 116 |
+
className="px-4 py-2 bg-blue-500 text-white rounded hover:bg-blue-600 transition duration-300"
|
| 117 |
+
onClick={fetchImages}
|
| 118 |
+
disabled={isLoading}
|
| 119 |
+
>
|
| 120 |
+
{isLoading ? "Loading..." : "Load Images"}
|
| 121 |
+
</button>
|
| 122 |
+
|
| 123 |
+
{groupKeys.length > 0 && (
|
| 124 |
+
<div className="space-y-2">
|
| 125 |
+
<h3 className="font-semibold">Group {currentGroup}</h3>
|
| 126 |
+
<label
|
| 127 |
+
htmlFor="new-face"
|
| 128 |
+
className="block w-full px-4 py-2 bg-blue-600 text-white rounded cursor-pointer mb-2 transition-colors duration-300 hover:bg-blue-700"
|
| 129 |
+
>
|
| 130 |
+
Choose New Face
|
| 131 |
+
</label>
|
| 132 |
+
<input
|
| 133 |
+
id="new-face"
|
| 134 |
+
className="hidden"
|
| 135 |
+
type="file"
|
| 136 |
+
onChange={(event) => handleFileChange(event, currentGroup)}
|
| 137 |
+
/>
|
| 138 |
+
<button
|
| 139 |
+
className="ml-2 px-4 py-2 bg-green-500 text-white rounded hover:bg-green-600 transition duration-300"
|
| 140 |
+
onClick={() => uploadImage(currentGroup)}
|
| 141 |
+
>
|
| 142 |
+
Upload Image
|
| 143 |
+
</button>
|
| 144 |
+
<div className="flex flex-wrap gap-2">
|
| 145 |
+
{imageGroups[currentGroup].map((image, idx) => (
|
| 146 |
+
<img
|
| 147 |
+
key={idx}
|
| 148 |
+
src={`http://localhost:8000/images/${image}`}
|
| 149 |
+
alt={`Group ${currentGroup} ${idx}`}
|
| 150 |
+
className="w-14 h-14 object-cover"
|
| 151 |
+
/>
|
| 152 |
+
))}
|
| 153 |
+
</div>
|
| 154 |
+
</div>
|
| 155 |
+
)}
|
| 156 |
+
|
| 157 |
+
<div className="flex justify-between mt-4">
|
| 158 |
+
<button
|
| 159 |
+
className="px-4 py-2 bg-gray-500 text-white rounded hover:bg-gray-600 transition duration-300"
|
| 160 |
+
onClick={previousGroup}
|
| 161 |
+
disabled={currentGroupIndex === 0}
|
| 162 |
+
>
|
| 163 |
+
Previous
|
| 164 |
+
</button>
|
| 165 |
+
<button
|
| 166 |
+
className="px-4 py-2 bg-gray-500 text-white rounded hover:bg-gray-600 transition duration-300"
|
| 167 |
+
onClick={nextGroup}
|
| 168 |
+
disabled={currentGroupIndex === groupKeys.length - 1}
|
| 169 |
+
>
|
| 170 |
+
Next
|
| 171 |
+
</button>
|
| 172 |
+
</div>
|
| 173 |
+
|
| 174 |
+
{uploadedGroups.size > 0 && (
|
| 175 |
+
<button
|
| 176 |
+
className="px-4 py-2 bg-red-500 text-white rounded hover:bg-red-600 transition duration-300"
|
| 177 |
+
onClick={faceSwap}
|
| 178 |
+
>
|
| 179 |
+
Face Swap for Group {Array.from(uploadedGroups).join(", ")}
|
| 180 |
+
</button>
|
| 181 |
+
)}
|
| 182 |
+
|
| 183 |
+
{isDownload && (
|
| 184 |
+
<button
|
| 185 |
+
className="px-4 py-2 bg-yellow-500 text-white rounded hover:bg-yellow-600 transition duration-300"
|
| 186 |
+
onClick={downloadVideo}
|
| 187 |
+
>
|
| 188 |
+
Download
|
| 189 |
+
</button>
|
| 190 |
+
)}
|
| 191 |
+
</div>
|
| 192 |
+
);
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
export default ImageDisplay;
|
frontend/vfs/src/components/MainContent.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useRef, useState } from "react";
|
| 2 |
+
import axios from "axios";
|
| 3 |
+
import ImageDisplay from "./ImageDisplay";
|
| 4 |
+
|
| 5 |
+
const MainContent = () => {
|
| 6 |
+
const fileInput = useRef(null);
|
| 7 |
+
const [isSuccessUpload, setIsSuccessUpload] = useState(false);
|
| 8 |
+
const [uid, setUid] = useState("");
|
| 9 |
+
const [videoPreview, setVideoPreview] = useState(null);
|
| 10 |
+
const [isLoading, setIsLoading] = useState(false);
|
| 11 |
+
|
| 12 |
+
const handleFileSelect = () => {
|
| 13 |
+
const file = fileInput.current.files[0];
|
| 14 |
+
if (file) {
|
| 15 |
+
setVideoPreview(URL.createObjectURL(file));
|
| 16 |
+
}
|
| 17 |
+
};
|
| 18 |
+
|
| 19 |
+
const handleUpload = async () => {
|
| 20 |
+
setIsLoading(true);
|
| 21 |
+
|
| 22 |
+
const formData = new FormData();
|
| 23 |
+
formData.append('file', fileInput.current.files[0]);
|
| 24 |
+
|
| 25 |
+
try {
|
| 26 |
+
const response = await axios.post('http://localhost:8000/uploadvideo/', formData, {
|
| 27 |
+
headers: {
|
| 28 |
+
'Content-Type': 'multipart/form-data',
|
| 29 |
+
},
|
| 30 |
+
});
|
| 31 |
+
alert(response.data.message);
|
| 32 |
+
setUid(response.data.uid);
|
| 33 |
+
setIsSuccessUpload(true);
|
| 34 |
+
} catch (error) {
|
| 35 |
+
alert('Failed to upload video. ' + error.message);
|
| 36 |
+
} finally {
|
| 37 |
+
setIsLoading(false);
|
| 38 |
+
}
|
| 39 |
+
};
|
| 40 |
+
|
| 41 |
+
return (
|
| 42 |
+
<div className="text-black bg-gray-400 p-5 min-h-[calc(100vh-100px)]">
|
| 43 |
+
<div className=" text-black font-bold text-4xl">Video Face Swap</div>
|
| 44 |
+
<div className="bg-gray-400 border-2 border-dashed border-gray-300 rounded p-5 my-5">
|
| 45 |
+
<div className="flex justify-between items-start gap-4">
|
| 46 |
+
{videoPreview && (
|
| 47 |
+
<video
|
| 48 |
+
className="w-1/2 md:w-1/4"
|
| 49 |
+
controls
|
| 50 |
+
src={videoPreview}
|
| 51 |
+
></video>
|
| 52 |
+
)}
|
| 53 |
+
{isSuccessUpload && (
|
| 54 |
+
<div className="w-1/2 md:w-1/2">
|
| 55 |
+
<ImageDisplay uid={uid} />
|
| 56 |
+
</div>
|
| 57 |
+
)}
|
| 58 |
+
</div>
|
| 59 |
+
|
| 60 |
+
<div className="text-center mt-4 flex flex-col items-center">
|
| 61 |
+
<div className="w-3/4 md:w-1/2">
|
| 62 |
+
<label
|
| 63 |
+
htmlFor="video-file"
|
| 64 |
+
className="block w-full px-4 py-2 bg-blue-600 text-white rounded cursor-pointer mb-2 transition-colors duration-300 hover:bg-blue-700"
|
| 65 |
+
>
|
| 66 |
+
Choose Video File
|
| 67 |
+
</label>
|
| 68 |
+
<input
|
| 69 |
+
id="video-file"
|
| 70 |
+
type="file"
|
| 71 |
+
ref={fileInput}
|
| 72 |
+
className="hidden"
|
| 73 |
+
accept="video/*"
|
| 74 |
+
onChange={handleFileSelect}
|
| 75 |
+
/>
|
| 76 |
+
</div>
|
| 77 |
+
<button
|
| 78 |
+
className="px-5 py-2.5 bg-green-500 text-white rounded cursor-pointer transition-colors duration-300 hover:bg-green-600"
|
| 79 |
+
onClick={handleUpload}
|
| 80 |
+
disabled={isLoading}
|
| 81 |
+
>
|
| 82 |
+
{isLoading ? "Uploading..." : "Upload"}
|
| 83 |
+
</button>
|
| 84 |
+
</div>
|
| 85 |
+
</div>
|
| 86 |
+
</div>
|
| 87 |
+
);
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
export default MainContent;
|
frontend/vfs/src/index.css
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@tailwind base;
|
| 2 |
+
@tailwind components;
|
| 3 |
+
@tailwind utilities;
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
body {
|
| 7 |
+
margin: 0;
|
| 8 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
|
| 9 |
+
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
|
| 10 |
+
sans-serif;
|
| 11 |
+
-webkit-font-smoothing: antialiased;
|
| 12 |
+
-moz-osx-font-smoothing: grayscale;
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
code {
|
| 16 |
+
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
|
| 17 |
+
monospace;
|
| 18 |
+
}
|
frontend/vfs/src/index.js
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React from 'react';
|
| 2 |
+
import ReactDOM from 'react-dom/client';
|
| 3 |
+
import './index.css';
|
| 4 |
+
import App from './App';
|
| 5 |
+
import reportWebVitals from './reportWebVitals';
|
| 6 |
+
|
| 7 |
+
const root = ReactDOM.createRoot(document.getElementById('root'));
|
| 8 |
+
root.render(
|
| 9 |
+
<React.StrictMode>
|
| 10 |
+
<App />
|
| 11 |
+
</React.StrictMode>
|
| 12 |
+
);
|
| 13 |
+
|
| 14 |
+
// If you want to start measuring performance in your app, pass a function
|
| 15 |
+
// to log results (for example: reportWebVitals(console.log))
|
| 16 |
+
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals
|
| 17 |
+
reportWebVitals();
|