Guillaume Massol
commited on
Commit
·
f95d332
1
Parent(s):
18aa057
Delete all
Browse files- .pre-commit-config.yaml +0 -47
- .style.yapf +0 -5
- LICENSE +0 -21
- LICENSE.ControlNet +0 -201
- README.md +0 -14
- app.py +0 -150
- app_canny.py +0 -91
- app_depth.py +0 -86
- app_fake_scribble.py +0 -83
- app_hed.py +0 -83
- app_hough.py +0 -97
- app_normal.py +0 -93
- app_pose.py +0 -89
- app_scribble.py +0 -77
- app_scribble_interactive.py +0 -103
- app_seg.py +0 -87
- model.py +0 -644
- notebooks/notebook.ipynb +0 -80
- patch +0 -128
- requirements.txt +0 -22
- style.css +0 -3
.pre-commit-config.yaml
DELETED
|
@@ -1,47 +0,0 @@
|
|
| 1 |
-
exclude: patch
|
| 2 |
-
repos:
|
| 3 |
-
- repo: https://github.com/pre-commit/pre-commit-hooks
|
| 4 |
-
rev: v4.2.0
|
| 5 |
-
hooks:
|
| 6 |
-
- id: check-executables-have-shebangs
|
| 7 |
-
- id: check-json
|
| 8 |
-
- id: check-merge-conflict
|
| 9 |
-
- id: check-shebang-scripts-are-executable
|
| 10 |
-
- id: check-toml
|
| 11 |
-
- id: check-yaml
|
| 12 |
-
- id: double-quote-string-fixer
|
| 13 |
-
- id: end-of-file-fixer
|
| 14 |
-
- id: mixed-line-ending
|
| 15 |
-
args: ['--fix=lf']
|
| 16 |
-
- id: requirements-txt-fixer
|
| 17 |
-
- id: trailing-whitespace
|
| 18 |
-
- repo: https://github.com/myint/docformatter
|
| 19 |
-
rev: v1.4
|
| 20 |
-
hooks:
|
| 21 |
-
- id: docformatter
|
| 22 |
-
args: ['--in-place']
|
| 23 |
-
- repo: https://github.com/pycqa/isort
|
| 24 |
-
rev: 5.12.0
|
| 25 |
-
hooks:
|
| 26 |
-
- id: isort
|
| 27 |
-
- repo: https://github.com/pre-commit/mirrors-mypy
|
| 28 |
-
rev: v0.991
|
| 29 |
-
hooks:
|
| 30 |
-
- id: mypy
|
| 31 |
-
args: ['--ignore-missing-imports']
|
| 32 |
-
additional_dependencies: ['types-python-slugify']
|
| 33 |
-
- repo: https://github.com/google/yapf
|
| 34 |
-
rev: v0.32.0
|
| 35 |
-
hooks:
|
| 36 |
-
- id: yapf
|
| 37 |
-
args: ['--parallel', '--in-place']
|
| 38 |
-
- repo: https://github.com/kynan/nbstripout
|
| 39 |
-
rev: 0.6.0
|
| 40 |
-
hooks:
|
| 41 |
-
- id: nbstripout
|
| 42 |
-
args: ['--extra-keys', 'metadata.interpreter metadata.kernelspec cell.metadata.pycharm']
|
| 43 |
-
- repo: https://github.com/nbQA-dev/nbQA
|
| 44 |
-
rev: 1.6.4
|
| 45 |
-
hooks:
|
| 46 |
-
- id: nbqa-isort
|
| 47 |
-
- id: nbqa-yapf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.style.yapf
DELETED
|
@@ -1,5 +0,0 @@
|
|
| 1 |
-
[style]
|
| 2 |
-
based_on_style = pep8
|
| 3 |
-
blank_line_before_nested_class_or_def = false
|
| 4 |
-
spaces_before_comment = 2
|
| 5 |
-
split_before_logical_operator = true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
|
| 3 |
-
Copyright (c) 2023 hysts
|
| 4 |
-
|
| 5 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
-
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
-
in the Software without restriction, including without limitation the rights
|
| 8 |
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
-
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
-
furnished to do so, subject to the following conditions:
|
| 11 |
-
|
| 12 |
-
The above copyright notice and this permission notice shall be included in all
|
| 13 |
-
copies or substantial portions of the Software.
|
| 14 |
-
|
| 15 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
-
SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE.ControlNet
DELETED
|
@@ -1,201 +0,0 @@
|
|
| 1 |
-
Apache License
|
| 2 |
-
Version 2.0, January 2004
|
| 3 |
-
http://www.apache.org/licenses/
|
| 4 |
-
|
| 5 |
-
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
-
|
| 7 |
-
1. Definitions.
|
| 8 |
-
|
| 9 |
-
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
-
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
-
|
| 12 |
-
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
-
the copyright owner that is granting the License.
|
| 14 |
-
|
| 15 |
-
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
-
other entities that control, are controlled by, or are under common
|
| 17 |
-
control with that entity. For the purposes of this definition,
|
| 18 |
-
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
-
direction or management of such entity, whether by contract or
|
| 20 |
-
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
-
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
-
|
| 23 |
-
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
-
exercising permissions granted by this License.
|
| 25 |
-
|
| 26 |
-
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
-
including but not limited to software source code, documentation
|
| 28 |
-
source, and configuration files.
|
| 29 |
-
|
| 30 |
-
"Object" form shall mean any form resulting from mechanical
|
| 31 |
-
transformation or translation of a Source form, including but
|
| 32 |
-
not limited to compiled object code, generated documentation,
|
| 33 |
-
and conversions to other media types.
|
| 34 |
-
|
| 35 |
-
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
-
Object form, made available under the License, as indicated by a
|
| 37 |
-
copyright notice that is included in or attached to the work
|
| 38 |
-
(an example is provided in the Appendix below).
|
| 39 |
-
|
| 40 |
-
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
-
form, that is based on (or derived from) the Work and for which the
|
| 42 |
-
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
-
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
-
of this License, Derivative Works shall not include works that remain
|
| 45 |
-
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
-
the Work and Derivative Works thereof.
|
| 47 |
-
|
| 48 |
-
"Contribution" shall mean any work of authorship, including
|
| 49 |
-
the original version of the Work and any modifications or additions
|
| 50 |
-
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
-
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
-
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
-
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
-
means any form of electronic, verbal, or written communication sent
|
| 55 |
-
to the Licensor or its representatives, including but not limited to
|
| 56 |
-
communication on electronic mailing lists, source code control systems,
|
| 57 |
-
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
-
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
-
excluding communication that is conspicuously marked or otherwise
|
| 60 |
-
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
-
|
| 62 |
-
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
-
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
-
subsequently incorporated within the Work.
|
| 65 |
-
|
| 66 |
-
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
-
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
-
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
-
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
-
Work and such Derivative Works in Source or Object form.
|
| 72 |
-
|
| 73 |
-
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
-
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
-
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
-
(except as stated in this section) patent license to make, have made,
|
| 77 |
-
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
-
where such license applies only to those patent claims licensable
|
| 79 |
-
by such Contributor that are necessarily infringed by their
|
| 80 |
-
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
-
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
-
institute patent litigation against any entity (including a
|
| 83 |
-
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
-
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
-
or contributory patent infringement, then any patent licenses
|
| 86 |
-
granted to You under this License for that Work shall terminate
|
| 87 |
-
as of the date such litigation is filed.
|
| 88 |
-
|
| 89 |
-
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
-
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
-
modifications, and in Source or Object form, provided that You
|
| 92 |
-
meet the following conditions:
|
| 93 |
-
|
| 94 |
-
(a) You must give any other recipients of the Work or
|
| 95 |
-
Derivative Works a copy of this License; and
|
| 96 |
-
|
| 97 |
-
(b) You must cause any modified files to carry prominent notices
|
| 98 |
-
stating that You changed the files; and
|
| 99 |
-
|
| 100 |
-
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
-
that You distribute, all copyright, patent, trademark, and
|
| 102 |
-
attribution notices from the Source form of the Work,
|
| 103 |
-
excluding those notices that do not pertain to any part of
|
| 104 |
-
the Derivative Works; and
|
| 105 |
-
|
| 106 |
-
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
-
distribution, then any Derivative Works that You distribute must
|
| 108 |
-
include a readable copy of the attribution notices contained
|
| 109 |
-
within such NOTICE file, excluding those notices that do not
|
| 110 |
-
pertain to any part of the Derivative Works, in at least one
|
| 111 |
-
of the following places: within a NOTICE text file distributed
|
| 112 |
-
as part of the Derivative Works; within the Source form or
|
| 113 |
-
documentation, if provided along with the Derivative Works; or,
|
| 114 |
-
within a display generated by the Derivative Works, if and
|
| 115 |
-
wherever such third-party notices normally appear. The contents
|
| 116 |
-
of the NOTICE file are for informational purposes only and
|
| 117 |
-
do not modify the License. You may add Your own attribution
|
| 118 |
-
notices within Derivative Works that You distribute, alongside
|
| 119 |
-
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
-
that such additional attribution notices cannot be construed
|
| 121 |
-
as modifying the License.
|
| 122 |
-
|
| 123 |
-
You may add Your own copyright statement to Your modifications and
|
| 124 |
-
may provide additional or different license terms and conditions
|
| 125 |
-
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
-
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
-
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
-
the conditions stated in this License.
|
| 129 |
-
|
| 130 |
-
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
-
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
-
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
-
this License, without any additional terms or conditions.
|
| 134 |
-
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
-
the terms of any separate license agreement you may have executed
|
| 136 |
-
with Licensor regarding such Contributions.
|
| 137 |
-
|
| 138 |
-
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
-
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
-
except as required for reasonable and customary use in describing the
|
| 141 |
-
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
-
|
| 143 |
-
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
-
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
-
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
-
implied, including, without limitation, any warranties or conditions
|
| 148 |
-
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
-
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
-
appropriateness of using or redistributing the Work and assume any
|
| 151 |
-
risks associated with Your exercise of permissions under this License.
|
| 152 |
-
|
| 153 |
-
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
-
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
-
unless required by applicable law (such as deliberate and grossly
|
| 156 |
-
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
-
liable to You for damages, including any direct, indirect, special,
|
| 158 |
-
incidental, or consequential damages of any character arising as a
|
| 159 |
-
result of this License or out of the use or inability to use the
|
| 160 |
-
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
-
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
-
other commercial damages or losses), even if such Contributor
|
| 163 |
-
has been advised of the possibility of such damages.
|
| 164 |
-
|
| 165 |
-
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
-
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
-
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
-
or other liability obligations and/or rights consistent with this
|
| 169 |
-
License. However, in accepting such obligations, You may act only
|
| 170 |
-
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
-
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
-
defend, and hold each Contributor harmless for any liability
|
| 173 |
-
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
-
of your accepting any such warranty or additional liability.
|
| 175 |
-
|
| 176 |
-
END OF TERMS AND CONDITIONS
|
| 177 |
-
|
| 178 |
-
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
-
|
| 180 |
-
To apply the Apache License to your work, attach the following
|
| 181 |
-
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
-
replaced with your own identifying information. (Don't include
|
| 183 |
-
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
-
comment syntax for the file format. We also recommend that a
|
| 185 |
-
file or class name and description of purpose be included on the
|
| 186 |
-
same "printed page" as the copyright notice for easier
|
| 187 |
-
identification within third-party archives.
|
| 188 |
-
|
| 189 |
-
Copyright [yyyy] [name of copyright owner]
|
| 190 |
-
|
| 191 |
-
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
-
you may not use this file except in compliance with the License.
|
| 193 |
-
You may obtain a copy of the License at
|
| 194 |
-
|
| 195 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
-
|
| 197 |
-
Unless required by applicable law or agreed to in writing, software
|
| 198 |
-
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
-
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
-
See the License for the specific language governing permissions and
|
| 201 |
-
limitations under the License.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
DELETED
|
@@ -1,14 +0,0 @@
|
|
| 1 |
-
---
|
| 2 |
-
title: ControlNet
|
| 3 |
-
emoji: 🌖
|
| 4 |
-
colorFrom: pink
|
| 5 |
-
colorTo: blue
|
| 6 |
-
sdk: gradio
|
| 7 |
-
sdk_version: 3.22.1
|
| 8 |
-
python_version: 3.10.9
|
| 9 |
-
app_file: app.py
|
| 10 |
-
pinned: false
|
| 11 |
-
license: mit
|
| 12 |
-
---
|
| 13 |
-
|
| 14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
DELETED
|
@@ -1,150 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python
|
| 2 |
-
|
| 3 |
-
from __future__ import annotations
|
| 4 |
-
|
| 5 |
-
import os
|
| 6 |
-
import pathlib
|
| 7 |
-
import shlex
|
| 8 |
-
import subprocess
|
| 9 |
-
|
| 10 |
-
import gradio as gr
|
| 11 |
-
|
| 12 |
-
if os.getenv('SYSTEM') == 'spaces':
|
| 13 |
-
with open('patch') as f:
|
| 14 |
-
subprocess.run(shlex.split('patch -p1'), stdin=f, cwd='ControlNet')
|
| 15 |
-
|
| 16 |
-
base_url = 'https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/'
|
| 17 |
-
names = [
|
| 18 |
-
'body_pose_model.pth',
|
| 19 |
-
'dpt_hybrid-midas-501f0c75.pt',
|
| 20 |
-
'hand_pose_model.pth',
|
| 21 |
-
'mlsd_large_512_fp32.pth',
|
| 22 |
-
'mlsd_tiny_512_fp32.pth',
|
| 23 |
-
'network-bsds500.pth',
|
| 24 |
-
'upernet_global_small.pth',
|
| 25 |
-
]
|
| 26 |
-
for name in names:
|
| 27 |
-
command = f'wget https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/{name} -O {name}'
|
| 28 |
-
out_path = pathlib.Path(f'ControlNet/annotator/ckpts/{name}')
|
| 29 |
-
if out_path.exists():
|
| 30 |
-
continue
|
| 31 |
-
subprocess.run(shlex.split(command), cwd='ControlNet/annotator/ckpts/')
|
| 32 |
-
|
| 33 |
-
from app_canny import create_demo as create_demo_canny
|
| 34 |
-
from app_depth import create_demo as create_demo_depth
|
| 35 |
-
from app_fake_scribble import create_demo as create_demo_fake_scribble
|
| 36 |
-
from app_hed import create_demo as create_demo_hed
|
| 37 |
-
from app_hough import create_demo as create_demo_hough
|
| 38 |
-
from app_normal import create_demo as create_demo_normal
|
| 39 |
-
from app_pose import create_demo as create_demo_pose
|
| 40 |
-
from app_scribble import create_demo as create_demo_scribble
|
| 41 |
-
from app_scribble_interactive import \
|
| 42 |
-
create_demo as create_demo_scribble_interactive
|
| 43 |
-
from app_seg import create_demo as create_demo_seg
|
| 44 |
-
from model import Model, download_all_controlnet_weights
|
| 45 |
-
|
| 46 |
-
DESCRIPTION = '# [ControlNet](https://github.com/lllyasviel/ControlNet)'
|
| 47 |
-
|
| 48 |
-
SPACE_ID = os.getenv('SPACE_ID')
|
| 49 |
-
ALLOW_CHANGING_BASE_MODEL = SPACE_ID != 'hysts/ControlNet'
|
| 50 |
-
|
| 51 |
-
if SPACE_ID is not None:
|
| 52 |
-
DESCRIPTION += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'
|
| 53 |
-
|
| 54 |
-
MAX_IMAGES = int(os.getenv('MAX_IMAGES', '3'))
|
| 55 |
-
DEFAULT_NUM_IMAGES = min(MAX_IMAGES, int(os.getenv('DEFAULT_NUM_IMAGES', '1')))
|
| 56 |
-
|
| 57 |
-
if os.getenv('SYSTEM') == 'spaces':
|
| 58 |
-
download_all_controlnet_weights()
|
| 59 |
-
|
| 60 |
-
DEFAULT_MODEL_ID = os.getenv('DEFAULT_MODEL_ID',
|
| 61 |
-
'runwayml/stable-diffusion-v1-5')
|
| 62 |
-
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name='canny')
|
| 63 |
-
|
| 64 |
-
with gr.Blocks(css='style.css') as demo:
|
| 65 |
-
gr.Markdown(DESCRIPTION)
|
| 66 |
-
with gr.Tabs():
|
| 67 |
-
with gr.TabItem('Canny'):
|
| 68 |
-
create_demo_canny(model.process_canny,
|
| 69 |
-
max_images=MAX_IMAGES,
|
| 70 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 71 |
-
with gr.TabItem('Hough'):
|
| 72 |
-
create_demo_hough(model.process_hough,
|
| 73 |
-
max_images=MAX_IMAGES,
|
| 74 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 75 |
-
with gr.TabItem('HED'):
|
| 76 |
-
create_demo_hed(model.process_hed,
|
| 77 |
-
max_images=MAX_IMAGES,
|
| 78 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 79 |
-
with gr.TabItem('Scribble'):
|
| 80 |
-
create_demo_scribble(model.process_scribble,
|
| 81 |
-
max_images=MAX_IMAGES,
|
| 82 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 83 |
-
with gr.TabItem('Scribble Interactive'):
|
| 84 |
-
create_demo_scribble_interactive(
|
| 85 |
-
model.process_scribble_interactive,
|
| 86 |
-
max_images=MAX_IMAGES,
|
| 87 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 88 |
-
with gr.TabItem('Fake Scribble'):
|
| 89 |
-
create_demo_fake_scribble(model.process_fake_scribble,
|
| 90 |
-
max_images=MAX_IMAGES,
|
| 91 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 92 |
-
with gr.TabItem('Pose'):
|
| 93 |
-
create_demo_pose(model.process_pose,
|
| 94 |
-
max_images=MAX_IMAGES,
|
| 95 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 96 |
-
with gr.TabItem('Segmentation'):
|
| 97 |
-
create_demo_seg(model.process_seg,
|
| 98 |
-
max_images=MAX_IMAGES,
|
| 99 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 100 |
-
with gr.TabItem('Depth'):
|
| 101 |
-
create_demo_depth(model.process_depth,
|
| 102 |
-
max_images=MAX_IMAGES,
|
| 103 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 104 |
-
with gr.TabItem('Normal map'):
|
| 105 |
-
create_demo_normal(model.process_normal,
|
| 106 |
-
max_images=MAX_IMAGES,
|
| 107 |
-
default_num_images=DEFAULT_NUM_IMAGES)
|
| 108 |
-
|
| 109 |
-
with gr.Accordion(label='Base model', open=False):
|
| 110 |
-
with gr.Row():
|
| 111 |
-
with gr.Column():
|
| 112 |
-
current_base_model = gr.Text(label='Current base model')
|
| 113 |
-
with gr.Column(scale=0.3):
|
| 114 |
-
check_base_model_button = gr.Button('Check current base model')
|
| 115 |
-
with gr.Row():
|
| 116 |
-
with gr.Column():
|
| 117 |
-
new_base_model_id = gr.Text(
|
| 118 |
-
label='New base model',
|
| 119 |
-
max_lines=1,
|
| 120 |
-
placeholder='runwayml/stable-diffusion-v1-5',
|
| 121 |
-
info=
|
| 122 |
-
'The base model must be compatible with Stable Diffusion v1.5.',
|
| 123 |
-
interactive=ALLOW_CHANGING_BASE_MODEL)
|
| 124 |
-
with gr.Column(scale=0.3):
|
| 125 |
-
change_base_model_button = gr.Button(
|
| 126 |
-
'Change base model', interactive=ALLOW_CHANGING_BASE_MODEL)
|
| 127 |
-
if not ALLOW_CHANGING_BASE_MODEL:
|
| 128 |
-
gr.Markdown(
|
| 129 |
-
'''The base model is not allowed to be changed in this Space so as not to slow down the demo, but it can be changed if you duplicate the Space. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a>'''
|
| 130 |
-
)
|
| 131 |
-
|
| 132 |
-
gr.Markdown('''### Related Spaces
|
| 133 |
-
|
| 134 |
-
- [Space using Anything-v4.0 as base model](https://huggingface.co/spaces/hysts/ControlNet-with-Anything-v4)
|
| 135 |
-
- https://huggingface.co/spaces/jonigata/PoseMaker2
|
| 136 |
-
- https://huggingface.co/spaces/diffusers/controlnet-openpose
|
| 137 |
-
- https://huggingface.co/spaces/diffusers/controlnet-canny
|
| 138 |
-
''')
|
| 139 |
-
|
| 140 |
-
check_base_model_button.click(fn=lambda: model.base_model_id,
|
| 141 |
-
outputs=current_base_model,
|
| 142 |
-
queue=False)
|
| 143 |
-
new_base_model_id.submit(fn=model.set_base_model,
|
| 144 |
-
inputs=new_base_model_id,
|
| 145 |
-
outputs=current_base_model)
|
| 146 |
-
change_base_model_button.click(fn=model.set_base_model,
|
| 147 |
-
inputs=new_base_model_id,
|
| 148 |
-
outputs=current_base_model)
|
| 149 |
-
|
| 150 |
-
demo.queue(api_open=False).launch(file_directories=['/tmp'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_canny.py
DELETED
|
@@ -1,91 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_canny2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Canny Edge Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
num_samples = gr.Slider(label='Images',
|
| 17 |
-
minimum=1,
|
| 18 |
-
maximum=max_images,
|
| 19 |
-
value=default_num_images,
|
| 20 |
-
step=1)
|
| 21 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 22 |
-
minimum=256,
|
| 23 |
-
maximum=512,
|
| 24 |
-
value=512,
|
| 25 |
-
step=256)
|
| 26 |
-
canny_low_threshold = gr.Slider(
|
| 27 |
-
label='Canny low threshold',
|
| 28 |
-
minimum=1,
|
| 29 |
-
maximum=255,
|
| 30 |
-
value=100,
|
| 31 |
-
step=1)
|
| 32 |
-
canny_high_threshold = gr.Slider(
|
| 33 |
-
label='Canny high threshold',
|
| 34 |
-
minimum=1,
|
| 35 |
-
maximum=255,
|
| 36 |
-
value=200,
|
| 37 |
-
step=1)
|
| 38 |
-
num_steps = gr.Slider(label='Steps',
|
| 39 |
-
minimum=1,
|
| 40 |
-
maximum=100,
|
| 41 |
-
value=20,
|
| 42 |
-
step=1)
|
| 43 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 44 |
-
minimum=0.1,
|
| 45 |
-
maximum=30.0,
|
| 46 |
-
value=9.0,
|
| 47 |
-
step=0.1)
|
| 48 |
-
seed = gr.Slider(label='Seed',
|
| 49 |
-
minimum=-1,
|
| 50 |
-
maximum=2147483647,
|
| 51 |
-
step=1,
|
| 52 |
-
randomize=True)
|
| 53 |
-
a_prompt = gr.Textbox(
|
| 54 |
-
label='Added Prompt',
|
| 55 |
-
value='best quality, extremely detailed')
|
| 56 |
-
n_prompt = gr.Textbox(
|
| 57 |
-
label='Negative Prompt',
|
| 58 |
-
value=
|
| 59 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 60 |
-
)
|
| 61 |
-
with gr.Column():
|
| 62 |
-
result = gr.Gallery(label='Output',
|
| 63 |
-
show_label=False,
|
| 64 |
-
elem_id='gallery').style(grid=2,
|
| 65 |
-
height='auto')
|
| 66 |
-
inputs = [
|
| 67 |
-
input_image,
|
| 68 |
-
prompt,
|
| 69 |
-
a_prompt,
|
| 70 |
-
n_prompt,
|
| 71 |
-
num_samples,
|
| 72 |
-
image_resolution,
|
| 73 |
-
num_steps,
|
| 74 |
-
guidance_scale,
|
| 75 |
-
seed,
|
| 76 |
-
canny_low_threshold,
|
| 77 |
-
canny_high_threshold,
|
| 78 |
-
]
|
| 79 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 80 |
-
run_button.click(fn=process,
|
| 81 |
-
inputs=inputs,
|
| 82 |
-
outputs=result,
|
| 83 |
-
api_name='canny')
|
| 84 |
-
return demo
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
if __name__ == '__main__':
|
| 88 |
-
from model import Model
|
| 89 |
-
model = Model()
|
| 90 |
-
demo = create_demo(model.process_canny)
|
| 91 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_depth.py
DELETED
|
@@ -1,86 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_depth2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Depth Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
is_depth_image = gr.Checkbox(label='Is depth image',
|
| 17 |
-
value=False)
|
| 18 |
-
num_samples = gr.Slider(label='Images',
|
| 19 |
-
minimum=1,
|
| 20 |
-
maximum=max_images,
|
| 21 |
-
value=default_num_images,
|
| 22 |
-
step=1)
|
| 23 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 24 |
-
minimum=256,
|
| 25 |
-
maximum=512,
|
| 26 |
-
value=512,
|
| 27 |
-
step=256)
|
| 28 |
-
detect_resolution = gr.Slider(label='Depth Resolution',
|
| 29 |
-
minimum=128,
|
| 30 |
-
maximum=512,
|
| 31 |
-
value=384,
|
| 32 |
-
step=1)
|
| 33 |
-
num_steps = gr.Slider(label='Steps',
|
| 34 |
-
minimum=1,
|
| 35 |
-
maximum=100,
|
| 36 |
-
value=20,
|
| 37 |
-
step=1)
|
| 38 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 39 |
-
minimum=0.1,
|
| 40 |
-
maximum=30.0,
|
| 41 |
-
value=9.0,
|
| 42 |
-
step=0.1)
|
| 43 |
-
seed = gr.Slider(label='Seed',
|
| 44 |
-
minimum=-1,
|
| 45 |
-
maximum=2147483647,
|
| 46 |
-
step=1,
|
| 47 |
-
randomize=True)
|
| 48 |
-
a_prompt = gr.Textbox(
|
| 49 |
-
label='Added Prompt',
|
| 50 |
-
value='best quality, extremely detailed')
|
| 51 |
-
n_prompt = gr.Textbox(
|
| 52 |
-
label='Negative Prompt',
|
| 53 |
-
value=
|
| 54 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 55 |
-
)
|
| 56 |
-
with gr.Column():
|
| 57 |
-
result = gr.Gallery(label='Output',
|
| 58 |
-
show_label=False,
|
| 59 |
-
elem_id='gallery').style(grid=2,
|
| 60 |
-
height='auto')
|
| 61 |
-
inputs = [
|
| 62 |
-
input_image,
|
| 63 |
-
prompt,
|
| 64 |
-
a_prompt,
|
| 65 |
-
n_prompt,
|
| 66 |
-
num_samples,
|
| 67 |
-
image_resolution,
|
| 68 |
-
detect_resolution,
|
| 69 |
-
num_steps,
|
| 70 |
-
guidance_scale,
|
| 71 |
-
seed,
|
| 72 |
-
is_depth_image,
|
| 73 |
-
]
|
| 74 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 75 |
-
run_button.click(fn=process,
|
| 76 |
-
inputs=inputs,
|
| 77 |
-
outputs=result,
|
| 78 |
-
api_name='depth')
|
| 79 |
-
return demo
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
if __name__ == '__main__':
|
| 83 |
-
from model import Model
|
| 84 |
-
model = Model()
|
| 85 |
-
demo = create_demo(model.process_depth)
|
| 86 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_fake_scribble.py
DELETED
|
@@ -1,83 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_fake_scribble2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Fake Scribble Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
num_samples = gr.Slider(label='Images',
|
| 17 |
-
minimum=1,
|
| 18 |
-
maximum=max_images,
|
| 19 |
-
value=default_num_images,
|
| 20 |
-
step=1)
|
| 21 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 22 |
-
minimum=256,
|
| 23 |
-
maximum=512,
|
| 24 |
-
value=512,
|
| 25 |
-
step=256)
|
| 26 |
-
detect_resolution = gr.Slider(label='HED Resolution',
|
| 27 |
-
minimum=128,
|
| 28 |
-
maximum=512,
|
| 29 |
-
value=512,
|
| 30 |
-
step=1)
|
| 31 |
-
num_steps = gr.Slider(label='Steps',
|
| 32 |
-
minimum=1,
|
| 33 |
-
maximum=100,
|
| 34 |
-
value=20,
|
| 35 |
-
step=1)
|
| 36 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 37 |
-
minimum=0.1,
|
| 38 |
-
maximum=30.0,
|
| 39 |
-
value=9.0,
|
| 40 |
-
step=0.1)
|
| 41 |
-
seed = gr.Slider(label='Seed',
|
| 42 |
-
minimum=-1,
|
| 43 |
-
maximum=2147483647,
|
| 44 |
-
step=1,
|
| 45 |
-
randomize=True)
|
| 46 |
-
a_prompt = gr.Textbox(
|
| 47 |
-
label='Added Prompt',
|
| 48 |
-
value='best quality, extremely detailed')
|
| 49 |
-
n_prompt = gr.Textbox(
|
| 50 |
-
label='Negative Prompt',
|
| 51 |
-
value=
|
| 52 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 53 |
-
)
|
| 54 |
-
with gr.Column():
|
| 55 |
-
result = gr.Gallery(label='Output',
|
| 56 |
-
show_label=False,
|
| 57 |
-
elem_id='gallery').style(grid=2,
|
| 58 |
-
height='auto')
|
| 59 |
-
inputs = [
|
| 60 |
-
input_image,
|
| 61 |
-
prompt,
|
| 62 |
-
a_prompt,
|
| 63 |
-
n_prompt,
|
| 64 |
-
num_samples,
|
| 65 |
-
image_resolution,
|
| 66 |
-
detect_resolution,
|
| 67 |
-
num_steps,
|
| 68 |
-
guidance_scale,
|
| 69 |
-
seed,
|
| 70 |
-
]
|
| 71 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 72 |
-
run_button.click(fn=process,
|
| 73 |
-
inputs=inputs,
|
| 74 |
-
outputs=result,
|
| 75 |
-
api_name='fake_scribble')
|
| 76 |
-
return demo
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
if __name__ == '__main__':
|
| 80 |
-
from model import Model
|
| 81 |
-
model = Model()
|
| 82 |
-
demo = create_demo(model.process_fake_scribble)
|
| 83 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_hed.py
DELETED
|
@@ -1,83 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_hed2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with HED Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
num_samples = gr.Slider(label='Images',
|
| 17 |
-
minimum=1,
|
| 18 |
-
maximum=max_images,
|
| 19 |
-
value=default_num_images,
|
| 20 |
-
step=1)
|
| 21 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 22 |
-
minimum=256,
|
| 23 |
-
maximum=512,
|
| 24 |
-
value=512,
|
| 25 |
-
step=256)
|
| 26 |
-
detect_resolution = gr.Slider(label='HED Resolution',
|
| 27 |
-
minimum=128,
|
| 28 |
-
maximum=512,
|
| 29 |
-
value=512,
|
| 30 |
-
step=1)
|
| 31 |
-
num_steps = gr.Slider(label='Steps',
|
| 32 |
-
minimum=1,
|
| 33 |
-
maximum=100,
|
| 34 |
-
value=20,
|
| 35 |
-
step=1)
|
| 36 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 37 |
-
minimum=0.1,
|
| 38 |
-
maximum=30.0,
|
| 39 |
-
value=9.0,
|
| 40 |
-
step=0.1)
|
| 41 |
-
seed = gr.Slider(label='Seed',
|
| 42 |
-
minimum=-1,
|
| 43 |
-
maximum=2147483647,
|
| 44 |
-
step=1,
|
| 45 |
-
randomize=True)
|
| 46 |
-
a_prompt = gr.Textbox(
|
| 47 |
-
label='Added Prompt',
|
| 48 |
-
value='best quality, extremely detailed')
|
| 49 |
-
n_prompt = gr.Textbox(
|
| 50 |
-
label='Negative Prompt',
|
| 51 |
-
value=
|
| 52 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 53 |
-
)
|
| 54 |
-
with gr.Column():
|
| 55 |
-
result = gr.Gallery(label='Output',
|
| 56 |
-
show_label=False,
|
| 57 |
-
elem_id='gallery').style(grid=2,
|
| 58 |
-
height='auto')
|
| 59 |
-
inputs = [
|
| 60 |
-
input_image,
|
| 61 |
-
prompt,
|
| 62 |
-
a_prompt,
|
| 63 |
-
n_prompt,
|
| 64 |
-
num_samples,
|
| 65 |
-
image_resolution,
|
| 66 |
-
detect_resolution,
|
| 67 |
-
num_steps,
|
| 68 |
-
guidance_scale,
|
| 69 |
-
seed,
|
| 70 |
-
]
|
| 71 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 72 |
-
run_button.click(fn=process,
|
| 73 |
-
inputs=inputs,
|
| 74 |
-
outputs=result,
|
| 75 |
-
api_name='hed')
|
| 76 |
-
return demo
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
if __name__ == '__main__':
|
| 80 |
-
from model import Model
|
| 81 |
-
model = Model()
|
| 82 |
-
demo = create_demo(model.process_hed)
|
| 83 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_hough.py
DELETED
|
@@ -1,97 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_hough2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Hough Line Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
num_samples = gr.Slider(label='Images',
|
| 17 |
-
minimum=1,
|
| 18 |
-
maximum=max_images,
|
| 19 |
-
value=default_num_images,
|
| 20 |
-
step=1)
|
| 21 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 22 |
-
minimum=256,
|
| 23 |
-
maximum=512,
|
| 24 |
-
value=512,
|
| 25 |
-
step=256)
|
| 26 |
-
detect_resolution = gr.Slider(label='Hough Resolution',
|
| 27 |
-
minimum=128,
|
| 28 |
-
maximum=512,
|
| 29 |
-
value=512,
|
| 30 |
-
step=1)
|
| 31 |
-
mlsd_value_threshold = gr.Slider(
|
| 32 |
-
label='Hough value threshold (MLSD)',
|
| 33 |
-
minimum=0.01,
|
| 34 |
-
maximum=2.0,
|
| 35 |
-
value=0.1,
|
| 36 |
-
step=0.01)
|
| 37 |
-
mlsd_distance_threshold = gr.Slider(
|
| 38 |
-
label='Hough distance threshold (MLSD)',
|
| 39 |
-
minimum=0.01,
|
| 40 |
-
maximum=20.0,
|
| 41 |
-
value=0.1,
|
| 42 |
-
step=0.01)
|
| 43 |
-
num_steps = gr.Slider(label='Steps',
|
| 44 |
-
minimum=1,
|
| 45 |
-
maximum=100,
|
| 46 |
-
value=20,
|
| 47 |
-
step=1)
|
| 48 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 49 |
-
minimum=0.1,
|
| 50 |
-
maximum=30.0,
|
| 51 |
-
value=9.0,
|
| 52 |
-
step=0.1)
|
| 53 |
-
seed = gr.Slider(label='Seed',
|
| 54 |
-
minimum=-1,
|
| 55 |
-
maximum=2147483647,
|
| 56 |
-
step=1,
|
| 57 |
-
randomize=True)
|
| 58 |
-
a_prompt = gr.Textbox(
|
| 59 |
-
label='Added Prompt',
|
| 60 |
-
value='best quality, extremely detailed')
|
| 61 |
-
n_prompt = gr.Textbox(
|
| 62 |
-
label='Negative Prompt',
|
| 63 |
-
value=
|
| 64 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 65 |
-
)
|
| 66 |
-
with gr.Column():
|
| 67 |
-
result = gr.Gallery(label='Output',
|
| 68 |
-
show_label=False,
|
| 69 |
-
elem_id='gallery').style(grid=2,
|
| 70 |
-
height='auto')
|
| 71 |
-
inputs = [
|
| 72 |
-
input_image,
|
| 73 |
-
prompt,
|
| 74 |
-
a_prompt,
|
| 75 |
-
n_prompt,
|
| 76 |
-
num_samples,
|
| 77 |
-
image_resolution,
|
| 78 |
-
detect_resolution,
|
| 79 |
-
num_steps,
|
| 80 |
-
guidance_scale,
|
| 81 |
-
seed,
|
| 82 |
-
mlsd_value_threshold,
|
| 83 |
-
mlsd_distance_threshold,
|
| 84 |
-
]
|
| 85 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 86 |
-
run_button.click(fn=process,
|
| 87 |
-
inputs=inputs,
|
| 88 |
-
outputs=result,
|
| 89 |
-
api_name='hough')
|
| 90 |
-
return demo
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
if __name__ == '__main__':
|
| 94 |
-
from model import Model
|
| 95 |
-
model = Model()
|
| 96 |
-
demo = create_demo(model.process_hough)
|
| 97 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_normal.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_normal2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Normal Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
is_normal_image = gr.Checkbox(label='Is normal image',
|
| 17 |
-
value=False)
|
| 18 |
-
num_samples = gr.Slider(label='Images',
|
| 19 |
-
minimum=1,
|
| 20 |
-
maximum=max_images,
|
| 21 |
-
value=default_num_images,
|
| 22 |
-
step=1)
|
| 23 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 24 |
-
minimum=256,
|
| 25 |
-
maximum=512,
|
| 26 |
-
value=512,
|
| 27 |
-
step=256)
|
| 28 |
-
detect_resolution = gr.Slider(label='Normal Resolution',
|
| 29 |
-
minimum=128,
|
| 30 |
-
maximum=512,
|
| 31 |
-
value=384,
|
| 32 |
-
step=1)
|
| 33 |
-
bg_threshold = gr.Slider(
|
| 34 |
-
label='Normal background threshold',
|
| 35 |
-
minimum=0.0,
|
| 36 |
-
maximum=1.0,
|
| 37 |
-
value=0.4,
|
| 38 |
-
step=0.01)
|
| 39 |
-
num_steps = gr.Slider(label='Steps',
|
| 40 |
-
minimum=1,
|
| 41 |
-
maximum=100,
|
| 42 |
-
value=20,
|
| 43 |
-
step=1)
|
| 44 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 45 |
-
minimum=0.1,
|
| 46 |
-
maximum=30.0,
|
| 47 |
-
value=9.0,
|
| 48 |
-
step=0.1)
|
| 49 |
-
seed = gr.Slider(label='Seed',
|
| 50 |
-
minimum=-1,
|
| 51 |
-
maximum=2147483647,
|
| 52 |
-
step=1,
|
| 53 |
-
randomize=True)
|
| 54 |
-
a_prompt = gr.Textbox(
|
| 55 |
-
label='Added Prompt',
|
| 56 |
-
value='best quality, extremely detailed')
|
| 57 |
-
n_prompt = gr.Textbox(
|
| 58 |
-
label='Negative Prompt',
|
| 59 |
-
value=
|
| 60 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 61 |
-
)
|
| 62 |
-
with gr.Column():
|
| 63 |
-
result = gr.Gallery(label='Output',
|
| 64 |
-
show_label=False,
|
| 65 |
-
elem_id='gallery').style(grid=2,
|
| 66 |
-
height='auto')
|
| 67 |
-
inputs = [
|
| 68 |
-
input_image,
|
| 69 |
-
prompt,
|
| 70 |
-
a_prompt,
|
| 71 |
-
n_prompt,
|
| 72 |
-
num_samples,
|
| 73 |
-
image_resolution,
|
| 74 |
-
detect_resolution,
|
| 75 |
-
num_steps,
|
| 76 |
-
guidance_scale,
|
| 77 |
-
seed,
|
| 78 |
-
bg_threshold,
|
| 79 |
-
is_normal_image,
|
| 80 |
-
]
|
| 81 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 82 |
-
run_button.click(fn=process,
|
| 83 |
-
inputs=inputs,
|
| 84 |
-
outputs=result,
|
| 85 |
-
api_name='normal')
|
| 86 |
-
return demo
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
if __name__ == '__main__':
|
| 90 |
-
from model import Model
|
| 91 |
-
model = Model()
|
| 92 |
-
demo = create_demo(model.process_normal)
|
| 93 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_pose.py
DELETED
|
@@ -1,89 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_pose2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Human Pose')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
is_pose_image = gr.Checkbox(label='Is pose image',
|
| 17 |
-
value=False)
|
| 18 |
-
gr.Markdown(
|
| 19 |
-
'You can use [PoseMaker2](https://huggingface.co/spaces/jonigata/PoseMaker2) to create pose images.'
|
| 20 |
-
)
|
| 21 |
-
num_samples = gr.Slider(label='Images',
|
| 22 |
-
minimum=1,
|
| 23 |
-
maximum=max_images,
|
| 24 |
-
value=default_num_images,
|
| 25 |
-
step=1)
|
| 26 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 27 |
-
minimum=256,
|
| 28 |
-
maximum=512,
|
| 29 |
-
value=512,
|
| 30 |
-
step=256)
|
| 31 |
-
detect_resolution = gr.Slider(label='OpenPose Resolution',
|
| 32 |
-
minimum=128,
|
| 33 |
-
maximum=512,
|
| 34 |
-
value=512,
|
| 35 |
-
step=1)
|
| 36 |
-
num_steps = gr.Slider(label='Steps',
|
| 37 |
-
minimum=1,
|
| 38 |
-
maximum=100,
|
| 39 |
-
value=20,
|
| 40 |
-
step=1)
|
| 41 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 42 |
-
minimum=0.1,
|
| 43 |
-
maximum=30.0,
|
| 44 |
-
value=9.0,
|
| 45 |
-
step=0.1)
|
| 46 |
-
seed = gr.Slider(label='Seed',
|
| 47 |
-
minimum=-1,
|
| 48 |
-
maximum=2147483647,
|
| 49 |
-
step=1,
|
| 50 |
-
randomize=True)
|
| 51 |
-
a_prompt = gr.Textbox(
|
| 52 |
-
label='Added Prompt',
|
| 53 |
-
value='best quality, extremely detailed')
|
| 54 |
-
n_prompt = gr.Textbox(
|
| 55 |
-
label='Negative Prompt',
|
| 56 |
-
value=
|
| 57 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 58 |
-
)
|
| 59 |
-
with gr.Column():
|
| 60 |
-
result = gr.Gallery(label='Output',
|
| 61 |
-
show_label=False,
|
| 62 |
-
elem_id='gallery').style(grid=2,
|
| 63 |
-
height='auto')
|
| 64 |
-
inputs = [
|
| 65 |
-
input_image,
|
| 66 |
-
prompt,
|
| 67 |
-
a_prompt,
|
| 68 |
-
n_prompt,
|
| 69 |
-
num_samples,
|
| 70 |
-
image_resolution,
|
| 71 |
-
detect_resolution,
|
| 72 |
-
num_steps,
|
| 73 |
-
guidance_scale,
|
| 74 |
-
seed,
|
| 75 |
-
is_pose_image,
|
| 76 |
-
]
|
| 77 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 78 |
-
run_button.click(fn=process,
|
| 79 |
-
inputs=inputs,
|
| 80 |
-
outputs=result,
|
| 81 |
-
api_name='pose')
|
| 82 |
-
return demo
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
if __name__ == '__main__':
|
| 86 |
-
from model import Model
|
| 87 |
-
model = Model()
|
| 88 |
-
demo = create_demo(model.process_pose)
|
| 89 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_scribble.py
DELETED
|
@@ -1,77 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_scribble2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Scribble Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
num_samples = gr.Slider(label='Images',
|
| 17 |
-
minimum=1,
|
| 18 |
-
maximum=max_images,
|
| 19 |
-
value=default_num_images,
|
| 20 |
-
step=1)
|
| 21 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 22 |
-
minimum=256,
|
| 23 |
-
maximum=512,
|
| 24 |
-
value=512,
|
| 25 |
-
step=256)
|
| 26 |
-
num_steps = gr.Slider(label='Steps',
|
| 27 |
-
minimum=1,
|
| 28 |
-
maximum=100,
|
| 29 |
-
value=20,
|
| 30 |
-
step=1)
|
| 31 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 32 |
-
minimum=0.1,
|
| 33 |
-
maximum=30.0,
|
| 34 |
-
value=9.0,
|
| 35 |
-
step=0.1)
|
| 36 |
-
seed = gr.Slider(label='Seed',
|
| 37 |
-
minimum=-1,
|
| 38 |
-
maximum=2147483647,
|
| 39 |
-
step=1,
|
| 40 |
-
randomize=True)
|
| 41 |
-
a_prompt = gr.Textbox(
|
| 42 |
-
label='Added Prompt',
|
| 43 |
-
value='best quality, extremely detailed')
|
| 44 |
-
n_prompt = gr.Textbox(
|
| 45 |
-
label='Negative Prompt',
|
| 46 |
-
value=
|
| 47 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 48 |
-
)
|
| 49 |
-
with gr.Column():
|
| 50 |
-
result = gr.Gallery(label='Output',
|
| 51 |
-
show_label=False,
|
| 52 |
-
elem_id='gallery').style(grid=2,
|
| 53 |
-
height='auto')
|
| 54 |
-
inputs = [
|
| 55 |
-
input_image,
|
| 56 |
-
prompt,
|
| 57 |
-
a_prompt,
|
| 58 |
-
n_prompt,
|
| 59 |
-
num_samples,
|
| 60 |
-
image_resolution,
|
| 61 |
-
num_steps,
|
| 62 |
-
guidance_scale,
|
| 63 |
-
seed,
|
| 64 |
-
]
|
| 65 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 66 |
-
run_button.click(fn=process,
|
| 67 |
-
inputs=inputs,
|
| 68 |
-
outputs=result,
|
| 69 |
-
api_name='scribble')
|
| 70 |
-
return demo
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
if __name__ == '__main__':
|
| 74 |
-
from model import Model
|
| 75 |
-
model = Model()
|
| 76 |
-
demo = create_demo(model.process_scribble)
|
| 77 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_scribble_interactive.py
DELETED
|
@@ -1,103 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_scribble2image_interactive.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
import numpy as np
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
def create_canvas(w, h):
|
| 8 |
-
return np.zeros(shape=(h, w, 3), dtype=np.uint8) + 255
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 12 |
-
with gr.Blocks() as demo:
|
| 13 |
-
with gr.Row():
|
| 14 |
-
gr.Markdown(
|
| 15 |
-
'## Control Stable Diffusion with Interactive Scribbles')
|
| 16 |
-
with gr.Row():
|
| 17 |
-
with gr.Column():
|
| 18 |
-
canvas_width = gr.Slider(label='Canvas Width',
|
| 19 |
-
minimum=256,
|
| 20 |
-
maximum=512,
|
| 21 |
-
value=512,
|
| 22 |
-
step=1)
|
| 23 |
-
canvas_height = gr.Slider(label='Canvas Height',
|
| 24 |
-
minimum=256,
|
| 25 |
-
maximum=512,
|
| 26 |
-
value=512,
|
| 27 |
-
step=1)
|
| 28 |
-
create_button = gr.Button(label='Start',
|
| 29 |
-
value='Open drawing canvas!')
|
| 30 |
-
input_image = gr.Image(source='upload',
|
| 31 |
-
type='numpy',
|
| 32 |
-
tool='sketch')
|
| 33 |
-
gr.Markdown(
|
| 34 |
-
value=
|
| 35 |
-
'Do not forget to change your brush width to make it thinner. (Gradio do not allow developers to set brush width so you need to do it manually.) '
|
| 36 |
-
'Just click on the small pencil icon in the upper right corner of the above block.'
|
| 37 |
-
)
|
| 38 |
-
create_button.click(fn=create_canvas,
|
| 39 |
-
inputs=[canvas_width, canvas_height],
|
| 40 |
-
outputs=input_image,
|
| 41 |
-
queue=False)
|
| 42 |
-
prompt = gr.Textbox(label='Prompt')
|
| 43 |
-
run_button = gr.Button(label='Run')
|
| 44 |
-
with gr.Accordion('Advanced options', open=False):
|
| 45 |
-
num_samples = gr.Slider(label='Images',
|
| 46 |
-
minimum=1,
|
| 47 |
-
maximum=max_images,
|
| 48 |
-
value=default_num_images,
|
| 49 |
-
step=1)
|
| 50 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 51 |
-
minimum=256,
|
| 52 |
-
maximum=512,
|
| 53 |
-
value=512,
|
| 54 |
-
step=256)
|
| 55 |
-
num_steps = gr.Slider(label='Steps',
|
| 56 |
-
minimum=1,
|
| 57 |
-
maximum=100,
|
| 58 |
-
value=20,
|
| 59 |
-
step=1)
|
| 60 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 61 |
-
minimum=0.1,
|
| 62 |
-
maximum=30.0,
|
| 63 |
-
value=9.0,
|
| 64 |
-
step=0.1)
|
| 65 |
-
seed = gr.Slider(label='Seed',
|
| 66 |
-
minimum=-1,
|
| 67 |
-
maximum=2147483647,
|
| 68 |
-
step=1,
|
| 69 |
-
randomize=True)
|
| 70 |
-
a_prompt = gr.Textbox(
|
| 71 |
-
label='Added Prompt',
|
| 72 |
-
value='best quality, extremely detailed')
|
| 73 |
-
n_prompt = gr.Textbox(
|
| 74 |
-
label='Negative Prompt',
|
| 75 |
-
value=
|
| 76 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 77 |
-
)
|
| 78 |
-
with gr.Column():
|
| 79 |
-
result = gr.Gallery(label='Output',
|
| 80 |
-
show_label=False,
|
| 81 |
-
elem_id='gallery').style(grid=2,
|
| 82 |
-
height='auto')
|
| 83 |
-
inputs = [
|
| 84 |
-
input_image,
|
| 85 |
-
prompt,
|
| 86 |
-
a_prompt,
|
| 87 |
-
n_prompt,
|
| 88 |
-
num_samples,
|
| 89 |
-
image_resolution,
|
| 90 |
-
num_steps,
|
| 91 |
-
guidance_scale,
|
| 92 |
-
seed,
|
| 93 |
-
]
|
| 94 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 95 |
-
run_button.click(fn=process, inputs=inputs, outputs=result)
|
| 96 |
-
return demo
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
if __name__ == '__main__':
|
| 100 |
-
from model import Model
|
| 101 |
-
model = Model()
|
| 102 |
-
demo = create_demo(model.process_scribble_interactive)
|
| 103 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app_seg.py
DELETED
|
@@ -1,87 +0,0 @@
|
|
| 1 |
-
# This file is adapted from https://github.com/lllyasviel/ControlNet/blob/f4748e3630d8141d7765e2bd9b1e348f47847707/gradio_seg2image.py
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
import gradio as gr
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
def create_demo(process, max_images=12, default_num_images=3):
|
| 7 |
-
with gr.Blocks() as demo:
|
| 8 |
-
with gr.Row():
|
| 9 |
-
gr.Markdown('## Control Stable Diffusion with Segmentation Maps')
|
| 10 |
-
with gr.Row():
|
| 11 |
-
with gr.Column():
|
| 12 |
-
input_image = gr.Image(source='upload', type='numpy')
|
| 13 |
-
prompt = gr.Textbox(label='Prompt')
|
| 14 |
-
run_button = gr.Button(label='Run')
|
| 15 |
-
with gr.Accordion('Advanced options', open=False):
|
| 16 |
-
is_segmentation_map = gr.Checkbox(
|
| 17 |
-
label='Is segmentation map', value=False)
|
| 18 |
-
num_samples = gr.Slider(label='Images',
|
| 19 |
-
minimum=1,
|
| 20 |
-
maximum=max_images,
|
| 21 |
-
value=default_num_images,
|
| 22 |
-
step=1)
|
| 23 |
-
image_resolution = gr.Slider(label='Image Resolution',
|
| 24 |
-
minimum=256,
|
| 25 |
-
maximum=512,
|
| 26 |
-
value=512,
|
| 27 |
-
step=256)
|
| 28 |
-
detect_resolution = gr.Slider(
|
| 29 |
-
label='Segmentation Resolution',
|
| 30 |
-
minimum=128,
|
| 31 |
-
maximum=512,
|
| 32 |
-
value=512,
|
| 33 |
-
step=1)
|
| 34 |
-
num_steps = gr.Slider(label='Steps',
|
| 35 |
-
minimum=1,
|
| 36 |
-
maximum=100,
|
| 37 |
-
value=20,
|
| 38 |
-
step=1)
|
| 39 |
-
guidance_scale = gr.Slider(label='Guidance Scale',
|
| 40 |
-
minimum=0.1,
|
| 41 |
-
maximum=30.0,
|
| 42 |
-
value=9.0,
|
| 43 |
-
step=0.1)
|
| 44 |
-
seed = gr.Slider(label='Seed',
|
| 45 |
-
minimum=-1,
|
| 46 |
-
maximum=2147483647,
|
| 47 |
-
step=1,
|
| 48 |
-
randomize=True)
|
| 49 |
-
a_prompt = gr.Textbox(
|
| 50 |
-
label='Added Prompt',
|
| 51 |
-
value='best quality, extremely detailed')
|
| 52 |
-
n_prompt = gr.Textbox(
|
| 53 |
-
label='Negative Prompt',
|
| 54 |
-
value=
|
| 55 |
-
'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
|
| 56 |
-
)
|
| 57 |
-
with gr.Column():
|
| 58 |
-
result = gr.Gallery(label='Output',
|
| 59 |
-
show_label=False,
|
| 60 |
-
elem_id='gallery').style(grid=2,
|
| 61 |
-
height='auto')
|
| 62 |
-
inputs = [
|
| 63 |
-
input_image,
|
| 64 |
-
prompt,
|
| 65 |
-
a_prompt,
|
| 66 |
-
n_prompt,
|
| 67 |
-
num_samples,
|
| 68 |
-
image_resolution,
|
| 69 |
-
detect_resolution,
|
| 70 |
-
num_steps,
|
| 71 |
-
guidance_scale,
|
| 72 |
-
seed,
|
| 73 |
-
is_segmentation_map,
|
| 74 |
-
]
|
| 75 |
-
prompt.submit(fn=process, inputs=inputs, outputs=result)
|
| 76 |
-
run_button.click(fn=process,
|
| 77 |
-
inputs=inputs,
|
| 78 |
-
outputs=result,
|
| 79 |
-
api_name='seg')
|
| 80 |
-
return demo
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
if __name__ == '__main__':
|
| 84 |
-
from model import Model
|
| 85 |
-
model = Model()
|
| 86 |
-
demo = create_demo(model.process_seg)
|
| 87 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model.py
DELETED
|
@@ -1,644 +0,0 @@
|
|
| 1 |
-
# This file is adapted from gradio_*.py in https://github.com/lllyasviel/ControlNet/tree/f4748e3630d8141d7765e2bd9b1e348f47847707
|
| 2 |
-
# The original license file is LICENSE.ControlNet in this repo.
|
| 3 |
-
from __future__ import annotations
|
| 4 |
-
|
| 5 |
-
import gc
|
| 6 |
-
import pathlib
|
| 7 |
-
import sys
|
| 8 |
-
|
| 9 |
-
import cv2
|
| 10 |
-
import numpy as np
|
| 11 |
-
import PIL.Image
|
| 12 |
-
import torch
|
| 13 |
-
from diffusers import (ControlNetModel, DiffusionPipeline,
|
| 14 |
-
StableDiffusionControlNetPipeline,
|
| 15 |
-
UniPCMultistepScheduler)
|
| 16 |
-
|
| 17 |
-
repo_dir = pathlib.Path(__file__).parent
|
| 18 |
-
submodule_dir = repo_dir / 'ControlNet'
|
| 19 |
-
sys.path.append(submodule_dir.as_posix())
|
| 20 |
-
|
| 21 |
-
from annotator.canny import apply_canny
|
| 22 |
-
from annotator.hed import apply_hed, nms
|
| 23 |
-
from annotator.midas import apply_midas
|
| 24 |
-
from annotator.mlsd import apply_mlsd
|
| 25 |
-
from annotator.openpose import apply_openpose
|
| 26 |
-
from annotator.uniformer import apply_uniformer
|
| 27 |
-
from annotator.util import HWC3, resize_image
|
| 28 |
-
|
| 29 |
-
CONTROLNET_MODEL_IDS = {
|
| 30 |
-
'canny': 'lllyasviel/sd-controlnet-canny',
|
| 31 |
-
'hough': 'lllyasviel/sd-controlnet-mlsd',
|
| 32 |
-
'hed': 'lllyasviel/sd-controlnet-hed',
|
| 33 |
-
'scribble': 'lllyasviel/sd-controlnet-scribble',
|
| 34 |
-
'pose': 'lllyasviel/sd-controlnet-openpose',
|
| 35 |
-
'seg': 'lllyasviel/sd-controlnet-seg',
|
| 36 |
-
'depth': 'lllyasviel/sd-controlnet-depth',
|
| 37 |
-
'normal': 'lllyasviel/sd-controlnet-normal',
|
| 38 |
-
}
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
def download_all_controlnet_weights() -> None:
|
| 42 |
-
for model_id in CONTROLNET_MODEL_IDS.values():
|
| 43 |
-
ControlNetModel.from_pretrained(model_id)
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
class Model:
|
| 47 |
-
def __init__(self,
|
| 48 |
-
base_model_id: str = 'runwayml/stable-diffusion-v1-5',
|
| 49 |
-
task_name: str = 'canny'):
|
| 50 |
-
self.device = torch.device(
|
| 51 |
-
'cuda:0' if torch.cuda.is_available() else 'cpu')
|
| 52 |
-
self.base_model_id = ''
|
| 53 |
-
self.task_name = ''
|
| 54 |
-
self.pipe = self.load_pipe(base_model_id, task_name)
|
| 55 |
-
|
| 56 |
-
def load_pipe(self, base_model_id: str, task_name) -> DiffusionPipeline:
|
| 57 |
-
if base_model_id == self.base_model_id and task_name == self.task_name and hasattr(
|
| 58 |
-
self, 'pipe'):
|
| 59 |
-
return self.pipe
|
| 60 |
-
model_id = CONTROLNET_MODEL_IDS[task_name]
|
| 61 |
-
controlnet = ControlNetModel.from_pretrained(model_id,
|
| 62 |
-
torch_dtype=torch.float16)
|
| 63 |
-
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
| 64 |
-
base_model_id,
|
| 65 |
-
safety_checker=None,
|
| 66 |
-
controlnet=controlnet,
|
| 67 |
-
torch_dtype=torch.float16)
|
| 68 |
-
pipe.scheduler = UniPCMultistepScheduler.from_config(
|
| 69 |
-
pipe.scheduler.config)
|
| 70 |
-
pipe.enable_xformers_memory_efficient_attention()
|
| 71 |
-
pipe.to(self.device)
|
| 72 |
-
torch.cuda.empty_cache()
|
| 73 |
-
gc.collect()
|
| 74 |
-
self.base_model_id = base_model_id
|
| 75 |
-
self.task_name = task_name
|
| 76 |
-
return pipe
|
| 77 |
-
|
| 78 |
-
def set_base_model(self, base_model_id: str) -> str:
|
| 79 |
-
if not base_model_id or base_model_id == self.base_model_id:
|
| 80 |
-
return self.base_model_id
|
| 81 |
-
del self.pipe
|
| 82 |
-
torch.cuda.empty_cache()
|
| 83 |
-
gc.collect()
|
| 84 |
-
try:
|
| 85 |
-
self.pipe = self.load_pipe(base_model_id, self.task_name)
|
| 86 |
-
except Exception:
|
| 87 |
-
self.pipe = self.load_pipe(self.base_model_id, self.task_name)
|
| 88 |
-
return self.base_model_id
|
| 89 |
-
|
| 90 |
-
def load_controlnet_weight(self, task_name: str) -> None:
|
| 91 |
-
if task_name == self.task_name:
|
| 92 |
-
return
|
| 93 |
-
if 'controlnet' in self.pipe.__dict__:
|
| 94 |
-
del self.pipe.controlnet
|
| 95 |
-
torch.cuda.empty_cache()
|
| 96 |
-
gc.collect()
|
| 97 |
-
model_id = CONTROLNET_MODEL_IDS[task_name]
|
| 98 |
-
controlnet = ControlNetModel.from_pretrained(model_id,
|
| 99 |
-
torch_dtype=torch.float16)
|
| 100 |
-
controlnet.to(self.device)
|
| 101 |
-
torch.cuda.empty_cache()
|
| 102 |
-
gc.collect()
|
| 103 |
-
self.pipe.controlnet = controlnet
|
| 104 |
-
self.task_name = task_name
|
| 105 |
-
|
| 106 |
-
def get_prompt(self, prompt: str, additional_prompt: str) -> str:
|
| 107 |
-
if not prompt:
|
| 108 |
-
prompt = additional_prompt
|
| 109 |
-
else:
|
| 110 |
-
prompt = f'{prompt}, {additional_prompt}'
|
| 111 |
-
return prompt
|
| 112 |
-
|
| 113 |
-
@torch.autocast('cuda')
|
| 114 |
-
def run_pipe(
|
| 115 |
-
self,
|
| 116 |
-
prompt: str,
|
| 117 |
-
negative_prompt: str,
|
| 118 |
-
control_image: PIL.Image.Image,
|
| 119 |
-
num_images: int,
|
| 120 |
-
num_steps: int,
|
| 121 |
-
guidance_scale: float,
|
| 122 |
-
seed: int,
|
| 123 |
-
) -> list[PIL.Image.Image]:
|
| 124 |
-
if seed == -1:
|
| 125 |
-
seed = np.random.randint(0, np.iinfo(np.int64).max)
|
| 126 |
-
generator = torch.Generator().manual_seed(seed)
|
| 127 |
-
return self.pipe(prompt=prompt,
|
| 128 |
-
negative_prompt=negative_prompt,
|
| 129 |
-
guidance_scale=guidance_scale,
|
| 130 |
-
num_images_per_prompt=num_images,
|
| 131 |
-
num_inference_steps=num_steps,
|
| 132 |
-
generator=generator,
|
| 133 |
-
image=control_image).images
|
| 134 |
-
|
| 135 |
-
@staticmethod
|
| 136 |
-
def preprocess_canny(
|
| 137 |
-
input_image: np.ndarray,
|
| 138 |
-
image_resolution: int,
|
| 139 |
-
low_threshold: int,
|
| 140 |
-
high_threshold: int,
|
| 141 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 142 |
-
image = resize_image(HWC3(input_image), image_resolution)
|
| 143 |
-
control_image = apply_canny(image, low_threshold, high_threshold)
|
| 144 |
-
control_image = HWC3(control_image)
|
| 145 |
-
vis_control_image = 255 - control_image
|
| 146 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 147 |
-
vis_control_image)
|
| 148 |
-
|
| 149 |
-
@torch.inference_mode()
|
| 150 |
-
def process_canny(
|
| 151 |
-
self,
|
| 152 |
-
input_image: np.ndarray,
|
| 153 |
-
prompt: str,
|
| 154 |
-
additional_prompt: str,
|
| 155 |
-
negative_prompt: str,
|
| 156 |
-
num_images: int,
|
| 157 |
-
image_resolution: int,
|
| 158 |
-
num_steps: int,
|
| 159 |
-
guidance_scale: float,
|
| 160 |
-
seed: int,
|
| 161 |
-
low_threshold: int,
|
| 162 |
-
high_threshold: int,
|
| 163 |
-
) -> list[PIL.Image.Image]:
|
| 164 |
-
control_image, vis_control_image = self.preprocess_canny(
|
| 165 |
-
input_image=input_image,
|
| 166 |
-
image_resolution=image_resolution,
|
| 167 |
-
low_threshold=low_threshold,
|
| 168 |
-
high_threshold=high_threshold,
|
| 169 |
-
)
|
| 170 |
-
self.load_controlnet_weight('canny')
|
| 171 |
-
results = self.run_pipe(
|
| 172 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 173 |
-
negative_prompt=negative_prompt,
|
| 174 |
-
control_image=control_image,
|
| 175 |
-
num_images=num_images,
|
| 176 |
-
num_steps=num_steps,
|
| 177 |
-
guidance_scale=guidance_scale,
|
| 178 |
-
seed=seed,
|
| 179 |
-
)
|
| 180 |
-
return [vis_control_image] + results
|
| 181 |
-
|
| 182 |
-
@staticmethod
|
| 183 |
-
def preprocess_hough(
|
| 184 |
-
input_image: np.ndarray,
|
| 185 |
-
image_resolution: int,
|
| 186 |
-
detect_resolution: int,
|
| 187 |
-
value_threshold: float,
|
| 188 |
-
distance_threshold: float,
|
| 189 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 190 |
-
input_image = HWC3(input_image)
|
| 191 |
-
control_image = apply_mlsd(
|
| 192 |
-
resize_image(input_image, detect_resolution), value_threshold,
|
| 193 |
-
distance_threshold)
|
| 194 |
-
control_image = HWC3(control_image)
|
| 195 |
-
image = resize_image(input_image, image_resolution)
|
| 196 |
-
H, W = image.shape[:2]
|
| 197 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 198 |
-
interpolation=cv2.INTER_NEAREST)
|
| 199 |
-
|
| 200 |
-
vis_control_image = 255 - cv2.dilate(
|
| 201 |
-
control_image, np.ones(shape=(3, 3), dtype=np.uint8), iterations=1)
|
| 202 |
-
|
| 203 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 204 |
-
vis_control_image)
|
| 205 |
-
|
| 206 |
-
@torch.inference_mode()
|
| 207 |
-
def process_hough(
|
| 208 |
-
self,
|
| 209 |
-
input_image: np.ndarray,
|
| 210 |
-
prompt: str,
|
| 211 |
-
additional_prompt: str,
|
| 212 |
-
negative_prompt: str,
|
| 213 |
-
num_images: int,
|
| 214 |
-
image_resolution: int,
|
| 215 |
-
detect_resolution: int,
|
| 216 |
-
num_steps: int,
|
| 217 |
-
guidance_scale: float,
|
| 218 |
-
seed: int,
|
| 219 |
-
value_threshold: float,
|
| 220 |
-
distance_threshold: float,
|
| 221 |
-
) -> list[PIL.Image.Image]:
|
| 222 |
-
control_image, vis_control_image = self.preprocess_hough(
|
| 223 |
-
input_image=input_image,
|
| 224 |
-
image_resolution=image_resolution,
|
| 225 |
-
detect_resolution=detect_resolution,
|
| 226 |
-
value_threshold=value_threshold,
|
| 227 |
-
distance_threshold=distance_threshold,
|
| 228 |
-
)
|
| 229 |
-
self.load_controlnet_weight('hough')
|
| 230 |
-
results = self.run_pipe(
|
| 231 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 232 |
-
negative_prompt=negative_prompt,
|
| 233 |
-
control_image=control_image,
|
| 234 |
-
num_images=num_images,
|
| 235 |
-
num_steps=num_steps,
|
| 236 |
-
guidance_scale=guidance_scale,
|
| 237 |
-
seed=seed,
|
| 238 |
-
)
|
| 239 |
-
return [vis_control_image] + results
|
| 240 |
-
|
| 241 |
-
@staticmethod
|
| 242 |
-
def preprocess_hed(
|
| 243 |
-
input_image: np.ndarray,
|
| 244 |
-
image_resolution: int,
|
| 245 |
-
detect_resolution: int,
|
| 246 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 247 |
-
input_image = HWC3(input_image)
|
| 248 |
-
control_image = apply_hed(resize_image(input_image, detect_resolution))
|
| 249 |
-
control_image = HWC3(control_image)
|
| 250 |
-
image = resize_image(input_image, image_resolution)
|
| 251 |
-
H, W = image.shape[:2]
|
| 252 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 253 |
-
interpolation=cv2.INTER_LINEAR)
|
| 254 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 255 |
-
control_image)
|
| 256 |
-
|
| 257 |
-
@torch.inference_mode()
|
| 258 |
-
def process_hed(
|
| 259 |
-
self,
|
| 260 |
-
input_image: np.ndarray,
|
| 261 |
-
prompt: str,
|
| 262 |
-
additional_prompt: str,
|
| 263 |
-
negative_prompt: str,
|
| 264 |
-
num_images: int,
|
| 265 |
-
image_resolution: int,
|
| 266 |
-
detect_resolution: int,
|
| 267 |
-
num_steps: int,
|
| 268 |
-
guidance_scale: float,
|
| 269 |
-
seed: int,
|
| 270 |
-
) -> list[PIL.Image.Image]:
|
| 271 |
-
control_image, vis_control_image = self.preprocess_hed(
|
| 272 |
-
input_image=input_image,
|
| 273 |
-
image_resolution=image_resolution,
|
| 274 |
-
detect_resolution=detect_resolution,
|
| 275 |
-
)
|
| 276 |
-
self.load_controlnet_weight('hed')
|
| 277 |
-
results = self.run_pipe(
|
| 278 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 279 |
-
negative_prompt=negative_prompt,
|
| 280 |
-
control_image=control_image,
|
| 281 |
-
num_images=num_images,
|
| 282 |
-
num_steps=num_steps,
|
| 283 |
-
guidance_scale=guidance_scale,
|
| 284 |
-
seed=seed,
|
| 285 |
-
)
|
| 286 |
-
return [vis_control_image] + results
|
| 287 |
-
|
| 288 |
-
@staticmethod
|
| 289 |
-
def preprocess_scribble(
|
| 290 |
-
input_image: np.ndarray,
|
| 291 |
-
image_resolution: int,
|
| 292 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 293 |
-
image = resize_image(HWC3(input_image), image_resolution)
|
| 294 |
-
control_image = np.zeros_like(image, dtype=np.uint8)
|
| 295 |
-
control_image[np.min(image, axis=2) < 127] = 255
|
| 296 |
-
vis_control_image = 255 - control_image
|
| 297 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 298 |
-
vis_control_image)
|
| 299 |
-
|
| 300 |
-
@torch.inference_mode()
|
| 301 |
-
def process_scribble(
|
| 302 |
-
self,
|
| 303 |
-
input_image: np.ndarray,
|
| 304 |
-
prompt: str,
|
| 305 |
-
additional_prompt: str,
|
| 306 |
-
negative_prompt: str,
|
| 307 |
-
num_images: int,
|
| 308 |
-
image_resolution: int,
|
| 309 |
-
num_steps: int,
|
| 310 |
-
guidance_scale: float,
|
| 311 |
-
seed: int,
|
| 312 |
-
) -> list[PIL.Image.Image]:
|
| 313 |
-
control_image, vis_control_image = self.preprocess_scribble(
|
| 314 |
-
input_image=input_image,
|
| 315 |
-
image_resolution=image_resolution,
|
| 316 |
-
)
|
| 317 |
-
self.load_controlnet_weight('scribble')
|
| 318 |
-
results = self.run_pipe(
|
| 319 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 320 |
-
negative_prompt=negative_prompt,
|
| 321 |
-
control_image=control_image,
|
| 322 |
-
num_images=num_images,
|
| 323 |
-
num_steps=num_steps,
|
| 324 |
-
guidance_scale=guidance_scale,
|
| 325 |
-
seed=seed,
|
| 326 |
-
)
|
| 327 |
-
return [vis_control_image] + results
|
| 328 |
-
|
| 329 |
-
@staticmethod
|
| 330 |
-
def preprocess_scribble_interactive(
|
| 331 |
-
input_image: np.ndarray,
|
| 332 |
-
image_resolution: int,
|
| 333 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 334 |
-
image = resize_image(HWC3(input_image['mask'][:, :, 0]),
|
| 335 |
-
image_resolution)
|
| 336 |
-
control_image = np.zeros_like(image, dtype=np.uint8)
|
| 337 |
-
control_image[np.min(image, axis=2) > 127] = 255
|
| 338 |
-
vis_control_image = 255 - control_image
|
| 339 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 340 |
-
vis_control_image)
|
| 341 |
-
|
| 342 |
-
@torch.inference_mode()
|
| 343 |
-
def process_scribble_interactive(
|
| 344 |
-
self,
|
| 345 |
-
input_image: np.ndarray,
|
| 346 |
-
prompt: str,
|
| 347 |
-
additional_prompt: str,
|
| 348 |
-
negative_prompt: str,
|
| 349 |
-
num_images: int,
|
| 350 |
-
image_resolution: int,
|
| 351 |
-
num_steps: int,
|
| 352 |
-
guidance_scale: float,
|
| 353 |
-
seed: int,
|
| 354 |
-
) -> list[PIL.Image.Image]:
|
| 355 |
-
control_image, vis_control_image = self.preprocess_scribble_interactive(
|
| 356 |
-
input_image=input_image,
|
| 357 |
-
image_resolution=image_resolution,
|
| 358 |
-
)
|
| 359 |
-
self.load_controlnet_weight('scribble')
|
| 360 |
-
results = self.run_pipe(
|
| 361 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 362 |
-
negative_prompt=negative_prompt,
|
| 363 |
-
control_image=control_image,
|
| 364 |
-
num_images=num_images,
|
| 365 |
-
num_steps=num_steps,
|
| 366 |
-
guidance_scale=guidance_scale,
|
| 367 |
-
seed=seed,
|
| 368 |
-
)
|
| 369 |
-
return [vis_control_image] + results
|
| 370 |
-
|
| 371 |
-
@staticmethod
|
| 372 |
-
def preprocess_fake_scribble(
|
| 373 |
-
input_image: np.ndarray,
|
| 374 |
-
image_resolution: int,
|
| 375 |
-
detect_resolution: int,
|
| 376 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 377 |
-
input_image = HWC3(input_image)
|
| 378 |
-
control_image = apply_hed(resize_image(input_image, detect_resolution))
|
| 379 |
-
control_image = HWC3(control_image)
|
| 380 |
-
image = resize_image(input_image, image_resolution)
|
| 381 |
-
H, W = image.shape[:2]
|
| 382 |
-
|
| 383 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 384 |
-
interpolation=cv2.INTER_LINEAR)
|
| 385 |
-
control_image = nms(control_image, 127, 3.0)
|
| 386 |
-
control_image = cv2.GaussianBlur(control_image, (0, 0), 3.0)
|
| 387 |
-
control_image[control_image > 4] = 255
|
| 388 |
-
control_image[control_image < 255] = 0
|
| 389 |
-
|
| 390 |
-
vis_control_image = 255 - control_image
|
| 391 |
-
|
| 392 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 393 |
-
vis_control_image)
|
| 394 |
-
|
| 395 |
-
@torch.inference_mode()
|
| 396 |
-
def process_fake_scribble(
|
| 397 |
-
self,
|
| 398 |
-
input_image: np.ndarray,
|
| 399 |
-
prompt: str,
|
| 400 |
-
additional_prompt: str,
|
| 401 |
-
negative_prompt: str,
|
| 402 |
-
num_images: int,
|
| 403 |
-
image_resolution: int,
|
| 404 |
-
detect_resolution: int,
|
| 405 |
-
num_steps: int,
|
| 406 |
-
guidance_scale: float,
|
| 407 |
-
seed: int,
|
| 408 |
-
) -> list[PIL.Image.Image]:
|
| 409 |
-
control_image, vis_control_image = self.preprocess_fake_scribble(
|
| 410 |
-
input_image=input_image,
|
| 411 |
-
image_resolution=image_resolution,
|
| 412 |
-
detect_resolution=detect_resolution,
|
| 413 |
-
)
|
| 414 |
-
self.load_controlnet_weight('scribble')
|
| 415 |
-
results = self.run_pipe(
|
| 416 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 417 |
-
negative_prompt=negative_prompt,
|
| 418 |
-
control_image=control_image,
|
| 419 |
-
num_images=num_images,
|
| 420 |
-
num_steps=num_steps,
|
| 421 |
-
guidance_scale=guidance_scale,
|
| 422 |
-
seed=seed,
|
| 423 |
-
)
|
| 424 |
-
return [vis_control_image] + results
|
| 425 |
-
|
| 426 |
-
@staticmethod
|
| 427 |
-
def preprocess_pose(
|
| 428 |
-
input_image: np.ndarray,
|
| 429 |
-
image_resolution: int,
|
| 430 |
-
detect_resolution: int,
|
| 431 |
-
is_pose_image: bool,
|
| 432 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 433 |
-
input_image = HWC3(input_image)
|
| 434 |
-
if not is_pose_image:
|
| 435 |
-
control_image, _ = apply_openpose(
|
| 436 |
-
resize_image(input_image, detect_resolution))
|
| 437 |
-
control_image = HWC3(control_image)
|
| 438 |
-
image = resize_image(input_image, image_resolution)
|
| 439 |
-
H, W = image.shape[:2]
|
| 440 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 441 |
-
interpolation=cv2.INTER_NEAREST)
|
| 442 |
-
else:
|
| 443 |
-
control_image = resize_image(input_image, image_resolution)
|
| 444 |
-
|
| 445 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 446 |
-
control_image)
|
| 447 |
-
|
| 448 |
-
@torch.inference_mode()
|
| 449 |
-
def process_pose(
|
| 450 |
-
self,
|
| 451 |
-
input_image: np.ndarray,
|
| 452 |
-
prompt: str,
|
| 453 |
-
additional_prompt: str,
|
| 454 |
-
negative_prompt: str,
|
| 455 |
-
num_images: int,
|
| 456 |
-
image_resolution: int,
|
| 457 |
-
detect_resolution: int,
|
| 458 |
-
num_steps: int,
|
| 459 |
-
guidance_scale: float,
|
| 460 |
-
seed: int,
|
| 461 |
-
is_pose_image: bool,
|
| 462 |
-
) -> list[PIL.Image.Image]:
|
| 463 |
-
control_image, vis_control_image = self.preprocess_pose(
|
| 464 |
-
input_image=input_image,
|
| 465 |
-
image_resolution=image_resolution,
|
| 466 |
-
detect_resolution=detect_resolution,
|
| 467 |
-
is_pose_image=is_pose_image,
|
| 468 |
-
)
|
| 469 |
-
self.load_controlnet_weight('pose')
|
| 470 |
-
results = self.run_pipe(
|
| 471 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 472 |
-
negative_prompt=negative_prompt,
|
| 473 |
-
control_image=control_image,
|
| 474 |
-
num_images=num_images,
|
| 475 |
-
num_steps=num_steps,
|
| 476 |
-
guidance_scale=guidance_scale,
|
| 477 |
-
seed=seed,
|
| 478 |
-
)
|
| 479 |
-
return [vis_control_image] + results
|
| 480 |
-
|
| 481 |
-
@staticmethod
|
| 482 |
-
def preprocess_seg(
|
| 483 |
-
input_image: np.ndarray,
|
| 484 |
-
image_resolution: int,
|
| 485 |
-
detect_resolution: int,
|
| 486 |
-
is_segmentation_map: bool,
|
| 487 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 488 |
-
input_image = HWC3(input_image)
|
| 489 |
-
if not is_segmentation_map:
|
| 490 |
-
control_image = apply_uniformer(
|
| 491 |
-
resize_image(input_image, detect_resolution))
|
| 492 |
-
image = resize_image(input_image, image_resolution)
|
| 493 |
-
H, W = image.shape[:2]
|
| 494 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 495 |
-
interpolation=cv2.INTER_NEAREST)
|
| 496 |
-
else:
|
| 497 |
-
control_image = resize_image(input_image, image_resolution)
|
| 498 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 499 |
-
control_image)
|
| 500 |
-
|
| 501 |
-
@torch.inference_mode()
|
| 502 |
-
def process_seg(
|
| 503 |
-
self,
|
| 504 |
-
input_image: np.ndarray,
|
| 505 |
-
prompt: str,
|
| 506 |
-
additional_prompt: str,
|
| 507 |
-
negative_prompt: str,
|
| 508 |
-
num_images: int,
|
| 509 |
-
image_resolution: int,
|
| 510 |
-
detect_resolution: int,
|
| 511 |
-
num_steps: int,
|
| 512 |
-
guidance_scale: float,
|
| 513 |
-
seed: int,
|
| 514 |
-
is_segmentation_map: bool,
|
| 515 |
-
) -> list[PIL.Image.Image]:
|
| 516 |
-
control_image, vis_control_image = self.preprocess_seg(
|
| 517 |
-
input_image=input_image,
|
| 518 |
-
image_resolution=image_resolution,
|
| 519 |
-
detect_resolution=detect_resolution,
|
| 520 |
-
is_segmentation_map=is_segmentation_map,
|
| 521 |
-
)
|
| 522 |
-
self.load_controlnet_weight('seg')
|
| 523 |
-
results = self.run_pipe(
|
| 524 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 525 |
-
negative_prompt=negative_prompt,
|
| 526 |
-
control_image=control_image,
|
| 527 |
-
num_images=num_images,
|
| 528 |
-
num_steps=num_steps,
|
| 529 |
-
guidance_scale=guidance_scale,
|
| 530 |
-
seed=seed,
|
| 531 |
-
)
|
| 532 |
-
return [vis_control_image] + results
|
| 533 |
-
|
| 534 |
-
@staticmethod
|
| 535 |
-
def preprocess_depth(
|
| 536 |
-
input_image: np.ndarray,
|
| 537 |
-
image_resolution: int,
|
| 538 |
-
detect_resolution: int,
|
| 539 |
-
is_depth_image: bool,
|
| 540 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 541 |
-
input_image = HWC3(input_image)
|
| 542 |
-
if not is_depth_image:
|
| 543 |
-
control_image, _ = apply_midas(
|
| 544 |
-
resize_image(input_image, detect_resolution))
|
| 545 |
-
control_image = HWC3(control_image)
|
| 546 |
-
image = resize_image(input_image, image_resolution)
|
| 547 |
-
H, W = image.shape[:2]
|
| 548 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 549 |
-
interpolation=cv2.INTER_LINEAR)
|
| 550 |
-
else:
|
| 551 |
-
control_image = resize_image(input_image, image_resolution)
|
| 552 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 553 |
-
control_image)
|
| 554 |
-
|
| 555 |
-
@torch.inference_mode()
|
| 556 |
-
def process_depth(
|
| 557 |
-
self,
|
| 558 |
-
input_image: np.ndarray,
|
| 559 |
-
prompt: str,
|
| 560 |
-
additional_prompt: str,
|
| 561 |
-
negative_prompt: str,
|
| 562 |
-
num_images: int,
|
| 563 |
-
image_resolution: int,
|
| 564 |
-
detect_resolution: int,
|
| 565 |
-
num_steps: int,
|
| 566 |
-
guidance_scale: float,
|
| 567 |
-
seed: int,
|
| 568 |
-
is_depth_image: bool,
|
| 569 |
-
) -> list[PIL.Image.Image]:
|
| 570 |
-
control_image, vis_control_image = self.preprocess_depth(
|
| 571 |
-
input_image=input_image,
|
| 572 |
-
image_resolution=image_resolution,
|
| 573 |
-
detect_resolution=detect_resolution,
|
| 574 |
-
is_depth_image=is_depth_image,
|
| 575 |
-
)
|
| 576 |
-
self.load_controlnet_weight('depth')
|
| 577 |
-
results = self.run_pipe(
|
| 578 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 579 |
-
negative_prompt=negative_prompt,
|
| 580 |
-
control_image=control_image,
|
| 581 |
-
num_images=num_images,
|
| 582 |
-
num_steps=num_steps,
|
| 583 |
-
guidance_scale=guidance_scale,
|
| 584 |
-
seed=seed,
|
| 585 |
-
)
|
| 586 |
-
return [vis_control_image] + results
|
| 587 |
-
|
| 588 |
-
@staticmethod
|
| 589 |
-
def preprocess_normal(
|
| 590 |
-
input_image: np.ndarray,
|
| 591 |
-
image_resolution: int,
|
| 592 |
-
detect_resolution: int,
|
| 593 |
-
bg_threshold: float,
|
| 594 |
-
is_normal_image: bool,
|
| 595 |
-
) -> tuple[PIL.Image.Image, PIL.Image.Image]:
|
| 596 |
-
input_image = HWC3(input_image)
|
| 597 |
-
if not is_normal_image:
|
| 598 |
-
_, control_image = apply_midas(resize_image(
|
| 599 |
-
input_image, detect_resolution),
|
| 600 |
-
bg_th=bg_threshold)
|
| 601 |
-
control_image = HWC3(control_image)
|
| 602 |
-
image = resize_image(input_image, image_resolution)
|
| 603 |
-
H, W = image.shape[:2]
|
| 604 |
-
control_image = cv2.resize(control_image, (W, H),
|
| 605 |
-
interpolation=cv2.INTER_LINEAR)
|
| 606 |
-
else:
|
| 607 |
-
control_image = resize_image(input_image, image_resolution)
|
| 608 |
-
return PIL.Image.fromarray(control_image), PIL.Image.fromarray(
|
| 609 |
-
control_image)
|
| 610 |
-
|
| 611 |
-
@torch.inference_mode()
|
| 612 |
-
def process_normal(
|
| 613 |
-
self,
|
| 614 |
-
input_image: np.ndarray,
|
| 615 |
-
prompt: str,
|
| 616 |
-
additional_prompt: str,
|
| 617 |
-
negative_prompt: str,
|
| 618 |
-
num_images: int,
|
| 619 |
-
image_resolution: int,
|
| 620 |
-
detect_resolution: int,
|
| 621 |
-
num_steps: int,
|
| 622 |
-
guidance_scale: float,
|
| 623 |
-
seed: int,
|
| 624 |
-
bg_threshold: float,
|
| 625 |
-
is_normal_image: bool,
|
| 626 |
-
) -> list[PIL.Image.Image]:
|
| 627 |
-
control_image, vis_control_image = self.preprocess_normal(
|
| 628 |
-
input_image=input_image,
|
| 629 |
-
image_resolution=image_resolution,
|
| 630 |
-
detect_resolution=detect_resolution,
|
| 631 |
-
bg_threshold=bg_threshold,
|
| 632 |
-
is_normal_image=is_normal_image,
|
| 633 |
-
)
|
| 634 |
-
self.load_controlnet_weight('normal')
|
| 635 |
-
results = self.run_pipe(
|
| 636 |
-
prompt=self.get_prompt(prompt, additional_prompt),
|
| 637 |
-
negative_prompt=negative_prompt,
|
| 638 |
-
control_image=control_image,
|
| 639 |
-
num_images=num_images,
|
| 640 |
-
num_steps=num_steps,
|
| 641 |
-
guidance_scale=guidance_scale,
|
| 642 |
-
seed=seed,
|
| 643 |
-
)
|
| 644 |
-
return [vis_control_image] + results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
notebooks/notebook.ipynb
DELETED
|
@@ -1,80 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"cells": [
|
| 3 |
-
{
|
| 4 |
-
"cell_type": "code",
|
| 5 |
-
"execution_count": null,
|
| 6 |
-
"metadata": {
|
| 7 |
-
"id": "8CnkIPtjn8Dc"
|
| 8 |
-
},
|
| 9 |
-
"outputs": [],
|
| 10 |
-
"source": [
|
| 11 |
-
"!git clone --recursive https://huggingface.co/spaces/hysts/ControlNet"
|
| 12 |
-
]
|
| 13 |
-
},
|
| 14 |
-
{
|
| 15 |
-
"cell_type": "code",
|
| 16 |
-
"execution_count": null,
|
| 17 |
-
"metadata": {
|
| 18 |
-
"id": "IZlaYNTWoFPK"
|
| 19 |
-
},
|
| 20 |
-
"outputs": [],
|
| 21 |
-
"source": [
|
| 22 |
-
"%cd ControlNet"
|
| 23 |
-
]
|
| 24 |
-
},
|
| 25 |
-
{
|
| 26 |
-
"cell_type": "code",
|
| 27 |
-
"execution_count": null,
|
| 28 |
-
"metadata": {
|
| 29 |
-
"id": "0zhLFnZUoWdp"
|
| 30 |
-
},
|
| 31 |
-
"outputs": [],
|
| 32 |
-
"source": [
|
| 33 |
-
"!cd ControlNet && git apply ../patch && cd .."
|
| 34 |
-
]
|
| 35 |
-
},
|
| 36 |
-
{
|
| 37 |
-
"cell_type": "code",
|
| 38 |
-
"execution_count": null,
|
| 39 |
-
"metadata": {
|
| 40 |
-
"id": "P_fzYrLvoIcI"
|
| 41 |
-
},
|
| 42 |
-
"outputs": [],
|
| 43 |
-
"source": [
|
| 44 |
-
"!pip install -q -r requirements.txt"
|
| 45 |
-
]
|
| 46 |
-
},
|
| 47 |
-
{
|
| 48 |
-
"cell_type": "code",
|
| 49 |
-
"execution_count": null,
|
| 50 |
-
"metadata": {
|
| 51 |
-
"id": "GOfGng5Woktd"
|
| 52 |
-
},
|
| 53 |
-
"outputs": [],
|
| 54 |
-
"source": [
|
| 55 |
-
"import app"
|
| 56 |
-
]
|
| 57 |
-
},
|
| 58 |
-
{
|
| 59 |
-
"cell_type": "code",
|
| 60 |
-
"execution_count": null,
|
| 61 |
-
"metadata": {
|
| 62 |
-
"id": "7Cued230ol7T"
|
| 63 |
-
},
|
| 64 |
-
"outputs": [],
|
| 65 |
-
"source": []
|
| 66 |
-
}
|
| 67 |
-
],
|
| 68 |
-
"metadata": {
|
| 69 |
-
"accelerator": "GPU",
|
| 70 |
-
"colab": {
|
| 71 |
-
"provenance": []
|
| 72 |
-
},
|
| 73 |
-
"gpuClass": "standard",
|
| 74 |
-
"language_info": {
|
| 75 |
-
"name": "python"
|
| 76 |
-
}
|
| 77 |
-
},
|
| 78 |
-
"nbformat": 4,
|
| 79 |
-
"nbformat_minor": 0
|
| 80 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
patch
DELETED
|
@@ -1,128 +0,0 @@
|
|
| 1 |
-
diff --git a/annotator/hed/__init__.py b/annotator/hed/__init__.py
|
| 2 |
-
index 42d8dc6..1587035 100644
|
| 3 |
-
--- a/annotator/hed/__init__.py
|
| 4 |
-
+++ b/annotator/hed/__init__.py
|
| 5 |
-
@@ -1,8 +1,12 @@
|
| 6 |
-
+import pathlib
|
| 7 |
-
+
|
| 8 |
-
import numpy as np
|
| 9 |
-
import cv2
|
| 10 |
-
import torch
|
| 11 |
-
from einops import rearrange
|
| 12 |
-
|
| 13 |
-
+root_dir = pathlib.Path(__file__).parents[2]
|
| 14 |
-
+
|
| 15 |
-
|
| 16 |
-
class Network(torch.nn.Module):
|
| 17 |
-
def __init__(self):
|
| 18 |
-
@@ -64,7 +68,7 @@ class Network(torch.nn.Module):
|
| 19 |
-
torch.nn.Sigmoid()
|
| 20 |
-
)
|
| 21 |
-
|
| 22 |
-
- self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load('./annotator/ckpts/network-bsds500.pth').items()})
|
| 23 |
-
+ self.load_state_dict({strKey.replace('module', 'net'): tenWeight for strKey, tenWeight in torch.load(f'{root_dir}/annotator/ckpts/network-bsds500.pth').items()})
|
| 24 |
-
# end
|
| 25 |
-
|
| 26 |
-
def forward(self, tenInput):
|
| 27 |
-
diff --git a/annotator/midas/api.py b/annotator/midas/api.py
|
| 28 |
-
index 9fa305e..d8594ea 100644
|
| 29 |
-
--- a/annotator/midas/api.py
|
| 30 |
-
+++ b/annotator/midas/api.py
|
| 31 |
-
@@ -1,5 +1,7 @@
|
| 32 |
-
# based on https://github.com/isl-org/MiDaS
|
| 33 |
-
|
| 34 |
-
+import pathlib
|
| 35 |
-
+
|
| 36 |
-
import cv2
|
| 37 |
-
import torch
|
| 38 |
-
import torch.nn as nn
|
| 39 |
-
@@ -10,10 +12,11 @@ from .midas.midas_net import MidasNet
|
| 40 |
-
from .midas.midas_net_custom import MidasNet_small
|
| 41 |
-
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
|
| 42 |
-
|
| 43 |
-
+root_dir = pathlib.Path(__file__).parents[2]
|
| 44 |
-
|
| 45 |
-
ISL_PATHS = {
|
| 46 |
-
- "dpt_large": "annotator/ckpts/dpt_large-midas-2f21e586.pt",
|
| 47 |
-
- "dpt_hybrid": "annotator/ckpts/dpt_hybrid-midas-501f0c75.pt",
|
| 48 |
-
+ "dpt_large": f"{root_dir}/annotator/ckpts/dpt_large-midas-2f21e586.pt",
|
| 49 |
-
+ "dpt_hybrid": f"{root_dir}/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt",
|
| 50 |
-
"midas_v21": "",
|
| 51 |
-
"midas_v21_small": "",
|
| 52 |
-
}
|
| 53 |
-
diff --git a/annotator/mlsd/__init__.py b/annotator/mlsd/__init__.py
|
| 54 |
-
index 75db717..f310fe6 100644
|
| 55 |
-
--- a/annotator/mlsd/__init__.py
|
| 56 |
-
+++ b/annotator/mlsd/__init__.py
|
| 57 |
-
@@ -1,3 +1,5 @@
|
| 58 |
-
+import pathlib
|
| 59 |
-
+
|
| 60 |
-
import cv2
|
| 61 |
-
import numpy as np
|
| 62 |
-
import torch
|
| 63 |
-
@@ -8,8 +10,9 @@ from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
|
| 64 |
-
from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
|
| 65 |
-
from .utils import pred_lines
|
| 66 |
-
|
| 67 |
-
+root_dir = pathlib.Path(__file__).parents[2]
|
| 68 |
-
|
| 69 |
-
-model_path = './annotator/ckpts/mlsd_large_512_fp32.pth'
|
| 70 |
-
+model_path = f'{root_dir}/annotator/ckpts/mlsd_large_512_fp32.pth'
|
| 71 |
-
model = MobileV2_MLSD_Large()
|
| 72 |
-
model.load_state_dict(torch.load(model_path), strict=True)
|
| 73 |
-
model = model.cuda().eval()
|
| 74 |
-
diff --git a/annotator/openpose/__init__.py b/annotator/openpose/__init__.py
|
| 75 |
-
index 47d50a5..2369eed 100644
|
| 76 |
-
--- a/annotator/openpose/__init__.py
|
| 77 |
-
+++ b/annotator/openpose/__init__.py
|
| 78 |
-
@@ -1,4 +1,5 @@
|
| 79 |
-
import os
|
| 80 |
-
+import pathlib
|
| 81 |
-
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
| 82 |
-
|
| 83 |
-
import torch
|
| 84 |
-
@@ -7,8 +8,10 @@ from . import util
|
| 85 |
-
from .body import Body
|
| 86 |
-
from .hand import Hand
|
| 87 |
-
|
| 88 |
-
-body_estimation = Body('./annotator/ckpts/body_pose_model.pth')
|
| 89 |
-
-hand_estimation = Hand('./annotator/ckpts/hand_pose_model.pth')
|
| 90 |
-
+root_dir = pathlib.Path(__file__).parents[2]
|
| 91 |
-
+
|
| 92 |
-
+body_estimation = Body(f'{root_dir}/annotator/ckpts/body_pose_model.pth')
|
| 93 |
-
+hand_estimation = Hand(f'{root_dir}/annotator/ckpts/hand_pose_model.pth')
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
def apply_openpose(oriImg, hand=False):
|
| 97 |
-
diff --git a/annotator/uniformer/__init__.py b/annotator/uniformer/__init__.py
|
| 98 |
-
index 500e53c..4061dbe 100644
|
| 99 |
-
--- a/annotator/uniformer/__init__.py
|
| 100 |
-
+++ b/annotator/uniformer/__init__.py
|
| 101 |
-
@@ -1,9 +1,12 @@
|
| 102 |
-
+import pathlib
|
| 103 |
-
+
|
| 104 |
-
from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
|
| 105 |
-
from annotator.uniformer.mmseg.core.evaluation import get_palette
|
| 106 |
-
|
| 107 |
-
+root_dir = pathlib.Path(__file__).parents[2]
|
| 108 |
-
|
| 109 |
-
-checkpoint_file = "annotator/ckpts/upernet_global_small.pth"
|
| 110 |
-
-config_file = 'annotator/uniformer/exp/upernet_global_small/config.py'
|
| 111 |
-
+checkpoint_file = f"{root_dir}/annotator/ckpts/upernet_global_small.pth"
|
| 112 |
-
+config_file = f'{root_dir}/annotator/uniformer/exp/upernet_global_small/config.py'
|
| 113 |
-
model = init_segmentor(config_file, checkpoint_file).cuda()
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
diff --git a/annotator/util.py b/annotator/util.py
|
| 117 |
-
index 7cde937..10a6d58 100644
|
| 118 |
-
--- a/annotator/util.py
|
| 119 |
-
+++ b/annotator/util.py
|
| 120 |
-
@@ -25,7 +25,7 @@ def resize_image(input_image, resolution):
|
| 121 |
-
H, W, C = input_image.shape
|
| 122 |
-
H = float(H)
|
| 123 |
-
W = float(W)
|
| 124 |
-
- k = float(resolution) / min(H, W)
|
| 125 |
-
+ k = float(resolution) / max(H, W)
|
| 126 |
-
H *= k
|
| 127 |
-
W *= k
|
| 128 |
-
H = int(np.round(H / 64.0)) * 64
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
DELETED
|
@@ -1,22 +0,0 @@
|
|
| 1 |
-
addict==2.4.0
|
| 2 |
-
albumentations==1.3.0
|
| 3 |
-
einops==0.6.0
|
| 4 |
-
git+https://github.com/huggingface/accelerate@78151f8
|
| 5 |
-
git+https://github.com/huggingface/diffusers@fa6d52d
|
| 6 |
-
gradio==3.22.1
|
| 7 |
-
imageio==2.25.0
|
| 8 |
-
imageio-ffmpeg==0.4.8
|
| 9 |
-
kornia==0.6.9
|
| 10 |
-
omegaconf==2.3.0
|
| 11 |
-
open-clip-torch==2.13.0
|
| 12 |
-
opencv-contrib-python==4.7.0.68
|
| 13 |
-
opencv-python-headless==4.7.0.68
|
| 14 |
-
prettytable==3.6.0
|
| 15 |
-
pytorch-lightning==1.9.0
|
| 16 |
-
safetensors==0.2.8
|
| 17 |
-
timm==0.6.12
|
| 18 |
-
torch==1.13.1
|
| 19 |
-
torchvision==0.14.1
|
| 20 |
-
transformers==4.26.1
|
| 21 |
-
xformers==0.0.16
|
| 22 |
-
yapf==0.32.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
style.css
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
h1 {
|
| 2 |
-
text-align: center;
|
| 3 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|