ddoc commited on
Commit
d8c73df
·
1 Parent(s): b30dc4f

Upload 35 files

Browse files
.github/ISSUE_TEMPLATE/bug_report.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug report
2
+ description: Create a report
3
+ title: "[Bug]: "
4
+
5
+ body:
6
+ - type: textarea
7
+ attributes:
8
+ label: Describe the bug
9
+ description: A clear and concise description of what the bug is.
10
+ placeholder: |
11
+ Any language accepted
12
+ 아무 언어 사용가능
13
+ すべての言語に対応
14
+ 接受所有语言
15
+ Se aceptan todos los idiomas
16
+ Alle Sprachen werden akzeptiert
17
+ Toutes les langues sont acceptées
18
+ Принимаются все языки
19
+
20
+ - type: textarea
21
+ attributes:
22
+ label: The entire console log, including python version information, webui version, commit hash, commandline arguments and error traces.
23
+ description: |
24
+ The full console log of your terminal.
25
+ From `Python ..., Version: ..., Commit hash: ...` to the end.
26
+ placeholder: |
27
+ Python ...
28
+ Version: ...
29
+ Commit hash: ...
30
+ Installing requirements
31
+ ...
32
+
33
+ Launching Web UI with arguments: ...
34
+ [-] ADetailer initialized. version: ...
35
+ ...
36
+ render: Shell
37
+ validations:
38
+ required: true
39
+
40
+ - type: textarea
41
+ attributes:
42
+ label: List of installed extensions
.github/ISSUE_TEMPLATE/feature_request.yaml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature request
2
+ description: Suggest an idea for this project
3
+ title: "[Feature Request]: "
4
+
5
+ body:
6
+ - type: textarea
7
+ attributes:
8
+ label: Is your feature request related to a problem? Please describe.
9
+ description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
10
+
11
+ - type: textarea
12
+ attributes:
13
+ label: Describe the solution you'd like
14
+ description: A clear and concise description of what you want to happen.
15
+
16
+ - type: textarea
17
+ attributes:
18
+ label: Describe alternatives you've considered
19
+ description: A clear and concise description of any alternative solutions or features you've considered.
20
+
21
+ - type: textarea
22
+ attributes:
23
+ label: Additional context
24
+ description: Add any other context or screenshots about the feature request here.
.github/workflows/stale.yml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: 'Close stale issues and PRs'
2
+ on:
3
+ schedule:
4
+ - cron: '30 1 * * *'
5
+
6
+ jobs:
7
+ stale:
8
+ runs-on: ubuntu-latest
9
+ steps:
10
+ - uses: actions/stale@v8
11
+ with:
12
+ days-before-stale: 30
13
+ days-before-close: 5
.gitignore ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python,visualstudiocode
3
+
4
+ ### Python ###
5
+ # Byte-compiled / optimized / DLL files
6
+ __pycache__/
7
+ *.py[cod]
8
+ *$py.class
9
+
10
+ # C extensions
11
+ *.so
12
+
13
+ # Distribution / packaging
14
+ .Python
15
+ build/
16
+ develop-eggs/
17
+ dist/
18
+ downloads/
19
+ eggs/
20
+ .eggs/
21
+ lib/
22
+ lib64/
23
+ parts/
24
+ sdist/
25
+ var/
26
+ wheels/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ # Usually these files are written by a python script from a template
35
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
36
+ *.manifest
37
+ *.spec
38
+
39
+ # Installer logs
40
+ pip-log.txt
41
+ pip-delete-this-directory.txt
42
+
43
+ # Unit test / coverage reports
44
+ htmlcov/
45
+ .tox/
46
+ .nox/
47
+ .coverage
48
+ .coverage.*
49
+ .cache
50
+ nosetests.xml
51
+ coverage.xml
52
+ *.cover
53
+ *.py,cover
54
+ .hypothesis/
55
+ .pytest_cache/
56
+ cover/
57
+
58
+ # Translations
59
+ *.mo
60
+ *.pot
61
+
62
+ # Django stuff:
63
+ *.log
64
+ local_settings.py
65
+ db.sqlite3
66
+ db.sqlite3-journal
67
+
68
+ # Flask stuff:
69
+ instance/
70
+ .webassets-cache
71
+
72
+ # Scrapy stuff:
73
+ .scrapy
74
+
75
+ # Sphinx documentation
76
+ docs/_build/
77
+
78
+ # PyBuilder
79
+ .pybuilder/
80
+ target/
81
+
82
+ # Jupyter Notebook
83
+ .ipynb_checkpoints
84
+
85
+ # IPython
86
+ profile_default/
87
+ ipython_config.py
88
+
89
+ # pyenv
90
+ # For a library or package, you might want to ignore these files since the code is
91
+ # intended to run in multiple environments; otherwise, check them in:
92
+ # .python-version
93
+
94
+ # pipenv
95
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
96
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
97
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
98
+ # install all needed dependencies.
99
+ #Pipfile.lock
100
+
101
+ # poetry
102
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
103
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
104
+ # commonly ignored for libraries.
105
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
106
+ #poetry.lock
107
+
108
+ # pdm
109
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
110
+ #pdm.lock
111
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
112
+ # in version control.
113
+ # https://pdm.fming.dev/#use-with-ide
114
+ .pdm.toml
115
+
116
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
117
+ __pypackages__/
118
+
119
+ # Celery stuff
120
+ celerybeat-schedule
121
+ celerybeat.pid
122
+
123
+ # SageMath parsed files
124
+ *.sage.py
125
+
126
+ # Environments
127
+ .env
128
+ .venv
129
+ env/
130
+ venv/
131
+ ENV/
132
+ env.bak/
133
+ venv.bak/
134
+
135
+ # Spyder project settings
136
+ .spyderproject
137
+ .spyproject
138
+
139
+ # Rope project settings
140
+ .ropeproject
141
+
142
+ # mkdocs documentation
143
+ /site
144
+
145
+ # mypy
146
+ .mypy_cache/
147
+ .dmypy.json
148
+ dmypy.json
149
+
150
+ # Pyre type checker
151
+ .pyre/
152
+
153
+ # pytype static type analyzer
154
+ .pytype/
155
+
156
+ # Cython debug symbols
157
+ cython_debug/
158
+
159
+ # PyCharm
160
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
161
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
162
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
163
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
164
+ #.idea/
165
+
166
+ ### Python Patch ###
167
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
168
+ poetry.toml
169
+
170
+ # ruff
171
+ .ruff_cache/
172
+
173
+ # LSP config files
174
+ pyrightconfig.json
175
+
176
+ ### VisualStudioCode ###
177
+ .vscode/*
178
+ !.vscode/settings.json
179
+ !.vscode/tasks.json
180
+ !.vscode/launch.json
181
+ !.vscode/extensions.json
182
+ !.vscode/*.code-snippets
183
+
184
+ # Local History for Visual Studio Code
185
+ .history/
186
+
187
+ # Built Visual Studio Code Extensions
188
+ *.vsix
189
+
190
+ ### VisualStudioCode Patch ###
191
+ # Ignore all local history of files
192
+ .history
193
+ .ionide
194
+
195
+ # End of https://www.toptal.com/developers/gitignore/api/python,visualstudiocode
196
+ *.ipynb
.pre-commit-config.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ args: [--markdown-linebreak-ext=md]
7
+ - id: end-of-file-fixer
8
+ - id: mixed-line-ending
9
+
10
+ - repo: https://github.com/astral-sh/ruff-pre-commit
11
+ rev: "v0.0.277"
12
+ hooks:
13
+ - id: ruff
14
+ args: [--fix, --exit-non-zero-on-fix]
15
+
16
+ - repo: https://github.com/psf/black
17
+ rev: 23.3.0
18
+ hooks:
19
+ - id: black
CHANGELOG.md ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Changelog
2
+
3
+ ## 2023-07-20
4
+
5
+ - v23.7.8
6
+ - `paste_field_names` 추가했던 것을 되돌림
7
+
8
+ ## 2023-07-19
9
+
10
+ - v23.7.7
11
+ - 인페인팅 단계에서 별도의 샘플러를 선택할 수 있게 옵션을 추가함 (xyz그리드에도 추가)
12
+ - webui 1.0.0-pre 이하 버전에서 batch index 문제 수정
13
+ - 스크립트에 `paste_field_names`을 추가함. 사용되는지는 모르겠음
14
+
15
+ ## 2023-07-16
16
+
17
+ - v23.7.6
18
+ - `ultralytics 8.0.135`에 추가된 cpuinfo 기능을 위해 `py-cpuinfo`를 미리 설치하게 함. (미리 설치 안하면 cpu나 mps사용할 때 재시작해야함)
19
+ - init_image가 RGB 모드가 아닐 때 RGB로 변경.
20
+
21
+ ## 2023-07-07
22
+
23
+ - v23.7.4
24
+ - batch count > 1일때 프롬프트의 인덱스 문제 수정
25
+
26
+ - v23.7.5
27
+ - i2i의 `cached_uc`와 `cached_c`가 p의 `cached_uc`와 `cached_c`가 다른 인스턴스가 되도록 수정
28
+
29
+ ## 2023-07-05
30
+
31
+ - v23.7.3
32
+ - 버그 수정
33
+ - `object()`가 json 직렬화 안되는 문제
34
+ - `process`를 호출함에 따라 배치 카운트가 2이상일 때, all_prompts가 고정되는 문제
35
+ - `ad-before`와 `ad-preview` 이미지 파일명이 실제 파일명과 다른 문제
36
+ - pydantic 2.0 호환성 문제
37
+
38
+ ## 2023-07-04
39
+
40
+ - v23.7.2
41
+ - `mediapipe_face_mesh_eyes_only` 모델 추가: `mediapipe_face_mesh`로 감지한 뒤 눈만 사용함.
42
+ - 매 배치 시작 전에 `scripts.postprocess`를, 후에 `scripts.process`를 호출함.
43
+ - 컨트롤넷을 사용하면 소요 시간이 조금 늘어나지만 몇몇 문제 해결에 도움이 됨.
44
+ - `lora_block_weight`를 스크립트 화이트리스트에 추가함.
45
+ - 한번이라도 ADetailer를 사용한 사람은 수동으로 추가해야함.
46
+
47
+ ## 2023-07-03
48
+
49
+ - v23.7.1
50
+ - `process_images`를 진행한 뒤 `StableDiffusionProcessing` 오브젝트의 close를 호출함
51
+ - api 호출로 사용했는지 확인하는 속성 추가
52
+ - `NansException`이 발생했을 때 중지하지 않고 남은 과정 계속 진행함
53
+
54
+ ## 2023-07-02
55
+
56
+ - v23.7.0
57
+ - `NansException`이 발생하면 로그에 표시하고 원본 이미지를 반환하게 설정
58
+ - `rich`를 사용한 에러 트레이싱
59
+ - install.py에 `rich` 추가
60
+ - 생성 중에 컴포넌트의 값을 변경하면 args의 값도 함께 변경되는 문제 수정 (issue #180)
61
+ - 터미널 로그로 ad_prompt와 ad_negative_prompt에 적용된 실제 프롬프트 확인할 수 있음 (입력과 다를 경우에만)
62
+
63
+ ## 2023-06-28
64
+
65
+ - v23.6.4
66
+ - 최대 모델 수 5 -> 10개
67
+ - ad_prompt와 ad_negative_prompt에 빈칸으로 놔두면 입력 프롬프트가 사용된다는 문구 추가
68
+ - huggingface 모델 다운로드 실패시 로깅
69
+ - 1st 모델이 `None`일 경우 나머지 입력을 무시하던 문제 수정
70
+ - `--use-cpu` 에 `adetailer` 입력 시 cpu로 yolo모델을 사용함
71
+
72
+ ## 2023-06-20
73
+
74
+ - v23.6.3
75
+ - 컨트롤넷 inpaint 모델에 대해, 3가지 모듈을 사용할 수 있도록 함
76
+ - Noise Multiplier 옵션 추가 (PR #149)
77
+ - pydantic 최소 버전 1.10.8로 설정 (Issue #146)
78
+
79
+ ## 2023-06-05
80
+
81
+ - v23.6.2
82
+ - xyz_grid에서 ADetailer를 사용할 수 있게함.
83
+ - 8가지 옵션만 1st 탭에 적용되도록 함.
84
+
85
+ ## 2023-06-01
86
+
87
+ - v23.6.1
88
+ - `inpaint, scribble, lineart, openpose, tile` 5가지 컨트롤넷 모델 지원 (PR #107)
89
+ - controlnet guidance start, end 인자 추가 (PR #107)
90
+ - `modules.extensions`를 사용하여 컨트롤넷 확장을 불러오고 경로를 알아내로록 변경
91
+ - ui에서 컨트롤넷을 별도 함수로 분리
92
+
93
+ ## 2023-05-30
94
+
95
+ - v23.6.0
96
+ - 스크립트의 이름을 `After Detailer`에서 `ADetailer`로 변경
97
+ - API 사용자는 변경 필요함
98
+ - 몇몇 설정 변경
99
+ - `ad_conf` → `ad_confidence`. 0~100 사이의 int → 0.0~1.0 사이의 float
100
+ - `ad_inpaint_full_res` → `ad_inpaint_only_masked`
101
+ - `ad_inpaint_full_res_padding` → `ad_inpaint_only_masked_padding`
102
+ - mediapipe face mesh 모델 추가
103
+ - mediapipe 최소 버전 `0.10.0`
104
+
105
+ - rich traceback 제거함
106
+ - huggingface 다운로드 실패할 때 에러가 나지 않게 하고 해당 모델을 제거함
107
+
108
+ ## 2023-05-26
109
+
110
+ - v23.5.19
111
+ - 1번째 탭에도 `None` 옵션을 추가함
112
+ - api로 ad controlnet model에 inpaint가 아닌 다른 컨트롤넷 모델을 사용하지 못하도록 막음
113
+ - adetailer 진행중에 total tqdm 진행바 업데이트를 멈춤
114
+ - state.inturrupted 상태에서 adetailer 과정을 중지함
115
+ - 컨트롤넷 process를 각 batch가 끝난 순간에만 호출하도록 변경
116
+
117
+ ### 2023-05-25
118
+
119
+ - v23.5.18
120
+ - 컨트롤넷 관련 수정
121
+ - unit의 `input_mode`를 `SIMPLE`로 모두 변경
122
+ - 컨트롤넷 유넷 훅과 하이잭 함수들을 adetailer를 실행할 때에만 되돌리는 기능 추가
123
+ - adetailer 처리가 끝난 뒤 컨트롤넷 스크립트의 process를 다시 진행함. (batch count 2 이상일때의 문제 해결)
124
+ - 기본 활성 스크립트 목록에서 컨트롤넷을 뺌
125
+
126
+ ### 2023-05-22
127
+
128
+ - v23.5.17
129
+ - 컨트롤넷 확장이 있으면 컨트롤넷 스크립트를 활성화함. (컨트롤넷 관련 문제 ��결)
130
+ - 모든 컴포넌트에 elem_id 설정
131
+ - ui에 버전을 표시함
132
+
133
+
134
+ ### 2023-05-19
135
+
136
+ - v23.5.16
137
+ - 추가한 옵션
138
+ - Mask min/max ratio
139
+ - Mask merge mode
140
+ - Restore faces after ADetailer
141
+ - 옵션들을 Accordion으로 묶음
142
+
143
+ ### 2023-05-18
144
+
145
+ - v23.5.15
146
+ - 필요한 것만 임포트하도록 변경 (vae 로딩 오류 없어짐. 로딩 속도 빨라짐)
147
+
148
+ ### 2023-05-17
149
+
150
+ - v23.5.14
151
+ - `[SKIP]`으로 ad prompt 일부를 건너뛰는 기능 추가
152
+ - bbox 정렬 옵션 추가
153
+ - sd_webui 타입힌트를 만들어냄
154
+ - enable checker와 관련된 api 오류 수정?
155
+
156
+ ### 2023-05-15
157
+
158
+ - v23.5.13
159
+ - `[SEP]`으로 ad prompt를 분리하여 적용하는 기능 추가
160
+ - enable checker를 다시 pydantic으로 변경함
161
+ - ui 관련 함수를 adetailer.ui 폴더로 분리함
162
+ - controlnet을 사용할 때 모든 controlnet unit 비활성화
163
+ - adetailer 폴더가 없으면 만들게 함
164
+
165
+ ### 2023-05-13
166
+
167
+ - v23.5.12
168
+ - `ad_enable`을 제외한 입력이 dict타입으로 들어오도록 변경
169
+ - web api로 사용할 때에 특히 사용하기 쉬움
170
+ - web api breaking change
171
+ - `mask_preprocess` 인자를 넣지 않았던 오류 수정 (PR #47)
172
+ - huggingface에서 모델을 다운로드하지 않는 옵션 추가 `--ad-no-huggingface`
173
+
174
+ ### 2023-05-12
175
+
176
+ - v23.5.11
177
+ - `ultralytics` 알람 제거
178
+ - 필요없는 exif 인자 더 제거함
179
+ - `use separate steps` 옵션 추가
180
+ - ui 배치를 조정함
181
+
182
+ ### 2023-05-09
183
+
184
+ - v23.5.10
185
+ - 선택한 스크립트만 ADetailer에 적용하는 옵션 추가, 기본값 `True`. 설정 탭에서 지정가능.
186
+ - 기본값: `dynamic_prompting,dynamic_thresholding,wildcards,wildcard_recursive`
187
+ - `person_yolov8s-seg.pt` 모델 추가
188
+ - `ultralytics`의 최소 버전을 `8.0.97`로 설정 (C:\\ 문제 해결된 버전)
189
+
190
+ ### 2023-05-08
191
+
192
+ - v23.5.9
193
+ - 2가지 이상의 모델을 사용할 수 있음. 기본값: 2, 최대: 5
194
+ - segment 모델을 사용할 수 있게 함. `person_yolov8n-seg.pt` 추가
195
+
196
+ ### 2023-05-07
197
+
198
+ - v23.5.8
199
+ - 프롬프트와 네거티브 프롬프트에 방향키 지원 (PR #24)
200
+ - `mask_preprocess`를 추가함. 이전 버전과 시드값이 달라질 가능성 있음!
201
+ - 이미지 처리가 일어났을 때에만 before이미지를 저장함
202
+ - 설정창의 레이블을 ADetailer 대신 더 적절하게 수정함
203
+
204
+ ### 2023-05-06
205
+
206
+ - v23.5.7
207
+ - `ad_use_cfg_scale` 옵션 추가. cfg 스케일을 따로 사용할지 말지 결정함.
208
+ - `ad_enable` 기본값을 `True`에서 `False`로 변경
209
+ - `ad_model`의 기본값을 `None`에서 첫번째 모델로 변경
210
+ - 최소 2개의 입력(ad_enable, ad_model)만 들어오면 작동하게 변경.
211
+
212
+ - v23.5.7.post0
213
+ - `init_controlnet_ext`을 controlnet_exists == True일때에만 실행
214
+ - webui를 C드라이브 바로 밑에 설치한 사람들에게 `ultralytics` 경고 표시
215
+
216
+ ### 2023-05-05 (어린이날)
217
+
218
+ - v23.5.5
219
+ - `Save images before ADetailer` 옵션 추가
220
+ - 입력으로 들어온 인자와 ALL_ARGS의 길이가 다르면 에러메세지
221
+ - README.md에 설치방법 추가
222
+
223
+ - v23.5.6
224
+ - get_args에서 IndexError가 발생하면 자세한 에러메세지를 볼 수 있음
225
+ - AdetailerArgs에 extra_params 내장
226
+ - scripts_args를 딥카피함
227
+ - postprocess_image를 약간 분리함
228
+
229
+ - v23.5.6.post0
230
+ - `init_controlnet_ext`에서 에러메세지를 자세히 볼 수 있음
231
+
232
+ ### 2023-05-04
233
+
234
+ - v23.5.4
235
+ - use pydantic for arguments validation
236
+ - revert: ad_model to `None` as default
237
+ - revert: `__future__` imports
238
+ - lazily import yolo and mediapipe
239
+
240
+ ### 2023-05-03
241
+
242
+ - v23.5.3.post0
243
+ - remove `__future__` imports
244
+ - change to copy scripts and scripts args
245
+
246
+ - v23.5.3.post1
247
+ - change default ad_model from `None`
248
+
249
+ ### 2023-05-02
250
+
251
+ - v23.5.3
252
+ - Remove `None` from model list and add `Enable ADetailer` checkbox.
253
+ - install.py `skip_install` fix.
LICENSE.md ADDED
@@ -0,0 +1,662 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ GNU AFFERO GENERAL PUBLIC LICENSE
3
+ Version 3, 19 November 2007
4
+
5
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
6
+ Everyone is permitted to copy and distribute verbatim copies
7
+ of this license document, but changing it is not allowed.
8
+
9
+ Preamble
10
+
11
+ The GNU Affero General Public License is a free, copyleft license for
12
+ software and other kinds of works, specifically designed to ensure
13
+ cooperation with the community in the case of network server software.
14
+
15
+ The licenses for most software and other practical works are designed
16
+ to take away your freedom to share and change the works. By contrast,
17
+ our General Public Licenses are intended to guarantee your freedom to
18
+ share and change all versions of a program--to make sure it remains free
19
+ software for all its users.
20
+
21
+ When we speak of free software, we are referring to freedom, not
22
+ price. Our General Public Licenses are designed to make sure that you
23
+ have the freedom to distribute copies of free software (and charge for
24
+ them if you wish), that you receive source code or can get it if you
25
+ want it, that you can change the software or use pieces of it in new
26
+ free programs, and that you know you can do these things.
27
+
28
+ Developers that use our General Public Licenses protect your rights
29
+ with two steps: (1) assert copyright on the software, and (2) offer
30
+ you this License which gives you legal permission to copy, distribute
31
+ and/or modify the software.
32
+
33
+ A secondary benefit of defending all users' freedom is that
34
+ improvements made in alternate versions of the program, if they
35
+ receive widespread use, become available for other developers to
36
+ incorporate. Many developers of free software are heartened and
37
+ encouraged by the resulting cooperation. However, in the case of
38
+ software used on network servers, this result may fail to come about.
39
+ The GNU General Public License permits making a modified version and
40
+ letting the public access it on a server without ever releasing its
41
+ source code to the public.
42
+
43
+ The GNU Affero General Public License is designed specifically to
44
+ ensure that, in such cases, the modified source code becomes available
45
+ to the community. It requires the operator of a network server to
46
+ provide the source code of the modified version running there to the
47
+ users of that server. Therefore, public use of a modified version, on
48
+ a publicly accessible server, gives the public access to the source
49
+ code of the modified version.
50
+
51
+ An older license, called the Affero General Public License and
52
+ published by Affero, was designed to accomplish similar goals. This is
53
+ a different license, not a version of the Affero GPL, but Affero has
54
+ released a new version of the Affero GPL which permits relicensing under
55
+ this license.
56
+
57
+ The precise terms and conditions for copying, distribution and
58
+ modification follow.
59
+
60
+ TERMS AND CONDITIONS
61
+
62
+ 0. Definitions.
63
+
64
+ "This License" refers to version 3 of the GNU Affero General Public License.
65
+
66
+ "Copyright" also means copyright-like laws that apply to other kinds of
67
+ works, such as semiconductor masks.
68
+
69
+ "The Program" refers to any copyrightable work licensed under this
70
+ License. Each licensee is addressed as "you". "Licensees" and
71
+ "recipients" may be individuals or organizations.
72
+
73
+ To "modify" a work means to copy from or adapt all or part of the work
74
+ in a fashion requiring copyright permission, other than the making of an
75
+ exact copy. The resulting work is called a "modified version" of the
76
+ earlier work or a work "based on" the earlier work.
77
+
78
+ A "covered work" means either the unmodified Program or a work based
79
+ on the Program.
80
+
81
+ To "propagate" a work means to do anything with it that, without
82
+ permission, would make you directly or secondarily liable for
83
+ infringement under applicable copyright law, except executing it on a
84
+ computer or modifying a private copy. Propagation includes copying,
85
+ distribution (with or without modification), making available to the
86
+ public, and in some countries other activities as well.
87
+
88
+ To "convey" a work means any kind of propagation that enables other
89
+ parties to make or receive copies. Mere interaction with a user through
90
+ a computer network, with no transfer of a copy, is not conveying.
91
+
92
+ An interactive user interface displays "Appropriate Legal Notices"
93
+ to the extent that it includes a convenient and prominently visible
94
+ feature that (1) displays an appropriate copyright notice, and (2)
95
+ tells the user that there is no warranty for the work (except to the
96
+ extent that warranties are provided), that licensees may convey the
97
+ work under this License, and how to view a copy of this License. If
98
+ the interface presents a list of user commands or options, such as a
99
+ menu, a prominent item in the list meets this criterion.
100
+
101
+ 1. Source Code.
102
+
103
+ The "source code" for a work means the preferred form of the work
104
+ for making modifications to it. "Object code" means any non-source
105
+ form of a work.
106
+
107
+ A "Standard Interface" means an interface that either is an official
108
+ standard defined by a recognized standards body, or, in the case of
109
+ interfaces specified for a particular programming language, one that
110
+ is widely used among developers working in that language.
111
+
112
+ The "System Libraries" of an executable work include anything, other
113
+ than the work as a whole, that (a) is included in the normal form of
114
+ packaging a Major Component, but which is not part of that Major
115
+ Component, and (b) serves only to enable use of the work with that
116
+ Major Component, or to implement a Standard Interface for which an
117
+ implementation is available to the public in source code form. A
118
+ "Major Component", in this context, means a major essential component
119
+ (kernel, window system, and so on) of the specific operating system
120
+ (if any) on which the executable work runs, or a compiler used to
121
+ produce the work, or an object code interpreter used to run it.
122
+
123
+ The "Corresponding Source" for a work in object code form means all
124
+ the source code needed to generate, install, and (for an executable
125
+ work) run the object code and to modify the work, including scripts to
126
+ control those activities. However, it does not include the work's
127
+ System Libraries, or general-purpose tools or generally available free
128
+ programs which are used unmodified in performing those activities but
129
+ which are not part of the work. For example, Corresponding Source
130
+ includes interface definition files associated with source files for
131
+ the work, and the source code for shared libraries and dynamically
132
+ linked subprograms that the work is specifically designed to require,
133
+ such as by intimate data communication or control flow between those
134
+ subprograms and other parts of the work.
135
+
136
+ The Corresponding Source need not include anything that users
137
+ can regenerate automatically from other parts of the Corresponding
138
+ Source.
139
+
140
+ The Corresponding Source for a work in source code form is that
141
+ same work.
142
+
143
+ 2. Basic Permissions.
144
+
145
+ All rights granted under this License are granted for the term of
146
+ copyright on the Program, and are irrevocable provided the stated
147
+ conditions are met. This License explicitly affirms your unlimited
148
+ permission to run the unmodified Program. The output from running a
149
+ covered work is covered by this License only if the output, given its
150
+ content, constitutes a covered work. This License acknowledges your
151
+ rights of fair use or other equivalent, as provided by copyright law.
152
+
153
+ You may make, run and propagate covered works that you do not
154
+ convey, without conditions so long as your license otherwise remains
155
+ in force. You may convey covered works to others for the sole purpose
156
+ of having them make modifications exclusively for you, or provide you
157
+ with facilities for running those works, provided that you comply with
158
+ the terms of this License in conveying all material for which you do
159
+ not control copyright. Those thus making or running the covered works
160
+ for you must do so exclusively on your behalf, under your direction
161
+ and control, on terms that prohibit them from making any copies of
162
+ your copyrighted material outside their relationship with you.
163
+
164
+ Conveying under any other circumstances is permitted solely under
165
+ the conditions stated below. Sublicensing is not allowed; section 10
166
+ makes it unnecessary.
167
+
168
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
169
+
170
+ No covered work shall be deemed part of an effective technological
171
+ measure under any applicable law fulfilling obligations under article
172
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
173
+ similar laws prohibiting or restricting circumvention of such
174
+ measures.
175
+
176
+ When you convey a covered work, you waive any legal power to forbid
177
+ circumvention of technological measures to the extent such circumvention
178
+ is effected by exercising rights under this License with respect to
179
+ the covered work, and you disclaim any intention to limit operation or
180
+ modification of the work as a means of enforcing, against the work's
181
+ users, your or third parties' legal rights to forbid circumvention of
182
+ technological measures.
183
+
184
+ 4. Conveying Verbatim Copies.
185
+
186
+ You may convey verbatim copies of the Program's source code as you
187
+ receive it, in any medium, provided that you conspicuously and
188
+ appropriately publish on each copy an appropriate copyright notice;
189
+ keep intact all notices stating that this License and any
190
+ non-permissive terms added in accord with section 7 apply to the code;
191
+ keep intact all notices of the absence of any warranty; and give all
192
+ recipients a copy of this License along with the Program.
193
+
194
+ You may charge any price or no price for each copy that you convey,
195
+ and you may offer support or warranty protection for a fee.
196
+
197
+ 5. Conveying Modified Source Versions.
198
+
199
+ You may convey a work based on the Program, or the modifications to
200
+ produce it from the Program, in the form of source code under the
201
+ terms of section 4, provided that you also meet all of these conditions:
202
+
203
+ a) The work must carry prominent notices stating that you modified
204
+ it, and giving a relevant date.
205
+
206
+ b) The work must carry prominent notices stating that it is
207
+ released under this License and any conditions added under section
208
+ 7. This requirement modifies the requirement in section 4 to
209
+ "keep intact all notices".
210
+
211
+ c) You must license the entire work, as a whole, under this
212
+ License to anyone who comes into possession of a copy. This
213
+ License will therefore apply, along with any applicable section 7
214
+ additional terms, to the whole of the work, and all its parts,
215
+ regardless of how they are packaged. This License gives no
216
+ permission to license the work in any other way, but it does not
217
+ invalidate such permission if you have separately received it.
218
+
219
+ d) If the work has interactive user interfaces, each must display
220
+ Appropriate Legal Notices; however, if the Program has interactive
221
+ interfaces that do not display Appropriate Legal Notices, your
222
+ work need not make them do so.
223
+
224
+ A compilation of a covered work with other separate and independent
225
+ works, which are not by their nature extensions of the covered work,
226
+ and which are not combined with it such as to form a larger program,
227
+ in or on a volume of a storage or distribution medium, is called an
228
+ "aggregate" if the compilation and its resulting copyright are not
229
+ used to limit the access or legal rights of the compilation's users
230
+ beyond what the individual works permit. Inclusion of a covered work
231
+ in an aggregate does not cause this License to apply to the other
232
+ parts of the aggregate.
233
+
234
+ 6. Conveying Non-Source Forms.
235
+
236
+ You may convey a covered work in object code form under the terms
237
+ of sections 4 and 5, provided that you also convey the
238
+ machine-readable Corresponding Source under the terms of this License,
239
+ in one of these ways:
240
+
241
+ a) Convey the object code in, or embodied in, a physical product
242
+ (including a physical distribution medium), accompanied by the
243
+ Corresponding Source fixed on a durable physical medium
244
+ customarily used for software interchange.
245
+
246
+ b) Convey the object code in, or embodied in, a physical product
247
+ (including a physical distribution medium), accompanied by a
248
+ written offer, valid for at least three years and valid for as
249
+ long as you offer spare parts or customer support for that product
250
+ model, to give anyone who possesses the object code either (1) a
251
+ copy of the Corresponding Source for all the software in the
252
+ product that is covered by this License, on a durable physical
253
+ medium customarily used for software interchange, for a price no
254
+ more than your reasonable cost of physically performing this
255
+ conveying of source, or (2) access to copy the
256
+ Corresponding Source from a network server at no charge.
257
+
258
+ c) Convey individual copies of the object code with a copy of the
259
+ written offer to provide the Corresponding Source. This
260
+ alternative is allowed only occasionally and noncommercially, and
261
+ only if you received the object code with such an offer, in accord
262
+ with subsection 6b.
263
+
264
+ d) Convey the object code by offering access from a designated
265
+ place (gratis or for a charge), and offer equivalent access to the
266
+ Corresponding Source in the same way through the same place at no
267
+ further charge. You need not require recipients to copy the
268
+ Corresponding Source along with the object code. If the place to
269
+ copy the object code is a network server, the Corresponding Source
270
+ may be on a different server (operated by you or a third party)
271
+ that supports equivalent copying facilities, provided you maintain
272
+ clear directions next to the object code saying where to find the
273
+ Corresponding Source. Regardless of what server hosts the
274
+ Corresponding Source, you remain obligated to ensure that it is
275
+ available for as long as needed to satisfy these requirements.
276
+
277
+ e) Convey the object code using peer-to-peer transmission, provided
278
+ you inform other peers where the object code and Corresponding
279
+ Source of the work are being offered to the general public at no
280
+ charge under subsection 6d.
281
+
282
+ A separable portion of the object code, whose source code is excluded
283
+ from the Corresponding Source as a System Library, need not be
284
+ included in conveying the object code work.
285
+
286
+ A "User Product" is either (1) a "consumer product", which means any
287
+ tangible personal property which is normally used for personal, family,
288
+ or household purposes, or (2) anything designed or sold for incorporation
289
+ into a dwelling. In determining whether a product is a consumer product,
290
+ doubtful cases shall be resolved in favor of coverage. For a particular
291
+ product received by a particular user, "normally used" refers to a
292
+ typical or common use of that class of product, regardless of the status
293
+ of the particular user or of the way in which the particular user
294
+ actually uses, or expects or is expected to use, the product. A product
295
+ is a consumer product regardless of whether the product has substantial
296
+ commercial, industrial or non-consumer uses, unless such uses represent
297
+ the only significant mode of use of the product.
298
+
299
+ "Installation Information" for a User Product means any methods,
300
+ procedures, authorization keys, or other information required to install
301
+ and execute modified versions of a covered work in that User Product from
302
+ a modified version of its Corresponding Source. The information must
303
+ suffice to ensure that the continued functioning of the modified object
304
+ code is in no case prevented or interfered with solely because
305
+ modification has been made.
306
+
307
+ If you convey an object code work under this section in, or with, or
308
+ specifically for use in, a User Product, and the conveying occurs as
309
+ part of a transaction in which the right of possession and use of the
310
+ User Product is transferred to the recipient in perpetuity or for a
311
+ fixed term (regardless of how the transaction is characterized), the
312
+ Corresponding Source conveyed under this section must be accompanied
313
+ by the Installation Information. But this requirement does not apply
314
+ if neither you nor any third party retains the ability to install
315
+ modified object code on the User Product (for example, the work has
316
+ been installed in ROM).
317
+
318
+ The requirement to provide Installation Information does not include a
319
+ requirement to continue to provide support service, warranty, or updates
320
+ for a work that has been modified or installed by the recipient, or for
321
+ the User Product in which it has been modified or installed. Access to a
322
+ network may be denied when the modification itself materially and
323
+ adversely affects the operation of the network or violates the rules and
324
+ protocols for communication across the network.
325
+
326
+ Corresponding Source conveyed, and Installation Information provided,
327
+ in accord with this section must be in a format that is publicly
328
+ documented (and with an implementation available to the public in
329
+ source code form), and must require no special password or key for
330
+ unpacking, reading or copying.
331
+
332
+ 7. Additional Terms.
333
+
334
+ "Additional permissions" are terms that supplement the terms of this
335
+ License by making exceptions from one or more of its conditions.
336
+ Additional permissions that are applicable to the entire Program shall
337
+ be treated as though they were included in this License, to the extent
338
+ that they are valid under applicable law. If additional permissions
339
+ apply only to part of the Program, that part may be used separately
340
+ under those permissions, but the entire Program remains governed by
341
+ this License without regard to the additional permissions.
342
+
343
+ When you convey a copy of a covered work, you may at your option
344
+ remove any additional permissions from that copy, or from any part of
345
+ it. (Additional permissions may be written to require their own
346
+ removal in certain cases when you modify the work.) You may place
347
+ additional permissions on material, added by you to a covered work,
348
+ for which you have or can give appropriate copyright permission.
349
+
350
+ Notwithstanding any other provision of this License, for material you
351
+ add to a covered work, you may (if authorized by the copyright holders of
352
+ that material) supplement the terms of this License with terms:
353
+
354
+ a) Disclaiming warranty or limiting liability differently from the
355
+ terms of sections 15 and 16 of this License; or
356
+
357
+ b) Requiring preservation of specified reasonable legal notices or
358
+ author attributions in that material or in the Appropriate Legal
359
+ Notices displayed by works containing it; or
360
+
361
+ c) Prohibiting misrepresentation of the origin of that material, or
362
+ requiring that modified versions of such material be marked in
363
+ reasonable ways as different from the original version; or
364
+
365
+ d) Limiting the use for publicity purposes of names of licensors or
366
+ authors of the material; or
367
+
368
+ e) Declining to grant rights under trademark law for use of some
369
+ trade names, trademarks, or service marks; or
370
+
371
+ f) Requiring indemnification of licensors and authors of that
372
+ material by anyone who conveys the material (or modified versions of
373
+ it) with contractual assumptions of liability to the recipient, for
374
+ any liability that these contractual assumptions directly impose on
375
+ those licensors and authors.
376
+
377
+ All other non-permissive additional terms are considered "further
378
+ restrictions" within the meaning of section 10. If the Program as you
379
+ received it, or any part of it, contains a notice stating that it is
380
+ governed by this License along with a term that is a further
381
+ restriction, you may remove that term. If a license document contains
382
+ a further restriction but permits relicensing or conveying under this
383
+ License, you may add to a covered work material governed by the terms
384
+ of that license document, provided that the further restriction does
385
+ not survive such relicensing or conveying.
386
+
387
+ If you add terms to a covered work in accord with this section, you
388
+ must place, in the relevant source files, a statement of the
389
+ additional terms that apply to those files, or a notice indicating
390
+ where to find the applicable terms.
391
+
392
+ Additional terms, permissive or non-permissive, may be stated in the
393
+ form of a separately written license, or stated as exceptions;
394
+ the above requirements apply either way.
395
+
396
+ 8. Termination.
397
+
398
+ You may not propagate or modify a covered work except as expressly
399
+ provided under this License. Any attempt otherwise to propagate or
400
+ modify it is void, and will automatically terminate your rights under
401
+ this License (including any patent licenses granted under the third
402
+ paragraph of section 11).
403
+
404
+ However, if you cease all violation of this License, then your
405
+ license from a particular copyright holder is reinstated (a)
406
+ provisionally, unless and until the copyright holder explicitly and
407
+ finally terminates your license, and (b) permanently, if the copyright
408
+ holder fails to notify you of the violation by some reasonable means
409
+ prior to 60 days after the cessation.
410
+
411
+ Moreover, your license from a particular copyright holder is
412
+ reinstated permanently if the copyright holder notifies you of the
413
+ violation by some reasonable means, this is the first time you have
414
+ received notice of violation of this License (for any work) from that
415
+ copyright holder, and you cure the violation prior to 30 days after
416
+ your receipt of the notice.
417
+
418
+ Termination of your rights under this section does not terminate the
419
+ licenses of parties who have received copies or rights from you under
420
+ this License. If your rights have been terminated and not permanently
421
+ reinstated, you do not qualify to receive new licenses for the same
422
+ material under section 10.
423
+
424
+ 9. Acceptance Not Required for Having Copies.
425
+
426
+ You are not required to accept this License in order to receive or
427
+ run a copy of the Program. Ancillary propagation of a covered work
428
+ occurring solely as a consequence of using peer-to-peer transmission
429
+ to receive a copy likewise does not require acceptance. However,
430
+ nothing other than this License grants you permission to propagate or
431
+ modify any covered work. These actions infringe copyright if you do
432
+ not accept this License. Therefore, by modifying or propagating a
433
+ covered work, you indicate your acceptance of this License to do so.
434
+
435
+ 10. Automatic Licensing of Downstream Recipients.
436
+
437
+ Each time you convey a covered work, the recipient automatically
438
+ receives a license from the original licensors, to run, modify and
439
+ propagate that work, subject to this License. You are not responsible
440
+ for enforcing compliance by third parties with this License.
441
+
442
+ An "entity transaction" is a transaction transferring control of an
443
+ organization, or substantially all assets of one, or subdividing an
444
+ organization, or merging organizations. If propagation of a covered
445
+ work results from an entity transaction, each party to that
446
+ transaction who receives a copy of the work also receives whatever
447
+ licenses to the work the party's predecessor in interest had or could
448
+ give under the previous paragraph, plus a right to possession of the
449
+ Corresponding Source of the work from the predecessor in interest, if
450
+ the predecessor has it or can get it with reasonable efforts.
451
+
452
+ You may not impose any further restrictions on the exercise of the
453
+ rights granted or affirmed under this License. For example, you may
454
+ not impose a license fee, royalty, or other charge for exercise of
455
+ rights granted under this License, and you may not initiate litigation
456
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
457
+ any patent claim is infringed by making, using, selling, offering for
458
+ sale, or importing the Program or any portion of it.
459
+
460
+ 11. Patents.
461
+
462
+ A "contributor" is a copyright holder who authorizes use under this
463
+ License of the Program or a work on which the Program is based. The
464
+ work thus licensed is called the contributor's "contributor version".
465
+
466
+ A contributor's "essential patent claims" are all patent claims
467
+ owned or controlled by the contributor, whether already acquired or
468
+ hereafter acquired, that would be infringed by some manner, permitted
469
+ by this License, of making, using, or selling its contributor version,
470
+ but do not include claims that would be infringed only as a
471
+ consequence of further modification of the contributor version. For
472
+ purposes of this definition, "control" includes the right to grant
473
+ patent sublicenses in a manner consistent with the requirements of
474
+ this License.
475
+
476
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
477
+ patent license under the contributor's essential patent claims, to
478
+ make, use, sell, offer for sale, import and otherwise run, modify and
479
+ propagate the contents of its contributor version.
480
+
481
+ In the following three paragraphs, a "patent license" is any express
482
+ agreement or commitment, however denominated, not to enforce a patent
483
+ (such as an express permission to practice a patent or covenant not to
484
+ sue for patent infringement). To "grant" such a patent license to a
485
+ party means to make such an agreement or commitment not to enforce a
486
+ patent against the party.
487
+
488
+ If you convey a covered work, knowingly relying on a patent license,
489
+ and the Corresponding Source of the work is not available for anyone
490
+ to copy, free of charge and under the terms of this License, through a
491
+ publicly available network server or other readily accessible means,
492
+ then you must either (1) cause the Corresponding Source to be so
493
+ available, or (2) arrange to deprive yourself of the benefit of the
494
+ patent license for this particular work, or (3) arrange, in a manner
495
+ consistent with the requirements of this License, to extend the patent
496
+ license to downstream recipients. "Knowingly relying" means you have
497
+ actual knowledge that, but for the patent license, your conveying the
498
+ covered work in a country, or your recipient's use of the covered work
499
+ in a country, would infringe one or more identifiable patents in that
500
+ country that you have reason to believe are valid.
501
+
502
+ If, pursuant to or in connection with a single transaction or
503
+ arrangement, you convey, or propagate by procuring conveyance of, a
504
+ covered work, and grant a patent license to some of the parties
505
+ receiving the covered work authorizing them to use, propagate, modify
506
+ or convey a specific copy of the covered work, then the patent license
507
+ you grant is automatically extended to all recipients of the covered
508
+ work and works based on it.
509
+
510
+ A patent license is "discriminatory" if it does not include within
511
+ the scope of its coverage, prohibits the exercise of, or is
512
+ conditioned on the non-exercise of one or more of the rights that are
513
+ specifically granted under this License. You may not convey a covered
514
+ work if you are a party to an arrangement with a third party that is
515
+ in the business of distributing software, under which you make payment
516
+ to the third party based on the extent of your activity of conveying
517
+ the work, and under which the third party grants, to any of the
518
+ parties who would receive the covered work from you, a discriminatory
519
+ patent license (a) in connection with copies of the covered work
520
+ conveyed by you (or copies made from those copies), or (b) primarily
521
+ for and in connection with specific products or compilations that
522
+ contain the covered work, unless you entered into that arrangement,
523
+ or that patent license was granted, prior to 28 March 2007.
524
+
525
+ Nothing in this License shall be construed as excluding or limiting
526
+ any implied license or other defenses to infringement that may
527
+ otherwise be available to you under applicable patent law.
528
+
529
+ 12. No Surrender of Others' Freedom.
530
+
531
+ If conditions are imposed on you (whether by court order, agreement or
532
+ otherwise) that contradict the conditions of this License, they do not
533
+ excuse you from the conditions of this License. If you cannot convey a
534
+ covered work so as to satisfy simultaneously your obligations under this
535
+ License and any other pertinent obligations, then as a consequence you may
536
+ not convey it at all. For example, if you agree to terms that obligate you
537
+ to collect a royalty for further conveying from those to whom you convey
538
+ the Program, the only way you could satisfy both those terms and this
539
+ License would be to refrain entirely from conveying the Program.
540
+
541
+ 13. Remote Network Interaction; Use with the GNU General Public License.
542
+
543
+ Notwithstanding any other provision of this License, if you modify the
544
+ Program, your modified version must prominently offer all users
545
+ interacting with it remotely through a computer network (if your version
546
+ supports such interaction) an opportunity to receive the Corresponding
547
+ Source of your version by providing access to the Corresponding Source
548
+ from a network server at no charge, through some standard or customary
549
+ means of facilitating copying of software. This Corresponding Source
550
+ shall include the Corresponding Source for any work covered by version 3
551
+ of the GNU General Public License that is incorporated pursuant to the
552
+ following paragraph.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the work with which it is combined will remain governed by version
560
+ 3 of the GNU General Public License.
561
+
562
+ 14. Revised Versions of this License.
563
+
564
+ The Free Software Foundation may publish revised and/or new versions of
565
+ the GNU Affero General Public License from time to time. Such new versions
566
+ will be similar in spirit to the present version, but may differ in detail to
567
+ address new problems or concerns.
568
+
569
+ Each version is given a distinguishing version number. If the
570
+ Program specifies that a certain numbered version of the GNU Affero General
571
+ Public License "or any later version" applies to it, you have the
572
+ option of following the terms and conditions either of that numbered
573
+ version or of any later version published by the Free Software
574
+ Foundation. If the Program does not specify a version number of the
575
+ GNU Affero General Public License, you may choose any version ever published
576
+ by the Free Software Foundation.
577
+
578
+ If the Program specifies that a proxy can decide which future
579
+ versions of the GNU Affero General Public License can be used, that proxy's
580
+ public statement of acceptance of a version permanently authorizes you
581
+ to choose that version for the Program.
582
+
583
+ Later license versions may give you additional or different
584
+ permissions. However, no additional obligations are imposed on any
585
+ author or copyright holder as a result of your choosing to follow a
586
+ later version.
587
+
588
+ 15. Disclaimer of Warranty.
589
+
590
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
591
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
592
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
593
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
594
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
595
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
596
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
597
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
598
+
599
+ 16. Limitation of Liability.
600
+
601
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
602
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
603
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
604
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
605
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
606
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
607
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
608
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
609
+ SUCH DAMAGES.
610
+
611
+ 17. Interpretation of Sections 15 and 16.
612
+
613
+ If the disclaimer of warranty and limitation of liability provided
614
+ above cannot be given local legal effect according to their terms,
615
+ reviewing courts shall apply local law that most closely approximates
616
+ an absolute waiver of all civil liability in connection with the
617
+ Program, unless a warranty or assumption of liability accompanies a
618
+ copy of the Program in return for a fee.
619
+
620
+ END OF TERMS AND CONDITIONS
621
+
622
+ How to Apply These Terms to Your New Programs
623
+
624
+ If you develop a new program, and you want it to be of the greatest
625
+ possible use to the public, the best way to achieve this is to make it
626
+ free software which everyone can redistribute and change under these terms.
627
+
628
+ To do so, attach the following notices to the program. It is safest
629
+ to attach them to the start of each source file to most effectively
630
+ state the exclusion of warranty; and each file should have at least
631
+ the "copyright" line and a pointer to where the full notice is found.
632
+
633
+ <one line to give the program's name and a brief idea of what it does.>
634
+ Copyright (C) <year> <name of author>
635
+
636
+ This program is free software: you can redistribute it and/or modify
637
+ it under the terms of the GNU Affero General Public License as published
638
+ by the Free Software Foundation, either version 3 of the License, or
639
+ (at your option) any later version.
640
+
641
+ This program is distributed in the hope that it will be useful,
642
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
643
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
644
+ GNU Affero General Public License for more details.
645
+
646
+ You should have received a copy of the GNU Affero General Public License
647
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
648
+
649
+ Also add information on how to contact you by electronic and paper mail.
650
+
651
+ If your software can interact with users remotely through a computer
652
+ network, you should also make sure that it provides a way for users to
653
+ get its source. For example, if your program is a web application, its
654
+ interface could display a "Source" link that leads users to an archive
655
+ of the code. There are many ways you could offer source, and different
656
+ solutions will be better for different programs; see section 13 for the
657
+ specific requirements.
658
+
659
+ You should also get your employer (if you work as a programmer) or school,
660
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
661
+ For more information on this, and how to apply and follow the GNU AGPL, see
662
+ <http://www.gnu.org/licenses/>.
README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !After Detailer
2
+
3
+ !After Detailer is a extension for stable diffusion webui, similar to Detection Detailer, except it uses ultralytics instead of the mmdet.
4
+
5
+ ## Install
6
+
7
+ (from Mikubill/sd-webui-controlnet)
8
+
9
+ 1. Open "Extensions" tab.
10
+ 2. Open "Install from URL" tab in the tab.
11
+ 3. Enter `https://github.com/Bing-su/adetailer.git` to "URL for extension's git repository".
12
+ 4. Press "Install" button.
13
+ 5. Wait 5 seconds, and you will see the message "Installed into stable-diffusion-webui\extensions\adetailer. Use Installed tab to restart".
14
+ 6. Go to "Installed" tab, click "Check for updates", and then click "Apply and restart UI". (The next time you can also use this method to update extensions.)
15
+ 7. Completely restart A1111 webui including your terminal. (If you do not know what is a "terminal", you can reboot your computer: turn your computer off and turn it on again.)
16
+
17
+ You can now install it directly from the Extensions tab.
18
+
19
+ ![image](https://i.imgur.com/g6GdRBT.png)
20
+
21
+ You **DON'T** need to download any model from huggingface.
22
+
23
+ ## Options
24
+
25
+ | Model, Prompts | | |
26
+ | --------------------------------- | ------------------------------------- | ------------------------------------------------- |
27
+ | ADetailer model | Determine what to detect. | `None` = disable |
28
+ | ADetailer prompt, negative prompt | Prompts and negative prompts to apply | If left blank, it will use the same as the input. |
29
+
30
+ | Detection | | |
31
+ | ------------------------------------ | -------------------------------------------------------------------------------------------- | --- |
32
+ | Detection model confidence threshold | Only objects with a detection model confidence above this threshold are used for inpainting. | |
33
+ | Mask min/max ratio | Only use masks whose area is between those ratios for the area of the entire image. | |
34
+
35
+ If you want to exclude objects in the background, try setting the min ratio to around `0.01`.
36
+
37
+ | Mask Preprocessing | | |
38
+ | ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- |
39
+ | Mask x, y offset | Moves the mask horizontally and vertically by | |
40
+ | Mask erosion (-) / dilation (+) | Enlarge or reduce the detected mask. | [opencv example](https://docs.opencv.org/4.7.0/db/df6/tutorial_erosion_dilatation.html) |
41
+ | Mask merge mode | `None`: Inpaint each mask<br/>`Merge`: Merge all masks and inpaint<br/>`Merge and Invert`: Merge all masks and Invert, then inpaint | |
42
+
43
+ Applied in this order: x, y offset → erosion/dilation → merge/invert.
44
+
45
+ #### Inpainting
46
+
47
+ ![image](https://i.imgur.com/wyWlT1n.png)
48
+
49
+ Each option corresponds to a corresponding option on the inpaint tab.
50
+
51
+ ## ControlNet Inpainting
52
+
53
+ You can use the ControlNet extension if you have ControlNet installed and ControlNet models.
54
+
55
+ Support `inpaint, scribble, lineart, openpose, tile` controlnet models. Once you choose a model, the preprocessor is set automatically.
56
+
57
+ ## Model
58
+
59
+ | Model | Target | mAP 50 | mAP 50-95 |
60
+ | --------------------- | --------------------- | ----------------------------- | ----------------------------- |
61
+ | face_yolov8n.pt | 2D / realistic face | 0.660 | 0.366 |
62
+ | face_yolov8s.pt | 2D / realistic face | 0.713 | 0.404 |
63
+ | hand_yolov8n.pt | 2D / realistic hand | 0.767 | 0.505 |
64
+ | person_yolov8n-seg.pt | 2D / realistic person | 0.782 (bbox)<br/>0.761 (mask) | 0.555 (bbox)<br/>0.460 (mask) |
65
+ | person_yolov8s-seg.pt | 2D / realistic person | 0.824 (bbox)<br/>0.809 (mask) | 0.605 (bbox)<br/>0.508 (mask) |
66
+ | mediapipe_face_full | realistic face | - | - |
67
+ | mediapipe_face_short | realistic face | - | - |
68
+ | mediapipe_face_mesh | realistic face | - | - |
69
+
70
+ The yolo models can be found on huggingface [Bingsu/adetailer](https://huggingface.co/Bingsu/adetailer).
71
+
72
+ ### User Model
73
+
74
+ Put your [ultralytics](https://github.com/ultralytics/ultralytics) model in `webui/models/adetailer`. The model name should end with `.pt` or `.pth`.
75
+
76
+ It must be a bbox detection or segment model and use all label.
77
+
78
+ ### Dataset
79
+
80
+ Datasets used for training the yolo models are:
81
+
82
+ #### Face
83
+
84
+ - [Anime Face CreateML](https://universe.roboflow.com/my-workspace-mph8o/anime-face-createml)
85
+ - [xml2txt](https://universe.roboflow.com/0oooooo0/xml2txt-njqx1)
86
+ - [AN](https://universe.roboflow.com/sed-b8vkf/an-lfg5i)
87
+ - [wider face](http://shuoyang1213.me/WIDERFACE/index.html)
88
+
89
+ #### Hand
90
+
91
+ - [AnHDet](https://universe.roboflow.com/1-yshhi/anhdet)
92
+ - [hand-detection-fuao9](https://universe.roboflow.com/catwithawand/hand-detection-fuao9)
93
+
94
+ #### Person
95
+
96
+ - [coco2017](https://cocodataset.org/#home) (only person)
97
+ - [AniSeg](https://github.com/jerryli27/AniSeg)
98
+ - [skytnt/anime-segmentation](https://huggingface.co/datasets/skytnt/anime-segmentation)
99
+
100
+ ## Example
101
+
102
+ ![image](https://i.imgur.com/38RSxSO.png)
103
+ ![image](https://i.imgur.com/2CYgjLx.png)
104
+
105
+ [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/F1F1L7V2N)
Taskfile.yml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://taskfile.dev
2
+
3
+ version: "3"
4
+
5
+ dotenv:
6
+ - .env
7
+
8
+ vars:
9
+ SHELL: '{{if eq .OS "Windows_NT"}}powershell{{end}}'
10
+
11
+ tasks:
12
+ default:
13
+ cmds:
14
+ - echo "$PYTHON"
15
+ - echo "$WEBUI"
16
+ silent: true
17
+
18
+ launch:
19
+ dir: "{{.WEBUI}}"
20
+ cmds:
21
+ - "{{.PYTHON}} launch.py --xformers --api --autolaunch"
22
+
23
+ lint:
24
+ cmds:
25
+ - pre-commit run -a
adetailer/__init__.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .__version__ import __version__
2
+ from .args import AD_ENABLE, ALL_ARGS, ADetailerArgs, EnableChecker
3
+ from .common import PredictOutput, get_models
4
+ from .mediapipe import mediapipe_predict
5
+ from .ultralytics import ultralytics_predict
6
+
7
+ AFTER_DETAILER = "ADetailer"
8
+
9
+ __all__ = [
10
+ "__version__",
11
+ "AD_ENABLE",
12
+ "ADetailerArgs",
13
+ "AFTER_DETAILER",
14
+ "ALL_ARGS",
15
+ "EnableChecker",
16
+ "PredictOutput",
17
+ "get_models",
18
+ "mediapipe_predict",
19
+ "ultralytics_predict",
20
+ ]
adetailer/__version__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __version__ = "23.7.8"
adetailer/args.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import UserList
4
+ from functools import cached_property, partial
5
+ from typing import Any, Literal, NamedTuple, Optional, Union
6
+
7
+ import pydantic
8
+ from pydantic import (
9
+ BaseModel,
10
+ Extra,
11
+ NonNegativeFloat,
12
+ NonNegativeInt,
13
+ PositiveInt,
14
+ confloat,
15
+ constr,
16
+ root_validator,
17
+ validator,
18
+ )
19
+
20
+ cn_model_regex = r".*(inpaint|tile|scribble|lineart|openpose).*|^None$"
21
+
22
+
23
+ class Arg(NamedTuple):
24
+ attr: str
25
+ name: str
26
+
27
+
28
+ class ArgsList(UserList):
29
+ @cached_property
30
+ def attrs(self) -> tuple[str]:
31
+ return tuple(attr for attr, _ in self)
32
+
33
+ @cached_property
34
+ def names(self) -> tuple[str]:
35
+ return tuple(name for _, name in self)
36
+
37
+
38
+ class ADetailerArgs(BaseModel, extra=Extra.forbid):
39
+ ad_model: str = "None"
40
+ ad_prompt: str = ""
41
+ ad_negative_prompt: str = ""
42
+ ad_confidence: confloat(ge=0.0, le=1.0) = 0.3
43
+ ad_mask_min_ratio: confloat(ge=0.0, le=1.0) = 0.0
44
+ ad_mask_max_ratio: confloat(ge=0.0, le=1.0) = 1.0
45
+ ad_dilate_erode: int = 4
46
+ ad_x_offset: int = 0
47
+ ad_y_offset: int = 0
48
+ ad_mask_merge_invert: Literal["None", "Merge", "Merge and Invert"] = "None"
49
+ ad_mask_blur: NonNegativeInt = 4
50
+ ad_denoising_strength: confloat(ge=0.0, le=1.0) = 0.4
51
+ ad_inpaint_only_masked: bool = True
52
+ ad_inpaint_only_masked_padding: NonNegativeInt = 32
53
+ ad_use_inpaint_width_height: bool = False
54
+ ad_inpaint_width: PositiveInt = 512
55
+ ad_inpaint_height: PositiveInt = 512
56
+ ad_use_steps: bool = False
57
+ ad_steps: PositiveInt = 28
58
+ ad_use_cfg_scale: bool = False
59
+ ad_cfg_scale: NonNegativeFloat = 7.0
60
+ ad_use_sampler: bool = False
61
+ ad_sampler: str = "DPM++ 2M Karras"
62
+ ad_use_noise_multiplier: bool = False
63
+ ad_noise_multiplier: confloat(ge=0.5, le=1.5) = 1.0
64
+ ad_restore_face: bool = False
65
+ ad_controlnet_model: constr(regex=cn_model_regex) = "None"
66
+ ad_controlnet_module: Optional[constr(regex=r".*inpaint.*|^None$")] = None
67
+ ad_controlnet_weight: confloat(ge=0.0, le=1.0) = 1.0
68
+ ad_controlnet_guidance_start: confloat(ge=0.0, le=1.0) = 0.0
69
+ ad_controlnet_guidance_end: confloat(ge=0.0, le=1.0) = 1.0
70
+ is_api: bool = True
71
+
72
+ @root_validator(skip_on_failure=True)
73
+ def ad_controlnt_module_validator(cls, values): # noqa: N805
74
+ cn_model = values.get("ad_controlnet_model", "None")
75
+ cn_module = values.get("ad_controlnet_module", None)
76
+ if "inpaint" not in cn_model or cn_module == "None":
77
+ values["ad_controlnet_module"] = None
78
+ return values
79
+
80
+ @validator("is_api", pre=True)
81
+ def is_api_validator(cls, v: Any): # noqa: N805
82
+ "tuple is json serializable but cannot be made with json deserialize."
83
+ return type(v) is not tuple
84
+
85
+ @staticmethod
86
+ def ppop(
87
+ p: dict[str, Any],
88
+ key: str,
89
+ pops: list[str] | None = None,
90
+ cond: Any = None,
91
+ ) -> None:
92
+ if pops is None:
93
+ pops = [key]
94
+ if key not in p:
95
+ return
96
+ value = p[key]
97
+ cond = (not bool(value)) if cond is None else value == cond
98
+
99
+ if cond:
100
+ for k in pops:
101
+ p.pop(k, None)
102
+
103
+ def extra_params(self, suffix: str = "") -> dict[str, Any]:
104
+ if self.ad_model == "None":
105
+ return {}
106
+
107
+ p = {name: getattr(self, attr) for attr, name in ALL_ARGS}
108
+ ppop = partial(self.ppop, p)
109
+
110
+ ppop("ADetailer prompt")
111
+ ppop("ADetailer negative prompt")
112
+ ppop("ADetailer mask min ratio", cond=0.0)
113
+ ppop("ADetailer mask max ratio", cond=1.0)
114
+ ppop("ADetailer x offset", cond=0)
115
+ ppop("ADetailer y offset", cond=0)
116
+ ppop("ADetailer mask merge/invert", cond="None")
117
+ ppop("ADetailer inpaint only masked", ["ADetailer inpaint padding"])
118
+ ppop(
119
+ "ADetailer use inpaint width/height",
120
+ [
121
+ "ADetailer use inpaint width/height",
122
+ "ADetailer inpaint width",
123
+ "ADetailer inpaint height",
124
+ ],
125
+ )
126
+ ppop(
127
+ "ADetailer use separate steps",
128
+ ["ADetailer use separate steps", "ADetailer steps"],
129
+ )
130
+ ppop(
131
+ "ADetailer use separate CFG scale",
132
+ ["ADetailer use separate CFG scale", "ADetailer CFG scale"],
133
+ )
134
+ ppop(
135
+ "ADetailer use separate sampler",
136
+ ["ADetailer use separate sampler", "ADetailer sampler"],
137
+ )
138
+ ppop(
139
+ "ADetailer use separate noise multiplier",
140
+ ["ADetailer use separate noise multiplier", "ADetailer noise multiplier"],
141
+ )
142
+
143
+ ppop("ADetailer restore face")
144
+ ppop(
145
+ "ADetailer ControlNet model",
146
+ [
147
+ "ADetailer ControlNet model",
148
+ "ADetailer ControlNet module",
149
+ "ADetailer ControlNet weight",
150
+ "ADetailer ControlNet guidance start",
151
+ "ADetailer ControlNet guidance end",
152
+ ],
153
+ cond="None",
154
+ )
155
+ ppop("ADetailer ControlNet module")
156
+ ppop("ADetailer ControlNet weight", cond=1.0)
157
+ ppop("ADetailer ControlNet guidance start", cond=0.0)
158
+ ppop("ADetailer ControlNet guidance end", cond=1.0)
159
+
160
+ if suffix:
161
+ p = {k + suffix: v for k, v in p.items()}
162
+
163
+ return p
164
+
165
+
166
+ class EnableChecker(BaseModel):
167
+ enable: bool
168
+ arg_list: list
169
+
170
+ def is_enabled(self) -> bool:
171
+ ad_model = ALL_ARGS[0].attr
172
+ if not self.enable:
173
+ return False
174
+ return any(arg.get(ad_model, "None") != "None" for arg in self.arg_list)
175
+
176
+
177
+ _all_args = [
178
+ ("ad_enable", "ADetailer enable"),
179
+ ("ad_model", "ADetailer model"),
180
+ ("ad_prompt", "ADetailer prompt"),
181
+ ("ad_negative_prompt", "ADetailer negative prompt"),
182
+ ("ad_confidence", "ADetailer confidence"),
183
+ ("ad_mask_min_ratio", "ADetailer mask min ratio"),
184
+ ("ad_mask_max_ratio", "ADetailer mask max ratio"),
185
+ ("ad_x_offset", "ADetailer x offset"),
186
+ ("ad_y_offset", "ADetailer y offset"),
187
+ ("ad_dilate_erode", "ADetailer dilate/erode"),
188
+ ("ad_mask_merge_invert", "ADetailer mask merge/invert"),
189
+ ("ad_mask_blur", "ADetailer mask blur"),
190
+ ("ad_denoising_strength", "ADetailer denoising strength"),
191
+ ("ad_inpaint_only_masked", "ADetailer inpaint only masked"),
192
+ ("ad_inpaint_only_masked_padding", "ADetailer inpaint padding"),
193
+ ("ad_use_inpaint_width_height", "ADetailer use inpaint width/height"),
194
+ ("ad_inpaint_width", "ADetailer inpaint width"),
195
+ ("ad_inpaint_height", "ADetailer inpaint height"),
196
+ ("ad_use_steps", "ADetailer use separate steps"),
197
+ ("ad_steps", "ADetailer steps"),
198
+ ("ad_use_cfg_scale", "ADetailer use separate CFG scale"),
199
+ ("ad_cfg_scale", "ADetailer CFG scale"),
200
+ ("ad_use_sampler", "ADetailer use separate sampler"),
201
+ ("ad_sampler", "ADetailer sampler"),
202
+ ("ad_use_noise_multiplier", "ADetailer use separate noise multiplier"),
203
+ ("ad_noise_multiplier", "ADetailer noise multiplier"),
204
+ ("ad_restore_face", "ADetailer restore face"),
205
+ ("ad_controlnet_model", "ADetailer ControlNet model"),
206
+ ("ad_controlnet_module", "ADetailer ControlNet module"),
207
+ ("ad_controlnet_weight", "ADetailer ControlNet weight"),
208
+ ("ad_controlnet_guidance_start", "ADetailer ControlNet guidance start"),
209
+ ("ad_controlnet_guidance_end", "ADetailer ControlNet guidance end"),
210
+ ]
211
+
212
+ AD_ENABLE = Arg(*_all_args[0])
213
+ _args = [Arg(*args) for args in _all_args[1:]]
214
+ ALL_ARGS = ArgsList(_args)
215
+
216
+ BBOX_SORTBY = [
217
+ "None",
218
+ "Position (left to right)",
219
+ "Position (center to edge)",
220
+ "Area (large to small)",
221
+ ]
222
+ MASK_MERGE_INVERT = ["None", "Merge", "Merge and Invert"]
adetailer/common.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections import OrderedDict
4
+ from dataclasses import dataclass, field
5
+ from pathlib import Path
6
+ from typing import Optional, Union
7
+
8
+ from huggingface_hub import hf_hub_download
9
+ from PIL import Image, ImageDraw
10
+ from rich import print
11
+
12
+ repo_id = "Bingsu/adetailer"
13
+
14
+
15
+ @dataclass
16
+ class PredictOutput:
17
+ bboxes: list[list[int | float]] = field(default_factory=list)
18
+ masks: list[Image.Image] = field(default_factory=list)
19
+ preview: Optional[Image.Image] = None
20
+
21
+
22
+ def hf_download(file: str):
23
+ try:
24
+ path = hf_hub_download(repo_id, file)
25
+ except Exception:
26
+ msg = f"[-] ADetailer: Failed to load model {file!r} from huggingface"
27
+ print(msg)
28
+ path = "INVALID"
29
+ return path
30
+
31
+
32
+ def get_models(
33
+ model_dir: Union[str, Path], huggingface: bool = True
34
+ ) -> OrderedDict[str, Optional[str]]:
35
+ model_dir = Path(model_dir)
36
+ if model_dir.is_dir():
37
+ model_paths = [
38
+ p
39
+ for p in model_dir.rglob("*")
40
+ if p.is_file() and p.suffix in (".pt", ".pth")
41
+ ]
42
+ else:
43
+ model_paths = []
44
+
45
+ models = OrderedDict()
46
+ if huggingface:
47
+ models.update(
48
+ {
49
+ "face_yolov8n.pt": hf_download("face_yolov8n.pt"),
50
+ "face_yolov8s.pt": hf_download("face_yolov8s.pt"),
51
+ "hand_yolov8n.pt": hf_download("hand_yolov8n.pt"),
52
+ "person_yolov8n-seg.pt": hf_download("person_yolov8n-seg.pt"),
53
+ "person_yolov8s-seg.pt": hf_download("person_yolov8s-seg.pt"),
54
+ }
55
+ )
56
+ models.update(
57
+ {
58
+ "mediapipe_face_full": None,
59
+ "mediapipe_face_short": None,
60
+ "mediapipe_face_mesh": None,
61
+ "mediapipe_face_mesh_eyes_only": None,
62
+ }
63
+ )
64
+
65
+ invalid_keys = [k for k, v in models.items() if v == "INVALID"]
66
+ for key in invalid_keys:
67
+ models.pop(key)
68
+
69
+ for path in model_paths:
70
+ if path.name in models:
71
+ continue
72
+ models[path.name] = str(path)
73
+
74
+ return models
75
+
76
+
77
+ def create_mask_from_bbox(
78
+ bboxes: list[list[float]], shape: tuple[int, int]
79
+ ) -> list[Image.Image]:
80
+ """
81
+ Parameters
82
+ ----------
83
+ bboxes: list[list[float]]
84
+ list of [x1, y1, x2, y2]
85
+ bounding boxes
86
+ shape: tuple[int, int]
87
+ shape of the image (width, height)
88
+
89
+ Returns
90
+ -------
91
+ masks: list[Image.Image]
92
+ A list of masks
93
+
94
+ """
95
+ masks = []
96
+ for bbox in bboxes:
97
+ mask = Image.new("L", shape, 0)
98
+ mask_draw = ImageDraw.Draw(mask)
99
+ mask_draw.rectangle(bbox, fill=255)
100
+ masks.append(mask)
101
+ return masks
102
+
103
+
104
+ def create_bbox_from_mask(
105
+ masks: list[Image.Image], shape: tuple[int, int]
106
+ ) -> list[list[int]]:
107
+ """
108
+ Parameters
109
+ ----------
110
+ masks: list[Image.Image]
111
+ A list of masks
112
+ shape: tuple[int, int]
113
+ shape of the image (width, height)
114
+
115
+ Returns
116
+ -------
117
+ bboxes: list[list[float]]
118
+ A list of bounding boxes
119
+
120
+ """
121
+ bboxes = []
122
+ for mask in masks:
123
+ mask = mask.resize(shape)
124
+ bbox = mask.getbbox()
125
+ if bbox is not None:
126
+ bboxes.append(list(bbox))
127
+ return bboxes
adetailer/mask.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from enum import IntEnum
4
+ from functools import partial, reduce
5
+ from math import dist
6
+
7
+ import cv2
8
+ import numpy as np
9
+ from PIL import Image, ImageChops
10
+
11
+ from adetailer.args import MASK_MERGE_INVERT
12
+ from adetailer.common import PredictOutput
13
+
14
+
15
+ class SortBy(IntEnum):
16
+ NONE = 0
17
+ LEFT_TO_RIGHT = 1
18
+ CENTER_TO_EDGE = 2
19
+ AREA = 3
20
+
21
+
22
+ class MergeInvert(IntEnum):
23
+ NONE = 0
24
+ MERGE = 1
25
+ MERGE_INVERT = 2
26
+
27
+
28
+ def _dilate(arr: np.ndarray, value: int) -> np.ndarray:
29
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
30
+ return cv2.dilate(arr, kernel, iterations=1)
31
+
32
+
33
+ def _erode(arr: np.ndarray, value: int) -> np.ndarray:
34
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (value, value))
35
+ return cv2.erode(arr, kernel, iterations=1)
36
+
37
+
38
+ def dilate_erode(img: Image.Image, value: int) -> Image.Image:
39
+ """
40
+ The dilate_erode function takes an image and a value.
41
+ If the value is positive, it dilates the image by that amount.
42
+ If the value is negative, it erodes the image by that amount.
43
+
44
+ Parameters
45
+ ----------
46
+ img: PIL.Image.Image
47
+ the image to be processed
48
+ value: int
49
+ kernel size of dilation or erosion
50
+
51
+ Returns
52
+ -------
53
+ PIL.Image.Image
54
+ The image that has been dilated or eroded
55
+ """
56
+ if value == 0:
57
+ return img
58
+
59
+ arr = np.array(img)
60
+ arr = _dilate(arr, value) if value > 0 else _erode(arr, -value)
61
+
62
+ return Image.fromarray(arr)
63
+
64
+
65
+ def offset(img: Image.Image, x: int = 0, y: int = 0) -> Image.Image:
66
+ """
67
+ The offset function takes an image and offsets it by a given x(→) and y(↑) value.
68
+
69
+ Parameters
70
+ ----------
71
+ mask: Image.Image
72
+ Pass the mask image to the function
73
+ x: int
74
+
75
+ y: int
76
+
77
+
78
+ Returns
79
+ -------
80
+ PIL.Image.Image
81
+ A new image that is offset by x and y
82
+ """
83
+ return ImageChops.offset(img, x, -y)
84
+
85
+
86
+ def is_all_black(img: Image.Image) -> bool:
87
+ arr = np.array(img)
88
+ return cv2.countNonZero(arr) == 0
89
+
90
+
91
+ def bbox_area(bbox: list[float]):
92
+ return (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
93
+
94
+
95
+ def mask_preprocess(
96
+ masks: list[Image.Image],
97
+ kernel: int = 0,
98
+ x_offset: int = 0,
99
+ y_offset: int = 0,
100
+ merge_invert: int | MergeInvert | str = MergeInvert.NONE,
101
+ ) -> list[Image.Image]:
102
+ """
103
+ The mask_preprocess function takes a list of masks and preprocesses them.
104
+ It dilates and erodes the masks, and offsets them by x_offset and y_offset.
105
+
106
+ Parameters
107
+ ----------
108
+ masks: list[Image.Image]
109
+ A list of masks
110
+ kernel: int
111
+ kernel size of dilation or erosion
112
+ x_offset: int
113
+
114
+ y_offset: int
115
+
116
+
117
+ Returns
118
+ -------
119
+ list[Image.Image]
120
+ A list of processed masks
121
+ """
122
+ if not masks:
123
+ return []
124
+
125
+ if x_offset != 0 or y_offset != 0:
126
+ masks = [offset(m, x_offset, y_offset) for m in masks]
127
+
128
+ if kernel != 0:
129
+ masks = [dilate_erode(m, kernel) for m in masks]
130
+ masks = [m for m in masks if not is_all_black(m)]
131
+
132
+ return mask_merge_invert(masks, mode=merge_invert)
133
+
134
+
135
+ # Bbox sorting
136
+ def _key_left_to_right(bbox: list[float]) -> float:
137
+ """
138
+ Left to right
139
+
140
+ Parameters
141
+ ----------
142
+ bbox: list[float]
143
+ list of [x1, y1, x2, y2]
144
+ """
145
+ return bbox[0]
146
+
147
+
148
+ def _key_center_to_edge(bbox: list[float], *, center: tuple[float, float]) -> float:
149
+ """
150
+ Center to edge
151
+
152
+ Parameters
153
+ ----------
154
+ bbox: list[float]
155
+ list of [x1, y1, x2, y2]
156
+ image: Image.Image
157
+ the image
158
+ """
159
+ bbox_center = ((bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2)
160
+ return dist(center, bbox_center)
161
+
162
+
163
+ def _key_area(bbox: list[float]) -> float:
164
+ """
165
+ Large to small
166
+
167
+ Parameters
168
+ ----------
169
+ bbox: list[float]
170
+ list of [x1, y1, x2, y2]
171
+ """
172
+ return -bbox_area(bbox)
173
+
174
+
175
+ def sort_bboxes(
176
+ pred: PredictOutput, order: int | SortBy = SortBy.NONE
177
+ ) -> PredictOutput:
178
+ if order == SortBy.NONE or len(pred.bboxes) <= 1:
179
+ return pred
180
+
181
+ if order == SortBy.LEFT_TO_RIGHT:
182
+ key = _key_left_to_right
183
+ elif order == SortBy.CENTER_TO_EDGE:
184
+ width, height = pred.preview.size
185
+ center = (width / 2, height / 2)
186
+ key = partial(_key_center_to_edge, center=center)
187
+ elif order == SortBy.AREA:
188
+ key = _key_area
189
+ else:
190
+ raise RuntimeError
191
+
192
+ items = len(pred.bboxes)
193
+ idx = sorted(range(items), key=lambda i: key(pred.bboxes[i]))
194
+ pred.bboxes = [pred.bboxes[i] for i in idx]
195
+ pred.masks = [pred.masks[i] for i in idx]
196
+ return pred
197
+
198
+
199
+ # Filter by ratio
200
+ def is_in_ratio(bbox: list[float], low: float, high: float, orig_area: int) -> bool:
201
+ area = bbox_area(bbox)
202
+ return low <= area / orig_area <= high
203
+
204
+
205
+ def filter_by_ratio(pred: PredictOutput, low: float, high: float) -> PredictOutput:
206
+ if not pred.bboxes:
207
+ return pred
208
+
209
+ w, h = pred.preview.size
210
+ orig_area = w * h
211
+ items = len(pred.bboxes)
212
+ idx = [i for i in range(items) if is_in_ratio(pred.bboxes[i], low, high, orig_area)]
213
+ pred.bboxes = [pred.bboxes[i] for i in idx]
214
+ pred.masks = [pred.masks[i] for i in idx]
215
+ return pred
216
+
217
+
218
+ # Merge / Invert
219
+ def mask_merge(masks: list[Image.Image]) -> list[Image.Image]:
220
+ arrs = [np.array(m) for m in masks]
221
+ arr = reduce(cv2.bitwise_or, arrs)
222
+ return [Image.fromarray(arr)]
223
+
224
+
225
+ def mask_invert(masks: list[Image.Image]) -> list[Image.Image]:
226
+ return [ImageChops.invert(m) for m in masks]
227
+
228
+
229
+ def mask_merge_invert(
230
+ masks: list[Image.Image], mode: int | MergeInvert | str
231
+ ) -> list[Image.Image]:
232
+ if isinstance(mode, str):
233
+ mode = MASK_MERGE_INVERT.index(mode)
234
+
235
+ if mode == MergeInvert.NONE or not masks:
236
+ return masks
237
+
238
+ if mode == MergeInvert.MERGE:
239
+ return mask_merge(masks)
240
+
241
+ if mode == MergeInvert.MERGE_INVERT:
242
+ merged = mask_merge(masks)
243
+ return mask_invert(merged)
244
+
245
+ raise RuntimeError
adetailer/mediapipe.py ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import partial
4
+
5
+ import numpy as np
6
+ from PIL import Image, ImageDraw
7
+
8
+ from adetailer import PredictOutput
9
+ from adetailer.common import create_bbox_from_mask, create_mask_from_bbox
10
+
11
+
12
+ def mediapipe_predict(
13
+ model_type: str, image: Image.Image, confidence: float = 0.3
14
+ ) -> PredictOutput:
15
+ mapping = {
16
+ "mediapipe_face_short": partial(mediapipe_face_detection, 0),
17
+ "mediapipe_face_full": partial(mediapipe_face_detection, 1),
18
+ "mediapipe_face_mesh": mediapipe_face_mesh,
19
+ "mediapipe_face_mesh_eyes_only": mediapipe_face_mesh_eyes_only,
20
+ }
21
+ if model_type in mapping:
22
+ func = mapping[model_type]
23
+ return func(image, confidence)
24
+ msg = f"[-] ADetailer: Invalid mediapipe model type: {model_type}, Available: {list(mapping.keys())!r}"
25
+ raise RuntimeError(msg)
26
+
27
+
28
+ def mediapipe_face_detection(
29
+ model_type: int, image: Image.Image, confidence: float = 0.3
30
+ ) -> PredictOutput:
31
+ import mediapipe as mp
32
+
33
+ img_width, img_height = image.size
34
+
35
+ mp_face_detection = mp.solutions.face_detection
36
+ draw_util = mp.solutions.drawing_utils
37
+
38
+ img_array = np.array(image)
39
+
40
+ with mp_face_detection.FaceDetection(
41
+ model_selection=model_type, min_detection_confidence=confidence
42
+ ) as face_detector:
43
+ pred = face_detector.process(img_array)
44
+
45
+ if pred.detections is None:
46
+ return PredictOutput()
47
+
48
+ preview_array = img_array.copy()
49
+
50
+ bboxes = []
51
+ for detection in pred.detections:
52
+ draw_util.draw_detection(preview_array, detection)
53
+
54
+ bbox = detection.location_data.relative_bounding_box
55
+ x1 = bbox.xmin * img_width
56
+ y1 = bbox.ymin * img_height
57
+ w = bbox.width * img_width
58
+ h = bbox.height * img_height
59
+ x2 = x1 + w
60
+ y2 = y1 + h
61
+
62
+ bboxes.append([x1, y1, x2, y2])
63
+
64
+ masks = create_mask_from_bbox(bboxes, image.size)
65
+ preview = Image.fromarray(preview_array)
66
+
67
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
68
+
69
+
70
+ def get_convexhull(points: np.ndarray) -> list[tuple[int, int]]:
71
+ """
72
+ Parameters
73
+ ----------
74
+ points: An ndarray of shape (n, 2) containing the 2D points.
75
+
76
+ Returns
77
+ -------
78
+ list[tuple[int, int]]: Input for the draw.polygon function
79
+ """
80
+ from scipy.spatial import ConvexHull
81
+
82
+ hull = ConvexHull(points)
83
+ vertices = hull.vertices
84
+ return list(zip(points[vertices, 0], points[vertices, 1]))
85
+
86
+
87
+ def mediapipe_face_mesh(image: Image.Image, confidence: float = 0.3) -> PredictOutput:
88
+ import mediapipe as mp
89
+
90
+ mp_face_mesh = mp.solutions.face_mesh
91
+ draw_util = mp.solutions.drawing_utils
92
+ drawing_styles = mp.solutions.drawing_styles
93
+
94
+ w, h = image.size
95
+
96
+ with mp_face_mesh.FaceMesh(
97
+ static_image_mode=True, max_num_faces=20, min_detection_confidence=confidence
98
+ ) as face_mesh:
99
+ arr = np.array(image)
100
+ pred = face_mesh.process(arr)
101
+
102
+ if pred.multi_face_landmarks is None:
103
+ return PredictOutput()
104
+
105
+ preview = arr.copy()
106
+ masks = []
107
+
108
+ for landmarks in pred.multi_face_landmarks:
109
+ draw_util.draw_landmarks(
110
+ image=preview,
111
+ landmark_list=landmarks,
112
+ connections=mp_face_mesh.FACEMESH_TESSELATION,
113
+ landmark_drawing_spec=None,
114
+ connection_drawing_spec=drawing_styles.get_default_face_mesh_tesselation_style(),
115
+ )
116
+
117
+ points = np.array([(land.x * w, land.y * h) for land in landmarks.landmark])
118
+ outline = get_convexhull(points)
119
+
120
+ mask = Image.new("L", image.size, "black")
121
+ draw = ImageDraw.Draw(mask)
122
+ draw.polygon(outline, fill="white")
123
+ masks.append(mask)
124
+
125
+ bboxes = create_bbox_from_mask(masks, image.size)
126
+ preview = Image.fromarray(preview)
127
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
128
+
129
+
130
+ def mediapipe_face_mesh_eyes_only(
131
+ image: Image.Image, confidence: float = 0.3
132
+ ) -> PredictOutput:
133
+ import mediapipe as mp
134
+
135
+ mp_face_mesh = mp.solutions.face_mesh
136
+
137
+ left_idx = np.array(list(mp_face_mesh.FACEMESH_LEFT_EYE)).flatten()
138
+ right_idx = np.array(list(mp_face_mesh.FACEMESH_RIGHT_EYE)).flatten()
139
+
140
+ w, h = image.size
141
+
142
+ with mp_face_mesh.FaceMesh(
143
+ static_image_mode=True, max_num_faces=20, min_detection_confidence=confidence
144
+ ) as face_mesh:
145
+ arr = np.array(image)
146
+ pred = face_mesh.process(arr)
147
+
148
+ if pred.multi_face_landmarks is None:
149
+ return PredictOutput()
150
+
151
+ preview = image.copy()
152
+ masks = []
153
+
154
+ for landmarks in pred.multi_face_landmarks:
155
+ points = np.array([(land.x * w, land.y * h) for land in landmarks.landmark])
156
+ left_eyes = points[left_idx]
157
+ right_eyes = points[right_idx]
158
+ left_outline = get_convexhull(left_eyes)
159
+ right_outline = get_convexhull(right_eyes)
160
+
161
+ mask = Image.new("L", image.size, "black")
162
+ draw = ImageDraw.Draw(mask)
163
+ for outline in (left_outline, right_outline):
164
+ draw.polygon(outline, fill="white")
165
+ masks.append(mask)
166
+
167
+ bboxes = create_bbox_from_mask(masks, image.size)
168
+ preview = draw_preview(preview, bboxes, masks)
169
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
170
+
171
+
172
+ def draw_preview(
173
+ preview: Image.Image, bboxes: list[list[int]], masks: list[Image.Image]
174
+ ) -> Image.Image:
175
+ red = Image.new("RGB", preview.size, "red")
176
+ for mask in masks:
177
+ masked = Image.composite(red, preview, mask)
178
+ preview = Image.blend(preview, masked, 0.25)
179
+
180
+ draw = ImageDraw.Draw(preview)
181
+ for bbox in bboxes:
182
+ draw.rectangle(bbox, outline="red", width=2)
183
+
184
+ return preview
adetailer/traceback.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import platform
5
+ import sys
6
+ from typing import Any, Callable
7
+
8
+ from rich.console import Console, Group
9
+ from rich.panel import Panel
10
+ from rich.table import Table
11
+ from rich.traceback import Traceback
12
+
13
+ from adetailer.__version__ import __version__
14
+
15
+
16
+ def processing(*args: Any) -> dict[str, Any]:
17
+ try:
18
+ from modules.processing import (
19
+ StableDiffusionProcessingImg2Img,
20
+ StableDiffusionProcessingTxt2Img,
21
+ )
22
+ except ImportError:
23
+ return {}
24
+
25
+ p = None
26
+ for arg in args:
27
+ if isinstance(
28
+ arg, (StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img)
29
+ ):
30
+ p = arg
31
+ break
32
+
33
+ if p is None:
34
+ return {}
35
+
36
+ info = {
37
+ "prompt": p.prompt,
38
+ "negative_prompt": p.negative_prompt,
39
+ "n_iter": p.n_iter,
40
+ "batch_size": p.batch_size,
41
+ "width": p.width,
42
+ "height": p.height,
43
+ "sampler_name": p.sampler_name,
44
+ "enable_hr": getattr(p, "enable_hr", False),
45
+ "hr_upscaler": getattr(p, "hr_upscaler", ""),
46
+ }
47
+
48
+ info.update(sd_models())
49
+ return info
50
+
51
+
52
+ def sd_models() -> dict[str, str]:
53
+ try:
54
+ from modules import shared
55
+
56
+ opts = shared.opts
57
+ except Exception:
58
+ return {}
59
+
60
+ return {
61
+ "checkpoint": getattr(opts, "sd_model_checkpoint", "------"),
62
+ "vae": getattr(opts, "sd_vae", "------"),
63
+ "unet": getattr(opts, "sd_unet", "------"),
64
+ }
65
+
66
+
67
+ def ad_args(*args: Any) -> dict[str, Any]:
68
+ ad_args = [
69
+ arg
70
+ for arg in args
71
+ if isinstance(arg, dict) and arg.get("ad_model", "None") != "None"
72
+ ]
73
+ if not ad_args:
74
+ return {}
75
+
76
+ arg0 = ad_args[0]
77
+ is_api = arg0.get("is_api", True)
78
+ return {
79
+ "version": __version__,
80
+ "ad_model": arg0["ad_model"],
81
+ "ad_prompt": arg0.get("ad_prompt", ""),
82
+ "ad_negative_prompt": arg0.get("ad_negative_prompt", ""),
83
+ "ad_controlnet_model": arg0.get("ad_controlnet_model", "None"),
84
+ "is_api": type(is_api) is not tuple,
85
+ }
86
+
87
+
88
+ def sys_info() -> dict[str, Any]:
89
+ try:
90
+ import launch
91
+
92
+ version = launch.git_tag()
93
+ commit = launch.commit_hash()
94
+ except Exception:
95
+ version = "Unknown (too old or vladmandic)"
96
+ commit = "-------"
97
+
98
+ return {
99
+ "Platform": platform.platform(),
100
+ "Python": sys.version,
101
+ "Version": version,
102
+ "Commit": commit,
103
+ "Commandline": sys.argv,
104
+ }
105
+
106
+
107
+ def get_table(title: str, data: dict[str, Any]) -> Table:
108
+ table = Table(title=title, highlight=True)
109
+ table.add_column(" ", justify="right", style="dim")
110
+ table.add_column("Value")
111
+ for key, value in data.items():
112
+ if not isinstance(value, str):
113
+ value = repr(value)
114
+ table.add_row(key, value)
115
+
116
+ return table
117
+
118
+
119
+ def force_terminal_value():
120
+ try:
121
+ from modules.shared import cmd_opts
122
+
123
+ return True if hasattr(cmd_opts, "skip_torch_cuda_test") else None
124
+ except Exception:
125
+ return None
126
+
127
+
128
+ def rich_traceback(func: Callable) -> Callable:
129
+ force_terminal = force_terminal_value()
130
+
131
+ def wrapper(*args, **kwargs):
132
+ string = io.StringIO()
133
+ width = Console().width
134
+ width = width - 4 if width > 4 else None
135
+ console = Console(file=string, force_terminal=force_terminal, width=width)
136
+ try:
137
+ return func(*args, **kwargs)
138
+ except Exception as e:
139
+ tables = [
140
+ get_table(title, data)
141
+ for title, data in [
142
+ ("System info", sys_info()),
143
+ ("Inputs", processing(*args)),
144
+ ("ADetailer", ad_args(*args)),
145
+ ]
146
+ if data
147
+ ]
148
+ tables.append(Traceback())
149
+
150
+ console.print(Panel(Group(*tables)))
151
+ output = "\n" + string.getvalue()
152
+
153
+ try:
154
+ error = e.__class__(output)
155
+ except Exception:
156
+ error = RuntimeError(output)
157
+ raise error from None
158
+
159
+ return wrapper
adetailer/ui.py ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import partial
4
+ from types import SimpleNamespace
5
+ from typing import Any
6
+
7
+ import gradio as gr
8
+
9
+ from adetailer import AFTER_DETAILER, __version__
10
+ from adetailer.args import AD_ENABLE, ALL_ARGS, MASK_MERGE_INVERT
11
+ from controlnet_ext import controlnet_exists, get_cn_models
12
+
13
+ cn_module_choices = [
14
+ "inpaint_global_harmonious",
15
+ "inpaint_only",
16
+ "inpaint_only+lama",
17
+ ]
18
+
19
+
20
+ class Widgets(SimpleNamespace):
21
+ def tolist(self):
22
+ return [getattr(self, attr) for attr in ALL_ARGS.attrs]
23
+
24
+
25
+ def gr_interactive(value: bool = True):
26
+ return gr.update(interactive=value)
27
+
28
+
29
+ def ordinal(n: int) -> str:
30
+ d = {1: "st", 2: "nd", 3: "rd"}
31
+ return str(n) + ("th" if 11 <= n % 100 <= 13 else d.get(n % 10, "th"))
32
+
33
+
34
+ def suffix(n: int, c: str = " ") -> str:
35
+ return "" if n == 0 else c + ordinal(n + 1)
36
+
37
+
38
+ def on_widget_change(state: dict, value: Any, *, attr: str):
39
+ state[attr] = value
40
+ return state
41
+
42
+
43
+ def on_generate_click(state: dict, *values: Any):
44
+ for attr, value in zip(ALL_ARGS.attrs, values):
45
+ state[attr] = value
46
+ state["is_api"] = ()
47
+ return state
48
+
49
+
50
+ def on_cn_model_update(cn_model: str):
51
+ if "inpaint" in cn_model:
52
+ return gr.update(
53
+ visible=True, choices=cn_module_choices, value=cn_module_choices[0]
54
+ )
55
+ return gr.update(visible=False, choices=["None"], value="None")
56
+
57
+
58
+ def elem_id(item_id: str, n: int, is_img2img: bool) -> str:
59
+ tap = "img2img" if is_img2img else "txt2img"
60
+ suf = suffix(n, "_")
61
+ return f"script_{tap}_adetailer_{item_id}{suf}"
62
+
63
+
64
+ def adui(
65
+ num_models: int,
66
+ is_img2img: bool,
67
+ model_list: list[str],
68
+ samplers: list[str],
69
+ t2i_button: gr.Button,
70
+ i2i_button: gr.Button,
71
+ ):
72
+ states = []
73
+ infotext_fields = []
74
+ eid = partial(elem_id, n=0, is_img2img=is_img2img)
75
+
76
+ with gr.Accordion(AFTER_DETAILER, open=False, elem_id=eid("ad_main_accordion")):
77
+ with gr.Row():
78
+ with gr.Column(scale=6):
79
+ ad_enable = gr.Checkbox(
80
+ label="Enable ADetailer",
81
+ value=False,
82
+ visible=True,
83
+ elem_id=eid("ad_enable"),
84
+ )
85
+
86
+ with gr.Column(scale=1, min_width=180):
87
+ gr.Markdown(
88
+ f"v{__version__}",
89
+ elem_id=eid("ad_version"),
90
+ )
91
+
92
+ infotext_fields.append((ad_enable, AD_ENABLE.name))
93
+
94
+ with gr.Group(), gr.Tabs():
95
+ for n in range(num_models):
96
+ with gr.Tab(ordinal(n + 1)):
97
+ state, infofields = one_ui_group(
98
+ n=n,
99
+ is_img2img=is_img2img,
100
+ model_list=model_list,
101
+ samplers=samplers,
102
+ t2i_button=t2i_button,
103
+ i2i_button=i2i_button,
104
+ )
105
+
106
+ states.append(state)
107
+ infotext_fields.extend(infofields)
108
+
109
+ # components: [bool, dict, dict, ...]
110
+ components = [ad_enable, *states]
111
+ return components, infotext_fields
112
+
113
+
114
+ def one_ui_group(
115
+ n: int,
116
+ is_img2img: bool,
117
+ model_list: list[str],
118
+ samplers: list[str],
119
+ t2i_button: gr.Button,
120
+ i2i_button: gr.Button,
121
+ ):
122
+ w = Widgets()
123
+ state = gr.State({})
124
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
125
+
126
+ with gr.Row():
127
+ model_choices = [*model_list, "None"] if n == 0 else ["None", *model_list]
128
+
129
+ w.ad_model = gr.Dropdown(
130
+ label="ADetailer model" + suffix(n),
131
+ choices=model_choices,
132
+ value=model_choices[0],
133
+ visible=True,
134
+ type="value",
135
+ elem_id=eid("ad_model"),
136
+ )
137
+
138
+ with gr.Group():
139
+ with gr.Row(elem_id=eid("ad_toprow_prompt")):
140
+ w.ad_prompt = gr.Textbox(
141
+ label="ad_prompt" + suffix(n),
142
+ show_label=False,
143
+ lines=3,
144
+ placeholder="ADetailer prompt"
145
+ + suffix(n)
146
+ + "\nIf blank, the main prompt is used.",
147
+ elem_id=eid("ad_prompt"),
148
+ )
149
+
150
+ with gr.Row(elem_id=eid("ad_toprow_negative_prompt")):
151
+ w.ad_negative_prompt = gr.Textbox(
152
+ label="ad_negative_prompt" + suffix(n),
153
+ show_label=False,
154
+ lines=2,
155
+ placeholder="ADetailer negative prompt"
156
+ + suffix(n)
157
+ + "\nIf blank, the main negative prompt is used.",
158
+ elem_id=eid("ad_negative_prompt"),
159
+ )
160
+
161
+ with gr.Group():
162
+ with gr.Accordion(
163
+ "Detection", open=False, elem_id=eid("ad_detection_accordion")
164
+ ):
165
+ detection(w, n, is_img2img)
166
+
167
+ with gr.Accordion(
168
+ "Mask Preprocessing",
169
+ open=False,
170
+ elem_id=eid("ad_mask_preprocessing_accordion"),
171
+ ):
172
+ mask_preprocessing(w, n, is_img2img)
173
+
174
+ with gr.Accordion(
175
+ "Inpainting", open=False, elem_id=eid("ad_inpainting_accordion")
176
+ ):
177
+ inpainting(w, n, is_img2img, samplers)
178
+
179
+ with gr.Group():
180
+ controlnet(w, n, is_img2img)
181
+
182
+ all_inputs = [state, *w.tolist()]
183
+ target_button = i2i_button if is_img2img else t2i_button
184
+ target_button.click(
185
+ fn=on_generate_click, inputs=all_inputs, outputs=state, queue=False
186
+ )
187
+
188
+ infotext_fields = [(getattr(w, attr), name + suffix(n)) for attr, name in ALL_ARGS]
189
+
190
+ return state, infotext_fields
191
+
192
+
193
+ def detection(w: Widgets, n: int, is_img2img: bool):
194
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
195
+
196
+ with gr.Row():
197
+ with gr.Column():
198
+ w.ad_confidence = gr.Slider(
199
+ label="Detection model confidence threshold" + suffix(n),
200
+ minimum=0.0,
201
+ maximum=1.0,
202
+ step=0.01,
203
+ value=0.3,
204
+ visible=True,
205
+ elem_id=eid("ad_confidence"),
206
+ )
207
+
208
+ with gr.Column(variant="compact"):
209
+ w.ad_mask_min_ratio = gr.Slider(
210
+ label="Mask min area ratio" + suffix(n),
211
+ minimum=0.0,
212
+ maximum=1.0,
213
+ step=0.001,
214
+ value=0.0,
215
+ visible=True,
216
+ elem_id=eid("ad_mask_min_ratio"),
217
+ )
218
+ w.ad_mask_max_ratio = gr.Slider(
219
+ label="Mask max area ratio" + suffix(n),
220
+ minimum=0.0,
221
+ maximum=1.0,
222
+ step=0.001,
223
+ value=1.0,
224
+ visible=True,
225
+ elem_id=eid("ad_mask_max_ratio"),
226
+ )
227
+
228
+
229
+ def mask_preprocessing(w: Widgets, n: int, is_img2img: bool):
230
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
231
+
232
+ with gr.Group():
233
+ with gr.Row():
234
+ with gr.Column(variant="compact"):
235
+ w.ad_x_offset = gr.Slider(
236
+ label="Mask x(→) offset" + suffix(n),
237
+ minimum=-200,
238
+ maximum=200,
239
+ step=1,
240
+ value=0,
241
+ visible=True,
242
+ elem_id=eid("ad_x_offset"),
243
+ )
244
+ w.ad_y_offset = gr.Slider(
245
+ label="Mask y(↑) offset" + suffix(n),
246
+ minimum=-200,
247
+ maximum=200,
248
+ step=1,
249
+ value=0,
250
+ visible=True,
251
+ elem_id=eid("ad_y_offset"),
252
+ )
253
+
254
+ with gr.Column(variant="compact"):
255
+ w.ad_dilate_erode = gr.Slider(
256
+ label="Mask erosion (-) / dilation (+)" + suffix(n),
257
+ minimum=-128,
258
+ maximum=128,
259
+ step=4,
260
+ value=4,
261
+ visible=True,
262
+ elem_id=eid("ad_dilate_erode"),
263
+ )
264
+
265
+ with gr.Row():
266
+ w.ad_mask_merge_invert = gr.Radio(
267
+ label="Mask merge mode" + suffix(n),
268
+ choices=MASK_MERGE_INVERT,
269
+ value="None",
270
+ elem_id=eid("ad_mask_merge_invert"),
271
+ )
272
+
273
+
274
+ def inpainting(w: Widgets, n: int, is_img2img: bool, samplers: list[str]):
275
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
276
+
277
+ with gr.Group():
278
+ with gr.Row():
279
+ w.ad_mask_blur = gr.Slider(
280
+ label="Inpaint mask blur" + suffix(n),
281
+ minimum=0,
282
+ maximum=64,
283
+ step=1,
284
+ value=4,
285
+ visible=True,
286
+ elem_id=eid("ad_mask_blur"),
287
+ )
288
+
289
+ w.ad_denoising_strength = gr.Slider(
290
+ label="Inpaint denoising strength" + suffix(n),
291
+ minimum=0.0,
292
+ maximum=1.0,
293
+ step=0.01,
294
+ value=0.4,
295
+ visible=True,
296
+ elem_id=eid("ad_denoising_strength"),
297
+ )
298
+
299
+ with gr.Row():
300
+ with gr.Column(variant="compact"):
301
+ w.ad_inpaint_only_masked = gr.Checkbox(
302
+ label="Inpaint only masked" + suffix(n),
303
+ value=True,
304
+ visible=True,
305
+ elem_id=eid("ad_inpaint_only_masked"),
306
+ )
307
+ w.ad_inpaint_only_masked_padding = gr.Slider(
308
+ label="Inpaint only masked padding, pixels" + suffix(n),
309
+ minimum=0,
310
+ maximum=256,
311
+ step=4,
312
+ value=32,
313
+ visible=True,
314
+ elem_id=eid("ad_inpaint_only_masked_padding"),
315
+ )
316
+
317
+ w.ad_inpaint_only_masked.change(
318
+ gr_interactive,
319
+ inputs=w.ad_inpaint_only_masked,
320
+ outputs=w.ad_inpaint_only_masked_padding,
321
+ queue=False,
322
+ )
323
+
324
+ with gr.Column(variant="compact"):
325
+ w.ad_use_inpaint_width_height = gr.Checkbox(
326
+ label="Use separate width/height" + suffix(n),
327
+ value=False,
328
+ visible=True,
329
+ elem_id=eid("ad_use_inpaint_width_height"),
330
+ )
331
+
332
+ w.ad_inpaint_width = gr.Slider(
333
+ label="inpaint width" + suffix(n),
334
+ minimum=64,
335
+ maximum=2048,
336
+ step=4,
337
+ value=512,
338
+ visible=True,
339
+ elem_id=eid("ad_inpaint_width"),
340
+ )
341
+
342
+ w.ad_inpaint_height = gr.Slider(
343
+ label="inpaint height" + suffix(n),
344
+ minimum=64,
345
+ maximum=2048,
346
+ step=4,
347
+ value=512,
348
+ visible=True,
349
+ elem_id=eid("ad_inpaint_height"),
350
+ )
351
+
352
+ w.ad_use_inpaint_width_height.change(
353
+ lambda value: (gr_interactive(value), gr_interactive(value)),
354
+ inputs=w.ad_use_inpaint_width_height,
355
+ outputs=[w.ad_inpaint_width, w.ad_inpaint_height],
356
+ queue=False,
357
+ )
358
+
359
+ with gr.Row():
360
+ with gr.Column(variant="compact"):
361
+ w.ad_use_steps = gr.Checkbox(
362
+ label="Use separate steps" + suffix(n),
363
+ value=False,
364
+ visible=True,
365
+ elem_id=eid("ad_use_steps"),
366
+ )
367
+
368
+ w.ad_steps = gr.Slider(
369
+ label="ADetailer steps" + suffix(n),
370
+ minimum=1,
371
+ maximum=150,
372
+ step=1,
373
+ value=28,
374
+ visible=True,
375
+ elem_id=eid("ad_steps"),
376
+ )
377
+
378
+ w.ad_use_steps.change(
379
+ gr_interactive,
380
+ inputs=w.ad_use_steps,
381
+ outputs=w.ad_steps,
382
+ queue=False,
383
+ )
384
+
385
+ with gr.Column(variant="compact"):
386
+ w.ad_use_cfg_scale = gr.Checkbox(
387
+ label="Use separate CFG scale" + suffix(n),
388
+ value=False,
389
+ visible=True,
390
+ elem_id=eid("ad_use_cfg_scale"),
391
+ )
392
+
393
+ w.ad_cfg_scale = gr.Slider(
394
+ label="ADetailer CFG scale" + suffix(n),
395
+ minimum=0.0,
396
+ maximum=30.0,
397
+ step=0.5,
398
+ value=7.0,
399
+ visible=True,
400
+ elem_id=eid("ad_cfg_scale"),
401
+ )
402
+
403
+ w.ad_use_cfg_scale.change(
404
+ gr_interactive,
405
+ inputs=w.ad_use_cfg_scale,
406
+ outputs=w.ad_cfg_scale,
407
+ queue=False,
408
+ )
409
+
410
+ with gr.Row():
411
+ with gr.Column(variant="compact"):
412
+ w.ad_use_sampler = gr.Checkbox(
413
+ label="Use separate sampler" + suffix(n),
414
+ value=False,
415
+ visible=True,
416
+ elem_id=eid("ad_use_sampler"),
417
+ )
418
+
419
+ w.ad_sampler = gr.Dropdown(
420
+ label="ADetailer sampler" + suffix(n),
421
+ choices=samplers,
422
+ value=samplers[0],
423
+ visible=True,
424
+ elem_id=eid("ad_sampler"),
425
+ )
426
+
427
+ w.ad_use_sampler.change(
428
+ gr_interactive,
429
+ inputs=w.ad_use_sampler,
430
+ outputs=w.ad_sampler,
431
+ queue=False,
432
+ )
433
+
434
+ with gr.Column(variant="compact"):
435
+ w.ad_use_noise_multiplier = gr.Checkbox(
436
+ label="Use separate noise multiplier" + suffix(n),
437
+ value=False,
438
+ visible=True,
439
+ elem_id=eid("ad_use_noise_multiplier"),
440
+ )
441
+
442
+ w.ad_noise_multiplier = gr.Slider(
443
+ label="Noise multiplier for img2img" + suffix(n),
444
+ minimum=0.5,
445
+ maximum=1.5,
446
+ step=0.01,
447
+ value=1.0,
448
+ visible=True,
449
+ elem_id=eid("ad_noise_multiplier"),
450
+ )
451
+
452
+ w.ad_use_noise_multiplier.change(
453
+ gr_interactive,
454
+ inputs=w.ad_use_noise_multiplier,
455
+ outputs=w.ad_noise_multiplier,
456
+ queue=False,
457
+ )
458
+
459
+ with gr.Row():
460
+ w.ad_restore_face = gr.Checkbox(
461
+ label="Restore faces after ADetailer" + suffix(n),
462
+ value=False,
463
+ elem_id=eid("ad_restore_face"),
464
+ )
465
+
466
+
467
+ def controlnet(w: Widgets, n: int, is_img2img: bool):
468
+ eid = partial(elem_id, n=n, is_img2img=is_img2img)
469
+ cn_models = ["None", *get_cn_models()]
470
+
471
+ with gr.Row(variant="panel"):
472
+ with gr.Column(variant="compact"):
473
+ w.ad_controlnet_model = gr.Dropdown(
474
+ label="ControlNet model" + suffix(n),
475
+ choices=cn_models,
476
+ value="None",
477
+ visible=True,
478
+ type="value",
479
+ interactive=controlnet_exists,
480
+ elem_id=eid("ad_controlnet_model"),
481
+ )
482
+
483
+ w.ad_controlnet_module = gr.Dropdown(
484
+ label="ControlNet module" + suffix(n),
485
+ choices=cn_module_choices,
486
+ value="inpaint_global_harmonious",
487
+ visible=False,
488
+ type="value",
489
+ interactive=controlnet_exists,
490
+ elem_id=eid("ad_controlnet_module"),
491
+ )
492
+
493
+ w.ad_controlnet_weight = gr.Slider(
494
+ label="ControlNet weight" + suffix(n),
495
+ minimum=0.0,
496
+ maximum=1.0,
497
+ step=0.01,
498
+ value=1.0,
499
+ visible=True,
500
+ interactive=controlnet_exists,
501
+ elem_id=eid("ad_controlnet_weight"),
502
+ )
503
+
504
+ w.ad_controlnet_model.change(
505
+ on_cn_model_update,
506
+ inputs=w.ad_controlnet_model,
507
+ outputs=w.ad_controlnet_module,
508
+ queue=False,
509
+ )
510
+
511
+ with gr.Column(variant="compact"):
512
+ w.ad_controlnet_guidance_start = gr.Slider(
513
+ label="ControlNet guidance start" + suffix(n),
514
+ minimum=0.0,
515
+ maximum=1.0,
516
+ step=0.01,
517
+ value=0.0,
518
+ visible=True,
519
+ interactive=controlnet_exists,
520
+ elem_id=eid("ad_controlnet_guidance_start"),
521
+ )
522
+
523
+ w.ad_controlnet_guidance_end = gr.Slider(
524
+ label="ControlNet guidance end" + suffix(n),
525
+ minimum=0.0,
526
+ maximum=1.0,
527
+ step=0.01,
528
+ value=1.0,
529
+ visible=True,
530
+ interactive=controlnet_exists,
531
+ elem_id=eid("ad_controlnet_guidance_end"),
532
+ )
adetailer/ultralytics.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+
5
+ import cv2
6
+ from PIL import Image
7
+
8
+ from adetailer import PredictOutput
9
+ from adetailer.common import create_mask_from_bbox
10
+
11
+
12
+ def ultralytics_predict(
13
+ model_path: str | Path,
14
+ image: Image.Image,
15
+ confidence: float = 0.3,
16
+ device: str = "",
17
+ ) -> PredictOutput:
18
+ from ultralytics import YOLO
19
+
20
+ model_path = str(model_path)
21
+
22
+ model = YOLO(model_path)
23
+ pred = model(image, conf=confidence, device=device)
24
+
25
+ bboxes = pred[0].boxes.xyxy.cpu().numpy()
26
+ if bboxes.size == 0:
27
+ return PredictOutput()
28
+ bboxes = bboxes.tolist()
29
+
30
+ if pred[0].masks is None:
31
+ masks = create_mask_from_bbox(bboxes, image.size)
32
+ else:
33
+ masks = mask_to_pil(pred[0].masks.data, image.size)
34
+ preview = pred[0].plot()
35
+ preview = cv2.cvtColor(preview, cv2.COLOR_BGR2RGB)
36
+ preview = Image.fromarray(preview)
37
+
38
+ return PredictOutput(bboxes=bboxes, masks=masks, preview=preview)
39
+
40
+
41
+ def mask_to_pil(masks, shape: tuple[int, int]) -> list[Image.Image]:
42
+ """
43
+ Parameters
44
+ ----------
45
+ masks: torch.Tensor, dtype=torch.float32, shape=(N, H, W).
46
+ The device can be CUDA, but `to_pil_image` takes care of that.
47
+
48
+ shape: tuple[int, int]
49
+ (width, height) of the original image
50
+ """
51
+ from torchvision.transforms.functional import to_pil_image
52
+
53
+ n = masks.shape[0]
54
+ return [to_pil_image(masks[i], mode="L").resize(shape) for i in range(n)]
controlnet_ext/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .controlnet_ext import ControlNetExt, controlnet_exists, get_cn_models
2
+
3
+ __all__ = [
4
+ "ControlNetExt",
5
+ "controlnet_exists",
6
+ "get_cn_models",
7
+ ]
controlnet_ext/controlnet_ext.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import re
5
+ from functools import lru_cache
6
+ from pathlib import Path
7
+
8
+ from modules import extensions, sd_models, shared
9
+ from modules.paths import data_path, models_path, script_path
10
+
11
+ ext_path = Path(data_path, "extensions")
12
+ ext_builtin_path = Path(script_path, "extensions-builtin")
13
+ controlnet_exists = False
14
+ controlnet_path = None
15
+ cn_base_path = ""
16
+
17
+ for extension in extensions.active():
18
+ if not extension.enabled:
19
+ continue
20
+ # For cases like sd-webui-controlnet-master
21
+ if "sd-webui-controlnet" in extension.name:
22
+ controlnet_exists = True
23
+ controlnet_path = Path(extension.path)
24
+ cn_base_path = ".".join(controlnet_path.parts[-2:])
25
+ break
26
+
27
+ cn_model_module = {
28
+ "inpaint": "inpaint_global_harmonious",
29
+ "scribble": "t2ia_sketch_pidi",
30
+ "lineart": "lineart_coarse",
31
+ "openpose": "openpose_full",
32
+ "tile": None,
33
+ }
34
+ cn_model_regex = re.compile("|".join(cn_model_module.keys()))
35
+
36
+
37
+ class ControlNetExt:
38
+ def __init__(self):
39
+ self.cn_models = ["None"]
40
+ self.cn_available = False
41
+ self.external_cn = None
42
+
43
+ def init_controlnet(self):
44
+ import_path = cn_base_path + ".scripts.external_code"
45
+
46
+ self.external_cn = importlib.import_module(import_path, "external_code")
47
+ self.cn_available = True
48
+ models = self.external_cn.get_models()
49
+ self.cn_models.extend(m for m in models if cn_model_regex.search(m))
50
+
51
+ def update_scripts_args(
52
+ self,
53
+ p,
54
+ model: str,
55
+ module: str | None,
56
+ weight: float,
57
+ guidance_start: float,
58
+ guidance_end: float,
59
+ ):
60
+ if (not self.cn_available) or model == "None":
61
+ return
62
+
63
+ if module is None:
64
+ for m, v in cn_model_module.items():
65
+ if m in model:
66
+ module = v
67
+ break
68
+
69
+ cn_units = [
70
+ self.external_cn.ControlNetUnit(
71
+ model=model,
72
+ weight=weight,
73
+ control_mode=self.external_cn.ControlMode.BALANCED,
74
+ module=module,
75
+ guidance_start=guidance_start,
76
+ guidance_end=guidance_end,
77
+ pixel_perfect=True,
78
+ )
79
+ ]
80
+
81
+ self.external_cn.update_cn_script_in_processing(p, cn_units)
82
+
83
+
84
+ def get_cn_model_dirs() -> list[Path]:
85
+ cn_model_dir = Path(models_path, "ControlNet")
86
+ if controlnet_path is not None:
87
+ cn_model_dir_old = controlnet_path.joinpath("models")
88
+ else:
89
+ cn_model_dir_old = None
90
+ ext_dir1 = shared.opts.data.get("control_net_models_path", "")
91
+ ext_dir2 = shared.opts.data.get("controlnet_dir", "")
92
+
93
+ dirs = [cn_model_dir]
94
+ for ext_dir in [cn_model_dir_old, ext_dir1, ext_dir2]:
95
+ if ext_dir:
96
+ dirs.append(Path(ext_dir))
97
+
98
+ return dirs
99
+
100
+
101
+ @lru_cache
102
+ def _get_cn_models() -> list[str]:
103
+ """
104
+ Since we can't import ControlNet, we use a function that does something like
105
+ controlnet's `list(global_state.cn_models_names.values())`.
106
+ """
107
+ cn_model_exts = (".pt", ".pth", ".ckpt", ".safetensors")
108
+ dirs = get_cn_model_dirs()
109
+ name_filter = shared.opts.data.get("control_net_models_name_filter", "")
110
+ name_filter = name_filter.strip(" ").lower()
111
+
112
+ model_paths = []
113
+
114
+ for base in dirs:
115
+ if not base.exists():
116
+ continue
117
+
118
+ for p in base.rglob("*"):
119
+ if (
120
+ p.is_file()
121
+ and p.suffix in cn_model_exts
122
+ and cn_model_regex.search(p.name)
123
+ ):
124
+ if name_filter and name_filter not in p.name.lower():
125
+ continue
126
+ model_paths.append(p)
127
+ model_paths.sort(key=lambda p: p.name)
128
+
129
+ models = []
130
+ for p in model_paths:
131
+ model_hash = sd_models.model_hash(p)
132
+ name = f"{p.stem} [{model_hash}]"
133
+ models.append(name)
134
+ return models
135
+
136
+
137
+ def get_cn_models() -> list[str]:
138
+ if controlnet_exists:
139
+ return _get_cn_models()
140
+ return []
controlnet_ext/restore.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from contextlib import contextmanager
4
+
5
+ from modules import img2img, processing, shared
6
+
7
+
8
+ def cn_restore_unet_hook(p, cn_latest_network):
9
+ if cn_latest_network is not None:
10
+ unet = p.sd_model.model.diffusion_model
11
+ cn_latest_network.restore(unet)
12
+
13
+
14
+ class CNHijackRestore:
15
+ def __init__(self):
16
+ self.process = hasattr(processing, "__controlnet_original_process_images_inner")
17
+ self.img2img = hasattr(img2img, "__controlnet_original_process_batch")
18
+
19
+ def __enter__(self):
20
+ if self.process:
21
+ self.orig_process = processing.process_images_inner
22
+ processing.process_images_inner = getattr(
23
+ processing, "__controlnet_original_process_images_inner"
24
+ )
25
+ if self.img2img:
26
+ self.orig_img2img = img2img.process_batch
27
+ img2img.process_batch = getattr(
28
+ img2img, "__controlnet_original_process_batch"
29
+ )
30
+
31
+ def __exit__(self, *args, **kwargs):
32
+ if self.process:
33
+ processing.process_images_inner = self.orig_process
34
+ if self.img2img:
35
+ img2img.process_batch = self.orig_img2img
36
+
37
+
38
+ @contextmanager
39
+ def cn_allow_script_control():
40
+ orig = False
41
+ if "control_net_allow_script_control" in shared.opts.data:
42
+ try:
43
+ orig = shared.opts.data["control_net_allow_script_control"]
44
+ shared.opts.data["control_net_allow_script_control"] = True
45
+ yield
46
+ finally:
47
+ shared.opts.data["control_net_allow_script_control"] = orig
48
+ else:
49
+ yield
install.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib.util
4
+ import subprocess
5
+ import sys
6
+ from importlib.metadata import version # python >= 3.8
7
+
8
+ from packaging.version import parse
9
+
10
+ import_name = {"py-cpuinfo": "cpuinfo"}
11
+
12
+
13
+ def is_installed(
14
+ package: str, min_version: str | None = None, max_version: str | None = None
15
+ ):
16
+ name = import_name.get(package, package)
17
+ try:
18
+ spec = importlib.util.find_spec(name)
19
+ except ModuleNotFoundError:
20
+ return False
21
+
22
+ if spec is None:
23
+ return False
24
+
25
+ if not min_version and not max_version:
26
+ return True
27
+
28
+ if not min_version:
29
+ min_version = "0.0.0"
30
+ if not max_version:
31
+ max_version = "99999999.99999999.99999999"
32
+
33
+ try:
34
+ pkg_version = version(package)
35
+ return parse(min_version) <= parse(pkg_version) <= parse(max_version)
36
+ except Exception:
37
+ return False
38
+
39
+
40
+ def run_pip(*args):
41
+ subprocess.run([sys.executable, "-m", "pip", "install", *args])
42
+
43
+
44
+ def install():
45
+ deps = [
46
+ # requirements
47
+ ("ultralytics", "8.0.97", None),
48
+ ("mediapipe", "0.10.0", None),
49
+ ("huggingface_hub", None, None),
50
+ ("pydantic", "1.10.8", None),
51
+ ("rich", "13.4.2", None),
52
+ # ultralytics
53
+ ("py-cpuinfo", None, None),
54
+ ]
55
+
56
+ for pkg, low, high in deps:
57
+ if not is_installed(pkg, low, high):
58
+ if low and high:
59
+ cmd = f"{pkg}>={low},<={high}"
60
+ elif low:
61
+ cmd = f"{pkg}>={low}"
62
+ elif high:
63
+ cmd = f"{pkg}<={high}"
64
+ else:
65
+ cmd = pkg
66
+
67
+ run_pip("-U", cmd)
68
+
69
+
70
+ try:
71
+ import launch
72
+
73
+ skip_install = launch.args.skip_install
74
+ except Exception:
75
+ skip_install = False
76
+
77
+ if not skip_install:
78
+ install()
preload.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+
3
+
4
+ def preload(parser: argparse.ArgumentParser):
5
+ parser.add_argument(
6
+ "--ad-no-huggingface",
7
+ action="store_true",
8
+ help="Don't use adetailer models from huggingface",
9
+ )
pyproject.toml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "adetailer"
3
+ description = "An object detection and auto-mask extension for stable diffusion webui."
4
+ authors = [
5
+ {name = "dowon", email = "ks2515@naver.com"},
6
+ ]
7
+ requires-python = ">=3.8,<3.12"
8
+ readme = "README.md"
9
+ license = {text = "AGPL-3.0"}
10
+
11
+ [project.urls]
12
+ repository = "https://github.com/Bing-su/adetailer"
13
+
14
+ [tool.isort]
15
+ profile = "black"
16
+ known_first_party = ["launch", "modules"]
17
+
18
+ [tool.ruff]
19
+ select = ["A", "B", "C4", "C90", "E", "EM", "F", "FA", "I001", "ISC", "N", "PIE", "PT", "RET", "RUF", "SIM", "UP", "W"]
20
+ ignore = ["B008", "B905", "E501", "F401", "UP007"]
21
+
22
+ [tool.ruff.isort]
23
+ known-first-party = ["launch", "modules"]
24
+
25
+ [tool.ruff.per-file-ignores]
26
+ "sd_webui/*.py" = ["B027", "F403"]
scripts/!adetailer.py ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import platform
5
+ import re
6
+ import sys
7
+ import traceback
8
+ from contextlib import contextmanager
9
+ from copy import copy, deepcopy
10
+ from functools import partial
11
+ from pathlib import Path
12
+ from textwrap import dedent
13
+ from typing import Any
14
+
15
+ import gradio as gr
16
+ import torch
17
+ from rich import print
18
+
19
+ import modules
20
+ from adetailer import (
21
+ AFTER_DETAILER,
22
+ __version__,
23
+ get_models,
24
+ mediapipe_predict,
25
+ ultralytics_predict,
26
+ )
27
+ from adetailer.args import ALL_ARGS, BBOX_SORTBY, ADetailerArgs, EnableChecker
28
+ from adetailer.common import PredictOutput
29
+ from adetailer.mask import filter_by_ratio, mask_preprocess, sort_bboxes
30
+ from adetailer.traceback import rich_traceback
31
+ from adetailer.ui import adui, ordinal, suffix
32
+ from controlnet_ext import ControlNetExt, controlnet_exists, get_cn_models
33
+ from controlnet_ext.restore import (
34
+ CNHijackRestore,
35
+ cn_allow_script_control,
36
+ )
37
+ from sd_webui import images, safe, script_callbacks, scripts, shared
38
+ from sd_webui.devices import NansException
39
+ from sd_webui.paths import data_path, models_path
40
+ from sd_webui.processing import (
41
+ Processed,
42
+ StableDiffusionProcessingImg2Img,
43
+ create_infotext,
44
+ process_images,
45
+ )
46
+ from sd_webui.sd_samplers import all_samplers
47
+ from sd_webui.shared import cmd_opts, opts, state
48
+
49
+ no_huggingface = getattr(cmd_opts, "ad_no_huggingface", False)
50
+ adetailer_dir = Path(models_path, "adetailer")
51
+ model_mapping = get_models(adetailer_dir, huggingface=not no_huggingface)
52
+ txt2img_submit_button = img2img_submit_button = None
53
+ SCRIPT_DEFAULT = "dynamic_prompting,dynamic_thresholding,wildcard_recursive,wildcards"
54
+
55
+ if (
56
+ not adetailer_dir.exists()
57
+ and adetailer_dir.parent.exists()
58
+ and os.access(adetailer_dir.parent, os.W_OK)
59
+ ):
60
+ adetailer_dir.mkdir()
61
+
62
+ print(
63
+ f"[-] ADetailer initialized. version: {__version__}, num models: {len(model_mapping)}"
64
+ )
65
+
66
+
67
+ @contextmanager
68
+ def change_torch_load():
69
+ orig = torch.load
70
+ try:
71
+ torch.load = safe.unsafe_torch_load
72
+ yield
73
+ finally:
74
+ torch.load = orig
75
+
76
+
77
+ @contextmanager
78
+ def pause_total_tqdm():
79
+ orig = opts.data.get("multiple_tqdm", True)
80
+ try:
81
+ opts.data["multiple_tqdm"] = False
82
+ yield
83
+ finally:
84
+ opts.data["multiple_tqdm"] = orig
85
+
86
+
87
+ @contextmanager
88
+ def preseve_prompts(p):
89
+ all_pt = copy(p.all_prompts)
90
+ all_ng = copy(p.all_negative_prompts)
91
+ try:
92
+ yield
93
+ finally:
94
+ p.all_prompts = all_pt
95
+ p.all_negative_prompts = all_ng
96
+
97
+
98
+ class AfterDetailerScript(scripts.Script):
99
+ def __init__(self):
100
+ super().__init__()
101
+ self.ultralytics_device = self.get_ultralytics_device()
102
+
103
+ self.controlnet_ext = None
104
+ self.cn_script = None
105
+ self.cn_latest_network = None
106
+
107
+ def __repr__(self):
108
+ return f"{self.__class__.__name__}(version={__version__})"
109
+
110
+ def title(self):
111
+ return AFTER_DETAILER
112
+
113
+ def show(self, is_img2img):
114
+ return scripts.AlwaysVisible
115
+
116
+ def ui(self, is_img2img):
117
+ num_models = opts.data.get("ad_max_models", 2)
118
+ model_list = list(model_mapping.keys())
119
+ samplers = [sampler.name for sampler in all_samplers]
120
+
121
+ components, infotext_fields = adui(
122
+ num_models,
123
+ is_img2img,
124
+ model_list,
125
+ samplers,
126
+ txt2img_submit_button,
127
+ img2img_submit_button,
128
+ )
129
+
130
+ self.infotext_fields = infotext_fields
131
+ return components
132
+
133
+ def init_controlnet_ext(self) -> None:
134
+ if self.controlnet_ext is not None:
135
+ return
136
+ self.controlnet_ext = ControlNetExt()
137
+
138
+ if controlnet_exists:
139
+ try:
140
+ self.controlnet_ext.init_controlnet()
141
+ except ImportError:
142
+ error = traceback.format_exc()
143
+ print(
144
+ f"[-] ADetailer: ControlNetExt init failed:\n{error}",
145
+ file=sys.stderr,
146
+ )
147
+
148
+ def update_controlnet_args(self, p, args: ADetailerArgs) -> None:
149
+ if self.controlnet_ext is None:
150
+ self.init_controlnet_ext()
151
+
152
+ if (
153
+ self.controlnet_ext is not None
154
+ and self.controlnet_ext.cn_available
155
+ and args.ad_controlnet_model != "None"
156
+ ):
157
+ self.controlnet_ext.update_scripts_args(
158
+ p,
159
+ model=args.ad_controlnet_model,
160
+ module=args.ad_controlnet_module,
161
+ weight=args.ad_controlnet_weight,
162
+ guidance_start=args.ad_controlnet_guidance_start,
163
+ guidance_end=args.ad_controlnet_guidance_end,
164
+ )
165
+
166
+ def is_ad_enabled(self, *args_) -> bool:
167
+ arg_list = [arg for arg in args_ if isinstance(arg, dict)]
168
+ if not args_ or not arg_list or not isinstance(args_[0], (bool, dict)):
169
+ message = f"""
170
+ [-] ADetailer: Invalid arguments passed to ADetailer.
171
+ input: {args_!r}
172
+ """
173
+ raise ValueError(dedent(message))
174
+ enable = args_[0] if isinstance(args_[0], bool) else True
175
+ checker = EnableChecker(enable=enable, arg_list=arg_list)
176
+ return checker.is_enabled()
177
+
178
+ def get_args(self, p, *args_) -> list[ADetailerArgs]:
179
+ """
180
+ `args_` is at least 1 in length by `is_ad_enabled` immediately above
181
+ """
182
+ args = [arg for arg in args_ if isinstance(arg, dict)]
183
+
184
+ if not args:
185
+ message = f"[-] ADetailer: Invalid arguments passed to ADetailer: {args_!r}"
186
+ raise ValueError(message)
187
+
188
+ if hasattr(p, "adetailer_xyz"):
189
+ args[0].update(p.adetailer_xyz)
190
+
191
+ all_inputs = []
192
+
193
+ for n, arg_dict in enumerate(args, 1):
194
+ try:
195
+ inp = ADetailerArgs(**arg_dict)
196
+ except ValueError as e:
197
+ msgs = [
198
+ f"[-] ADetailer: ValidationError when validating {ordinal(n)} arguments: {e}\n"
199
+ ]
200
+ for attr in ALL_ARGS.attrs:
201
+ arg = arg_dict.get(attr)
202
+ dtype = type(arg)
203
+ arg = "DEFAULT" if arg is None else repr(arg)
204
+ msgs.append(f" {attr}: {arg} ({dtype})")
205
+ raise ValueError("\n".join(msgs)) from e
206
+
207
+ all_inputs.append(inp)
208
+
209
+ return all_inputs
210
+
211
+ def extra_params(self, arg_list: list[ADetailerArgs]) -> dict:
212
+ params = {}
213
+ for n, args in enumerate(arg_list):
214
+ params.update(args.extra_params(suffix=suffix(n)))
215
+ params["ADetailer version"] = __version__
216
+ return params
217
+
218
+ @staticmethod
219
+ def get_ultralytics_device() -> str:
220
+ if "adetailer" in shared.cmd_opts.use_cpu:
221
+ return "cpu"
222
+
223
+ if platform.system() == "Darwin":
224
+ return ""
225
+
226
+ if any(getattr(cmd_opts, vram, False) for vram in ["lowvram", "medvram"]):
227
+ return "cpu"
228
+
229
+ return ""
230
+
231
+ def prompt_blank_replacement(
232
+ self, all_prompts: list[str], i: int, default: str
233
+ ) -> str:
234
+ if not all_prompts:
235
+ return default
236
+ if i < len(all_prompts):
237
+ return all_prompts[i]
238
+ j = i % len(all_prompts)
239
+ return all_prompts[j]
240
+
241
+ def _get_prompt(
242
+ self, ad_prompt: str, all_prompts: list[str], i: int, default: str
243
+ ) -> list[str]:
244
+ prompts = re.split(r"\s*\[SEP\]\s*", ad_prompt)
245
+ blank_replacement = self.prompt_blank_replacement(all_prompts, i, default)
246
+ for n in range(len(prompts)):
247
+ if not prompts[n]:
248
+ prompts[n] = blank_replacement
249
+ return prompts
250
+
251
+ def get_prompt(self, p, args: ADetailerArgs) -> tuple[list[str], list[str]]:
252
+ i = p._ad_idx
253
+
254
+ prompt = self._get_prompt(args.ad_prompt, p.all_prompts, i, p.prompt)
255
+ negative_prompt = self._get_prompt(
256
+ args.ad_negative_prompt, p.all_negative_prompts, i, p.negative_prompt
257
+ )
258
+
259
+ return prompt, negative_prompt
260
+
261
+ def get_seed(self, p) -> tuple[int, int]:
262
+ i = p._ad_idx
263
+
264
+ if not p.all_seeds:
265
+ seed = p.seed
266
+ elif i < len(p.all_seeds):
267
+ seed = p.all_seeds[i]
268
+ else:
269
+ j = i % len(p.all_seeds)
270
+ seed = p.all_seeds[j]
271
+
272
+ if not p.all_subseeds:
273
+ subseed = p.subseed
274
+ elif i < len(p.all_subseeds):
275
+ subseed = p.all_subseeds[i]
276
+ else:
277
+ j = i % len(p.all_subseeds)
278
+ subseed = p.all_subseeds[j]
279
+
280
+ return seed, subseed
281
+
282
+ def get_width_height(self, p, args: ADetailerArgs) -> tuple[int, int]:
283
+ if args.ad_use_inpaint_width_height:
284
+ width = args.ad_inpaint_width
285
+ height = args.ad_inpaint_height
286
+ else:
287
+ width = p.width
288
+ height = p.height
289
+
290
+ return width, height
291
+
292
+ def get_steps(self, p, args: ADetailerArgs) -> int:
293
+ if args.ad_use_steps:
294
+ return args.ad_steps
295
+ return p.steps
296
+
297
+ def get_cfg_scale(self, p, args: ADetailerArgs) -> float:
298
+ if args.ad_use_cfg_scale:
299
+ return args.ad_cfg_scale
300
+ return p.cfg_scale
301
+
302
+ def get_sampler(self, p, args: ADetailerArgs) -> str:
303
+ sampler_name = args.ad_sampler if args.ad_use_sampler else p.sampler_name
304
+
305
+ if sampler_name in ["PLMS", "UniPC"]:
306
+ sampler_name = "Euler"
307
+ return sampler_name
308
+
309
+ def get_initial_noise_multiplier(self, p, args: ADetailerArgs) -> float | None:
310
+ if args.ad_use_noise_multiplier:
311
+ return args.ad_noise_multiplier
312
+ return None
313
+
314
+ @staticmethod
315
+ def infotext(p) -> str:
316
+ return create_infotext(
317
+ p, p.all_prompts, p.all_seeds, p.all_subseeds, None, 0, 0
318
+ )
319
+
320
+ def write_params_txt(self, p) -> None:
321
+ infotext = self.infotext(p)
322
+ params_txt = Path(data_path, "params.txt")
323
+ params_txt.write_text(infotext, encoding="utf-8")
324
+
325
+ def script_filter(self, p, args: ADetailerArgs):
326
+ script_runner = copy(p.scripts)
327
+ script_args = deepcopy(p.script_args)
328
+ self.disable_controlnet_units(script_args)
329
+
330
+ ad_only_seleted_scripts = opts.data.get("ad_only_seleted_scripts", True)
331
+ if not ad_only_seleted_scripts:
332
+ return script_runner, script_args
333
+
334
+ ad_script_names = opts.data.get("ad_script_names", SCRIPT_DEFAULT)
335
+ script_names_set = {
336
+ name
337
+ for script_name in ad_script_names.split(",")
338
+ for name in (script_name, script_name.strip())
339
+ }
340
+
341
+ if args.ad_controlnet_model != "None":
342
+ script_names_set.add("controlnet")
343
+
344
+ filtered_alwayson = []
345
+ for script_object in script_runner.alwayson_scripts:
346
+ filepath = script_object.filename
347
+ filename = Path(filepath).stem
348
+ if filename in script_names_set:
349
+ filtered_alwayson.append(script_object)
350
+ if filename == "controlnet":
351
+ self.cn_script = script_object
352
+ self.cn_latest_network = script_object.latest_network
353
+
354
+ script_runner.alwayson_scripts = filtered_alwayson
355
+ return script_runner, script_args
356
+
357
+ def disable_controlnet_units(self, script_args: list[Any]) -> None:
358
+ for obj in script_args:
359
+ if "controlnet" in obj.__class__.__name__.lower():
360
+ if hasattr(obj, "enabled"):
361
+ obj.enabled = False
362
+ if hasattr(obj, "input_mode"):
363
+ obj.input_mode = getattr(obj.input_mode, "SIMPLE", "simple")
364
+
365
+ elif isinstance(obj, dict) and "module" in obj:
366
+ obj["enabled"] = False
367
+
368
+ def get_i2i_p(self, p, args: ADetailerArgs, image):
369
+ seed, subseed = self.get_seed(p)
370
+ width, height = self.get_width_height(p, args)
371
+ steps = self.get_steps(p, args)
372
+ cfg_scale = self.get_cfg_scale(p, args)
373
+ initial_noise_multiplier = self.get_initial_noise_multiplier(p, args)
374
+ sampler_name = self.get_sampler(p, args)
375
+
376
+ i2i = StableDiffusionProcessingImg2Img(
377
+ init_images=[image],
378
+ resize_mode=0,
379
+ denoising_strength=args.ad_denoising_strength,
380
+ mask=None,
381
+ mask_blur=args.ad_mask_blur,
382
+ inpainting_fill=1,
383
+ inpaint_full_res=args.ad_inpaint_only_masked,
384
+ inpaint_full_res_padding=args.ad_inpaint_only_masked_padding,
385
+ inpainting_mask_invert=0,
386
+ initial_noise_multiplier=initial_noise_multiplier,
387
+ sd_model=p.sd_model,
388
+ outpath_samples=p.outpath_samples,
389
+ outpath_grids=p.outpath_grids,
390
+ prompt="", # replace later
391
+ negative_prompt="",
392
+ styles=p.styles,
393
+ seed=seed,
394
+ subseed=subseed,
395
+ subseed_strength=p.subseed_strength,
396
+ seed_resize_from_h=p.seed_resize_from_h,
397
+ seed_resize_from_w=p.seed_resize_from_w,
398
+ sampler_name=sampler_name,
399
+ batch_size=1,
400
+ n_iter=1,
401
+ steps=steps,
402
+ cfg_scale=cfg_scale,
403
+ width=width,
404
+ height=height,
405
+ restore_faces=args.ad_restore_face,
406
+ tiling=p.tiling,
407
+ extra_generation_params=p.extra_generation_params,
408
+ do_not_save_samples=True,
409
+ do_not_save_grid=True,
410
+ )
411
+
412
+ i2i.cached_c = [None, None]
413
+ i2i.cached_uc = [None, None]
414
+ i2i.scripts, i2i.script_args = self.script_filter(p, args)
415
+ i2i._disable_adetailer = True
416
+
417
+ if args.ad_controlnet_model != "None":
418
+ self.update_controlnet_args(i2i, args)
419
+ else:
420
+ i2i.control_net_enabled = False
421
+
422
+ return i2i
423
+
424
+ def save_image(self, p, image, *, condition: str, suffix: str) -> None:
425
+ i = p._ad_idx
426
+ if p.all_prompts:
427
+ i %= len(p.all_prompts)
428
+ save_prompt = p.all_prompts[i]
429
+ else:
430
+ save_prompt = p.prompt
431
+ seed, _ = self.get_seed(p)
432
+
433
+ if opts.data.get(condition, False):
434
+ images.save_image(
435
+ image=image,
436
+ path=p.outpath_samples,
437
+ basename="",
438
+ seed=seed,
439
+ prompt=save_prompt,
440
+ extension=opts.samples_format,
441
+ info=self.infotext(p),
442
+ p=p,
443
+ suffix=suffix,
444
+ )
445
+
446
+ def get_ad_model(self, name: str):
447
+ if name not in model_mapping:
448
+ msg = f"[-] ADetailer: Model {name!r} not found. Available models: {list(model_mapping.keys())}"
449
+ raise ValueError(msg)
450
+ return model_mapping[name]
451
+
452
+ def sort_bboxes(self, pred: PredictOutput) -> PredictOutput:
453
+ sortby = opts.data.get("ad_bbox_sortby", BBOX_SORTBY[0])
454
+ sortby_idx = BBOX_SORTBY.index(sortby)
455
+ return sort_bboxes(pred, sortby_idx)
456
+
457
+ def pred_preprocessing(self, pred: PredictOutput, args: ADetailerArgs):
458
+ pred = filter_by_ratio(
459
+ pred, low=args.ad_mask_min_ratio, high=args.ad_mask_max_ratio
460
+ )
461
+ pred = self.sort_bboxes(pred)
462
+ return mask_preprocess(
463
+ pred.masks,
464
+ kernel=args.ad_dilate_erode,
465
+ x_offset=args.ad_x_offset,
466
+ y_offset=args.ad_y_offset,
467
+ merge_invert=args.ad_mask_merge_invert,
468
+ )
469
+
470
+ @staticmethod
471
+ def ensure_rgb_image(image: Any):
472
+ if hasattr(image, "mode") and image.mode != "RGB":
473
+ image = image.convert("RGB")
474
+ return image
475
+
476
+ @staticmethod
477
+ def i2i_prompts_replace(
478
+ i2i, prompts: list[str], negative_prompts: list[str], j: int
479
+ ) -> None:
480
+ i1 = min(j, len(prompts) - 1)
481
+ i2 = min(j, len(negative_prompts) - 1)
482
+ prompt = prompts[i1]
483
+ negative_prompt = negative_prompts[i2]
484
+ i2i.prompt = prompt
485
+ i2i.negative_prompt = negative_prompt
486
+
487
+ @staticmethod
488
+ def compare_prompt(p, processed, n: int = 0):
489
+ if p.prompt != processed.all_prompts[0]:
490
+ print(
491
+ f"[-] ADetailer: applied {ordinal(n + 1)} ad_prompt: {processed.all_prompts[0]!r}"
492
+ )
493
+
494
+ if p.negative_prompt != processed.all_negative_prompts[0]:
495
+ print(
496
+ f"[-] ADetailer: applied {ordinal(n + 1)} ad_negative_prompt: {processed.all_negative_prompts[0]!r}"
497
+ )
498
+
499
+ @staticmethod
500
+ def need_call_process(p) -> bool:
501
+ i = p._ad_idx
502
+ bs = p.batch_size
503
+ return i % bs == bs - 1
504
+
505
+ @staticmethod
506
+ def need_call_postprocess(p) -> bool:
507
+ i = p._ad_idx
508
+ bs = p.batch_size
509
+ return i % bs == 0
510
+
511
+ @rich_traceback
512
+ def process(self, p, *args_):
513
+ if getattr(p, "_disable_adetailer", False):
514
+ return
515
+
516
+ if self.is_ad_enabled(*args_):
517
+ arg_list = self.get_args(p, *args_)
518
+ extra_params = self.extra_params(arg_list)
519
+ p.extra_generation_params.update(extra_params)
520
+
521
+ def _postprocess_image(self, p, pp, args: ADetailerArgs, *, n: int = 0) -> bool:
522
+ """
523
+ Returns
524
+ -------
525
+ bool
526
+
527
+ `True` if image was processed, `False` otherwise.
528
+ """
529
+ if state.interrupted:
530
+ return False
531
+
532
+ i = p._ad_idx
533
+
534
+ i2i = self.get_i2i_p(p, args, pp.image)
535
+ seed, subseed = self.get_seed(p)
536
+ ad_prompts, ad_negatives = self.get_prompt(p, args)
537
+
538
+ is_mediapipe = args.ad_model.lower().startswith("mediapipe")
539
+
540
+ kwargs = {}
541
+ if is_mediapipe:
542
+ predictor = mediapipe_predict
543
+ ad_model = args.ad_model
544
+ else:
545
+ predictor = ultralytics_predict
546
+ ad_model = self.get_ad_model(args.ad_model)
547
+ kwargs["device"] = self.ultralytics_device
548
+
549
+ with change_torch_load():
550
+ pred = predictor(ad_model, pp.image, args.ad_confidence, **kwargs)
551
+
552
+ masks = self.pred_preprocessing(pred, args)
553
+
554
+ if not masks:
555
+ print(
556
+ f"[-] ADetailer: nothing detected on image {i + 1} with {ordinal(n + 1)} settings."
557
+ )
558
+ return False
559
+
560
+ self.save_image(
561
+ p,
562
+ pred.preview,
563
+ condition="ad_save_previews",
564
+ suffix="-ad-preview" + suffix(n, "-"),
565
+ )
566
+
567
+ steps = len(masks)
568
+ processed = None
569
+ state.job_count += steps
570
+
571
+ if is_mediapipe:
572
+ print(f"mediapipe: {steps} detected.")
573
+
574
+ p2 = copy(i2i)
575
+ for j in range(steps):
576
+ p2.image_mask = masks[j]
577
+ p2.init_images[0] = self.ensure_rgb_image(p2.init_images[0])
578
+ self.i2i_prompts_replace(p2, ad_prompts, ad_negatives, j)
579
+
580
+ if re.match(r"^\s*\[SKIP\]\s*$", p2.prompt):
581
+ continue
582
+
583
+ p2.seed = seed + j
584
+ p2.subseed = subseed + j
585
+
586
+ try:
587
+ processed = process_images(p2)
588
+ except NansException as e:
589
+ msg = f"[-] ADetailer: 'NansException' occurred with {ordinal(n + 1)} settings.\n{e}"
590
+ print(msg, file=sys.stderr)
591
+ continue
592
+ finally:
593
+ p2.close()
594
+
595
+ self.compare_prompt(p2, processed, n=n)
596
+ p2 = copy(i2i)
597
+ p2.init_images = [processed.images[0]]
598
+
599
+ if processed is not None:
600
+ pp.image = processed.images[0]
601
+ return True
602
+
603
+ return False
604
+
605
+ @rich_traceback
606
+ def postprocess_image(self, p, pp, *args_):
607
+ if getattr(p, "_disable_adetailer", False):
608
+ return
609
+
610
+ if not self.is_ad_enabled(*args_):
611
+ return
612
+
613
+ p._ad_idx = getattr(p, "_ad_idx", -1) + 1
614
+ init_image = copy(pp.image)
615
+ arg_list = self.get_args(p, *args_)
616
+
617
+ if p.scripts is not None and self.need_call_postprocess(p):
618
+ dummy = Processed(p, [], p.seed, "")
619
+ with preseve_prompts(p):
620
+ p.scripts.postprocess(copy(p), dummy)
621
+
622
+ is_processed = False
623
+ with CNHijackRestore(), pause_total_tqdm(), cn_allow_script_control():
624
+ for n, args in enumerate(arg_list):
625
+ if args.ad_model == "None":
626
+ continue
627
+ is_processed |= self._postprocess_image(p, pp, args, n=n)
628
+
629
+ if is_processed:
630
+ self.save_image(
631
+ p, init_image, condition="ad_save_images_before", suffix="-ad-before"
632
+ )
633
+
634
+ if p.scripts is not None and self.need_call_process(p):
635
+ with preseve_prompts(p):
636
+ p.scripts.process(copy(p))
637
+
638
+ try:
639
+ ia = p._ad_idx
640
+ lenp = len(p.all_prompts)
641
+ if ia % lenp == lenp - 1:
642
+ self.write_params_txt(p)
643
+ except Exception:
644
+ pass
645
+
646
+
647
+ def on_after_component(component, **_kwargs):
648
+ global txt2img_submit_button, img2img_submit_button
649
+ if getattr(component, "elem_id", None) == "txt2img_generate":
650
+ txt2img_submit_button = component
651
+ return
652
+
653
+ if getattr(component, "elem_id", None) == "img2img_generate":
654
+ img2img_submit_button = component
655
+
656
+
657
+ def on_ui_settings():
658
+ section = ("ADetailer", AFTER_DETAILER)
659
+ shared.opts.add_option(
660
+ "ad_max_models",
661
+ shared.OptionInfo(
662
+ default=2,
663
+ label="Max models",
664
+ component=gr.Slider,
665
+ component_args={"minimum": 1, "maximum": 10, "step": 1},
666
+ section=section,
667
+ ),
668
+ )
669
+
670
+ shared.opts.add_option(
671
+ "ad_save_previews",
672
+ shared.OptionInfo(False, "Save mask previews", section=section),
673
+ )
674
+
675
+ shared.opts.add_option(
676
+ "ad_save_images_before",
677
+ shared.OptionInfo(False, "Save images before ADetailer", section=section),
678
+ )
679
+
680
+ shared.opts.add_option(
681
+ "ad_only_seleted_scripts",
682
+ shared.OptionInfo(
683
+ True, "Apply only selected scripts to ADetailer", section=section
684
+ ),
685
+ )
686
+
687
+ textbox_args = {
688
+ "placeholder": "comma-separated list of script names",
689
+ "interactive": True,
690
+ }
691
+
692
+ shared.opts.add_option(
693
+ "ad_script_names",
694
+ shared.OptionInfo(
695
+ default=SCRIPT_DEFAULT,
696
+ label="Script names to apply to ADetailer (separated by comma)",
697
+ component=gr.Textbox,
698
+ component_args=textbox_args,
699
+ section=section,
700
+ ),
701
+ )
702
+
703
+ shared.opts.add_option(
704
+ "ad_bbox_sortby",
705
+ shared.OptionInfo(
706
+ default="None",
707
+ label="Sort bounding boxes by",
708
+ component=gr.Radio,
709
+ component_args={"choices": BBOX_SORTBY},
710
+ section=section,
711
+ ),
712
+ )
713
+
714
+
715
+ # xyz_grid
716
+
717
+
718
+ def make_axis_on_xyz_grid():
719
+ xyz_grid = None
720
+ for script in scripts.scripts_data:
721
+ if script.script_class.__module__ == "xyz_grid.py":
722
+ xyz_grid = script.module
723
+ break
724
+
725
+ if xyz_grid is None:
726
+ return
727
+
728
+ model_list = ["None", *model_mapping.keys()]
729
+ samplers = [sampler.name for sampler in all_samplers]
730
+
731
+ def set_value(p, x, xs, *, field: str):
732
+ if not hasattr(p, "adetailer_xyz"):
733
+ p.adetailer_xyz = {}
734
+ p.adetailer_xyz[field] = x
735
+
736
+ axis = [
737
+ xyz_grid.AxisOption(
738
+ "[ADetailer] ADetailer model 1st",
739
+ str,
740
+ partial(set_value, field="ad_model"),
741
+ choices=lambda: model_list,
742
+ ),
743
+ xyz_grid.AxisOption(
744
+ "[ADetailer] ADetailer prompt 1st",
745
+ str,
746
+ partial(set_value, field="ad_prompt"),
747
+ ),
748
+ xyz_grid.AxisOption(
749
+ "[ADetailer] ADetailer negative prompt 1st",
750
+ str,
751
+ partial(set_value, field="ad_negative_prompt"),
752
+ ),
753
+ xyz_grid.AxisOption(
754
+ "[ADetailer] Mask erosion / dilation 1st",
755
+ int,
756
+ partial(set_value, field="ad_dilate_erode"),
757
+ ),
758
+ xyz_grid.AxisOption(
759
+ "[ADetailer] Inpaint denoising strength 1st",
760
+ float,
761
+ partial(set_value, field="ad_denoising_strength"),
762
+ ),
763
+ xyz_grid.AxisOption(
764
+ "[ADetailer] Inpaint only masked 1st",
765
+ str,
766
+ partial(set_value, field="ad_inpaint_only_masked"),
767
+ choices=lambda: ["True", "False"],
768
+ ),
769
+ xyz_grid.AxisOption(
770
+ "[ADetailer] Inpaint only masked padding 1st",
771
+ int,
772
+ partial(set_value, field="ad_inpaint_only_masked_padding"),
773
+ ),
774
+ xyz_grid.AxisOption(
775
+ "[ADetailer] ADetailer sampler 1st",
776
+ str,
777
+ partial(set_value, field="ad_sampler"),
778
+ choices=lambda: samplers,
779
+ ),
780
+ xyz_grid.AxisOption(
781
+ "[ADetailer] ControlNet model 1st",
782
+ str,
783
+ partial(set_value, field="ad_controlnet_model"),
784
+ choices=lambda: ["None", *get_cn_models()],
785
+ ),
786
+ ]
787
+
788
+ if not any(x.label.startswith("[ADetailer]") for x in xyz_grid.axis_options):
789
+ xyz_grid.axis_options.extend(axis)
790
+
791
+
792
+ def on_before_ui():
793
+ try:
794
+ make_axis_on_xyz_grid()
795
+ except Exception:
796
+ error = traceback.format_exc()
797
+ print(
798
+ f"[-] ADetailer: xyz_grid error:\n{error}",
799
+ file=sys.stderr,
800
+ )
801
+
802
+
803
+ script_callbacks.on_ui_settings(on_ui_settings)
804
+ script_callbacks.on_after_component(on_after_component)
805
+ script_callbacks.on_before_ui(on_before_ui)
sd_webui/__init__.py ADDED
File without changes
sd_webui/devices.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+
7
+ class NansException(Exception): # noqa: N818
8
+ pass
9
+
10
+ else:
11
+ from modules.devices import NansException
sd_webui/images.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from PIL import Image, PngImagePlugin
7
+
8
+ from sd_webui.processing import StableDiffusionProcessing
9
+
10
+ def save_image(
11
+ image: Image.Image,
12
+ path: str,
13
+ basename: str,
14
+ seed: int | None = None,
15
+ prompt: str = "",
16
+ extension: str = "png",
17
+ info: str | PngImagePlugin.iTXt = "",
18
+ short_filename: bool = False,
19
+ no_prompt: bool = False,
20
+ grid: bool = False,
21
+ pnginfo_section_name: str = "parameters",
22
+ p: StableDiffusionProcessing | None = None,
23
+ existing_info: dict | None = None,
24
+ forced_filename: str | None = None,
25
+ suffix: str = "",
26
+ save_to_dirs: bool = False,
27
+ ) -> tuple[str, str | None]:
28
+ """Save an image.
29
+
30
+ Args:
31
+ image (`PIL.Image`):
32
+ The image to be saved.
33
+ path (`str`):
34
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
35
+ basename (`str`):
36
+ The base filename which will be applied to `filename pattern`.
37
+ seed, prompt, short_filename,
38
+ extension (`str`):
39
+ Image file extension, default is `png`.
40
+ pngsectionname (`str`):
41
+ Specify the name of the section which `info` will be saved in.
42
+ info (`str` or `PngImagePlugin.iTXt`):
43
+ PNG info chunks.
44
+ existing_info (`dict`):
45
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
46
+ no_prompt:
47
+ TODO I don't know its meaning.
48
+ p (`StableDiffusionProcessing`)
49
+ forced_filename (`str`):
50
+ If specified, `basename` and filename pattern will be ignored.
51
+ save_to_dirs (bool):
52
+ If true, the image will be saved into a subdirectory of `path`.
53
+
54
+ Returns: (fullfn, txt_fullfn)
55
+ fullfn (`str`):
56
+ The full path of the saved imaged.
57
+ txt_fullfn (`str` or None):
58
+ If a text file is saved for this image, this will be its full path. Otherwise None.
59
+ """
60
+
61
+ else:
62
+ from modules.images import save_image
sd_webui/paths.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import os
7
+
8
+ models_path = os.path.join(os.path.dirname(__file__), "1")
9
+ script_path = os.path.join(os.path.dirname(__file__), "2")
10
+ data_path = os.path.join(os.path.dirname(__file__), "3")
11
+ extensions_dir = os.path.join(os.path.dirname(__file__), "4")
12
+ extensions_builtin_dir = os.path.join(os.path.dirname(__file__), "5")
13
+ else:
14
+ from modules.paths import data_path, models_path, script_path
sd_webui/processing.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from dataclasses import dataclass, field
7
+ from typing import Any, Callable
8
+
9
+ import numpy as np
10
+ import torch
11
+ from PIL import Image
12
+
13
+ def _image():
14
+ return Image.new("L", (512, 512))
15
+
16
+ @dataclass
17
+ class StableDiffusionProcessing:
18
+ sd_model: torch.nn.Module = field(default_factory=lambda: torch.nn.Linear(1, 1))
19
+ outpath_samples: str = ""
20
+ outpath_grids: str = ""
21
+ prompt: str = ""
22
+ prompt_for_display: str = ""
23
+ negative_prompt: str = ""
24
+ styles: list[str] = field(default_factory=list)
25
+ seed: int = -1
26
+ subseed: int = -1
27
+ subseed_strength: float = 0.0
28
+ seed_resize_from_h: int = -1
29
+ seed_resize_from_w: int = -1
30
+ sampler_name: str | None = None
31
+ batch_size: int = 1
32
+ n_iter: int = 1
33
+ steps: int = 50
34
+ cfg_scale: float = 7.0
35
+ width: int = 512
36
+ height: int = 512
37
+ restore_faces: bool = False
38
+ tiling: bool = False
39
+ do_not_save_samples: bool = False
40
+ do_not_save_grid: bool = False
41
+ extra_generation_params: dict[str, Any] = field(default_factory=dict)
42
+ overlay_images: list[Image.Image] = field(default_factory=list)
43
+ eta: float = 0.0
44
+ do_not_reload_embeddings: bool = False
45
+ paste_to: tuple[int | float, ...] = (0, 0, 0, 0)
46
+ color_corrections: list[np.ndarray] = field(default_factory=list)
47
+ denoising_strength: float = 0.0
48
+ sampler_noise_scheduler_override: Callable | None = None
49
+ ddim_discretize: str = ""
50
+ s_min_uncond: float = 0.0
51
+ s_churn: float = 0.0
52
+ s_tmin: float = 0.0
53
+ s_tmax: float = 0.0
54
+ s_noise: float = 0.0
55
+ override_settings: dict[str, Any] = field(default_factory=dict)
56
+ override_settings_restore_afterwards: bool = False
57
+ is_using_inpainting_conditioning: bool = False
58
+ disable_extra_networks: bool = False
59
+ scripts: Any = None
60
+ script_args: list[Any] = field(default_factory=list)
61
+ all_prompts: list[str] = field(default_factory=list)
62
+ all_negative_prompts: list[str] = field(default_factory=list)
63
+ all_seeds: list[int] = field(default_factory=list)
64
+ all_subseeds: list[int] = field(default_factory=list)
65
+ iteration: int = 1
66
+ is_hr_pass: bool = False
67
+
68
+ def close(self) -> None:
69
+ pass
70
+
71
+ @dataclass
72
+ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
73
+ sampler: Callable | None = None
74
+ enable_hr: bool = False
75
+ denoising_strength: float = 0.75
76
+ hr_scale: float = 2.0
77
+ hr_upscaler: str = ""
78
+ hr_second_pass_steps: int = 0
79
+ hr_resize_x: int = 0
80
+ hr_resize_y: int = 0
81
+ hr_upscale_to_x: int = 0
82
+ hr_upscale_to_y: int = 0
83
+ width: int = 512
84
+ height: int = 512
85
+ truncate_x: int = 512
86
+ truncate_y: int = 512
87
+ applied_old_hires_behavior_to: tuple[int, int] = (512, 512)
88
+
89
+ @dataclass
90
+ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
91
+ sampler: Callable | None = None
92
+ init_images: list[Image.Image] = field(default_factory=list)
93
+ resize_mode: int = 0
94
+ denoising_strength: float = 0.75
95
+ image_cfg_scale: float | None = None
96
+ init_latent: torch.Tensor | None = None
97
+ image_mask: Image.Image = field(default_factory=_image)
98
+ latent_mask: Image.Image = field(default_factory=_image)
99
+ mask_for_overlay: Image.Image = field(default_factory=_image)
100
+ mask_blur: int = 4
101
+ inpainting_fill: int = 0
102
+ inpaint_full_res: bool = True
103
+ inpaint_full_res_padding: int = 0
104
+ inpainting_mask_invert: int | bool = 0
105
+ initial_noise_multiplier: float = 1.0
106
+ mask: torch.Tensor | None = None
107
+ nmask: torch.Tensor | None = None
108
+ image_conditioning: torch.Tensor | None = None
109
+
110
+ @dataclass
111
+ class Processed:
112
+ images: list[Image.Image] = field(default_factory=list)
113
+ prompt: list[str] = field(default_factory=list)
114
+ negative_prompt: list[str] = field(default_factory=list)
115
+ seed: list[int] = field(default_factory=list)
116
+ subseed: list[int] = field(default_factory=list)
117
+ subseed_strength: float = 0.0
118
+ info: str = ""
119
+ comments: str = ""
120
+ width: int = 512
121
+ height: int = 512
122
+ sampler_name: str = ""
123
+ cfg_scale: float = 7.0
124
+ image_cfg_scale: float | None = None
125
+ steps: int = 50
126
+ batch_size: int = 1
127
+ restore_faces: bool = False
128
+ face_restoration_model: str | None = None
129
+ sd_model_hash: str = ""
130
+ seed_resize_from_w: int = -1
131
+ seed_resize_from_h: int = -1
132
+ denoising_strength: float = 0.0
133
+ extra_generation_params: dict[str, Any] = field(default_factory=dict)
134
+ index_of_first_image: int = 0
135
+ styles: list[str] = field(default_factory=list)
136
+ job_timestamp: str = ""
137
+ clip_skip: int = 1
138
+ eta: float = 0.0
139
+ ddim_discretize: str = ""
140
+ s_churn: float = 0.0
141
+ s_tmin: float = 0.0
142
+ s_tmax: float = 0.0
143
+ s_noise: float = 0.0
144
+ sampler_noise_scheduler_override: Callable | None = None
145
+ is_using_inpainting_conditioning: bool = False
146
+ all_prompts: list[str] = field(default_factory=list)
147
+ all_negative_prompts: list[str] = field(default_factory=list)
148
+ all_seeds: list[int] = field(default_factory=list)
149
+ all_subseeds: list[int] = field(default_factory=list)
150
+ infotexts: list[str] = field(default_factory=list)
151
+
152
+ def create_infotext(
153
+ p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
154
+ all_prompts: list[str],
155
+ all_seeds: list[int],
156
+ all_subseeds: list[int],
157
+ comments: Any,
158
+ iteration: int = 0,
159
+ position_in_batch: int = 0,
160
+ ) -> str:
161
+ pass
162
+
163
+ def process_images(
164
+ p: StableDiffusionProcessingTxt2Img | StableDiffusionProcessingImg2Img,
165
+ ) -> Processed:
166
+ pass
167
+
168
+ else:
169
+ from modules.processing import (
170
+ Processed,
171
+ StableDiffusionProcessing,
172
+ StableDiffusionProcessingImg2Img,
173
+ StableDiffusionProcessingTxt2Img,
174
+ create_infotext,
175
+ process_images,
176
+ )
sd_webui/safe.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import torch
7
+
8
+ unsafe_torch_load = torch.load
9
+ else:
10
+ from modules.safe import unsafe_torch_load
sd_webui/script_callbacks.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from typing import Callable
7
+
8
+ def on_app_started(callback: Callable):
9
+ pass
10
+
11
+ def on_ui_settings(callback: Callable):
12
+ pass
13
+
14
+ def on_after_component(callback: Callable):
15
+ pass
16
+
17
+ def on_before_ui(callback: Callable):
18
+ pass
19
+
20
+ else:
21
+ from modules.script_callbacks import (
22
+ on_after_component,
23
+ on_app_started,
24
+ on_before_ui,
25
+ on_ui_settings,
26
+ )
sd_webui/scripts.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from abc import ABC, abstractmethod
7
+ from collections import namedtuple
8
+ from dataclasses import dataclass
9
+ from typing import Any
10
+
11
+ import gradio as gr
12
+ from PIL import Image
13
+
14
+ from sd_webui.processing import (
15
+ Processed,
16
+ StableDiffusionProcessingImg2Img,
17
+ StableDiffusionProcessingTxt2Img,
18
+ )
19
+
20
+ SDPType = StableDiffusionProcessingImg2Img | StableDiffusionProcessingTxt2Img
21
+ AlwaysVisible = object()
22
+
23
+ @dataclass
24
+ class PostprocessImageArgs:
25
+ image: Image.Image
26
+
27
+ class Script(ABC):
28
+ filename: str
29
+ args_from: int
30
+ args_to: int
31
+ alwayson: bool
32
+
33
+ is_txt2img: bool
34
+ is_img2img: bool
35
+
36
+ group: gr.Group
37
+ infotext_fields: list[tuple[str, str]]
38
+ paste_field_names: list[str]
39
+
40
+ @abstractmethod
41
+ def title(self):
42
+ raise NotImplementedError
43
+
44
+ def ui(self, is_img2img: bool):
45
+ pass
46
+
47
+ def show(self, is_img2img: bool):
48
+ return True
49
+
50
+ def run(self, p: SDPType, *args):
51
+ pass
52
+
53
+ def process(self, p: SDPType, *args):
54
+ pass
55
+
56
+ def before_process_batch(self, p: SDPType, *args, **kwargs):
57
+ pass
58
+
59
+ def process_batch(self, p: SDPType, *args, **kwargs):
60
+ pass
61
+
62
+ def postprocess_batch(self, p: SDPType, *args, **kwargs):
63
+ pass
64
+
65
+ def postprocess_image(self, p: SDPType, pp: PostprocessImageArgs, *args):
66
+ pass
67
+
68
+ def postprocess(self, p: SDPType, processed: Processed, *args):
69
+ pass
70
+
71
+ def before_component(self, component, **kwargs):
72
+ pass
73
+
74
+ def after_component(self, component, **kwargs):
75
+ pass
76
+
77
+ def describe(self):
78
+ return ""
79
+
80
+ def elem_id(self, item_id: Any) -> str:
81
+ pass
82
+
83
+ ScriptClassData = namedtuple(
84
+ "ScriptClassData", ["script_class", "path", "basedir", "module"]
85
+ )
86
+ scripts_data: list[ScriptClassData] = []
87
+
88
+ else:
89
+ from modules.scripts import (
90
+ AlwaysVisible,
91
+ PostprocessImageArgs,
92
+ Script,
93
+ scripts_data,
94
+ )
sd_webui/sd_samplers.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ from typing import Any, Callable, NamedTuple
7
+
8
+ class SamplerData(NamedTuple):
9
+ name: str
10
+ constructor: Callable
11
+ aliases: list[str]
12
+ options: dict[str, Any]
13
+
14
+ all_samplers: list[SamplerData] = []
15
+
16
+ else:
17
+ from modules.sd_samplers import all_samplers
sd_webui/shared.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ if TYPE_CHECKING:
6
+ import argparse
7
+ from dataclasses import dataclass
8
+ from typing import Any, Callable
9
+
10
+ import torch
11
+ from PIL import Image
12
+
13
+ @dataclass
14
+ class State:
15
+ skipped: bool = False
16
+ interrupted: bool = False
17
+ job: str = ""
18
+ job_no: int = 0
19
+ job_count: int = 0
20
+ processing_has_refined_job_count: bool = False
21
+ job_timestamp: str = "0"
22
+ sampling_step: int = 0
23
+ sampling_steps: int = 0
24
+ current_latent: torch.Tensor | None = None
25
+ current_image: Image.Image | None = None
26
+ current_image_sampling_step: int = 0
27
+ id_live_preview: int = 0
28
+ textinfo: str | None = None
29
+ time_start: float | None = None
30
+ need_restart: bool = False
31
+ server_start: float | None = None
32
+
33
+ @dataclass
34
+ class OptionInfo:
35
+ default: Any = None
36
+ label: str = ""
37
+ component: Any = None
38
+ component_args: Callable[[], dict] | dict[str, Any] | None = None
39
+ onchange: Callable[[], None] | None = None
40
+ section: tuple[str, str] | None = None
41
+ refresh: Callable[[], None] | None = None
42
+
43
+ class Option:
44
+ data_labels: dict[str, OptionInfo]
45
+
46
+ def __init__(self):
47
+ self.data: dict[str, Any] = {}
48
+
49
+ def add_option(self, key: str, info: OptionInfo):
50
+ pass
51
+
52
+ def __getattr__(self, item: str):
53
+ if self.data is not None and item in self.data:
54
+ return self.data[item]
55
+
56
+ if item in self.data_labels:
57
+ return self.data_labels[item].default
58
+
59
+ return super().__getattribute__(item)
60
+
61
+ opts = Option()
62
+ cmd_opts = argparse.Namespace()
63
+ state = State()
64
+
65
+ else:
66
+ from modules.shared import OptionInfo, cmd_opts, opts, state