dikdimon commited on
Commit
f2f3b8d
·
verified ·
1 Parent(s): fabd6c3

Upload sd-forge-extra-samplers using SD-Hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. sd-forge-extra-samplers/.github/workflows/ruff.yaml +8 -0
  2. sd-forge-extra-samplers/.gitignore +163 -0
  3. sd-forge-extra-samplers/LICENSE +674 -0
  4. sd-forge-extra-samplers/README.md +97 -0
  5. sd-forge-extra-samplers/lib_es/__init__.py +0 -0
  6. sd-forge-extra-samplers/lib_es/__pycache__/__init__.cpython-310.pyc +0 -0
  7. sd-forge-extra-samplers/lib_es/__pycache__/const.cpython-310.pyc +0 -0
  8. sd-forge-extra-samplers/lib_es/__pycache__/samplers.cpython-310.pyc +0 -0
  9. sd-forge-extra-samplers/lib_es/__pycache__/schedulers.cpython-310.pyc +0 -0
  10. sd-forge-extra-samplers/lib_es/__pycache__/utils.cpython-310.pyc +0 -0
  11. sd-forge-extra-samplers/lib_es/__pycache__/xyz.cpython-310.pyc +0 -0
  12. sd-forge-extra-samplers/lib_es/const.py +25 -0
  13. sd-forge-extra-samplers/lib_es/extra_samplers/__init__.py +49 -0
  14. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/__init__.cpython-310.pyc +0 -0
  15. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/adaptive_progressive.cpython-310.pyc +0 -0
  16. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_dy.cpython-310.pyc +0 -0
  17. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_dy_negative.cpython-310.pyc +0 -0
  18. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_max.cpython-310.pyc +0 -0
  19. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_multipass.cpython-310.pyc +0 -0
  20. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_negative.cpython-310.pyc +0 -0
  21. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea.cpython-310.pyc +0 -0
  22. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea_dy.cpython-310.pyc +0 -0
  23. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea_dy_negative.cpython-310.pyc +0 -0
  24. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/extended_reverse_time.cpython-310.pyc +0 -0
  25. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/gradient_estimation.cpython-310.pyc +0 -0
  26. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/heun_ancestral.cpython-310.pyc +0 -0
  27. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/kohaku_lonyu_yog.cpython-310.pyc +0 -0
  28. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/langevin_euler.cpython-310.pyc +0 -0
  29. sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/res_multistep.cpython-310.pyc +0 -0
  30. sd-forge-extra-samplers/lib_es/extra_samplers/adaptive_progressive.py +227 -0
  31. sd-forge-extra-samplers/lib_es/extra_samplers/euler_dy.py +50 -0
  32. sd-forge-extra-samplers/lib_es/extra_samplers/euler_dy_negative.py +50 -0
  33. sd-forge-extra-samplers/lib_es/extra_samplers/euler_max.py +45 -0
  34. sd-forge-extra-samplers/lib_es/extra_samplers/euler_multipass.py +290 -0
  35. sd-forge-extra-samplers/lib_es/extra_samplers/euler_negative.py +48 -0
  36. sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea.py +49 -0
  37. sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea_dy.py +53 -0
  38. sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea_dy_negative.py +55 -0
  39. sd-forge-extra-samplers/lib_es/extra_samplers/extended_reverse_time.py +83 -0
  40. sd-forge-extra-samplers/lib_es/extra_samplers/gradient_estimation.py +180 -0
  41. sd-forge-extra-samplers/lib_es/extra_samplers/heun_ancestral.py +81 -0
  42. sd-forge-extra-samplers/lib_es/extra_samplers/kohaku_lonyu_yog.py +58 -0
  43. sd-forge-extra-samplers/lib_es/extra_samplers/langevin_euler.py +89 -0
  44. sd-forge-extra-samplers/lib_es/extra_samplers/res_multistep.py +235 -0
  45. sd-forge-extra-samplers/lib_es/extra_schedulers/__init__.py +6 -0
  46. sd-forge-extra-samplers/lib_es/extra_schedulers/__pycache__/__init__.cpython-310.pyc +0 -0
  47. sd-forge-extra-samplers/lib_es/extra_schedulers/__pycache__/linear_log.cpython-310.pyc +0 -0
  48. sd-forge-extra-samplers/lib_es/extra_schedulers/linear_log.py +63 -0
  49. sd-forge-extra-samplers/lib_es/samplers.py +57 -0
  50. sd-forge-extra-samplers/lib_es/schedulers.py +18 -0
sd-forge-extra-samplers/.github/workflows/ruff.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ name: Ruff
2
+ on: [pull_request]
3
+ jobs:
4
+ ruff:
5
+ runs-on: ubuntu-latest
6
+ steps:
7
+ - uses: actions/checkout@v4
8
+ - uses: astral-sh/ruff-action@v1
sd-forge-extra-samplers/.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Byte-compiled / optimized / DLL files
3
+ __pycache__/
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ # Distribution / packaging
11
+ .Python
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ #Pipfile.lock
97
+
98
+ # poetry
99
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
103
+ #poetry.lock
104
+
105
+ # pdm
106
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
107
+ #pdm.lock
108
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
109
+ # in version control.
110
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
111
+ .pdm.toml
112
+ .pdm-python
113
+ .pdm-build/
114
+
115
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
116
+ __pypackages__/
117
+
118
+ # Celery stuff
119
+ celerybeat-schedule
120
+ celerybeat.pid
121
+
122
+ # SageMath parsed files
123
+ *.sage.py
124
+
125
+ # Environments
126
+ .env
127
+ .venv
128
+ env/
129
+ venv/
130
+ ENV/
131
+ env.bak/
132
+ venv.bak/
133
+
134
+ # Spyder project settings
135
+ .spyderproject
136
+ .spyproject
137
+
138
+ # Rope project settings
139
+ .ropeproject
140
+
141
+ # mkdocs documentation
142
+ /site
143
+
144
+ # mypy
145
+ .mypy_cache/
146
+ .dmypy.json
147
+ dmypy.json
148
+
149
+ # Pyre type checker
150
+ .pyre/
151
+
152
+ # pytype static type analyzer
153
+ .pytype/
154
+
155
+ # Cython debug symbols
156
+ cython_debug/
157
+
158
+ # PyCharm
159
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
160
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
161
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
162
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
163
+ #.idea/
sd-forge-extra-samplers/LICENSE ADDED
@@ -0,0 +1,674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GNU GENERAL PUBLIC LICENSE
2
+ Version 3, 29 June 2007
3
+
4
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
5
+ Everyone is permitted to copy and distribute verbatim copies
6
+ of this license document, but changing it is not allowed.
7
+
8
+ Preamble
9
+
10
+ The GNU General Public License is a free, copyleft license for
11
+ software and other kinds of works.
12
+
13
+ The licenses for most software and other practical works are designed
14
+ to take away your freedom to share and change the works. By contrast,
15
+ the GNU General Public License is intended to guarantee your freedom to
16
+ share and change all versions of a program--to make sure it remains free
17
+ software for all its users. We, the Free Software Foundation, use the
18
+ GNU General Public License for most of our software; it applies also to
19
+ any other work released this way by its authors. You can apply it to
20
+ your programs, too.
21
+
22
+ When we speak of free software, we are referring to freedom, not
23
+ price. Our General Public Licenses are designed to make sure that you
24
+ have the freedom to distribute copies of free software (and charge for
25
+ them if you wish), that you receive source code or can get it if you
26
+ want it, that you can change the software or use pieces of it in new
27
+ free programs, and that you know you can do these things.
28
+
29
+ To protect your rights, we need to prevent others from denying you
30
+ these rights or asking you to surrender the rights. Therefore, you have
31
+ certain responsibilities if you distribute copies of the software, or if
32
+ you modify it: responsibilities to respect the freedom of others.
33
+
34
+ For example, if you distribute copies of such a program, whether
35
+ gratis or for a fee, you must pass on to the recipients the same
36
+ freedoms that you received. You must make sure that they, too, receive
37
+ or can get the source code. And you must show them these terms so they
38
+ know their rights.
39
+
40
+ Developers that use the GNU GPL protect your rights with two steps:
41
+ (1) assert copyright on the software, and (2) offer you this License
42
+ giving you legal permission to copy, distribute and/or modify it.
43
+
44
+ For the developers' and authors' protection, the GPL clearly explains
45
+ that there is no warranty for this free software. For both users' and
46
+ authors' sake, the GPL requires that modified versions be marked as
47
+ changed, so that their problems will not be attributed erroneously to
48
+ authors of previous versions.
49
+
50
+ Some devices are designed to deny users access to install or run
51
+ modified versions of the software inside them, although the manufacturer
52
+ can do so. This is fundamentally incompatible with the aim of
53
+ protecting users' freedom to change the software. The systematic
54
+ pattern of such abuse occurs in the area of products for individuals to
55
+ use, which is precisely where it is most unacceptable. Therefore, we
56
+ have designed this version of the GPL to prohibit the practice for those
57
+ products. If such problems arise substantially in other domains, we
58
+ stand ready to extend this provision to those domains in future versions
59
+ of the GPL, as needed to protect the freedom of users.
60
+
61
+ Finally, every program is threatened constantly by software patents.
62
+ States should not allow patents to restrict development and use of
63
+ software on general-purpose computers, but in those that do, we wish to
64
+ avoid the special danger that patents applied to a free program could
65
+ make it effectively proprietary. To prevent this, the GPL assures that
66
+ patents cannot be used to render the program non-free.
67
+
68
+ The precise terms and conditions for copying, distribution and
69
+ modification follow.
70
+
71
+ TERMS AND CONDITIONS
72
+
73
+ 0. Definitions.
74
+
75
+ "This License" refers to version 3 of the GNU General Public License.
76
+
77
+ "Copyright" also means copyright-like laws that apply to other kinds of
78
+ works, such as semiconductor masks.
79
+
80
+ "The Program" refers to any copyrightable work licensed under this
81
+ License. Each licensee is addressed as "you". "Licensees" and
82
+ "recipients" may be individuals or organizations.
83
+
84
+ To "modify" a work means to copy from or adapt all or part of the work
85
+ in a fashion requiring copyright permission, other than the making of an
86
+ exact copy. The resulting work is called a "modified version" of the
87
+ earlier work or a work "based on" the earlier work.
88
+
89
+ A "covered work" means either the unmodified Program or a work based
90
+ on the Program.
91
+
92
+ To "propagate" a work means to do anything with it that, without
93
+ permission, would make you directly or secondarily liable for
94
+ infringement under applicable copyright law, except executing it on a
95
+ computer or modifying a private copy. Propagation includes copying,
96
+ distribution (with or without modification), making available to the
97
+ public, and in some countries other activities as well.
98
+
99
+ To "convey" a work means any kind of propagation that enables other
100
+ parties to make or receive copies. Mere interaction with a user through
101
+ a computer network, with no transfer of a copy, is not conveying.
102
+
103
+ An interactive user interface displays "Appropriate Legal Notices"
104
+ to the extent that it includes a convenient and prominently visible
105
+ feature that (1) displays an appropriate copyright notice, and (2)
106
+ tells the user that there is no warranty for the work (except to the
107
+ extent that warranties are provided), that licensees may convey the
108
+ work under this License, and how to view a copy of this License. If
109
+ the interface presents a list of user commands or options, such as a
110
+ menu, a prominent item in the list meets this criterion.
111
+
112
+ 1. Source Code.
113
+
114
+ The "source code" for a work means the preferred form of the work
115
+ for making modifications to it. "Object code" means any non-source
116
+ form of a work.
117
+
118
+ A "Standard Interface" means an interface that either is an official
119
+ standard defined by a recognized standards body, or, in the case of
120
+ interfaces specified for a particular programming language, one that
121
+ is widely used among developers working in that language.
122
+
123
+ The "System Libraries" of an executable work include anything, other
124
+ than the work as a whole, that (a) is included in the normal form of
125
+ packaging a Major Component, but which is not part of that Major
126
+ Component, and (b) serves only to enable use of the work with that
127
+ Major Component, or to implement a Standard Interface for which an
128
+ implementation is available to the public in source code form. A
129
+ "Major Component", in this context, means a major essential component
130
+ (kernel, window system, and so on) of the specific operating system
131
+ (if any) on which the executable work runs, or a compiler used to
132
+ produce the work, or an object code interpreter used to run it.
133
+
134
+ The "Corresponding Source" for a work in object code form means all
135
+ the source code needed to generate, install, and (for an executable
136
+ work) run the object code and to modify the work, including scripts to
137
+ control those activities. However, it does not include the work's
138
+ System Libraries, or general-purpose tools or generally available free
139
+ programs which are used unmodified in performing those activities but
140
+ which are not part of the work. For example, Corresponding Source
141
+ includes interface definition files associated with source files for
142
+ the work, and the source code for shared libraries and dynamically
143
+ linked subprograms that the work is specifically designed to require,
144
+ such as by intimate data communication or control flow between those
145
+ subprograms and other parts of the work.
146
+
147
+ The Corresponding Source need not include anything that users
148
+ can regenerate automatically from other parts of the Corresponding
149
+ Source.
150
+
151
+ The Corresponding Source for a work in source code form is that
152
+ same work.
153
+
154
+ 2. Basic Permissions.
155
+
156
+ All rights granted under this License are granted for the term of
157
+ copyright on the Program, and are irrevocable provided the stated
158
+ conditions are met. This License explicitly affirms your unlimited
159
+ permission to run the unmodified Program. The output from running a
160
+ covered work is covered by this License only if the output, given its
161
+ content, constitutes a covered work. This License acknowledges your
162
+ rights of fair use or other equivalent, as provided by copyright law.
163
+
164
+ You may make, run and propagate covered works that you do not
165
+ convey, without conditions so long as your license otherwise remains
166
+ in force. You may convey covered works to others for the sole purpose
167
+ of having them make modifications exclusively for you, or provide you
168
+ with facilities for running those works, provided that you comply with
169
+ the terms of this License in conveying all material for which you do
170
+ not control copyright. Those thus making or running the covered works
171
+ for you must do so exclusively on your behalf, under your direction
172
+ and control, on terms that prohibit them from making any copies of
173
+ your copyrighted material outside their relationship with you.
174
+
175
+ Conveying under any other circumstances is permitted solely under
176
+ the conditions stated below. Sublicensing is not allowed; section 10
177
+ makes it unnecessary.
178
+
179
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180
+
181
+ No covered work shall be deemed part of an effective technological
182
+ measure under any applicable law fulfilling obligations under article
183
+ 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184
+ similar laws prohibiting or restricting circumvention of such
185
+ measures.
186
+
187
+ When you convey a covered work, you waive any legal power to forbid
188
+ circumvention of technological measures to the extent such circumvention
189
+ is effected by exercising rights under this License with respect to
190
+ the covered work, and you disclaim any intention to limit operation or
191
+ modification of the work as a means of enforcing, against the work's
192
+ users, your or third parties' legal rights to forbid circumvention of
193
+ technological measures.
194
+
195
+ 4. Conveying Verbatim Copies.
196
+
197
+ You may convey verbatim copies of the Program's source code as you
198
+ receive it, in any medium, provided that you conspicuously and
199
+ appropriately publish on each copy an appropriate copyright notice;
200
+ keep intact all notices stating that this License and any
201
+ non-permissive terms added in accord with section 7 apply to the code;
202
+ keep intact all notices of the absence of any warranty; and give all
203
+ recipients a copy of this License along with the Program.
204
+
205
+ You may charge any price or no price for each copy that you convey,
206
+ and you may offer support or warranty protection for a fee.
207
+
208
+ 5. Conveying Modified Source Versions.
209
+
210
+ You may convey a work based on the Program, or the modifications to
211
+ produce it from the Program, in the form of source code under the
212
+ terms of section 4, provided that you also meet all of these conditions:
213
+
214
+ a) The work must carry prominent notices stating that you modified
215
+ it, and giving a relevant date.
216
+
217
+ b) The work must carry prominent notices stating that it is
218
+ released under this License and any conditions added under section
219
+ 7. This requirement modifies the requirement in section 4 to
220
+ "keep intact all notices".
221
+
222
+ c) You must license the entire work, as a whole, under this
223
+ License to anyone who comes into possession of a copy. This
224
+ License will therefore apply, along with any applicable section 7
225
+ additional terms, to the whole of the work, and all its parts,
226
+ regardless of how they are packaged. This License gives no
227
+ permission to license the work in any other way, but it does not
228
+ invalidate such permission if you have separately received it.
229
+
230
+ d) If the work has interactive user interfaces, each must display
231
+ Appropriate Legal Notices; however, if the Program has interactive
232
+ interfaces that do not display Appropriate Legal Notices, your
233
+ work need not make them do so.
234
+
235
+ A compilation of a covered work with other separate and independent
236
+ works, which are not by their nature extensions of the covered work,
237
+ and which are not combined with it such as to form a larger program,
238
+ in or on a volume of a storage or distribution medium, is called an
239
+ "aggregate" if the compilation and its resulting copyright are not
240
+ used to limit the access or legal rights of the compilation's users
241
+ beyond what the individual works permit. Inclusion of a covered work
242
+ in an aggregate does not cause this License to apply to the other
243
+ parts of the aggregate.
244
+
245
+ 6. Conveying Non-Source Forms.
246
+
247
+ You may convey a covered work in object code form under the terms
248
+ of sections 4 and 5, provided that you also convey the
249
+ machine-readable Corresponding Source under the terms of this License,
250
+ in one of these ways:
251
+
252
+ a) Convey the object code in, or embodied in, a physical product
253
+ (including a physical distribution medium), accompanied by the
254
+ Corresponding Source fixed on a durable physical medium
255
+ customarily used for software interchange.
256
+
257
+ b) Convey the object code in, or embodied in, a physical product
258
+ (including a physical distribution medium), accompanied by a
259
+ written offer, valid for at least three years and valid for as
260
+ long as you offer spare parts or customer support for that product
261
+ model, to give anyone who possesses the object code either (1) a
262
+ copy of the Corresponding Source for all the software in the
263
+ product that is covered by this License, on a durable physical
264
+ medium customarily used for software interchange, for a price no
265
+ more than your reasonable cost of physically performing this
266
+ conveying of source, or (2) access to copy the
267
+ Corresponding Source from a network server at no charge.
268
+
269
+ c) Convey individual copies of the object code with a copy of the
270
+ written offer to provide the Corresponding Source. This
271
+ alternative is allowed only occasionally and noncommercially, and
272
+ only if you received the object code with such an offer, in accord
273
+ with subsection 6b.
274
+
275
+ d) Convey the object code by offering access from a designated
276
+ place (gratis or for a charge), and offer equivalent access to the
277
+ Corresponding Source in the same way through the same place at no
278
+ further charge. You need not require recipients to copy the
279
+ Corresponding Source along with the object code. If the place to
280
+ copy the object code is a network server, the Corresponding Source
281
+ may be on a different server (operated by you or a third party)
282
+ that supports equivalent copying facilities, provided you maintain
283
+ clear directions next to the object code saying where to find the
284
+ Corresponding Source. Regardless of what server hosts the
285
+ Corresponding Source, you remain obligated to ensure that it is
286
+ available for as long as needed to satisfy these requirements.
287
+
288
+ e) Convey the object code using peer-to-peer transmission, provided
289
+ you inform other peers where the object code and Corresponding
290
+ Source of the work are being offered to the general public at no
291
+ charge under subsection 6d.
292
+
293
+ A separable portion of the object code, whose source code is excluded
294
+ from the Corresponding Source as a System Library, need not be
295
+ included in conveying the object code work.
296
+
297
+ A "User Product" is either (1) a "consumer product", which means any
298
+ tangible personal property which is normally used for personal, family,
299
+ or household purposes, or (2) anything designed or sold for incorporation
300
+ into a dwelling. In determining whether a product is a consumer product,
301
+ doubtful cases shall be resolved in favor of coverage. For a particular
302
+ product received by a particular user, "normally used" refers to a
303
+ typical or common use of that class of product, regardless of the status
304
+ of the particular user or of the way in which the particular user
305
+ actually uses, or expects or is expected to use, the product. A product
306
+ is a consumer product regardless of whether the product has substantial
307
+ commercial, industrial or non-consumer uses, unless such uses represent
308
+ the only significant mode of use of the product.
309
+
310
+ "Installation Information" for a User Product means any methods,
311
+ procedures, authorization keys, or other information required to install
312
+ and execute modified versions of a covered work in that User Product from
313
+ a modified version of its Corresponding Source. The information must
314
+ suffice to ensure that the continued functioning of the modified object
315
+ code is in no case prevented or interfered with solely because
316
+ modification has been made.
317
+
318
+ If you convey an object code work under this section in, or with, or
319
+ specifically for use in, a User Product, and the conveying occurs as
320
+ part of a transaction in which the right of possession and use of the
321
+ User Product is transferred to the recipient in perpetuity or for a
322
+ fixed term (regardless of how the transaction is characterized), the
323
+ Corresponding Source conveyed under this section must be accompanied
324
+ by the Installation Information. But this requirement does not apply
325
+ if neither you nor any third party retains the ability to install
326
+ modified object code on the User Product (for example, the work has
327
+ been installed in ROM).
328
+
329
+ The requirement to provide Installation Information does not include a
330
+ requirement to continue to provide support service, warranty, or updates
331
+ for a work that has been modified or installed by the recipient, or for
332
+ the User Product in which it has been modified or installed. Access to a
333
+ network may be denied when the modification itself materially and
334
+ adversely affects the operation of the network or violates the rules and
335
+ protocols for communication across the network.
336
+
337
+ Corresponding Source conveyed, and Installation Information provided,
338
+ in accord with this section must be in a format that is publicly
339
+ documented (and with an implementation available to the public in
340
+ source code form), and must require no special password or key for
341
+ unpacking, reading or copying.
342
+
343
+ 7. Additional Terms.
344
+
345
+ "Additional permissions" are terms that supplement the terms of this
346
+ License by making exceptions from one or more of its conditions.
347
+ Additional permissions that are applicable to the entire Program shall
348
+ be treated as though they were included in this License, to the extent
349
+ that they are valid under applicable law. If additional permissions
350
+ apply only to part of the Program, that part may be used separately
351
+ under those permissions, but the entire Program remains governed by
352
+ this License without regard to the additional permissions.
353
+
354
+ When you convey a copy of a covered work, you may at your option
355
+ remove any additional permissions from that copy, or from any part of
356
+ it. (Additional permissions may be written to require their own
357
+ removal in certain cases when you modify the work.) You may place
358
+ additional permissions on material, added by you to a covered work,
359
+ for which you have or can give appropriate copyright permission.
360
+
361
+ Notwithstanding any other provision of this License, for material you
362
+ add to a covered work, you may (if authorized by the copyright holders of
363
+ that material) supplement the terms of this License with terms:
364
+
365
+ a) Disclaiming warranty or limiting liability differently from the
366
+ terms of sections 15 and 16 of this License; or
367
+
368
+ b) Requiring preservation of specified reasonable legal notices or
369
+ author attributions in that material or in the Appropriate Legal
370
+ Notices displayed by works containing it; or
371
+
372
+ c) Prohibiting misrepresentation of the origin of that material, or
373
+ requiring that modified versions of such material be marked in
374
+ reasonable ways as different from the original version; or
375
+
376
+ d) Limiting the use for publicity purposes of names of licensors or
377
+ authors of the material; or
378
+
379
+ e) Declining to grant rights under trademark law for use of some
380
+ trade names, trademarks, or service marks; or
381
+
382
+ f) Requiring indemnification of licensors and authors of that
383
+ material by anyone who conveys the material (or modified versions of
384
+ it) with contractual assumptions of liability to the recipient, for
385
+ any liability that these contractual assumptions directly impose on
386
+ those licensors and authors.
387
+
388
+ All other non-permissive additional terms are considered "further
389
+ restrictions" within the meaning of section 10. If the Program as you
390
+ received it, or any part of it, contains a notice stating that it is
391
+ governed by this License along with a term that is a further
392
+ restriction, you may remove that term. If a license document contains
393
+ a further restriction but permits relicensing or conveying under this
394
+ License, you may add to a covered work material governed by the terms
395
+ of that license document, provided that the further restriction does
396
+ not survive such relicensing or conveying.
397
+
398
+ If you add terms to a covered work in accord with this section, you
399
+ must place, in the relevant source files, a statement of the
400
+ additional terms that apply to those files, or a notice indicating
401
+ where to find the applicable terms.
402
+
403
+ Additional terms, permissive or non-permissive, may be stated in the
404
+ form of a separately written license, or stated as exceptions;
405
+ the above requirements apply either way.
406
+
407
+ 8. Termination.
408
+
409
+ You may not propagate or modify a covered work except as expressly
410
+ provided under this License. Any attempt otherwise to propagate or
411
+ modify it is void, and will automatically terminate your rights under
412
+ this License (including any patent licenses granted under the third
413
+ paragraph of section 11).
414
+
415
+ However, if you cease all violation of this License, then your
416
+ license from a particular copyright holder is reinstated (a)
417
+ provisionally, unless and until the copyright holder explicitly and
418
+ finally terminates your license, and (b) permanently, if the copyright
419
+ holder fails to notify you of the violation by some reasonable means
420
+ prior to 60 days after the cessation.
421
+
422
+ Moreover, your license from a particular copyright holder is
423
+ reinstated permanently if the copyright holder notifies you of the
424
+ violation by some reasonable means, this is the first time you have
425
+ received notice of violation of this License (for any work) from that
426
+ copyright holder, and you cure the violation prior to 30 days after
427
+ your receipt of the notice.
428
+
429
+ Termination of your rights under this section does not terminate the
430
+ licenses of parties who have received copies or rights from you under
431
+ this License. If your rights have been terminated and not permanently
432
+ reinstated, you do not qualify to receive new licenses for the same
433
+ material under section 10.
434
+
435
+ 9. Acceptance Not Required for Having Copies.
436
+
437
+ You are not required to accept this License in order to receive or
438
+ run a copy of the Program. Ancillary propagation of a covered work
439
+ occurring solely as a consequence of using peer-to-peer transmission
440
+ to receive a copy likewise does not require acceptance. However,
441
+ nothing other than this License grants you permission to propagate or
442
+ modify any covered work. These actions infringe copyright if you do
443
+ not accept this License. Therefore, by modifying or propagating a
444
+ covered work, you indicate your acceptance of this License to do so.
445
+
446
+ 10. Automatic Licensing of Downstream Recipients.
447
+
448
+ Each time you convey a covered work, the recipient automatically
449
+ receives a license from the original licensors, to run, modify and
450
+ propagate that work, subject to this License. You are not responsible
451
+ for enforcing compliance by third parties with this License.
452
+
453
+ An "entity transaction" is a transaction transferring control of an
454
+ organization, or substantially all assets of one, or subdividing an
455
+ organization, or merging organizations. If propagation of a covered
456
+ work results from an entity transaction, each party to that
457
+ transaction who receives a copy of the work also receives whatever
458
+ licenses to the work the party's predecessor in interest had or could
459
+ give under the previous paragraph, plus a right to possession of the
460
+ Corresponding Source of the work from the predecessor in interest, if
461
+ the predecessor has it or can get it with reasonable efforts.
462
+
463
+ You may not impose any further restrictions on the exercise of the
464
+ rights granted or affirmed under this License. For example, you may
465
+ not impose a license fee, royalty, or other charge for exercise of
466
+ rights granted under this License, and you may not initiate litigation
467
+ (including a cross-claim or counterclaim in a lawsuit) alleging that
468
+ any patent claim is infringed by making, using, selling, offering for
469
+ sale, or importing the Program or any portion of it.
470
+
471
+ 11. Patents.
472
+
473
+ A "contributor" is a copyright holder who authorizes use under this
474
+ License of the Program or a work on which the Program is based. The
475
+ work thus licensed is called the contributor's "contributor version".
476
+
477
+ A contributor's "essential patent claims" are all patent claims
478
+ owned or controlled by the contributor, whether already acquired or
479
+ hereafter acquired, that would be infringed by some manner, permitted
480
+ by this License, of making, using, or selling its contributor version,
481
+ but do not include claims that would be infringed only as a
482
+ consequence of further modification of the contributor version. For
483
+ purposes of this definition, "control" includes the right to grant
484
+ patent sublicenses in a manner consistent with the requirements of
485
+ this License.
486
+
487
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
488
+ patent license under the contributor's essential patent claims, to
489
+ make, use, sell, offer for sale, import and otherwise run, modify and
490
+ propagate the contents of its contributor version.
491
+
492
+ In the following three paragraphs, a "patent license" is any express
493
+ agreement or commitment, however denominated, not to enforce a patent
494
+ (such as an express permission to practice a patent or covenant not to
495
+ sue for patent infringement). To "grant" such a patent license to a
496
+ party means to make such an agreement or commitment not to enforce a
497
+ patent against the party.
498
+
499
+ If you convey a covered work, knowingly relying on a patent license,
500
+ and the Corresponding Source of the work is not available for anyone
501
+ to copy, free of charge and under the terms of this License, through a
502
+ publicly available network server or other readily accessible means,
503
+ then you must either (1) cause the Corresponding Source to be so
504
+ available, or (2) arrange to deprive yourself of the benefit of the
505
+ patent license for this particular work, or (3) arrange, in a manner
506
+ consistent with the requirements of this License, to extend the patent
507
+ license to downstream recipients. "Knowingly relying" means you have
508
+ actual knowledge that, but for the patent license, your conveying the
509
+ covered work in a country, or your recipient's use of the covered work
510
+ in a country, would infringe one or more identifiable patents in that
511
+ country that you have reason to believe are valid.
512
+
513
+ If, pursuant to or in connection with a single transaction or
514
+ arrangement, you convey, or propagate by procuring conveyance of, a
515
+ covered work, and grant a patent license to some of the parties
516
+ receiving the covered work authorizing them to use, propagate, modify
517
+ or convey a specific copy of the covered work, then the patent license
518
+ you grant is automatically extended to all recipients of the covered
519
+ work and works based on it.
520
+
521
+ A patent license is "discriminatory" if it does not include within
522
+ the scope of its coverage, prohibits the exercise of, or is
523
+ conditioned on the non-exercise of one or more of the rights that are
524
+ specifically granted under this License. You may not convey a covered
525
+ work if you are a party to an arrangement with a third party that is
526
+ in the business of distributing software, under which you make payment
527
+ to the third party based on the extent of your activity of conveying
528
+ the work, and under which the third party grants, to any of the
529
+ parties who would receive the covered work from you, a discriminatory
530
+ patent license (a) in connection with copies of the covered work
531
+ conveyed by you (or copies made from those copies), or (b) primarily
532
+ for and in connection with specific products or compilations that
533
+ contain the covered work, unless you entered into that arrangement,
534
+ or that patent license was granted, prior to 28 March 2007.
535
+
536
+ Nothing in this License shall be construed as excluding or limiting
537
+ any implied license or other defenses to infringement that may
538
+ otherwise be available to you under applicable patent law.
539
+
540
+ 12. No Surrender of Others' Freedom.
541
+
542
+ If conditions are imposed on you (whether by court order, agreement or
543
+ otherwise) that contradict the conditions of this License, they do not
544
+ excuse you from the conditions of this License. If you cannot convey a
545
+ covered work so as to satisfy simultaneously your obligations under this
546
+ License and any other pertinent obligations, then as a consequence you may
547
+ not convey it at all. For example, if you agree to terms that obligate you
548
+ to collect a royalty for further conveying from those to whom you convey
549
+ the Program, the only way you could satisfy both those terms and this
550
+ License would be to refrain entirely from conveying the Program.
551
+
552
+ 13. Use with the GNU Affero General Public License.
553
+
554
+ Notwithstanding any other provision of this License, you have
555
+ permission to link or combine any covered work with a work licensed
556
+ under version 3 of the GNU Affero General Public License into a single
557
+ combined work, and to convey the resulting work. The terms of this
558
+ License will continue to apply to the part which is the covered work,
559
+ but the special requirements of the GNU Affero General Public License,
560
+ section 13, concerning interaction through a network will apply to the
561
+ combination as such.
562
+
563
+ 14. Revised Versions of this License.
564
+
565
+ The Free Software Foundation may publish revised and/or new versions of
566
+ the GNU General Public License from time to time. Such new versions will
567
+ be similar in spirit to the present version, but may differ in detail to
568
+ address new problems or concerns.
569
+
570
+ Each version is given a distinguishing version number. If the
571
+ Program specifies that a certain numbered version of the GNU General
572
+ Public License "or any later version" applies to it, you have the
573
+ option of following the terms and conditions either of that numbered
574
+ version or of any later version published by the Free Software
575
+ Foundation. If the Program does not specify a version number of the
576
+ GNU General Public License, you may choose any version ever published
577
+ by the Free Software Foundation.
578
+
579
+ If the Program specifies that a proxy can decide which future
580
+ versions of the GNU General Public License can be used, that proxy's
581
+ public statement of acceptance of a version permanently authorizes you
582
+ to choose that version for the Program.
583
+
584
+ Later license versions may give you additional or different
585
+ permissions. However, no additional obligations are imposed on any
586
+ author or copyright holder as a result of your choosing to follow a
587
+ later version.
588
+
589
+ 15. Disclaimer of Warranty.
590
+
591
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592
+ APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593
+ HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594
+ OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596
+ PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597
+ IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598
+ ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599
+
600
+ 16. Limitation of Liability.
601
+
602
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603
+ WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604
+ THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605
+ GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606
+ USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607
+ DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608
+ PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609
+ EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610
+ SUCH DAMAGES.
611
+
612
+ 17. Interpretation of Sections 15 and 16.
613
+
614
+ If the disclaimer of warranty and limitation of liability provided
615
+ above cannot be given local legal effect according to their terms,
616
+ reviewing courts shall apply local law that most closely approximates
617
+ an absolute waiver of all civil liability in connection with the
618
+ Program, unless a warranty or assumption of liability accompanies a
619
+ copy of the Program in return for a fee.
620
+
621
+ END OF TERMS AND CONDITIONS
622
+
623
+ How to Apply These Terms to Your New Programs
624
+
625
+ If you develop a new program, and you want it to be of the greatest
626
+ possible use to the public, the best way to achieve this is to make it
627
+ free software which everyone can redistribute and change under these terms.
628
+
629
+ To do so, attach the following notices to the program. It is safest
630
+ to attach them to the start of each source file to most effectively
631
+ state the exclusion of warranty; and each file should have at least
632
+ the "copyright" line and a pointer to where the full notice is found.
633
+
634
+ <one line to give the program's name and a brief idea of what it does.>
635
+ Copyright (C) <year> <name of author>
636
+
637
+ This program is free software: you can redistribute it and/or modify
638
+ it under the terms of the GNU General Public License as published by
639
+ the Free Software Foundation, either version 3 of the License, or
640
+ (at your option) any later version.
641
+
642
+ This program is distributed in the hope that it will be useful,
643
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
644
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645
+ GNU General Public License for more details.
646
+
647
+ You should have received a copy of the GNU General Public License
648
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
649
+
650
+ Also add information on how to contact you by electronic and paper mail.
651
+
652
+ If the program does terminal interaction, make it output a short
653
+ notice like this when it starts in an interactive mode:
654
+
655
+ <program> Copyright (C) <year> <name of author>
656
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657
+ This is free software, and you are welcome to redistribute it
658
+ under certain conditions; type `show c' for details.
659
+
660
+ The hypothetical commands `show w' and `show c' should show the appropriate
661
+ parts of the General Public License. Of course, your program's commands
662
+ might be different; for a GUI interface, you would use an "about box".
663
+
664
+ You should also get your employer (if you work as a programmer) or school,
665
+ if any, to sign a "copyright disclaimer" for the program, if necessary.
666
+ For more information on this, and how to apply and follow the GNU GPL, see
667
+ <https://www.gnu.org/licenses/>.
668
+
669
+ The GNU General Public License does not permit incorporating your program
670
+ into proprietary programs. If your program is a subroutine library, you
671
+ may consider it more useful to permit linking proprietary applications with
672
+ the library. If this is what you want to do, use the GNU Lesser General
673
+ Public License instead of this License. But first, please read
674
+ <https://www.gnu.org/licenses/why-not-lgpl.html>.
sd-forge-extra-samplers/README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Overview
2
+
3
+ This repository provides additional samplers to the Forge WebUI.
4
+
5
+ ## Features
6
+
7
+ - Additional samplers integrated into the Forge WebUI.
8
+ - Adaptive Progressive (Experimental)
9
+ - Euler Max
10
+ - Euler Negative
11
+ - Euler Dy
12
+ - Euler Dy Negative
13
+ - Euler SMEA
14
+ - Euler SMEA Dy
15
+ - Euler SMEA Dy Negative
16
+ - Euler Multipass
17
+ - Euler Multipass CFG++
18
+ - Euler a Multipass
19
+ - Euler a Multipass CFG++
20
+ - Extended Reverse Time SDE
21
+ - Gradient Estimation
22
+ - Heun Ancestral
23
+ - Kohaku LoNyu Yog
24
+ - Langevin Euler (Experimental)
25
+ - Res Multistep
26
+ - Res Multistep CFG++
27
+ - Res Multistep Ancestral
28
+ - Res Multistep Ancestral CFG++
29
+
30
+ - Additional Schedulers
31
+ - Linear Log
32
+
33
+ Adds a new extension accordian titled "Extra Samplers" to allow adjusting certain samplers.
34
+
35
+ ## Installation
36
+
37
+ ### Clone from Git
38
+
39
+ 1. Navigate to the extension directory in your WebUI installation
40
+ 1. Clone the repository:
41
+ ```sh
42
+ git clone https://github.com/MisterChief95/sd-forge-extra-samplers.git
43
+ ```
44
+ 1. Start WebUI
45
+
46
+ ### Install from URL
47
+
48
+ 1. Open the Extensions tab in the web UI.
49
+ 2. Go to the "Install from URL" section.
50
+ 3. Enter: `https://github.com/MisterChief95/sd-forge-extra-samplers.git` in the "URL for extension's git repository" box.
51
+ 4. Click "Install".
52
+ 5. Restart WebUI
53
+
54
+ ## Usage
55
+
56
+ 1. Open the WebUI.
57
+ 2. Navigate to the sampler settings.
58
+ 3. Select one of the newly added Euler samplers from the list.
59
+ 4. Generate images as usual.
60
+
61
+ ### Important
62
+ - Not all samplers work well in every situation. Some will look poor when used for img2img/hires fix.
63
+ - Mix-and-match samplers to find the best combinations. A sampler might look bad with one scheduler but good with another!
64
+
65
+ ## Contributing
66
+
67
+ Contributions are welcome! Please open an issue or submit a pull request for any improvements or bug fixes.
68
+
69
+ ## Acknowledgements
70
+
71
+ If any of these are incorrect please let me know!
72
+
73
+ - Thanks to the developers of Automatic1111 and Forge.
74
+ - [Koishi-Star](https://github.com/Koishi-Star/Euler-Smea-Dyn-Sampler) for the following sampler contributions:
75
+ - Euler Negative
76
+ - Euler Dy
77
+ - Euler Dy Negative
78
+ - Euler SMEA Dy (Euler SMEA Dy Negative based on this)
79
+ - Kohaku LoNyu Yog
80
+ - [licyk](https://github.com/licyk/advanced_euler_sampler_extension/tree/main) for the following sampler contributions:
81
+ - Euler Max
82
+ - Euler SMEA
83
+ - [Panchovix](https://github.com/Panchovix/stable-diffusion-webui-reForge) for the following sampler contributions:
84
+ - Res Multistep
85
+ - Res Multistep CFG++
86
+ - [comfyanonymous](https://github.com/comfyanonymous/ComfyUI) for the following sampler contributions:
87
+ - Gradient Estimation
88
+ - Extended Reverse Time SDE
89
+ - Res Multistep
90
+ - Res Multistep CFG++
91
+ - Res Multistep Ancestral
92
+ - Res Multistep Ancestral CFG++
93
+ - Euler Multipass
94
+ - Original Implementation: [aria1th](https://github.com/aria1th)
95
+ - CFG++ Implementation: [LaVie024](https://github.com/LaVie024)
96
+ - Final ComfyUI implementation: [catboxanon](https://github.com/catboxanon)
97
+ - Special thanks to the contributors of this repository.
sd-forge-extra-samplers/lib_es/__init__.py ADDED
File without changes
sd-forge-extra-samplers/lib_es/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
sd-forge-extra-samplers/lib_es/__pycache__/const.cpython-310.pyc ADDED
Binary file (858 Bytes). View file
 
sd-forge-extra-samplers/lib_es/__pycache__/samplers.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
sd-forge-extra-samplers/lib_es/__pycache__/schedulers.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
sd-forge-extra-samplers/lib_es/__pycache__/utils.cpython-310.pyc ADDED
Binary file (7.22 kB). View file
 
sd-forge-extra-samplers/lib_es/__pycache__/xyz.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
sd-forge-extra-samplers/lib_es/const.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adaptive Progressive
2
+ AP_EULER_A_END = "exs_ap_euler_a_end"
3
+ AP_DPM_2M_END = "exs_ap_dpm_2m_end"
4
+ AP_ANCESTRAL_ETA = "exs_ap_ancestral_eta"
5
+ AP_DETAIL_STRENGTH = "exs_ap_detail_strength"
6
+
7
+ # Langevin Euler
8
+ LANGEVIN_STRENGTH = "exs_langevin_strength"
9
+
10
+ # Extended Reverse-Time
11
+ ER_MAX_STAGE = "er_max_stage"
12
+
13
+ # Gradient Estimation
14
+ GE_GAMMA = "ge_gamma"
15
+ GE_GAMMA_OFFSET = "ge_gamma_offset"
16
+ GE_USE_ADAPTIVE_STEPS = "ge_use_adaptive_steps"
17
+ GE_USE_TIMESTEP_ADAPTIVE_GAMMA = "ge_use_timestep_adaptive_gamma"
18
+ GE_VALIDATE_SCHEDULE = "ge_validate_schedule"
19
+
20
+ GE_DEFAULT_GAMMA = 2.0
21
+ GE_MIN_GAMMA = 1.0
22
+ GE_MAX_GAMMA = 3.0
23
+ GE_DEFAULT_GAMMA_OFFSET = 0.0
24
+ GE_MIN_GAMMA_OFFSET = -1.0
25
+ GE_MAX_GAMMA_OFFSET = 1.0
sd-forge-extra-samplers/lib_es/extra_samplers/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from lib_es.extra_samplers.adaptive_progressive import sample_adaptive_progressive
2
+ from lib_es.extra_samplers.euler_dy import sample_euler_dy
3
+ from lib_es.extra_samplers.euler_dy_negative import sample_euler_dy_negative
4
+ from lib_es.extra_samplers.euler_smea import sample_euler_smea
5
+ from lib_es.extra_samplers.euler_smea_dy import sample_euler_smea_dy
6
+ from lib_es.extra_samplers.euler_smea_dy_negative import sample_euler_smea_dy_negative
7
+ from lib_es.extra_samplers.euler_max import sample_euler_max
8
+ from lib_es.extra_samplers.euler_multipass import (
9
+ sample_euler_multipass,
10
+ sample_euler_multipass_cfg_pp,
11
+ sample_euler_ancestral_multipass,
12
+ sample_euler_ancestral_multipass_cfg_pp,
13
+ )
14
+ from lib_es.extra_samplers.euler_negative import sample_euler_negative
15
+ from lib_es.extra_samplers.extended_reverse_time import sample_er_sde
16
+ from lib_es.extra_samplers.gradient_estimation import sample_gradient_estimation
17
+ from lib_es.extra_samplers.heun_ancestral import sample_heun_ancestral
18
+ from lib_es.extra_samplers.kohaku_lonyu_yog import sample_kohaku_lonyu_yog
19
+ from lib_es.extra_samplers.langevin_euler import sample_langevin_euler
20
+ #from lib_es.extra_samplers.res_multistep import (
21
+ #sample_res_multistep,
22
+ #sample_res_multistep_cfg_pp,
23
+ #sample_res_multistep_ancestral,
24
+ #sample_res_multistep_ancestral_cfg_pp,
25
+ #)
26
+
27
+ __sampler_funcs__ = [
28
+ #sample_adaptive_progressive,
29
+ sample_euler_max,
30
+ sample_euler_negative,
31
+ sample_euler_dy,
32
+ sample_euler_dy_negative,
33
+ sample_euler_smea,
34
+ sample_euler_smea_dy,
35
+ sample_euler_smea_dy_negative,
36
+ sample_euler_multipass,
37
+ sample_euler_multipass_cfg_pp,
38
+ sample_euler_ancestral_multipass,
39
+ sample_euler_ancestral_multipass_cfg_pp,
40
+ sample_er_sde,
41
+ sample_gradient_estimation,
42
+ sample_heun_ancestral,
43
+ sample_kohaku_lonyu_yog,
44
+ sample_langevin_euler,
45
+ #sample_res_multistep_ancestral_cfg_pp,
46
+ #sample_res_multistep_ancestral,
47
+ #sample_res_multistep_cfg_pp,
48
+ #sample_res_multistep,
49
+ ]
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.48 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/adaptive_progressive.cpython-310.pyc ADDED
Binary file (4.57 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_dy.cpython-310.pyc ADDED
Binary file (1.22 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_dy_negative.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_max.cpython-310.pyc ADDED
Binary file (1.18 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_multipass.cpython-310.pyc ADDED
Binary file (4.32 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_negative.cpython-310.pyc ADDED
Binary file (1.19 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea.cpython-310.pyc ADDED
Binary file (1.21 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea_dy.cpython-310.pyc ADDED
Binary file (1.31 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/euler_smea_dy_negative.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/extended_reverse_time.cpython-310.pyc ADDED
Binary file (2.4 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/gradient_estimation.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/heun_ancestral.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/kohaku_lonyu_yog.cpython-310.pyc ADDED
Binary file (1.58 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/langevin_euler.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/__pycache__/res_multistep.cpython-310.pyc ADDED
Binary file (5.43 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_samplers/adaptive_progressive.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+ from tqdm.auto import trange
4
+ from k_diffusion.sampling import to_d, get_ancestral_step
5
+ #from backend.modules.k_diffusion_extra import default_noise_sampler
6
+
7
+ import lib_es.const as consts
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata(
12
+ "Adaptive Progressive",
13
+ {"scheduler": "sgm_uniform", "uses_ensd": True},
14
+ )
15
+ @torch.no_grad()
16
+ def sample_adaptive_progressive(
17
+ model,
18
+ x,
19
+ sigmas,
20
+ extra_args=None,
21
+ callback=None,
22
+ disable=None,
23
+ s_churn=0.0,
24
+ s_tmin=0.0,
25
+ s_tmax=float("inf"),
26
+ s_noise=1.0,
27
+ noise_sampler=None,
28
+ ):
29
+ """
30
+ Adaptive progressive sampler that automatically adjusts to different step counts.
31
+ Combines Euler ancestral, DPM++ 2M, and detail enhancement with phase-based transitions.
32
+
33
+ This sampler is optimized for both high and very low step counts (4+),
34
+ dynamically adjusting phase durations based on total step count.
35
+
36
+ Args:
37
+ model: The denoising model
38
+ x: Input noise tensor
39
+ sigmas: Noise schedule
40
+ extra_args: Additional arguments for the model
41
+ callback: Optional callback function
42
+ disable: Whether to disable the progress bar
43
+ s_churn: Amount of stochasticity
44
+ s_tmin: Minimum sigma for stochasticity
45
+ s_tmax: Maximum sigma for stochasticity
46
+ eta: Ancestral noise parameter
47
+ s_noise: Noise scale
48
+ noise_sampler: Custom noise sampler function
49
+ detail_strength: Strength of detail enhancement phase
50
+
51
+ Returns:
52
+ Denoised tensor
53
+ """
54
+ extra_args = {} if extra_args is None else extra_args
55
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
56
+ s_in = x.new_ones([x.shape[0]])
57
+ steps = len(sigmas) - 1
58
+
59
+ euler_a_end = getattr(model.p, consts.AP_EULER_A_END, 0.35)
60
+ dpm_2m_end = getattr(model.p, consts.AP_DPM_2M_END, 0.75)
61
+ ancestral_eta = getattr(model.p, consts.AP_ANCESTRAL_ETA, 0.4)
62
+ detail_strength = getattr(model.p, consts.AP_DETAIL_STRENGTH, 1.5)
63
+
64
+ # Store previous steps' information
65
+ prev_d = None
66
+ prev_denoised = None
67
+
68
+ euler_end, dpm_end = calc_phase_bounds(steps, euler_a_end, dpm_2m_end)
69
+
70
+ for i in trange(steps, disable=disable):
71
+ progress = i / steps
72
+
73
+ # Calculate weights based on phase
74
+ if progress < euler_end:
75
+ # Euler ancestral phase
76
+ w_euler = 1.0
77
+ w_multi = 0.0
78
+ w_detail = 0.0
79
+ elif progress < dpm_end:
80
+ # DPM++ phase - smooth transition from Euler
81
+ phase_progress = (progress - euler_end) / (dpm_end - euler_end)
82
+ w_euler = max(0.0, 1.0 - phase_progress * 2.5) # Faster transition out of Euler
83
+ w_multi = 1.0 - w_euler
84
+ w_detail = 0.0
85
+ else:
86
+ # Detail refinement phase - gradual transition
87
+ phase_progress = (progress - dpm_end) / (1.0 - dpm_end)
88
+ w_euler = 0.0
89
+ w_multi = max(0.0, 1.0 - phase_progress * 1.5) # Gradual reduction in DPM++
90
+ w_detail = 1.0 - w_multi
91
+
92
+ # Apply adaptive stochasticity (only in early steps)
93
+ if s_churn > 0 and s_tmin <= sigmas[i] <= s_tmax and progress < 0.4:
94
+ # Scale down stochasticity as we progress
95
+ gamma = min(s_churn / steps, 2**0.5 - 1) * (1.0 - progress / 0.4)
96
+ sigma_hat = sigmas[i] * (gamma + 1)
97
+ eps = torch.randn_like(x) * s_noise
98
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2).sqrt()
99
+ else:
100
+ sigma_hat = sigmas[i]
101
+
102
+ # Get denoised prediction
103
+ denoised = model(x, sigma_hat * s_in, **extra_args)
104
+
105
+ if callback is not None:
106
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
107
+
108
+ # Calculate sigma for step
109
+ # Reduce eta as we progress to lower noise in later steps
110
+ step_eta = ancestral_eta if progress < 0.5 else ancestral_eta * (1.0 - min(1.0, (progress - 0.5) * 2.0))
111
+ sigma_down, sigma_up = get_ancestral_step(sigma_hat, sigmas[i + 1], eta=step_eta)
112
+
113
+ # Calculate current score
114
+ d = to_d(x, sigma_hat, denoised)
115
+ dt = sigma_down - sigma_hat
116
+
117
+ # Special case for final step
118
+ if sigmas[i + 1] == 0:
119
+ x = denoised
120
+ break
121
+
122
+ # Calculate step direction based on phase
123
+ if prev_d is None:
124
+ # First step is pure Euler ancestral
125
+ direction = d
126
+ else:
127
+ # Initialize direction
128
+ direction = torch.zeros_like(d)
129
+
130
+ # Add Euler component if needed
131
+ if w_euler > 0:
132
+ direction += w_euler * d
133
+
134
+ # Add DPM++ component if needed
135
+ if w_multi > 0:
136
+ # Adjust coefficients based on noise level
137
+ if sigma_hat > 2.0:
138
+ # Higher noise: favor current direction
139
+ c1, c2 = 0.7, 0.3
140
+ else:
141
+ # Lower noise: more balanced
142
+ c1, c2 = 0.6, 0.4
143
+
144
+ multi_direction = c1 * d + c2 * prev_d
145
+ direction += w_multi * multi_direction
146
+
147
+ # Add detail enhancement if needed
148
+ if w_detail > 0 and prev_denoised is not None:
149
+ # Only apply significant enhancement at lower noise levels
150
+ if sigma_hat < 1.0:
151
+ # Calculate detail vector (high frequency components)
152
+ detail_vector = denoised - prev_denoised
153
+
154
+ # Scale based on noise level - stronger at very low noise
155
+ detail_scale = detail_strength * min(1.0, 0.2 / (sigma_hat + 0.2))
156
+
157
+ # Apply detail enhancement with adaptive scaling
158
+ detail_direction = d + detail_vector * detail_scale / dt
159
+ direction += w_detail * detail_direction
160
+ else:
161
+ # At higher noise levels, use standard direction
162
+ direction += w_detail * d
163
+
164
+ # Ensure numerical stability
165
+ direction = torch.clamp(direction, -1e2, 1e2)
166
+
167
+ # Apply the step
168
+ x = x + direction * dt
169
+
170
+ # Apply ancestral noise with progressive reduction
171
+ if sigma_up > 0:
172
+ # Only add significant noise in earlier steps
173
+ noise_scale = s_noise
174
+ if progress > 0.3:
175
+ # Exponential reduction in noise after Euler phase
176
+ noise_scale *= math.exp(-4.0 * (progress - 0.3))
177
+
178
+ # Add the scaled noise
179
+ x = x + noise_sampler(sigma_hat, sigmas[i + 1]) * sigma_up * noise_scale
180
+
181
+ # Store values for next step
182
+ prev_d = d
183
+ prev_denoised = denoised
184
+
185
+ return x
186
+
187
+
188
+ def calc_phase_bounds(steps: int, custom_euler_end: float = 0.25, custom_dpm_end: float = 0.7) -> tuple[float, float]:
189
+ """
190
+ Calculate phase boundaries for the adaptive progressive sampler.
191
+
192
+ Args:
193
+ steps: Total number of steps
194
+ custom_euler_end: End point for Euler phase (0.0-1.0)
195
+ custom_dpm_end: End point for DPM++ phase (0.0-1.0)
196
+
197
+ Returns:
198
+ Tuple of phase boundaries (Euler end, DPM++ end)
199
+ """
200
+ # Ensure values are within valid range
201
+ euler_end = max(0.0, min(1.0, custom_euler_end))
202
+ dpm_end = max(0.0, min(1.0, custom_dpm_end))
203
+
204
+ # Ensure euler_end < dpm_end
205
+ if euler_end >= dpm_end:
206
+ euler_end = max(0.0, dpm_end - 0.2) # Ensure at least 20% for DPM++ phase
207
+
208
+ # Adaptive phase boundaries based on step count
209
+ if steps < 10:
210
+ # For very low step counts, shorten Euler phase and extend detail phase
211
+ euler_end = min(euler_end, 0.15 + (steps - 4) * 0.01)
212
+ dpm_end = min(dpm_end, 0.5 + (steps - 4) * 0.02)
213
+ elif steps < 20:
214
+ # For low step counts, slightly adjust phases
215
+ euler_end = min(euler_end, 0.2)
216
+ dpm_end = min(dpm_end, 0.65)
217
+ elif steps > 50:
218
+ # For high step counts, extend the Euler phase slightly
219
+ euler_end = min(0.3, euler_end + (steps - 50) * 0.0005)
220
+ # And allow for a longer DPM++ phase
221
+ dpm_end = min(0.8, dpm_end + (steps - 50) * 0.0005)
222
+
223
+ # Ensure minimum phase lengths
224
+ if dpm_end - euler_end < 0.1:
225
+ dpm_end = min(1.0, euler_end + 0.1)
226
+
227
+ return euler_end, dpm_end
sd-forge-extra-samplers/lib_es/extra_samplers/euler_dy.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import dy_sampling_step
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler Dy")
12
+ @torch.no_grad()
13
+ def sample_euler_dy(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+
28
+ for i in trange(len(sigmas) - 1, disable=disable):
29
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
30
+ eps = torch.randn_like(x) * s_noise
31
+ sigma_hat = sigmas[i] * (gamma + 1)
32
+ dt = sigmas[i + 1] - sigma_hat
33
+
34
+ if gamma > 0:
35
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
36
+
37
+ denoised = model(x, sigma_hat * s_in, **extra_args)
38
+ d = to_d(x, sigma_hat, denoised)
39
+
40
+ if sigmas[i + 1] > 0:
41
+ if i // 2 == 1:
42
+ x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
43
+
44
+ if callback is not None:
45
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
46
+
47
+ # Euler method
48
+ x = x + d * dt
49
+
50
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_dy_negative.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import dy_sampling_step
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler Dy Negative")
12
+ @torch.no_grad()
13
+ def sample_euler_dy_negative(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+
28
+ for i in trange(len(sigmas) - 1, disable=disable):
29
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
30
+ eps = torch.randn_like(x) * s_noise
31
+ sigma_hat = sigmas[i] * (gamma + 1)
32
+ dt = sigmas[i + 1] - sigma_hat
33
+
34
+ if gamma > 0:
35
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
36
+
37
+ denoised = model(x, sigma_hat * s_in, **extra_args)
38
+ d = to_d(x, sigma_hat, denoised)
39
+
40
+ if callback is not None:
41
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
42
+
43
+ # Euler method
44
+ if sigmas[i + 1] > 0 and i // 2 == 1:
45
+ x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
46
+ x = -x - d * dt
47
+ else:
48
+ x = x + d * dt
49
+
50
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_max.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import torch
3
+
4
+ from k_diffusion.sampling import to_d
5
+
6
+ from tqdm.auto import trange
7
+
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler Max")
12
+ @torch.no_grad()
13
+ def sample_euler_max(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+ for i in trange(len(sigmas) - 1, disable=disable):
28
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
29
+ eps = torch.randn_like(x) * s_noise
30
+ sigma_hat = sigmas[i] * (gamma + 1)
31
+
32
+ if gamma > 0:
33
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
34
+
35
+ denoised = model(x, sigma_hat * s_in, **extra_args)
36
+ d = to_d(x, sigma_hat, denoised)
37
+
38
+ if callback is not None:
39
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
40
+
41
+ dt = sigmas[i + 1] - sigma_hat
42
+
43
+ # Euler method
44
+ x = x + (math.cos(i + 1) / (i + 1) + 1) * d * dt
45
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_multipass.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm import trange
3
+
4
+ from k_diffusion.sampling import get_ancestral_step, to_d
5
+
6
+ from lib_es.utils import default_noise_sampler, extend_sigmas, sampler_metadata
7
+
8
+
9
+ # ==============================================================================================================
10
+ # - Originally written by aria1th: https://github.com/aria1th
11
+ # - CFG++ support written by LaVie024: https://github.com/LaVie024
12
+ # - Standard Euler support written by catboxanon: https://github.com/catboxanon
13
+ # ==============================================================================================================
14
+
15
+
16
+ def apply_churn(x, sub_sigma, s_churn, s_tmin, s_tmax, s_noise, pass_step):
17
+ if s_churn > 0:
18
+ gamma = min(s_churn / max(0, pass_step - 1), 2**0.5 - 1) if s_tmin <= sub_sigma < s_tmax else 0
19
+ sigma_hat = sub_sigma * (gamma + 1)
20
+ else:
21
+ gamma = 0
22
+ sigma_hat = sub_sigma
23
+
24
+ if gamma > 0:
25
+ eps = torch.randn_like(x) * s_noise
26
+ x = x + eps * (sigma_hat**2 - sub_sigma**2) ** 0.5
27
+
28
+ return x, sigma_hat
29
+
30
+
31
+ @torch.no_grad()
32
+ def euler_ancestral_multipass(
33
+ model,
34
+ x,
35
+ sigmas,
36
+ extra_args=None,
37
+ callback=None,
38
+ disable=None,
39
+ eta=1.0,
40
+ s_noise=1.0,
41
+ noise_sampler=None,
42
+ pass_steps=2,
43
+ pass_sigma_max=float("inf"),
44
+ pass_sigma_min=12.0,
45
+ cfg_pp=False,
46
+ ):
47
+ """
48
+ A multipass variant of Euler-Ancestral sampling.
49
+ - For each i in [0, len(sigmas)-2], we check if sigma_i is in [pass_sigma_min, pass_sigma_max].
50
+ If so, subdivide the step from sigma_i -> sigma_{i+1} into 'pass_steps' sub-steps.
51
+ Otherwise, do a single standard step.
52
+ - Each sub-step calls 'get_ancestral_step(...)' with the sub-interval's start & end sigmas,
53
+ then applies the usual Euler-Ancestral update:
54
+ x = x + d*dt + (noise * sigma_up)
55
+ """
56
+ extra_args = {} if extra_args is None else extra_args
57
+ seed = extra_args.get("seed", None)
58
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
59
+ s_in = x.new_ones([x.shape[0]])
60
+
61
+ if cfg_pp:
62
+ model.need_last_noise_uncond = True
63
+ model.inner_model.inner_model.forge_objects.unet.model_options["disable_cfg1_optimization"] = True
64
+
65
+ sub_sigmas = extend_sigmas(sigmas, pass_steps, pass_sigma_max, pass_sigma_min)
66
+
67
+ for i in trange(len(sub_sigmas) - 1, disable=disable):
68
+ # Current sub-step range:
69
+ sub_sigma_curr = sub_sigmas[i]
70
+ sub_sigma_next = sub_sigmas[i + 1]
71
+
72
+ # Denoise at the current sub-sigma
73
+ denoised = model(x, sub_sigma_curr * s_in, **extra_args)
74
+
75
+ if callback is not None:
76
+ callback({"x": x, "i": i, "sub_step": i, "sigma": sub_sigma_curr, "denoised": denoised})
77
+
78
+ # Compute the ancestral step parameters for this sub-interval
79
+ sigma_down, sigma_up = get_ancestral_step(sub_sigma_curr, sub_sigma_next, eta=eta)
80
+
81
+ d = model.last_noise_uncond if cfg_pp else to_d(x, sub_sigma_curr, denoised)
82
+
83
+ if cfg_pp:
84
+ x = denoised + d * sigma_down
85
+ elif sigma_down == 0.0:
86
+ x = denoised
87
+ else:
88
+ x = x + d * (sigma_down - sub_sigma_curr)
89
+
90
+ if sigma_up != 0.0:
91
+ # Add noise for the "ancestral" part
92
+ x = x + noise_sampler(sub_sigma_curr, sub_sigma_next) * (s_noise * sigma_up)
93
+
94
+ return x
95
+
96
+
97
+ @sampler_metadata(name="Euler a Multipass", extra_params={"uses_ensd": True})
98
+ def sample_euler_ancestral_multipass(
99
+ model,
100
+ x,
101
+ sigmas,
102
+ extra_args=None,
103
+ callback=None,
104
+ disable=None,
105
+ eta=1.0,
106
+ s_noise=1.0,
107
+ noise_sampler=None,
108
+ pass_steps=2,
109
+ pass_sigma_max=float("inf"),
110
+ pass_sigma_min=12.0,
111
+ ):
112
+ return euler_ancestral_multipass(
113
+ model,
114
+ x,
115
+ sigmas,
116
+ extra_args,
117
+ callback,
118
+ disable,
119
+ eta,
120
+ s_noise,
121
+ noise_sampler,
122
+ pass_steps,
123
+ pass_sigma_max,
124
+ pass_sigma_min,
125
+ False,
126
+ )
127
+
128
+
129
+ @sampler_metadata(name="Euler a Multipass CFG++", extra_params={"uses_ensd": True})
130
+ def sample_euler_ancestral_multipass_cfg_pp(
131
+ model,
132
+ x,
133
+ sigmas,
134
+ extra_args=None,
135
+ callback=None,
136
+ disable=None,
137
+ eta=1.0,
138
+ s_noise=1.0,
139
+ noise_sampler=None,
140
+ pass_steps=2,
141
+ pass_sigma_max=float("inf"),
142
+ pass_sigma_min=12.0,
143
+ ):
144
+ return euler_ancestral_multipass(
145
+ model,
146
+ x,
147
+ sigmas,
148
+ extra_args,
149
+ callback,
150
+ disable,
151
+ eta,
152
+ s_noise,
153
+ noise_sampler,
154
+ pass_steps,
155
+ pass_sigma_max,
156
+ pass_sigma_min,
157
+ True,
158
+ )
159
+
160
+
161
+ @torch.no_grad()
162
+ def euler_multipass(
163
+ model,
164
+ x,
165
+ sigmas,
166
+ extra_args=None,
167
+ callback=None,
168
+ disable=None,
169
+ noise_sampler=None,
170
+ s_churn=0.0,
171
+ s_tmin=0.0,
172
+ s_tmax=float("inf"),
173
+ s_noise=1.0,
174
+ pass_steps=2,
175
+ pass_sigma_max=float("inf"),
176
+ pass_sigma_min=12.0,
177
+ cfg_pp=False,
178
+ ):
179
+ """
180
+ A multipass variant of Euler sampling.
181
+ """
182
+ extra_args = {} if extra_args is None else extra_args
183
+ seed = extra_args.get("seed", None)
184
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
185
+
186
+ if cfg_pp:
187
+ model.need_last_noise_uncond = True
188
+ model.inner_model.inner_model.forge_objects.unet.model_options["disable_cfg1_optimization"] = True
189
+
190
+ s_in = x.new_ones([x.shape[0]])
191
+ sub_sigmas = extend_sigmas(sigmas, pass_steps, pass_sigma_max, pass_sigma_min)
192
+
193
+ for i in trange(len(sub_sigmas) - 1, disable=disable):
194
+ # Current sub-step range:
195
+ sub_sigma_curr = sub_sigmas[i]
196
+ sub_sigma_next = sub_sigmas[i + 1]
197
+
198
+ x, sigma_hat = apply_churn(x, sub_sigma_curr, s_churn, s_tmin, s_tmax, s_noise, pass_steps)
199
+
200
+ # Denoise at the current sub-sigma
201
+ denoised = model(x, sub_sigma_curr * s_in, **extra_args)
202
+
203
+ if callback is not None:
204
+ callback(
205
+ {
206
+ "x": x,
207
+ "i": i,
208
+ "sub_step": i,
209
+ "sigma": sub_sigma_curr,
210
+ "sigma_hat": sigma_hat,
211
+ "denoised": denoised,
212
+ }
213
+ )
214
+
215
+ d = model.last_noise_uncond if cfg_pp else to_d(x, sigma_hat, denoised)
216
+ x = denoised + d * sub_sigma_next if cfg_pp else x + d * (sub_sigma_next - sigma_hat)
217
+
218
+ return x
219
+
220
+
221
+ @sampler_metadata(name="Euler Multipass")
222
+ def sample_euler_multipass(
223
+ model,
224
+ x,
225
+ sigmas,
226
+ extra_args=None,
227
+ callback=None,
228
+ disable=None,
229
+ s_noise=1.0,
230
+ s_churn=0.0,
231
+ s_tmin=0.0,
232
+ s_tmax=float("inf"),
233
+ noise_sampler=None,
234
+ pass_steps=2,
235
+ pass_sigma_max=float("inf"),
236
+ pass_sigma_min=12.0,
237
+ ):
238
+ return euler_multipass(
239
+ model,
240
+ x,
241
+ sigmas,
242
+ extra_args,
243
+ callback,
244
+ disable,
245
+ noise_sampler,
246
+ s_churn,
247
+ s_tmin,
248
+ s_tmax,
249
+ s_noise,
250
+ pass_steps,
251
+ pass_sigma_max,
252
+ pass_sigma_min,
253
+ False,
254
+ )
255
+
256
+
257
+ @sampler_metadata(name="Euler Multipass CFG++")
258
+ def sample_euler_multipass_cfg_pp(
259
+ model,
260
+ x,
261
+ sigmas,
262
+ extra_args=None,
263
+ callback=None,
264
+ disable=None,
265
+ s_noise=1.0,
266
+ s_churn=0.0,
267
+ s_tmin=0.0,
268
+ s_tmax=float("inf"),
269
+ noise_sampler=None,
270
+ pass_steps=2,
271
+ pass_sigma_max=float("inf"),
272
+ pass_sigma_min=12.0,
273
+ ):
274
+ return euler_multipass(
275
+ model,
276
+ x,
277
+ sigmas,
278
+ extra_args,
279
+ callback,
280
+ disable,
281
+ noise_sampler,
282
+ s_churn,
283
+ s_tmin,
284
+ s_tmax,
285
+ s_noise,
286
+ pass_steps,
287
+ pass_sigma_max,
288
+ pass_sigma_min,
289
+ True,
290
+ )
sd-forge-extra-samplers/lib_es/extra_samplers/euler_negative.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import sampler_metadata
8
+
9
+
10
+ @sampler_metadata("Euler Negative")
11
+ @torch.no_grad()
12
+ def sample_euler_negative(
13
+ model,
14
+ x,
15
+ sigmas,
16
+ extra_args=None,
17
+ callback=None,
18
+ disable=None,
19
+ s_churn=0.0,
20
+ s_tmin=0.0,
21
+ s_tmax=float("inf"),
22
+ s_noise=1.0,
23
+ ):
24
+ extra_args = {} if extra_args is None else extra_args
25
+ s_in = x.new_ones([x.shape[0]])
26
+
27
+ for i in trange(len(sigmas) - 1, disable=disable):
28
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
29
+ eps = torch.randn_like(x) * s_noise
30
+ sigma_hat = sigmas[i] * (gamma + 1)
31
+ dt = sigmas[i + 1] - sigma_hat
32
+
33
+ if gamma > 0:
34
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
35
+
36
+ denoised = model(x, sigma_hat * s_in, **extra_args)
37
+ d = to_d(x, sigma_hat, denoised)
38
+
39
+ if callback is not None:
40
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
41
+
42
+ # Euler method
43
+ if sigmas[i + 1] > 0 and i // 2 == 1:
44
+ x = -x - d * dt
45
+ else:
46
+ x = x + d * dt
47
+
48
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import overall_sampling_step
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler SMEA")
12
+ @torch.no_grad()
13
+ def sample_euler_smea(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+
28
+ for i in trange(len(sigmas) - 1, disable=disable):
29
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
30
+ eps = torch.randn_like(x) * s_noise
31
+ sigma_hat = sigmas[i] * (gamma + 1)
32
+ dt = sigmas[i + 1] - sigma_hat
33
+
34
+ if i // 2 == 1:
35
+ x = overall_sampling_step(x, model, dt, sigma_hat, **extra_args)
36
+
37
+ if gamma > 0:
38
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
39
+
40
+ denoised = model(x, sigma_hat * s_in, **extra_args)
41
+ d = to_d(x, sigma_hat, denoised)
42
+
43
+ if callback is not None:
44
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
45
+
46
+ # Euler method
47
+ x = x + d * dt
48
+
49
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea_dy.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import dy_sampling_step, smea_sampling_step
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler SMEA Dy")
12
+ @torch.no_grad()
13
+ def sample_euler_smea_dy(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+
28
+ for i in trange(len(sigmas) - 1, disable=disable):
29
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
30
+ eps = torch.randn_like(x) * s_noise
31
+ sigma_hat = sigmas[i] * (gamma + 1)
32
+ dt = sigmas[i + 1] - sigma_hat
33
+
34
+ if gamma > 0:
35
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
36
+
37
+ denoised = model(x, sigma_hat * s_in, **extra_args)
38
+ d = to_d(x, sigma_hat, denoised)
39
+
40
+ # Euler method
41
+ x = x + d * dt
42
+
43
+ if sigmas[i + 1] > 0:
44
+ if i + 1 // 2 == 1:
45
+ x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
46
+
47
+ if i + 1 // 2 == 0:
48
+ x = smea_sampling_step(x, model, dt, sigma_hat, **extra_args)
49
+
50
+ if callback is not None:
51
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
52
+
53
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/euler_smea_dy_negative.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import dy_sampling_step, smea_sampling_step
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ @sampler_metadata("Euler SMEA Dy Negative")
12
+ @torch.no_grad()
13
+ def sample_euler_smea_dy_negative(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ s_churn=0.0,
21
+ s_tmin=0.0,
22
+ s_tmax=float("inf"),
23
+ s_noise=1.0,
24
+ ):
25
+ extra_args = {} if extra_args is None else extra_args
26
+ s_in = x.new_ones([x.shape[0]])
27
+
28
+ for i in trange(len(sigmas) - 1, disable=disable):
29
+ gamma = max(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
30
+ eps = torch.randn_like(x) * s_noise
31
+ sigma_hat = sigmas[i] * (gamma + 1)
32
+ dt = sigmas[i + 1] - sigma_hat
33
+
34
+ if gamma > 0:
35
+ x = x - eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
36
+
37
+ denoised = model(x, sigma_hat * s_in, **extra_args)
38
+ d = to_d(x, sigma_hat, denoised)
39
+
40
+ # Euler method
41
+ x = x + d * dt
42
+
43
+ if sigmas[i + 1] > 0:
44
+ if i + 1 // 2 == 1:
45
+ x = dy_sampling_step(x, model, dt, sigma_hat, **extra_args)
46
+ x = -x - d * dt
47
+
48
+ if i + 1 // 2 == 0:
49
+ x = smea_sampling_step(x, model, dt, sigma_hat, **extra_args)
50
+ x = -x - d * dt
51
+
52
+ if callback is not None:
53
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
54
+
55
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/extended_reverse_time.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm import trange
3
+
4
+ import lib_es.const as consts
5
+ from lib_es.utils import default_noise_sampler, sampler_metadata
6
+
7
+
8
+ # From ComfyUI
9
+ @sampler_metadata(
10
+ "Extended Reverse-Time SDE",
11
+ {"uses_ensd": True, "scheduler": "sgm_uniform"},
12
+ ["sample_er_sde, extended_reverse_sde"],
13
+ )
14
+ @torch.no_grad()
15
+ def sample_er_sde(
16
+ model,
17
+ x,
18
+ sigmas,
19
+ extra_args=None,
20
+ callback=None,
21
+ disable=None,
22
+ s_noise=1.0,
23
+ noise_sampler=None,
24
+ noise_scaler=None,
25
+ ):
26
+ """
27
+ Extended Reverse-Time SDE solver (VE ER-SDE-Solver-3). Arxiv: https://arxiv.org/abs/2309.06169.
28
+ Code reference: https://github.com/QinpengCui/ER-SDE-Solver/blob/main/er_sde_solver.py.
29
+ """
30
+ extra_args = {} if extra_args is None else extra_args
31
+ seed = extra_args.get("seed", None)
32
+ noise_sampler = default_noise_sampler(x, seed=seed) if noise_sampler is None else noise_sampler
33
+ s_in = x.new_ones([x.shape[0]])
34
+
35
+ max_stage: int = getattr(model.p, consts.ER_MAX_STAGE, 3)
36
+
37
+ def default_noise_scaler(sigma):
38
+ return sigma * ((sigma**0.3).exp() + 10.0)
39
+
40
+ noise_scaler = default_noise_scaler if noise_scaler is None else noise_scaler
41
+ num_integration_points = 200.0
42
+ point_indice = torch.arange(0, num_integration_points, dtype=torch.float32, device=x.device)
43
+
44
+ old_denoised = None
45
+ old_denoised_d = None
46
+
47
+ for i in trange(len(sigmas) - 1, disable=disable):
48
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
49
+ if callback is not None:
50
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
51
+ stage_used = min(max_stage, i + 1)
52
+ if sigmas[i + 1] == 0:
53
+ x = denoised
54
+ elif stage_used == 1:
55
+ r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i])
56
+ x = r * x + (1 - r) * denoised
57
+ else:
58
+ r = noise_scaler(sigmas[i + 1]) / noise_scaler(sigmas[i])
59
+ x = r * x + (1 - r) * denoised
60
+
61
+ dt = sigmas[i + 1] - sigmas[i]
62
+ sigma_step_size = -dt / num_integration_points
63
+ sigma_pos = sigmas[i + 1] + point_indice * sigma_step_size
64
+ scaled_pos = noise_scaler(sigma_pos)
65
+
66
+ # Stage 2
67
+ s = torch.sum(1 / scaled_pos) * sigma_step_size
68
+ denoised_d = (denoised - old_denoised) / (sigmas[i] - sigmas[i - 1])
69
+ x = x + (dt + s * noise_scaler(sigmas[i + 1])) * denoised_d
70
+
71
+ if stage_used >= 3:
72
+ # Stage 3
73
+ s_u = torch.sum((sigma_pos - sigmas[i]) / scaled_pos) * sigma_step_size
74
+ denoised_u = (denoised_d - old_denoised_d) / ((sigmas[i] - sigmas[i - 2]) / 2)
75
+ x = x + ((dt**2) / 2 + s_u * noise_scaler(sigmas[i + 1])) * denoised_u
76
+ old_denoised_d = denoised_d
77
+
78
+ if s_noise != 0 and sigmas[i + 1] > 0:
79
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * (
80
+ sigmas[i + 1] ** 2 - sigmas[i] ** 2 * r**2
81
+ ).sqrt().nan_to_num(nan=0.0)
82
+ old_denoised = denoised
83
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/gradient_estimation.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Optional
3
+ import torch
4
+ from tqdm import trange
5
+
6
+ from k_diffusion.sampling import to_d
7
+ from modules import errors
8
+
9
+ import lib_es.const as consts
10
+ from lib_es.utils import sampler_metadata
11
+
12
+
13
+ def compute_optimal_gamma(steps: int, adaptive: bool = True) -> float:
14
+ """
15
+ Compute the optimal gamma parameter for gradient estimation based on step count.
16
+
17
+ Args:
18
+ steps: Number of sampling steps
19
+ adaptive: Whether to use adaptive gamma based on step count
20
+
21
+ Returns:
22
+ Optimal gamma value
23
+ """
24
+ if not adaptive:
25
+ return consts.GE_DEFAULT_GAMMA
26
+
27
+ # Define min and max values
28
+ min_steps, max_steps = 10, 100
29
+ min_gamma, max_gamma = 1.5, 2.6
30
+
31
+ # Handle edge cases
32
+ if steps <= min_steps:
33
+ return min_gamma
34
+ elif steps >= max_steps:
35
+ return max_gamma
36
+
37
+ # Apply logarithmic scaling
38
+ # log(steps/min_steps) / log(max_steps/min_steps) gives a value from 0 to 1
39
+ # that increases logarithmically with steps
40
+ log_factor = torch.log(torch.tensor(steps / min_steps)) / torch.log(torch.tensor(max_steps / min_steps))
41
+
42
+ # Convert the logarithmic factor to gamma value
43
+ gamma = min_gamma + log_factor * (max_gamma - min_gamma)
44
+
45
+ return float(gamma)
46
+
47
+
48
+ def validate_schedule(sigmas: torch.Tensor, eta: float = 0.1, nu: float = 2.0) -> bool:
49
+ """
50
+ Validate whether a noise schedule satisfies the admissibility criteria from the paper.
51
+
52
+ Args:
53
+ sigmas: Tensor of noise levels in descending order
54
+ eta: Error parameter
55
+ nu: Accuracy parameter for distance estimates
56
+
57
+ Returns:
58
+ True if schedule is admissible, False otherwise
59
+ """
60
+ n = len(sigmas) - 1
61
+ is_admissible = True
62
+ issues = []
63
+
64
+ # Check if sigmas are strictly decreasing
65
+ if not torch.all(sigmas[:-1] > sigmas[1:]):
66
+ is_admissible = False
67
+ issues.append("Sigmas must be strictly decreasing")
68
+
69
+ # Calculate the maximum allowable beta
70
+ c = 1 - nu ** (-1 / n)
71
+ beta_max = c / (eta + c)
72
+
73
+ # Check that step sizes respect the admissibility criteria
74
+ for i in range(n - 1):
75
+ ratio = sigmas[i + 1] / sigmas[i]
76
+ beta = 1 - ratio
77
+ if beta > beta_max:
78
+ is_admissible = False
79
+ issues.append(f"Step {i} has beta {beta:.4f} > beta_max {beta_max:.4f}")
80
+
81
+ if not is_admissible:
82
+ errors.display(ValueError(f"Noise schedule is not admissible: {', '.join(issues)}"))
83
+ errors.print_error_explanation("Noise schedule validation failed.\n\tIssues:" + ",\n\t\t".join(issues))
84
+
85
+ return is_admissible
86
+
87
+
88
+ @torch.no_grad()
89
+ @sampler_metadata("Gradient Estimation", {"scheduler": "sgm_uniform"})
90
+ def sample_gradient_estimation(
91
+ model,
92
+ x: torch.Tensor,
93
+ sigmas: torch.Tensor,
94
+ extra_args: Optional[dict[str, Any]] = None,
95
+ callback: Optional[Callable] = None,
96
+ disable: Optional[bool] = None,
97
+ validate_sigmas: bool = False,
98
+ eta: float = 0.1,
99
+ nu: float = 2.0,
100
+ ) -> torch.Tensor:
101
+ """
102
+ Gradient-estimation sampler as described in "Interpreting and Improving Diffusion Models from an Optimization Perspective".
103
+
104
+ This sampler implements a second-order method that improves upon DDIM by using a combination of current and previous
105
+ gradients to reduce gradient estimation error. It is based on the insight that denoising is approximately equivalent to
106
+ projection onto the data manifold, and diffusion sampling is gradient descent on the squared Euclidean distance function.
107
+
108
+ Args:
109
+ model: The diffusion model
110
+ x: Input tensor
111
+ sigmas: Noise schedule (should be in descending order)
112
+ extra_args: Extra arguments to pass to the model
113
+ callback: Callback function
114
+ disable: Whether to disable the progress bar
115
+ validate_sigmas: Whether to validate the noise schedule
116
+ eta: Error parameter for schedule validation (default 0.1)
117
+ nu: Accuracy parameter for schedule validation (default 2.0)
118
+
119
+ Returns:
120
+ Denoised tensor
121
+
122
+ References:
123
+ Paper: https://openreview.net/pdf?id=o2ND9v0CeK
124
+ """
125
+ # Parameter validation and initialization
126
+ if sigmas.shape[0] < 2:
127
+ raise ValueError("Need at least 2 timesteps for gradient estimation")
128
+
129
+ extra_args = {} if extra_args is None else extra_args
130
+ s_in = x.new_ones([x.shape[0]])
131
+ old_d = None
132
+ steps = len(sigmas) - 1
133
+
134
+ # Schedule validation
135
+ if validate_sigmas:
136
+ validate_schedule(sigmas, eta, nu)
137
+
138
+ # Get gamma from model properties or compute optimal value
139
+ use_adaptive_steps: bool = getattr(model.p, consts.GE_USE_ADAPTIVE_STEPS, True)
140
+ if use_adaptive_steps:
141
+ # Compute optimal gamma based on the number of steps
142
+ # and add the offset if specified
143
+ ge_gamma = compute_optimal_gamma(steps, use_adaptive_steps) + getattr(
144
+ model.p, consts.GE_GAMMA_OFFSET, consts.GE_DEFAULT_GAMMA_OFFSET
145
+ )
146
+ else:
147
+ ge_gamma = getattr(model.p, consts.GE_GAMMA, consts.GE_DEFAULT_GAMMA)
148
+
149
+ # Initialize timestep-adaptive gamma values if needed
150
+ timestep_adaptive_gamma = getattr(model.p, consts.GE_USE_TIMESTEP_ADAPTIVE_GAMMA, False)
151
+
152
+ if timestep_adaptive_gamma:
153
+ # Higher gamma at the beginning, lower toward the end
154
+ # This is a heuristic based on the observation that early steps benefit more
155
+ # from aggressive gradient correction
156
+ gammas = torch.linspace(ge_gamma * 1.2, ge_gamma * 0.8, steps)
157
+
158
+ # Main sampling loop
159
+ for i in trange(len(sigmas) - 1, disable=disable):
160
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
161
+ d = to_d(x, sigmas[i], denoised)
162
+
163
+ if callback is not None:
164
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
165
+
166
+ dt = sigmas[i + 1] - sigmas[i]
167
+
168
+ if i == 0:
169
+ # Euler method for first step
170
+ x = x + d * dt
171
+ else:
172
+ # Gradient estimation
173
+ current_gamma = gammas[i].item() if timestep_adaptive_gamma else ge_gamma
174
+
175
+ d_bar = current_gamma * d + (1 - current_gamma) * old_d
176
+ x = x + d_bar * dt
177
+
178
+ old_d = d
179
+
180
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/heun_ancestral.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm.auto import trange
3
+ from k_diffusion.sampling import default_noise_sampler, get_ancestral_step, to_d
4
+
5
+ from lib_es.utils import sampler_metadata
6
+
7
+
8
+ @sampler_metadata(
9
+ "Heun Ancestral",
10
+ {"uses_ensd": True},
11
+ )
12
+ @torch.no_grad()
13
+ def sample_heun_ancestral(
14
+ model,
15
+ x,
16
+ sigmas,
17
+ extra_args=None,
18
+ callback=None,
19
+ disable=None,
20
+ eta=1.0,
21
+ s_noise=1.0,
22
+ noise_sampler=None,
23
+ ):
24
+ """
25
+ Ancestral sampling with Heun's method steps.
26
+
27
+ Args:
28
+ model: The model to sample from.
29
+ x: The initial noise.
30
+ sigmas: The noise levels to sample at.
31
+ extra_args: Extra arguments to the model.
32
+ callback: A function that's called after each step.
33
+ disable: Disable tqdm progress bar.
34
+ eta: Ancestral sampling strength parameter.
35
+ s_noise: Noise scale.
36
+ noise_sampler: A function that returns noise.
37
+ """
38
+ extra_args = {} if extra_args is None else extra_args
39
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
40
+ s_in = x.new_ones([x.shape[0]])
41
+
42
+ for i in trange(len(sigmas) - 1, disable=disable):
43
+ # Get current and next sigma
44
+ sigma = sigmas[i]
45
+
46
+ # Run denoising model
47
+ denoised = model(x, sigma * s_in, **extra_args)
48
+
49
+ # Calculate ancestral step parameters
50
+ sigma_down, sigma_up = get_ancestral_step(sigma, sigmas[i + 1], eta=eta)
51
+
52
+ if callback is not None:
53
+ callback({"x": x, "i": i, "sigma": sigma, "sigma_hat": sigma, "denoised": denoised})
54
+
55
+ # Calculate the derivative
56
+ d = to_d(x, sigma, denoised)
57
+
58
+ # Determine step size
59
+ dt = sigma_down - sigma
60
+
61
+ if sigma_down == 0:
62
+ # For the last step, use Euler method for stability
63
+ x = x + d * dt
64
+ else:
65
+ # Heun's method (predictor-corrector)
66
+ # 1. Predictor step (Euler)
67
+ x_2 = x + d * dt
68
+
69
+ # 2. Evaluate at the predicted point
70
+ denoised_2 = model(x_2, sigma_down * s_in, **extra_args)
71
+ d_2 = to_d(x_2, sigma_down, denoised_2)
72
+
73
+ # 3. Corrector step (average of gradients)
74
+ d_prime = (d + d_2) / 2
75
+ x = x + d_prime * dt
76
+
77
+ # Add noise according to ancestral sampling formula
78
+ if sigma_up > 0:
79
+ x = x + noise_sampler(sigma, sigmas[i + 1]) * s_noise * sigma_up
80
+
81
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/kohaku_lonyu_yog.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from k_diffusion.sampling import default_noise_sampler, get_ancestral_step, to_d
4
+
5
+ from tqdm.auto import trange
6
+
7
+ from lib_es.utils import sampler_metadata
8
+
9
+
10
+ @sampler_metadata("Kohaku LoNyu Yog")
11
+ @torch.no_grad()
12
+ def sample_kohaku_lonyu_yog(
13
+ model,
14
+ x,
15
+ sigmas,
16
+ extra_args=None,
17
+ callback=None,
18
+ disable=None,
19
+ s_churn=0.0,
20
+ s_tmin=0.0,
21
+ s_tmax=float("inf"),
22
+ s_noise=1.0,
23
+ noise_sampler=None,
24
+ eta=1.0,
25
+ ):
26
+ """Kohaku_LoNyu_Yog"""
27
+ extra_args = {} if extra_args is None else extra_args
28
+ s_in = x.new_ones([x.shape[0]])
29
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
30
+ for i in trange(len(sigmas) - 1, disable=disable):
31
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
32
+ eps = torch.randn_like(x) * s_noise
33
+ sigma_hat = sigmas[i] * (gamma + 1)
34
+ if gamma > 0:
35
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
36
+ denoised = model(x, sigma_hat * s_in, **extra_args)
37
+ d = to_d(x, sigma_hat, denoised)
38
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
39
+ if callback is not None:
40
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
41
+ dt = sigma_down - sigmas[i]
42
+
43
+ if i <= (len(sigmas) - 1) / 2:
44
+ x2 = -x
45
+ denoised2 = model(x2, sigma_hat * s_in, **extra_args)
46
+ d2 = to_d(x2, sigma_hat, denoised2)
47
+
48
+ x3 = x + ((d + d2) / 2) * dt
49
+ denoised3 = model(x3, sigma_hat * s_in, **extra_args)
50
+ d3 = to_d(x3, sigma_hat, denoised3)
51
+
52
+ real_d = (d + d3) / 2
53
+ x = x + real_d * dt
54
+
55
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
56
+ else:
57
+ x = x + d * dt
58
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/langevin_euler.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm.auto import trange
3
+ from k_diffusion.sampling import default_noise_sampler, to_d
4
+
5
+ import lib_es.const as consts
6
+ from lib_es.utils import sampler_metadata
7
+
8
+
9
+ @sampler_metadata(
10
+ "Langevin Euler",
11
+ {"scheduler": "sgm_uniform"},
12
+ )
13
+ @torch.no_grad()
14
+ def sample_langevin_euler(
15
+ model,
16
+ x,
17
+ sigmas,
18
+ extra_args=None,
19
+ callback=None,
20
+ disable=None,
21
+ s_churn=0.0,
22
+ s_tmin=0.0,
23
+ s_tmax=float("inf"),
24
+ s_noise=1.0,
25
+ noise_sampler=None,
26
+ ):
27
+ """
28
+ Langevin dynamics sampler - the adaptive CFG is now handled by the CFG function.
29
+ This is your original implementation but with the adaptive CFG logic removed.
30
+ """
31
+ extra_args = {} if extra_args is None else extra_args
32
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
33
+ s_in = x.new_ones([x.shape[0]])
34
+
35
+ # Store original shape for aspect ratio calculations
36
+ height, width = x.shape[2:4]
37
+ aspect_ratio = width / height
38
+ sigma_max = sigmas[0]
39
+
40
+ langevin_strength = getattr(model.p, consts.LANGEVIN_STRENGTH, 0.1)
41
+
42
+ for i in trange(len(sigmas) - 1, disable=disable):
43
+ # Apply s_churn noise if requested
44
+ gamma = min(s_churn / (len(sigmas) - 1), 2**0.5 - 1) if s_tmin <= sigmas[i] <= s_tmax else 0.0
45
+ eps = torch.randn_like(x) * s_noise
46
+ sigma_hat = sigmas[i] * (gamma + 1)
47
+ if gamma > 0:
48
+ x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
49
+
50
+ # Perform model prediction - CFG is now handled by our function
51
+ denoised = model(x, sigma_hat * s_in, **extra_args)
52
+
53
+ # Call the callback
54
+ if callback is not None:
55
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigma_hat, "denoised": denoised})
56
+
57
+ # Calculate the derivative
58
+ d = to_d(x, sigma_hat, denoised)
59
+
60
+ # Langevin step: Deterministic part + Noise part
61
+ dt = sigmas[i + 1] - sigma_hat
62
+
63
+ # Deterministic Euler step
64
+ x = x + d * dt
65
+
66
+ # Apply Langevin noise if not the final step
67
+ if sigmas[i + 1] > 0:
68
+ # Simpler Langevin noise logic with less aggressive scaling
69
+ # Use a constant base noise level with a gentle decay
70
+ base_noise_level = langevin_strength # Base level from parameter
71
+
72
+ # Gentle decay curve - more consistent noise across steps
73
+ # Sqrt provides a more gradual decrease than linear scaling
74
+ decay_factor = torch.sqrt(sigmas[i + 1] / sigma_max)
75
+ noise_scale = base_noise_level * (0.1 + 0.9 * decay_factor)
76
+
77
+ # Higher safety clamp to allow more noise influence
78
+ noise_scale = max(langevin_strength * 0.05, min(noise_scale, 0.8))
79
+
80
+ # Generate balanced noise
81
+ noise = torch.randn_like(x) * noise_scale
82
+ height_scale = torch.sqrt(torch.tensor(aspect_ratio))
83
+ width_scale = 1.0 / height_scale
84
+ scaling = torch.tensor([1.0, 1.0, height_scale, width_scale]).reshape(1, -1, 1, 1).to(x.device)
85
+ balanced_noise = noise * scaling
86
+
87
+ x = x + balanced_noise
88
+
89
+ return x
sd-forge-extra-samplers/lib_es/extra_samplers/res_multistep.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from tqdm.auto import trange
3
+
4
+ #from backend.modules.k_diffusion_extra import default_noise_sampler
5
+ #from backend.patcher.unet import UnetPatcher
6
+ from k_diffusion.sampling import get_ancestral_step, to_d
7
+
8
+ from lib_es.utils import sampler_metadata
9
+
10
+
11
+ def sigma_fn(t):
12
+ """
13
+ Computes the sigma function for a given tensor `t`.
14
+ The sigma function is defined as the exponential of the negation of `t`.
15
+ Args:
16
+ t (torch.Tensor): Input tensor.
17
+ Returns:
18
+ torch.Tensor: The result of applying the sigma function to `t`.
19
+ """
20
+
21
+ return t.neg().exp()
22
+
23
+
24
+ def t_fn(sigma):
25
+ """
26
+ Computes the negative logarithm of the input tensor.
27
+ Args:
28
+ sigma (torch.Tensor): A tensor for which the negative logarithm is to be computed.
29
+ Returns:
30
+ torch.Tensor: A tensor containing the negative logarithm of the input tensor.
31
+ """
32
+
33
+ return sigma.log().neg()
34
+
35
+
36
+ def phi1_fn(t):
37
+ """
38
+ Computes the function phi1(t) = (exp(t) - 1) / t using PyTorch's expm1 function.
39
+ Args:
40
+ t (torch.Tensor): Input tensor.
41
+ Returns:
42
+ torch.Tensor: The result of (exp(t) - 1) / t.
43
+ """
44
+
45
+ return torch.expm1(t) / t
46
+
47
+
48
+ def phi2_fn(t):
49
+ """
50
+ Compute the value of the phi2 function.
51
+ The phi2 function is defined as (phi1_fn(t) - 1.0) / t, where phi1_fn is
52
+ another function that takes a single argument t.
53
+ Parameters:
54
+ t (float): The input value for the function.
55
+ Returns:
56
+ float: The computed value of the phi2 function.
57
+ """
58
+
59
+ return (phi1_fn(t) - 1.0) / t
60
+
61
+
62
+ @torch.no_grad()
63
+ def res_multistep(
64
+ model,
65
+ x,
66
+ sigmas,
67
+ extra_args=None,
68
+ callback=None,
69
+ disable=None,
70
+ s_noise=1.0,
71
+ noise_sampler=None,
72
+ eta=1.0,
73
+ cfg_pp=False,
74
+ ):
75
+ """
76
+ Perform multi-step denoising using a conditional denoising model.
77
+ Args:
78
+ model (CFGDenoiserKDiffusion): The denoising model to use.
79
+ x (torch.Tensor): The input tensor to be denoised.
80
+ sigmas (list or torch.Tensor): A list or tensor of sigma values for each step.
81
+ extra_args (dict, optional): Additional arguments to pass to the model. Defaults to None.
82
+ callback (callable, optional): A callback function to be called after each step. Defaults to None.
83
+ disable (bool, optional): If True, disables the progress bar. Defaults to None.
84
+ s_noise (float, optional): Noise scale for stochasticity. Defaults to 1.0.
85
+ noise_sampler (callable, optional): Function to sample noise. Defaults to None.
86
+ cfg_pp (bool, optional): If True, enables post-processing for classifier-free guidance. Defaults to False.
87
+ Returns:
88
+ torch.Tensor: The denoised output tensor.
89
+ """
90
+ extra_args = {} if extra_args is None else extra_args
91
+ noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
92
+ s_in = x.new_ones([x.shape[0]])
93
+
94
+ old_denoised = None
95
+ uncond_denoised = None
96
+
97
+ # unconditional denoised is used for the second order multistep method
98
+ def post_cfg_function(args):
99
+ nonlocal uncond_denoised
100
+ uncond_denoised = args["uncond_denoised"]
101
+ return args["denoised"]
102
+
103
+ if cfg_pp:
104
+ model.need_last_noise_uncond = True
105
+ unet_patcher: UnetPatcher = model.inner_model.inner_model.forge_objects.unet
106
+ unet_patcher.model_options["disable_cfg1_optimization"] = True # not sure if this really works
107
+ unet_patcher.set_model_sampler_post_cfg_function(post_cfg_function)
108
+
109
+ for i in trange(len(sigmas) - 1, disable=disable):
110
+ denoised = model(x, sigmas[i] * s_in, **extra_args)
111
+ sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1], eta=eta)
112
+ if callback is not None:
113
+ callback({"x": x, "i": i, "sigma": sigmas[i], "sigma_hat": sigmas[i], "denoised": denoised})
114
+ if sigma_down == 0 or old_denoised is None:
115
+ # Euler method
116
+ if cfg_pp:
117
+ d = to_d(x, sigmas[i], uncond_denoised)
118
+ x = denoised + d * sigma_down
119
+ else:
120
+ d = to_d(x, sigmas[i], denoised)
121
+ dt = sigma_down - sigmas[i]
122
+ x = x + d * dt
123
+ else:
124
+ # Second order multistep method in https://arxiv.org/pdf/2308.02157
125
+ t, t_next, t_prev = t_fn(sigmas[i]), t_fn(sigma_down), t_fn(sigmas[i - 1])
126
+ h = t_next - t
127
+ c2 = (t_prev - t) / h
128
+
129
+ phi1_val, phi2_val = phi1_fn(-h), phi2_fn(-h)
130
+ b1 = torch.nan_to_num(phi1_val - phi2_val / c2, nan=0.0)
131
+ b2 = torch.nan_to_num(phi2_val / c2, nan=0.0)
132
+
133
+ if cfg_pp:
134
+ x = x + (denoised - uncond_denoised)
135
+ x = sigma_fn(h) * x + h * (b1 * uncond_denoised + b2 * old_denoised)
136
+ else:
137
+ x = sigma_fn(h) * x + h * (b1 * denoised + b2 * old_denoised)
138
+
139
+ # Noise addition
140
+ if sigmas[i + 1] > 0:
141
+ x = x + noise_sampler(sigmas[i], sigmas[i + 1]) * s_noise * sigma_up
142
+
143
+ if cfg_pp:
144
+ old_denoised = uncond_denoised
145
+ else:
146
+ old_denoised = denoised
147
+ return x
148
+
149
+
150
+ @sampler_metadata(
151
+ "Res Multistep",
152
+ {"scheduler": "sgm_uniform"},
153
+ )
154
+ @torch.no_grad()
155
+ def sample_res_multistep(
156
+ model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None
157
+ ):
158
+ return res_multistep(
159
+ model,
160
+ x,
161
+ sigmas,
162
+ extra_args=extra_args,
163
+ callback=callback,
164
+ disable=disable,
165
+ s_noise=s_noise,
166
+ noise_sampler=noise_sampler,
167
+ eta=0.0,
168
+ cfg_pp=False,
169
+ )
170
+
171
+
172
+ @sampler_metadata(
173
+ "Res Multistep CFG++",
174
+ {"scheduler": "sgm_uniform"},
175
+ )
176
+ @torch.no_grad()
177
+ def sample_res_multistep_cfg_pp(
178
+ model, x, sigmas, extra_args=None, callback=None, disable=None, s_noise=1.0, noise_sampler=None
179
+ ):
180
+ return res_multistep(
181
+ model,
182
+ x,
183
+ sigmas,
184
+ extra_args=extra_args,
185
+ callback=callback,
186
+ disable=disable,
187
+ s_noise=s_noise,
188
+ noise_sampler=noise_sampler,
189
+ eta=0.0,
190
+ cfg_pp=True,
191
+ )
192
+
193
+
194
+ @sampler_metadata(
195
+ "Res Multistep Ancestral",
196
+ {"uses_ensd": True, "scheduler": "sgm_uniform"},
197
+ )
198
+ @torch.no_grad()
199
+ def sample_res_multistep_ancestral(
200
+ model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None
201
+ ):
202
+ return res_multistep(
203
+ model,
204
+ x,
205
+ sigmas,
206
+ extra_args=extra_args,
207
+ callback=callback,
208
+ disable=disable,
209
+ s_noise=s_noise,
210
+ noise_sampler=noise_sampler,
211
+ eta=eta,
212
+ cfg_pp=False,
213
+ )
214
+
215
+
216
+ @sampler_metadata(
217
+ "Res Multistep Ancestral CFG++",
218
+ {"uses_ensd": True, "scheduler": "sgm_uniform"},
219
+ )
220
+ @torch.no_grad()
221
+ def sample_res_multistep_ancestral_cfg_pp(
222
+ model, x, sigmas, extra_args=None, callback=None, disable=None, eta=1.0, s_noise=1.0, noise_sampler=None
223
+ ):
224
+ return res_multistep(
225
+ model,
226
+ x,
227
+ sigmas,
228
+ extra_args=extra_args,
229
+ callback=callback,
230
+ disable=disable,
231
+ s_noise=s_noise,
232
+ noise_sampler=noise_sampler,
233
+ eta=eta,
234
+ cfg_pp=True,
235
+ )
sd-forge-extra-samplers/lib_es/extra_schedulers/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from lib_es.extra_schedulers.linear_log import linear_log
2
+
3
+
4
+ __all_schedulers__ = [
5
+ linear_log,
6
+ ]
sd-forge-extra-samplers/lib_es/extra_schedulers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (290 Bytes). View file
 
sd-forge-extra-samplers/lib_es/extra_schedulers/__pycache__/linear_log.cpython-310.pyc ADDED
Binary file (1.55 kB). View file
 
sd-forge-extra-samplers/lib_es/extra_schedulers/linear_log.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from lib_es.utils import scheduler_metadata
4
+
5
+
6
+ @scheduler_metadata(name="linear_log", alias="Linear Log", need_inner_model=True)
7
+ def linear_log(
8
+ n: int,
9
+ sigma_min: float,
10
+ sigma_max: float,
11
+ inner_model,
12
+ device: torch.device,
13
+ eta: float = 0.1,
14
+ nu: float = 2.0,
15
+ sgm: bool = False,
16
+ floor=False,
17
+ final_step_full: bool = True,
18
+ ) -> torch.Tensor:
19
+ """
20
+ Creates a log-linear (geometric) noise schedule as recommended in the paper.
21
+
22
+ Args:
23
+ n: Number of sampling steps
24
+ sigma_min: Minimum noise level
25
+ sigma_max: Maximum noise level
26
+ eta: Error parameter (default 0.1, as estimated in the paper for CIFAR-10)
27
+ nu: Accuracy parameter for distance estimates (default 2.0)
28
+ final_step_full: Whether to take a full step (β=1) for the final iteration
29
+
30
+ Returns:
31
+ A tensor of sigma values in descending order with a geometric progression.
32
+ """
33
+
34
+ # TODO: Add adjustable eta/nu parameters for more flexibility
35
+
36
+ # Calculate the maximum allowable beta based on the admissibility criteria
37
+ # β*,N = c/(η+c) where c = 1 - ν^(-1/N)
38
+ c = 1 - nu ** (-1 / n)
39
+ beta_max = c / (eta + c)
40
+
41
+ # Calculate the ratio that would give us exactly sigma_min from sigma_max in n steps
42
+ exact_ratio = (sigma_min / sigma_max) ** (1 / (n - 1))
43
+
44
+ # Use the smaller of the two to ensure admissibility
45
+ ratio = max(1 - beta_max, exact_ratio)
46
+
47
+ # Generate the geometric sequence
48
+ sigs = [sigma_max]
49
+ for i in range(1, n):
50
+ next_sigma = sigs[-1] * ratio
51
+
52
+ # For the final step, optionally set beta=1 (as recommended in the paper)
53
+ if final_step_full and i == n - 1:
54
+ next_sigma = sigma_min
55
+
56
+ sigs.append(next_sigma)
57
+
58
+ if not sgm:
59
+ # Add final value of 0.0
60
+ sigs.append(0.0)
61
+
62
+ # Convert to tensor
63
+ return torch.tensor(sigs)
sd-forge-extra-samplers/lib_es/samplers.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from modules.sd_samplers import all_samplers
2
+ from modules.sd_samplers_common import SamplerData
3
+ from modules.sd_samplers_kdiffusion import KDiffusionSampler
4
+
5
+ from lib_es.extra_samplers import __sampler_funcs__
6
+
7
+
8
+ # See modules_forge/alter_samplers.py for the basis of this class and build_constructor function
9
+ class ExtraSampler(KDiffusionSampler):
10
+ """
11
+ Overloads KDiffusionSampler to add extra parameters to the constructor
12
+ Based off lllyasviel's AlterSampler
13
+ """
14
+
15
+ def __init__(self, sd_model, sampler_name, sampler_func, options=None):
16
+ self.sampler_name = sampler_name
17
+ self.unet = sd_model.model.diffusion_model
18
+ sampler_function = sampler_func
19
+ super().__init__(sampler_function, sd_model, options)
20
+ self.extra_params = ["s_churn", "s_tmin", "s_tmax", "s_noise"]
21
+
22
+
23
+ def build_constructor(sampler_name, sampler_func):
24
+ def constructor(m):
25
+ return ExtraSampler(m, sampler_name, sampler_func)
26
+
27
+ return constructor
28
+
29
+
30
+ extra_sampler_list = [
31
+ (
32
+ fn.sampler_name,
33
+ fn,
34
+ fn.sampler_k_names,
35
+ fn.sampler_extra_params,
36
+ )
37
+ for fn in __sampler_funcs__
38
+ ]
39
+
40
+ samplers_data_k_diffusion: list[SamplerData] = [
41
+ SamplerData(name, build_constructor(sampler_name=name, sampler_func=funcname), aliases, options)
42
+ for name, funcname, aliases, options in extra_sampler_list
43
+ ]
44
+
45
+
46
+ def add_extra_samplers():
47
+ import modules.sd_samplers as sd_samplers
48
+
49
+ for sampler in samplers_data_k_diffusion:
50
+ if sampler.name not in sd_samplers.all_samplers_map:
51
+ sd_samplers.all_samplers.append(sampler)
52
+
53
+ sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
54
+ sd_samplers.set_samplers()
55
+
56
+
57
+
sd-forge-extra-samplers/lib_es/schedulers.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from lib_es.extra_schedulers import __all_schedulers__
2
+
3
+ import modules.sd_schedulers as sched
4
+
5
+
6
+ extra_scheduler_list = [
7
+ sched.Scheduler(fn.name, fn.alias, fn, need_inner_model=fn.need_inner_model) for fn in __all_schedulers__
8
+ ]
9
+
10
+
11
+ def add_schedulers():
12
+ """
13
+ Add extra schedulers to the list of schedulers in the webui.
14
+ """
15
+ for scheduler in extra_scheduler_list:
16
+ if scheduler.name not in sched.schedulers_map:
17
+ sched.schedulers.append(scheduler)
18
+ sched.schedulers_map = {**{x.name: x for x in sched.schedulers}, **{x.label: x for x in sched.schedulers}}