Commit
·
a8704d0
1
Parent(s):
6f3015e
updated code to include lora adapters
Browse files- .gitignore +213 -0
- README.md +1 -0
- requirements.txt +2 -1
- src/sdgen/main.py +2 -2
- src/sdgen/presets/styles.py +53 -36
- src/sdgen/sd/__init__.py +4 -0
- src/sdgen/sd/lora_loader.py +69 -0
- src/sdgen/sd/models.py +4 -1
- src/sdgen/ui/layout.py +306 -58
- src/sdgen/ui/tabs/img2img_tab.py +12 -21
- src/sdgen/ui/tabs/presets_tab.py +58 -20
- src/sdgen/ui/tabs/txt2img_tab.py +10 -11
- src/sdgen/upscaler/__init__.py +0 -1
.gitignore
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[codz]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py.cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# UV
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
#uv.lock
|
| 102 |
+
|
| 103 |
+
# poetry
|
| 104 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 105 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 106 |
+
# commonly ignored for libraries.
|
| 107 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 108 |
+
#poetry.lock
|
| 109 |
+
#poetry.toml
|
| 110 |
+
|
| 111 |
+
# pdm
|
| 112 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 113 |
+
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
| 114 |
+
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
| 115 |
+
#pdm.lock
|
| 116 |
+
#pdm.toml
|
| 117 |
+
.pdm-python
|
| 118 |
+
.pdm-build/
|
| 119 |
+
|
| 120 |
+
# pixi
|
| 121 |
+
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
| 122 |
+
#pixi.lock
|
| 123 |
+
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
| 124 |
+
# in the .venv directory. It is recommended not to include this directory in version control.
|
| 125 |
+
.pixi
|
| 126 |
+
|
| 127 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 128 |
+
__pypackages__/
|
| 129 |
+
|
| 130 |
+
# Celery stuff
|
| 131 |
+
celerybeat-schedule
|
| 132 |
+
celerybeat.pid
|
| 133 |
+
|
| 134 |
+
# SageMath parsed files
|
| 135 |
+
*.sage.py
|
| 136 |
+
|
| 137 |
+
# Environments
|
| 138 |
+
.env
|
| 139 |
+
.envrc
|
| 140 |
+
.venv
|
| 141 |
+
env/
|
| 142 |
+
venv/
|
| 143 |
+
ENV/
|
| 144 |
+
env.bak/
|
| 145 |
+
venv.bak/
|
| 146 |
+
|
| 147 |
+
# Spyder project settings
|
| 148 |
+
.spyderproject
|
| 149 |
+
.spyproject
|
| 150 |
+
|
| 151 |
+
# Rope project settings
|
| 152 |
+
.ropeproject
|
| 153 |
+
|
| 154 |
+
# mkdocs documentation
|
| 155 |
+
/site
|
| 156 |
+
|
| 157 |
+
# mypy
|
| 158 |
+
.mypy_cache/
|
| 159 |
+
.dmypy.json
|
| 160 |
+
dmypy.json
|
| 161 |
+
|
| 162 |
+
# Pyre type checker
|
| 163 |
+
.pyre/
|
| 164 |
+
|
| 165 |
+
# pytype static type analyzer
|
| 166 |
+
.pytype/
|
| 167 |
+
|
| 168 |
+
# Cython debug symbols
|
| 169 |
+
cython_debug/
|
| 170 |
+
|
| 171 |
+
# PyCharm
|
| 172 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 173 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
| 174 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
| 175 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
| 176 |
+
#.idea/
|
| 177 |
+
|
| 178 |
+
# Abstra
|
| 179 |
+
# Abstra is an AI-powered process automation framework.
|
| 180 |
+
# Ignore directories containing user credentials, local state, and settings.
|
| 181 |
+
# Learn more at https://abstra.io/docs
|
| 182 |
+
.abstra/
|
| 183 |
+
|
| 184 |
+
# Visual Studio Code
|
| 185 |
+
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
| 186 |
+
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
| 187 |
+
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
| 188 |
+
# you could uncomment the following to ignore the entire vscode folder
|
| 189 |
+
# .vscode/
|
| 190 |
+
|
| 191 |
+
# Ruff stuff:
|
| 192 |
+
.ruff_cache/
|
| 193 |
+
|
| 194 |
+
# PyPI configuration file
|
| 195 |
+
.pypirc
|
| 196 |
+
|
| 197 |
+
# Cursor
|
| 198 |
+
# Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
|
| 199 |
+
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
|
| 200 |
+
# refer to https://docs.cursor.com/context/ignore-files
|
| 201 |
+
.cursorignore
|
| 202 |
+
.cursorindexingignore
|
| 203 |
+
|
| 204 |
+
# Marimo
|
| 205 |
+
marimo/_static/
|
| 206 |
+
marimo/_lsp/
|
| 207 |
+
__marimo__/
|
| 208 |
+
|
| 209 |
+
src/assets/history
|
| 210 |
+
*.ckpt
|
| 211 |
+
*.pt
|
| 212 |
+
*.bin
|
| 213 |
+
*.pth
|
README.md
CHANGED
|
@@ -142,6 +142,7 @@ Install core libs:
|
|
| 142 |
|
| 143 |
```bash
|
| 144 |
pip install -r requirements.txt
|
|
|
|
| 145 |
```
|
| 146 |
|
| 147 |
### 4. HuggingFace Login (optional)
|
|
|
|
| 142 |
|
| 143 |
```bash
|
| 144 |
pip install -r requirements.txt
|
| 145 |
+
pip install -e .
|
| 146 |
```
|
| 147 |
|
| 148 |
### 4. HuggingFace Login (optional)
|
requirements.txt
CHANGED
|
@@ -11,8 +11,9 @@ torchaudio==2.5.1
|
|
| 11 |
diffusers==0.35.2
|
| 12 |
transformers==4.57.3
|
| 13 |
huggingface_hub==0.36.0
|
| 14 |
-
accelerate==0.
|
| 15 |
safetensors==0.7.0
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
# UI FRAMEWORK
|
|
|
|
| 11 |
diffusers==0.35.2
|
| 12 |
transformers==4.57.3
|
| 13 |
huggingface_hub==0.36.0
|
| 14 |
+
accelerate==0.33.0
|
| 15 |
safetensors==0.7.0
|
| 16 |
+
peft==0.17.1
|
| 17 |
|
| 18 |
|
| 19 |
# UI FRAMEWORK
|
src/sdgen/main.py
CHANGED
|
@@ -6,8 +6,8 @@ sets up the UI, and launches the Gradio interface.
|
|
| 6 |
|
| 7 |
from __future__ import annotations
|
| 8 |
|
| 9 |
-
import sys
|
| 10 |
import os
|
|
|
|
| 11 |
|
| 12 |
# for HF spaces
|
| 13 |
sys.path.append(os.path.abspath("src"))
|
|
@@ -45,7 +45,7 @@ def main() -> None:
|
|
| 45 |
model_id1 = settings.model_id1
|
| 46 |
model_id2 = settings.model_id2
|
| 47 |
|
| 48 |
-
device =
|
| 49 |
|
| 50 |
logger.info("Loading pipeline %s", model_id1)
|
| 51 |
pipes = {
|
|
|
|
| 6 |
|
| 7 |
from __future__ import annotations
|
| 8 |
|
|
|
|
| 9 |
import os
|
| 10 |
+
import sys
|
| 11 |
|
| 12 |
# for HF spaces
|
| 13 |
sys.path.append(os.path.abspath("src"))
|
|
|
|
| 45 |
model_id1 = settings.model_id1
|
| 46 |
model_id2 = settings.model_id2
|
| 47 |
|
| 48 |
+
device = "cpu"
|
| 49 |
|
| 50 |
logger.info("Loading pipeline %s", model_id1)
|
| 51 |
pipes = {
|
src/sdgen/presets/styles.py
CHANGED
|
@@ -1,65 +1,82 @@
|
|
| 1 |
-
"""Preset configurations
|
| 2 |
-
|
| 3 |
-
This module defines a collection of named presets including prompt,
|
| 4 |
-
negative prompt, sampler parameters, and recommended resolutions.
|
| 5 |
-
"""
|
| 6 |
|
| 7 |
from __future__ import annotations
|
| 8 |
|
| 9 |
from typing import Any, Dict, List
|
| 10 |
|
| 11 |
-
# Global preset registry: {preset_name: parameters}
|
| 12 |
PRESETS: Dict[str, Dict[str, Any]] = {
|
| 13 |
-
"
|
| 14 |
"prompt": (
|
| 15 |
-
"ultra realistic, 35mm
|
| 16 |
-
|
| 17 |
),
|
| 18 |
-
"negative_prompt":
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
"Anime": {
|
| 22 |
-
"prompt": (
|
| 23 |
-
"high quality anime, clean lines, vibrant colors, \
|
| 24 |
-
soft rim lighting, studio lighting"
|
| 25 |
),
|
| 26 |
-
"
|
| 27 |
-
"
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
"
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
"negative_prompt": "oversaturated, low detail, flat lighting",
|
| 35 |
-
"tags": ["cinematic", "moody"],
|
| 36 |
},
|
| 37 |
"Oil Painting / Classic Art": {
|
| 38 |
"prompt": (
|
| 39 |
-
"oil painting, impasto brush strokes, classical \
|
| 40 |
lighting, Rembrandt style"
|
| 41 |
),
|
| 42 |
"negative_prompt": "blurry, cartoonish, digital artifacts",
|
| 43 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
},
|
| 45 |
-
"
|
| 46 |
"prompt": (
|
| 47 |
-
"
|
| 48 |
-
|
| 49 |
),
|
| 50 |
-
"negative_prompt": "
|
| 51 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
},
|
| 53 |
}
|
| 54 |
|
| 55 |
|
| 56 |
def get_preset(name: str) -> Dict[str, Any] | None:
|
| 57 |
-
"""Return
|
| 58 |
data = PRESETS.get(name)
|
| 59 |
return dict(data) if data else None
|
| 60 |
|
| 61 |
|
| 62 |
def list_presets() -> List[str]:
|
| 63 |
-
"""
|
| 64 |
-
# Avoid unexpected reordering: use insertion order
|
| 65 |
return list(PRESETS.keys())
|
|
|
|
| 1 |
+
"""Preset configurations."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
from typing import Any, Dict, List
|
| 6 |
|
|
|
|
| 7 |
PRESETS: Dict[str, Dict[str, Any]] = {
|
| 8 |
+
"Cinematic Realism": {
|
| 9 |
"prompt": (
|
| 10 |
+
"ultra realistic, cinematic lighting, 35mm film look, depth "
|
| 11 |
+
"of field, sharp focus, natural skin texture"
|
| 12 |
),
|
| 13 |
+
"negative_prompt": (
|
| 14 |
+
"lowres, blurry, deformed anatomy, \
|
| 15 |
+
extra limbs, oversaturated, jpeg artifacts"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
),
|
| 17 |
+
"steps": 24,
|
| 18 |
+
"cfg": 6.5,
|
| 19 |
+
"width": 768,
|
| 20 |
+
"height": 512,
|
| 21 |
+
"lora_A": "DetailTweak.safetensors",
|
| 22 |
+
"alpha_A": 0.8,
|
| 23 |
+
"lora_B": None,
|
| 24 |
+
"alpha_B": 0.0,
|
|
|
|
|
|
|
| 25 |
},
|
| 26 |
"Oil Painting / Classic Art": {
|
| 27 |
"prompt": (
|
| 28 |
+
"oil painting, impasto brush strokes, classical \ \
|
| 29 |
lighting, Rembrandt style"
|
| 30 |
),
|
| 31 |
"negative_prompt": "blurry, cartoonish, digital artifacts",
|
| 32 |
+
"steps": 20,
|
| 33 |
+
"cfg": 7.5,
|
| 34 |
+
"width": 512,
|
| 35 |
+
"height": 512,
|
| 36 |
+
"lora_A": "DetailTweak.safetensors",
|
| 37 |
+
"alpha_A": 0.4,
|
| 38 |
+
"lora_B": None,
|
| 39 |
+
"alpha_B": 0.0,
|
| 40 |
+
},
|
| 41 |
+
"Manga Illustration": {
|
| 42 |
+
"prompt": (
|
| 43 |
+
"manga illustration, clean line art, expressive pose, full "
|
| 44 |
+
"background, detailed composition"
|
| 45 |
+
),
|
| 46 |
+
"negative_prompt": "badhandsv4, easyn, blurry line art",
|
| 47 |
+
"steps": 20,
|
| 48 |
+
"cfg": 7.0,
|
| 49 |
+
"width": 512,
|
| 50 |
+
"height": 704,
|
| 51 |
+
"lora_A": "MangaPanels.safetensors",
|
| 52 |
+
"alpha_A": 1.0,
|
| 53 |
+
"lora_B": None,
|
| 54 |
+
"alpha_B": 0.0,
|
| 55 |
},
|
| 56 |
+
"Anime Tarot": {
|
| 57 |
"prompt": (
|
| 58 |
+
"anime tarot card, ornate composition, symbolic character pose, "
|
| 59 |
+
"intricate patterns, layered design"
|
| 60 |
),
|
| 61 |
+
"negative_prompt": "badhandsv4, flat background, simple layout",
|
| 62 |
+
"steps": 20,
|
| 63 |
+
"cfg": 6.0,
|
| 64 |
+
"width": 512,
|
| 65 |
+
"height": 704,
|
| 66 |
+
"lora_A": "AnimeTarotCards.safetensors",
|
| 67 |
+
"alpha_A": 1.0,
|
| 68 |
+
"lora_B": "DetailTweak.safetensors",
|
| 69 |
+
"alpha_B": -0.4,
|
| 70 |
},
|
| 71 |
}
|
| 72 |
|
| 73 |
|
| 74 |
def get_preset(name: str) -> Dict[str, Any] | None:
|
| 75 |
+
"""Return shallow copy of a preset config."""
|
| 76 |
data = PRESETS.get(name)
|
| 77 |
return dict(data) if data else None
|
| 78 |
|
| 79 |
|
| 80 |
def list_presets() -> List[str]:
|
| 81 |
+
"""Stable UI order."""
|
|
|
|
| 82 |
return list(PRESETS.keys())
|
src/sdgen/sd/__init__.py
CHANGED
|
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
| 2 |
|
| 3 |
from .generator import generate_image
|
| 4 |
from .img2img import generate_img2img, prepare_img2img_pipeline
|
|
|
|
| 5 |
from .models import GenerationMetadata, HistorySummary, Img2ImgConfig, Txt2ImgConfig
|
| 6 |
from .pipeline import load_pipeline, warmup_pipeline
|
| 7 |
|
|
@@ -15,4 +16,7 @@ __all__ = [
|
|
| 15 |
"prepare_img2img_pipeline",
|
| 16 |
"load_pipeline",
|
| 17 |
"warmup_pipeline",
|
|
|
|
|
|
|
|
|
|
| 18 |
]
|
|
|
|
| 2 |
|
| 3 |
from .generator import generate_image
|
| 4 |
from .img2img import generate_img2img, prepare_img2img_pipeline
|
| 5 |
+
from .lora_loader import apply_loras, get_lora_path, list_loras
|
| 6 |
from .models import GenerationMetadata, HistorySummary, Img2ImgConfig, Txt2ImgConfig
|
| 7 |
from .pipeline import load_pipeline, warmup_pipeline
|
| 8 |
|
|
|
|
| 16 |
"prepare_img2img_pipeline",
|
| 17 |
"load_pipeline",
|
| 18 |
"warmup_pipeline",
|
| 19 |
+
"list_loras",
|
| 20 |
+
"get_lora_path",
|
| 21 |
+
"apply_loras",
|
| 22 |
]
|
src/sdgen/sd/lora_loader.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""LORA Adapter loader module."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from typing import List, Optional
|
| 7 |
+
|
| 8 |
+
from sdgen.config import ASSETS_ROOT
|
| 9 |
+
|
| 10 |
+
# Assets/loras lives under src/assets/loras
|
| 11 |
+
LORA_DIR: Path = ASSETS_ROOT / "loras"
|
| 12 |
+
LORA_DIR.mkdir(parents=True, exist_ok=True)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def list_loras() -> List[str]:
|
| 16 |
+
"""Return a sorted list of available LoRA checkpoint filenames."""
|
| 17 |
+
if not LORA_DIR.exists():
|
| 18 |
+
return []
|
| 19 |
+
return sorted([p.name for p in LORA_DIR.glob("*.safetensors")])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_lora_path(name: str) -> str:
|
| 23 |
+
"""Return the absolute path for a given LoRA filename."""
|
| 24 |
+
return str(LORA_DIR / name)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def apply_loras(
|
| 28 |
+
pipe,
|
| 29 |
+
lora_a_name: Optional[str],
|
| 30 |
+
alpha_a: float,
|
| 31 |
+
lora_b_name: Optional[str],
|
| 32 |
+
alpha_b: float,
|
| 33 |
+
) -> None:
|
| 34 |
+
"""Apply up to two LoRA adapters to the given pipeline.
|
| 35 |
+
|
| 36 |
+
Uses diffusers' load_lora_weights / set_adapters API.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
pipe: A Stable Diffusion pipeline instance.
|
| 40 |
+
lora_a_name: Filename of first LoRA (or None).
|
| 41 |
+
alpha_a: Weight for first LoRA.
|
| 42 |
+
lora_b_name: Filename of second LoRA (or None).
|
| 43 |
+
alpha_b: Weight for second LoRA.
|
| 44 |
+
"""
|
| 45 |
+
# If the pipeline supports unloading adapters, clear previous ones
|
| 46 |
+
if hasattr(pipe, "unload_lora_weights"):
|
| 47 |
+
pipe.unload_lora_weights()
|
| 48 |
+
|
| 49 |
+
adapters = []
|
| 50 |
+
weights = []
|
| 51 |
+
|
| 52 |
+
if lora_a_name:
|
| 53 |
+
pipe.load_lora_weights(
|
| 54 |
+
get_lora_path(lora_a_name),
|
| 55 |
+
adapter_name=Path(lora_a_name).stem,
|
| 56 |
+
)
|
| 57 |
+
adapters.append(Path(lora_a_name).stem)
|
| 58 |
+
weights.append(float(alpha_a))
|
| 59 |
+
|
| 60 |
+
if lora_b_name:
|
| 61 |
+
pipe.load_lora_weights(
|
| 62 |
+
get_lora_path(lora_b_name),
|
| 63 |
+
adapter_name=Path(lora_b_name).stem,
|
| 64 |
+
)
|
| 65 |
+
adapters.append(Path(lora_b_name).stem)
|
| 66 |
+
weights.append(float(alpha_b))
|
| 67 |
+
|
| 68 |
+
if adapters and hasattr(pipe, "set_adapters"):
|
| 69 |
+
pipe.set_adapters(adapters, weights)
|
src/sdgen/sd/models.py
CHANGED
|
@@ -4,7 +4,7 @@ from __future__ import annotations
|
|
| 4 |
|
| 5 |
from dataclasses import asdict, dataclass, field
|
| 6 |
from datetime import datetime
|
| 7 |
-
from typing import Any, Dict, Optional
|
| 8 |
|
| 9 |
|
| 10 |
@dataclass
|
|
@@ -79,6 +79,7 @@ class GenerationMetadata:
|
|
| 79 |
id: Optional[str] = None
|
| 80 |
thumbnail: Optional[str] = None
|
| 81 |
full_image: Optional[str] = None
|
|
|
|
| 82 |
|
| 83 |
# Txt2Img / Img2Img
|
| 84 |
prompt: Optional[str] = None
|
|
@@ -86,6 +87,8 @@ class GenerationMetadata:
|
|
| 86 |
steps: Optional[int] = None
|
| 87 |
guidance_scale: Optional[float] = None
|
| 88 |
seed: Optional[int] = None
|
|
|
|
|
|
|
| 89 |
|
| 90 |
# Img2Img only
|
| 91 |
strength: Optional[float] = None
|
|
|
|
| 4 |
|
| 5 |
from dataclasses import asdict, dataclass, field
|
| 6 |
from datetime import datetime
|
| 7 |
+
from typing import Any, Dict, List, Optional
|
| 8 |
|
| 9 |
|
| 10 |
@dataclass
|
|
|
|
| 79 |
id: Optional[str] = None
|
| 80 |
thumbnail: Optional[str] = None
|
| 81 |
full_image: Optional[str] = None
|
| 82 |
+
model_id: Optional[str] = None
|
| 83 |
|
| 84 |
# Txt2Img / Img2Img
|
| 85 |
prompt: Optional[str] = None
|
|
|
|
| 87 |
steps: Optional[int] = None
|
| 88 |
guidance_scale: Optional[float] = None
|
| 89 |
seed: Optional[int] = None
|
| 90 |
+
lora_names: Optional[List[str]] = None
|
| 91 |
+
lora_alphas: Optional[List[float]] = None
|
| 92 |
|
| 93 |
# Img2Img only
|
| 94 |
strength: Optional[float] = None
|
src/sdgen/ui/layout.py
CHANGED
|
@@ -6,9 +6,14 @@ from typing import Any, Tuple
|
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
|
| 9 |
-
from sdgen.sd
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
from sdgen.ui.tabs import (
|
| 13 |
build_history_tab,
|
| 14 |
build_img2img_tab,
|
|
@@ -24,38 +29,154 @@ from sdgen.utils.logger import get_logger
|
|
| 24 |
logger = get_logger(__name__)
|
| 25 |
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
def _resolve_seed(value: Any) -> int | None:
|
| 28 |
"""Return integer seed if valid, otherwise None."""
|
| 29 |
if value is None:
|
| 30 |
return None
|
| 31 |
if isinstance(value, int):
|
| 32 |
return value
|
|
|
|
| 33 |
text = str(value).strip()
|
| 34 |
if not text:
|
| 35 |
return None
|
|
|
|
| 36 |
try:
|
| 37 |
return int(text)
|
| 38 |
except ValueError:
|
| 39 |
-
logger.warning("Invalid seed input: %s", value)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
|
| 43 |
-
def
|
| 44 |
-
"""
|
| 45 |
if model == "Turbo":
|
|
|
|
| 46 |
return (
|
| 47 |
gr.update(minimum=1, maximum=5, value=2, step=1),
|
| 48 |
gr.update(minimum=0, maximum=0, value=0, step=0),
|
| 49 |
)
|
|
|
|
|
|
|
| 50 |
return (
|
| 51 |
gr.update(minimum=10, maximum=30, value=20, step=1),
|
| 52 |
gr.update(minimum=1, maximum=10, value=5, step=1),
|
| 53 |
)
|
| 54 |
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
def _txt2img_handler(
|
| 57 |
-
|
| 58 |
-
pipes: dict,
|
| 59 |
prompt: str,
|
| 60 |
negative: str,
|
| 61 |
steps: int,
|
|
@@ -63,11 +184,28 @@ def _txt2img_handler(
|
|
| 63 |
width: int,
|
| 64 |
height: int,
|
| 65 |
seed: Any,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
) -> Tuple[Any, str]:
|
| 67 |
"""Run text-to-image generation."""
|
| 68 |
-
model = model_choice
|
| 69 |
pipe = pipes[model]
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
cfg = Txt2ImgConfig(
|
| 72 |
prompt=prompt or "",
|
| 73 |
negative_prompt=negative or "",
|
|
@@ -80,6 +218,9 @@ def _txt2img_handler(
|
|
| 80 |
)
|
| 81 |
|
| 82 |
image, meta = generate_image(pipe, cfg)
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
try:
|
| 85 |
save_history_entry(meta, image)
|
|
@@ -90,8 +231,8 @@ def _txt2img_handler(
|
|
| 90 |
|
| 91 |
|
| 92 |
def _img2img_handler(
|
| 93 |
-
|
| 94 |
-
pipes: dict,
|
| 95 |
input_image: Any,
|
| 96 |
prompt: str,
|
| 97 |
negative: str,
|
|
@@ -99,9 +240,12 @@ def _img2img_handler(
|
|
| 99 |
steps: int,
|
| 100 |
guidance: float,
|
| 101 |
seed: Any,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
) -> Tuple[Any, str]:
|
| 103 |
"""Run image-to-image generation."""
|
| 104 |
-
model = model_choice
|
| 105 |
pipe = pipes[model]
|
| 106 |
|
| 107 |
if input_image is None:
|
|
@@ -109,6 +253,20 @@ def _img2img_handler(
|
|
| 109 |
|
| 110 |
pil_image = to_pil(input_image)
|
| 111 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
cfg = Img2ImgConfig(
|
| 113 |
prompt=prompt or "",
|
| 114 |
negative_prompt=negative or "",
|
|
@@ -122,6 +280,9 @@ def _img2img_handler(
|
|
| 122 |
)
|
| 123 |
|
| 124 |
image, meta = generate_img2img(pipe, cfg, pil_image)
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
try:
|
| 127 |
save_history_entry(meta, image)
|
|
@@ -141,7 +302,6 @@ def _upscale_handler(
|
|
| 141 |
|
| 142 |
pil_image = to_pil(input_image)
|
| 143 |
|
| 144 |
-
# scale is str → convert to int
|
| 145 |
try:
|
| 146 |
scale_int = int(float(scale))
|
| 147 |
except Exception as exc: # noqa: BLE001
|
|
@@ -149,6 +309,7 @@ def _upscale_handler(
|
|
| 149 |
|
| 150 |
upscaler = Upscaler(scale=scale_int, prefer="ncnn")
|
| 151 |
out_image, meta = upscaler.upscale(pil_image)
|
|
|
|
| 152 |
|
| 153 |
try:
|
| 154 |
save_history_entry(meta, out_image)
|
|
@@ -158,104 +319,191 @@ def _upscale_handler(
|
|
| 158 |
return out_image, pretty_json(meta.to_dict())
|
| 159 |
|
| 160 |
|
| 161 |
-
|
| 162 |
-
"""Return handler for img2img generation."""
|
| 163 |
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
pipes,
|
| 168 |
-
input_image,
|
| 169 |
prompt,
|
| 170 |
negative,
|
| 171 |
-
strength,
|
| 172 |
steps,
|
| 173 |
guidance,
|
|
|
|
|
|
|
| 174 |
seed,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 175 |
)
|
| 176 |
|
| 177 |
return handler
|
| 178 |
|
| 179 |
|
| 180 |
-
def
|
| 181 |
-
"""
|
| 182 |
-
|
| 183 |
-
def handler(
|
| 184 |
-
|
| 185 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 186 |
pipes,
|
|
|
|
| 187 |
prompt,
|
| 188 |
negative,
|
|
|
|
| 189 |
steps,
|
| 190 |
guidance,
|
| 191 |
-
width,
|
| 192 |
-
height,
|
| 193 |
seed,
|
|
|
|
|
|
|
|
|
|
|
|
|
| 194 |
)
|
| 195 |
|
| 196 |
return handler
|
| 197 |
|
| 198 |
|
|
|
|
|
|
|
|
|
|
| 199 |
def build_ui(txt2img_pipes: dict, img2img_pipes: dict) -> gr.Blocks:
|
| 200 |
"""Build the entire Gradio UI."""
|
| 201 |
with gr.Blocks() as demo:
|
| 202 |
gr.Markdown(
|
| 203 |
"# Stable Diffusion Generator\n"
|
| 204 |
-
"
|
| 205 |
-
on the Model selceted and applied settings."
|
| 206 |
)
|
| 207 |
|
| 208 |
model_choice = gr.Dropdown(
|
| 209 |
-
choices=[
|
| 210 |
-
"SD1.5",
|
| 211 |
-
"Turbo",
|
| 212 |
-
],
|
| 213 |
value="SD1.5",
|
| 214 |
label="Model",
|
| 215 |
)
|
| 216 |
-
gr.Markdown(
|
| 217 |
-
"Use Turbo model if you prefer speed over quality. \
|
| 218 |
-
SD1.5 produces much better results but takes 10 minutes on average \
|
| 219 |
-
for generation on HF Spaces."
|
| 220 |
-
)
|
| 221 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
txt_controls = build_txt2img_tab(
|
| 223 |
-
make_txt2img_handler(
|
|
|
|
| 224 |
)
|
| 225 |
|
| 226 |
img_controls = build_img2img_tab(
|
| 227 |
-
make_img2img_handler(
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
build_upscaler_tab(
|
| 231 |
-
handler=_upscale_handler,
|
| 232 |
)
|
| 233 |
|
|
|
|
| 234 |
build_presets_tab(
|
| 235 |
txt_controls=txt_controls,
|
| 236 |
img_controls=img_controls,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
)
|
| 238 |
-
|
| 239 |
build_history_tab()
|
| 240 |
|
| 241 |
model_choice.change(
|
| 242 |
-
fn=
|
| 243 |
inputs=[model_choice],
|
| 244 |
-
outputs=[
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
| 250 |
)
|
| 251 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 252 |
gr.Markdown(
|
| 253 |
"### Notes\n"
|
| 254 |
-
"-
|
| 255 |
-
"- Presets apply to both **Text → Image** and
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
take some time, please be patient.\n"
|
| 259 |
)
|
| 260 |
|
| 261 |
return demo
|
|
|
|
| 6 |
|
| 7 |
import gradio as gr
|
| 8 |
|
| 9 |
+
from sdgen.sd import (
|
| 10 |
+
Img2ImgConfig,
|
| 11 |
+
Txt2ImgConfig,
|
| 12 |
+
apply_loras,
|
| 13 |
+
generate_image,
|
| 14 |
+
generate_img2img,
|
| 15 |
+
list_loras,
|
| 16 |
+
)
|
| 17 |
from sdgen.ui.tabs import (
|
| 18 |
build_history_tab,
|
| 19 |
build_img2img_tab,
|
|
|
|
| 29 |
logger = get_logger(__name__)
|
| 30 |
|
| 31 |
|
| 32 |
+
# Small helpers
|
| 33 |
+
|
| 34 |
+
|
| 35 |
def _resolve_seed(value: Any) -> int | None:
|
| 36 |
"""Return integer seed if valid, otherwise None."""
|
| 37 |
if value is None:
|
| 38 |
return None
|
| 39 |
if isinstance(value, int):
|
| 40 |
return value
|
| 41 |
+
|
| 42 |
text = str(value).strip()
|
| 43 |
if not text:
|
| 44 |
return None
|
| 45 |
+
|
| 46 |
try:
|
| 47 |
return int(text)
|
| 48 |
except ValueError:
|
| 49 |
+
logger.warning("Invalid seed input for seed: %s", value)
|
| 50 |
+
return None
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def _normalize_lora_name(raw: Any) -> str | None:
|
| 54 |
+
"""Normalize dropdown value to a LoRA filename or None."""
|
| 55 |
+
if raw is None:
|
| 56 |
return None
|
| 57 |
+
name = str(raw).strip()
|
| 58 |
+
if not name or name == "(none)":
|
| 59 |
+
return None
|
| 60 |
+
return name
|
| 61 |
|
| 62 |
|
| 63 |
+
def _steps_cfg_for_model(model: str) -> Tuple[gr.update, gr.update]:
|
| 64 |
+
"""Return UI updates for steps & CFG when the model changes."""
|
| 65 |
if model == "Turbo":
|
| 66 |
+
# Turbo: super low steps, CFG has no meaningful effect.
|
| 67 |
return (
|
| 68 |
gr.update(minimum=1, maximum=5, value=2, step=1),
|
| 69 |
gr.update(minimum=0, maximum=0, value=0, step=0),
|
| 70 |
)
|
| 71 |
+
|
| 72 |
+
# SD1.5 defaults
|
| 73 |
return (
|
| 74 |
gr.update(minimum=10, maximum=30, value=20, step=1),
|
| 75 |
gr.update(minimum=1, maximum=10, value=5, step=1),
|
| 76 |
)
|
| 77 |
|
| 78 |
|
| 79 |
+
def _validate_turbo_strength(
|
| 80 |
+
model: str,
|
| 81 |
+
steps: float,
|
| 82 |
+
strength: float,
|
| 83 |
+
) -> Tuple[gr.update, gr.update, str]:
|
| 84 |
+
"""UI constraint: Turbo requires steps * strength >= 1."""
|
| 85 |
+
if model != "Turbo":
|
| 86 |
+
return gr.update(), gr.update(), ""
|
| 87 |
+
|
| 88 |
+
product = float(steps) * float(strength)
|
| 89 |
+
if product >= 1.0:
|
| 90 |
+
return gr.update(), gr.update(), f"Turbo OK: {product:.2f} ≥ 1"
|
| 91 |
+
|
| 92 |
+
required = 1.0 / float(steps)
|
| 93 |
+
required = min(required, 1.0)
|
| 94 |
+
return (
|
| 95 |
+
gr.update(),
|
| 96 |
+
gr.update(value=required),
|
| 97 |
+
"Adjusted for Turbo: steps×strength must be ≥ 1",
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _update_model_dependents(model: str):
|
| 102 |
+
"""update.
|
| 103 |
+
|
| 104 |
+
- steps & cfg sliders for txt2img and img2img
|
| 105 |
+
- LoRA panel visibility
|
| 106 |
+
"""
|
| 107 |
+
steps_t2i, cfg_t2i = _steps_cfg_for_model(model)
|
| 108 |
+
steps_i2i, cfg_i2i = _steps_cfg_for_model(model)
|
| 109 |
+
lora_visibility = gr.update(visible=(model == "SD1.5"))
|
| 110 |
+
|
| 111 |
+
return (
|
| 112 |
+
steps_t2i,
|
| 113 |
+
cfg_t2i,
|
| 114 |
+
steps_i2i,
|
| 115 |
+
cfg_i2i,
|
| 116 |
+
lora_visibility,
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _apply_lora_if_allowed(
|
| 121 |
+
model: str,
|
| 122 |
+
pipe: Any,
|
| 123 |
+
lora_a: str | None,
|
| 124 |
+
alpha_a: float,
|
| 125 |
+
lora_b: str | None,
|
| 126 |
+
alpha_b: float,
|
| 127 |
+
) -> Tuple[list[str], list[float]]:
|
| 128 |
+
"""Apply up to two LoRA adapters to the given pipeline.
|
| 129 |
+
|
| 130 |
+
- Only applied for SD1.5.
|
| 131 |
+
- Turbo completely ignores LoRA (and unloads any existing ones).
|
| 132 |
+
- Returns (active_lora_names, active_lora_alphas) for metadata.
|
| 133 |
+
"""
|
| 134 |
+
if model != "SD1.5":
|
| 135 |
+
if hasattr(pipe, "unload_lora_weights"):
|
| 136 |
+
try:
|
| 137 |
+
pipe.unload_lora_weights()
|
| 138 |
+
except Exception as exc: # noqa: BLE001
|
| 139 |
+
logger.warning("Failed to unload LoRA weights: %s", exc)
|
| 140 |
+
return [], []
|
| 141 |
+
|
| 142 |
+
names: list[str] = []
|
| 143 |
+
alphas: list[float] = []
|
| 144 |
+
|
| 145 |
+
# Filter out zero-weight adapters to keep metadata clean.
|
| 146 |
+
if lora_a and alpha_a != 0:
|
| 147 |
+
names.append(lora_a)
|
| 148 |
+
alphas.append(float(alpha_a))
|
| 149 |
+
|
| 150 |
+
if lora_b and alpha_b != 0:
|
| 151 |
+
names.append(lora_b)
|
| 152 |
+
alphas.append(float(alpha_b))
|
| 153 |
+
|
| 154 |
+
if not names:
|
| 155 |
+
# Nothing to apply
|
| 156 |
+
if hasattr(pipe, "unload_lora_weights"):
|
| 157 |
+
try:
|
| 158 |
+
pipe.unload_lora_weights()
|
| 159 |
+
except Exception as exc: # noqa: BLE001
|
| 160 |
+
logger.warning("Failed to unload LoRA weights: %s", exc)
|
| 161 |
+
return [], []
|
| 162 |
+
|
| 163 |
+
apply_loras(
|
| 164 |
+
pipe,
|
| 165 |
+
names[0] if len(names) > 0 else None,
|
| 166 |
+
alphas[0] if len(alphas) > 0 else 0.0,
|
| 167 |
+
names[1] if len(names) > 1 else None,
|
| 168 |
+
alphas[1] if len(alphas) > 1 else 0.0,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
return names, alphas
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
# Core handlers
|
| 175 |
+
|
| 176 |
+
|
| 177 |
def _txt2img_handler(
|
| 178 |
+
model: str,
|
| 179 |
+
pipes: dict[str, Any],
|
| 180 |
prompt: str,
|
| 181 |
negative: str,
|
| 182 |
steps: int,
|
|
|
|
| 184 |
width: int,
|
| 185 |
height: int,
|
| 186 |
seed: Any,
|
| 187 |
+
lora_a_value: Any,
|
| 188 |
+
lora_a_alpha_value: Any,
|
| 189 |
+
lora_b_value: Any,
|
| 190 |
+
lora_b_alpha_value: Any,
|
| 191 |
) -> Tuple[Any, str]:
|
| 192 |
"""Run text-to-image generation."""
|
|
|
|
| 193 |
pipe = pipes[model]
|
| 194 |
|
| 195 |
+
lora_a = _normalize_lora_name(lora_a_value)
|
| 196 |
+
lora_b = _normalize_lora_name(lora_b_value)
|
| 197 |
+
alpha_a = float(lora_a_alpha_value or 0.0)
|
| 198 |
+
alpha_b = float(lora_b_alpha_value or 0.0)
|
| 199 |
+
|
| 200 |
+
active_lora_names, active_lora_alphas = _apply_lora_if_allowed(
|
| 201 |
+
model,
|
| 202 |
+
pipe,
|
| 203 |
+
lora_a,
|
| 204 |
+
alpha_a,
|
| 205 |
+
lora_b,
|
| 206 |
+
alpha_b,
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
cfg = Txt2ImgConfig(
|
| 210 |
prompt=prompt or "",
|
| 211 |
negative_prompt=negative or "",
|
|
|
|
| 218 |
)
|
| 219 |
|
| 220 |
image, meta = generate_image(pipe, cfg)
|
| 221 |
+
meta.model_id = model
|
| 222 |
+
meta.lora_names = active_lora_names
|
| 223 |
+
meta.lora_alphas = active_lora_alphas
|
| 224 |
|
| 225 |
try:
|
| 226 |
save_history_entry(meta, image)
|
|
|
|
| 231 |
|
| 232 |
|
| 233 |
def _img2img_handler(
|
| 234 |
+
model: str,
|
| 235 |
+
pipes: dict[str, Any],
|
| 236 |
input_image: Any,
|
| 237 |
prompt: str,
|
| 238 |
negative: str,
|
|
|
|
| 240 |
steps: int,
|
| 241 |
guidance: float,
|
| 242 |
seed: Any,
|
| 243 |
+
lora_a_value: Any,
|
| 244 |
+
lora_a_alpha_value: Any,
|
| 245 |
+
lora_b_value: Any,
|
| 246 |
+
lora_b_alpha_value: Any,
|
| 247 |
) -> Tuple[Any, str]:
|
| 248 |
"""Run image-to-image generation."""
|
|
|
|
| 249 |
pipe = pipes[model]
|
| 250 |
|
| 251 |
if input_image is None:
|
|
|
|
| 253 |
|
| 254 |
pil_image = to_pil(input_image)
|
| 255 |
|
| 256 |
+
lora_a = _normalize_lora_name(lora_a_value)
|
| 257 |
+
lora_b = _normalize_lora_name(lora_b_value)
|
| 258 |
+
alpha_a = float(lora_a_alpha_value or 0.0)
|
| 259 |
+
alpha_b = float(lora_b_alpha_value or 0.0)
|
| 260 |
+
|
| 261 |
+
active_lora_names, active_lora_alphas = _apply_lora_if_allowed(
|
| 262 |
+
model,
|
| 263 |
+
pipe,
|
| 264 |
+
lora_a,
|
| 265 |
+
alpha_a,
|
| 266 |
+
lora_b,
|
| 267 |
+
alpha_b,
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
cfg = Img2ImgConfig(
|
| 271 |
prompt=prompt or "",
|
| 272 |
negative_prompt=negative or "",
|
|
|
|
| 280 |
)
|
| 281 |
|
| 282 |
image, meta = generate_img2img(pipe, cfg, pil_image)
|
| 283 |
+
meta.model_id = model
|
| 284 |
+
meta.lora_names = active_lora_names
|
| 285 |
+
meta.lora_alphas = active_lora_alphas
|
| 286 |
|
| 287 |
try:
|
| 288 |
save_history_entry(meta, image)
|
|
|
|
| 302 |
|
| 303 |
pil_image = to_pil(input_image)
|
| 304 |
|
|
|
|
| 305 |
try:
|
| 306 |
scale_int = int(float(scale))
|
| 307 |
except Exception as exc: # noqa: BLE001
|
|
|
|
| 309 |
|
| 310 |
upscaler = Upscaler(scale=scale_int, prefer="ncnn")
|
| 311 |
out_image, meta = upscaler.upscale(pil_image)
|
| 312 |
+
meta.model_id = "RealESRGAN"
|
| 313 |
|
| 314 |
try:
|
| 315 |
save_history_entry(meta, out_image)
|
|
|
|
| 319 |
return out_image, pretty_json(meta.to_dict())
|
| 320 |
|
| 321 |
|
| 322 |
+
# Handler factories wired to Gradio
|
|
|
|
| 323 |
|
| 324 |
+
|
| 325 |
+
def make_txt2img_handler(pipes: dict[str, Any]):
|
| 326 |
+
"""Factory to build the txt2img handler with extra UI inputs."""
|
| 327 |
+
|
| 328 |
+
def handler(
|
| 329 |
+
prompt: str,
|
| 330 |
+
negative: str,
|
| 331 |
+
steps: int,
|
| 332 |
+
guidance: float,
|
| 333 |
+
width: int,
|
| 334 |
+
height: int,
|
| 335 |
+
seed: Any,
|
| 336 |
+
model_choice_value: Any,
|
| 337 |
+
lora_a_value: Any,
|
| 338 |
+
lora_a_alpha_value: Any,
|
| 339 |
+
lora_b_value: Any,
|
| 340 |
+
lora_b_alpha_value: Any,
|
| 341 |
+
):
|
| 342 |
+
model = str(model_choice_value)
|
| 343 |
+
return _txt2img_handler(
|
| 344 |
+
model,
|
| 345 |
pipes,
|
|
|
|
| 346 |
prompt,
|
| 347 |
negative,
|
|
|
|
| 348 |
steps,
|
| 349 |
guidance,
|
| 350 |
+
width,
|
| 351 |
+
height,
|
| 352 |
seed,
|
| 353 |
+
lora_a_value,
|
| 354 |
+
lora_a_alpha_value,
|
| 355 |
+
lora_b_value,
|
| 356 |
+
lora_b_alpha_value,
|
| 357 |
)
|
| 358 |
|
| 359 |
return handler
|
| 360 |
|
| 361 |
|
| 362 |
+
def make_img2img_handler(pipes: dict[str, Any]):
|
| 363 |
+
"""Factory to build the img2img handler with extra UI inputs."""
|
| 364 |
+
|
| 365 |
+
def handler(
|
| 366 |
+
input_image: Any,
|
| 367 |
+
prompt: str,
|
| 368 |
+
negative: str,
|
| 369 |
+
strength: float,
|
| 370 |
+
steps: int,
|
| 371 |
+
guidance: float,
|
| 372 |
+
seed: Any,
|
| 373 |
+
model_choice_value: Any,
|
| 374 |
+
lora_a_value: Any,
|
| 375 |
+
lora_a_alpha_value: Any,
|
| 376 |
+
lora_b_value: Any,
|
| 377 |
+
lora_b_alpha_value: Any,
|
| 378 |
+
):
|
| 379 |
+
model = str(model_choice_value)
|
| 380 |
+
return _img2img_handler(
|
| 381 |
+
model,
|
| 382 |
pipes,
|
| 383 |
+
input_image,
|
| 384 |
prompt,
|
| 385 |
negative,
|
| 386 |
+
strength,
|
| 387 |
steps,
|
| 388 |
guidance,
|
|
|
|
|
|
|
| 389 |
seed,
|
| 390 |
+
lora_a_value,
|
| 391 |
+
lora_a_alpha_value,
|
| 392 |
+
lora_b_value,
|
| 393 |
+
lora_b_alpha_value,
|
| 394 |
)
|
| 395 |
|
| 396 |
return handler
|
| 397 |
|
| 398 |
|
| 399 |
+
# Top-level UI composition
|
| 400 |
+
|
| 401 |
+
|
| 402 |
def build_ui(txt2img_pipes: dict, img2img_pipes: dict) -> gr.Blocks:
|
| 403 |
"""Build the entire Gradio UI."""
|
| 404 |
with gr.Blocks() as demo:
|
| 405 |
gr.Markdown(
|
| 406 |
"# Stable Diffusion Generator\n"
|
| 407 |
+
"SD1.5 - slower, higher quality | Turbo - faster, lower quality"
|
|
|
|
| 408 |
)
|
| 409 |
|
| 410 |
model_choice = gr.Dropdown(
|
| 411 |
+
choices=["SD1.5", "Turbo"],
|
|
|
|
|
|
|
|
|
|
| 412 |
value="SD1.5",
|
| 413 |
label="Model",
|
| 414 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 415 |
|
| 416 |
+
# LoRA controls (SD1.5 only). Hidden when Turbo is selected.
|
| 417 |
+
with gr.Accordion("LoRA", open=False) as lora_group:
|
| 418 |
+
lora_files = list_loras()
|
| 419 |
+
lora_choices = ["(none)"] + lora_files
|
| 420 |
+
|
| 421 |
+
lora_a = gr.Dropdown(
|
| 422 |
+
lora_choices,
|
| 423 |
+
value="(none)",
|
| 424 |
+
label="LoRA A",
|
| 425 |
+
info=(
|
| 426 |
+
"Primary LoRA. Use this for style/character/detail control. "
|
| 427 |
+
"Pick '(none)' to disable."
|
| 428 |
+
),
|
| 429 |
+
)
|
| 430 |
+
alpha_a = gr.Slider(
|
| 431 |
+
minimum=-2.0,
|
| 432 |
+
maximum=2.0,
|
| 433 |
+
value=1.0,
|
| 434 |
+
step=0.1,
|
| 435 |
+
label="LoRA A weight",
|
| 436 |
+
info=(
|
| 437 |
+
"Positive → apply effect. Negative → \
|
| 438 |
+
dampen or invert. 0 → disable."
|
| 439 |
+
),
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
lora_b = gr.Dropdown(
|
| 443 |
+
lora_choices,
|
| 444 |
+
value="(none)",
|
| 445 |
+
label="LoRA B (optional)",
|
| 446 |
+
info="Optional second LoRA. Can be mixed with LoRA A.",
|
| 447 |
+
)
|
| 448 |
+
alpha_b = gr.Slider(
|
| 449 |
+
minimum=-2.0,
|
| 450 |
+
maximum=2.0,
|
| 451 |
+
value=0.8,
|
| 452 |
+
step=0.1,
|
| 453 |
+
label="LoRA B weight",
|
| 454 |
+
info=("Same convention as LoRA A. Use lighter weights when mixing."),
|
| 455 |
+
)
|
| 456 |
+
|
| 457 |
+
# Core tabs
|
| 458 |
txt_controls = build_txt2img_tab(
|
| 459 |
+
make_txt2img_handler(txt2img_pipes),
|
| 460 |
+
extra_inputs=[model_choice, lora_a, alpha_a, lora_b, alpha_b],
|
| 461 |
)
|
| 462 |
|
| 463 |
img_controls = build_img2img_tab(
|
| 464 |
+
make_img2img_handler(img2img_pipes),
|
| 465 |
+
extra_inputs=[model_choice, lora_a, alpha_a, lora_b, alpha_b],
|
|
|
|
|
|
|
|
|
|
| 466 |
)
|
| 467 |
|
| 468 |
+
build_upscaler_tab(handler=_upscale_handler)
|
| 469 |
build_presets_tab(
|
| 470 |
txt_controls=txt_controls,
|
| 471 |
img_controls=img_controls,
|
| 472 |
+
model_choice=model_choice,
|
| 473 |
+
lora_a=lora_a,
|
| 474 |
+
alpha_a=alpha_a,
|
| 475 |
+
lora_b=lora_b,
|
| 476 |
+
alpha_b=alpha_b,
|
| 477 |
)
|
|
|
|
| 478 |
build_history_tab()
|
| 479 |
|
| 480 |
model_choice.change(
|
| 481 |
+
fn=_update_model_dependents,
|
| 482 |
inputs=[model_choice],
|
| 483 |
+
outputs=[
|
| 484 |
+
txt_controls.steps,
|
| 485 |
+
txt_controls.guidance,
|
| 486 |
+
img_controls.steps,
|
| 487 |
+
img_controls.guidance,
|
| 488 |
+
lora_group,
|
| 489 |
+
],
|
| 490 |
)
|
| 491 |
|
| 492 |
+
# turbo constraints
|
| 493 |
+
msg = gr.Markdown("", visible=False)
|
| 494 |
+
for inp in [model_choice, img_controls.steps, img_controls.strength]:
|
| 495 |
+
inp.change(
|
| 496 |
+
fn=_validate_turbo_strength,
|
| 497 |
+
inputs=[model_choice, img_controls.steps, img_controls.strength],
|
| 498 |
+
outputs=[img_controls.steps, img_controls.strength, msg],
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
gr.Markdown(
|
| 502 |
"### Notes\n"
|
| 503 |
+
"- **History → Refresh** if new entries do not appear.\n"
|
| 504 |
+
"- Presets apply to both **Text → Image** and **Image → Image** tabs.\n"
|
| 505 |
+
"- Deployed on CPU-only HF Spaces, so performance would be a little slower \
|
| 506 |
+
(~10 mins for SD1.5 and ~1.5 min for Turbo on default settings)."
|
|
|
|
| 507 |
)
|
| 508 |
|
| 509 |
return demo
|
src/sdgen/ui/tabs/img2img_tab.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
from dataclasses import dataclass
|
| 6 |
-
from typing import Any, Callable, Tuple
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
|
|
@@ -21,16 +21,11 @@ class Img2ImgControls:
|
|
| 21 |
seed: gr.Textbox
|
| 22 |
|
| 23 |
|
| 24 |
-
def build_img2img_tab(
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
(output_image, metadata_dict)
|
| 30 |
-
|
| 31 |
-
Returns:
|
| 32 |
-
Img2ImgControls: A container with references to UI components.
|
| 33 |
-
"""
|
| 34 |
with gr.Tab("Image → Image"):
|
| 35 |
with gr.Row():
|
| 36 |
with gr.Column():
|
|
@@ -88,7 +83,7 @@ the prompt more strictly. "
|
|
| 88 |
value="",
|
| 89 |
placeholder="Leave empty for random",
|
| 90 |
)
|
| 91 |
-
|
| 92 |
generate_button = gr.Button("Generate")
|
| 93 |
|
| 94 |
with gr.Column():
|
|
@@ -100,17 +95,13 @@ the prompt more strictly. "
|
|
| 100 |
label="Metadata",
|
| 101 |
)
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
generate_button.click(
|
| 104 |
fn=handler,
|
| 105 |
-
inputs=
|
| 106 |
-
input_image,
|
| 107 |
-
prompt,
|
| 108 |
-
negative,
|
| 109 |
-
strength,
|
| 110 |
-
steps,
|
| 111 |
-
guidance,
|
| 112 |
-
seed,
|
| 113 |
-
],
|
| 114 |
outputs=[out_image, out_metadata],
|
| 115 |
)
|
| 116 |
|
|
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
from dataclasses import dataclass
|
| 6 |
+
from typing import Any, Callable, List, Optional, Tuple
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
|
|
|
|
| 21 |
seed: gr.Textbox
|
| 22 |
|
| 23 |
|
| 24 |
+
def build_img2img_tab(
|
| 25 |
+
handler: Callable[..., Tuple[Any, dict]],
|
| 26 |
+
extra_inputs: Optional[List[gr.components.Component]] = None,
|
| 27 |
+
) -> Img2ImgControls:
|
| 28 |
+
"""Build the Image → Image tab and connect it to the provided handler."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
with gr.Tab("Image → Image"):
|
| 30 |
with gr.Row():
|
| 31 |
with gr.Column():
|
|
|
|
| 83 |
value="",
|
| 84 |
placeholder="Leave empty for random",
|
| 85 |
)
|
| 86 |
+
|
| 87 |
generate_button = gr.Button("Generate")
|
| 88 |
|
| 89 |
with gr.Column():
|
|
|
|
| 95 |
label="Metadata",
|
| 96 |
)
|
| 97 |
|
| 98 |
+
inputs = [input_image, prompt, negative, strength, steps, guidance, seed]
|
| 99 |
+
if extra_inputs:
|
| 100 |
+
inputs.extend(extra_inputs)
|
| 101 |
+
|
| 102 |
generate_button.click(
|
| 103 |
fn=handler,
|
| 104 |
+
inputs=inputs,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
outputs=[out_image, out_metadata],
|
| 106 |
)
|
| 107 |
|
src/sdgen/ui/tabs/presets_tab.py
CHANGED
|
@@ -11,16 +11,19 @@ from sdgen.ui.tabs.img2img_tab import Img2ImgControls
|
|
| 11 |
from sdgen.ui.tabs.txt2img_tab import Txt2ImgControls
|
| 12 |
|
| 13 |
|
| 14 |
-
def apply_preset(
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
Args:
|
| 18 |
preset_name: A string or a one-element list representing the preset key.
|
|
|
|
| 19 |
|
| 20 |
Returns:
|
| 21 |
-
|
| 22 |
"""
|
| 23 |
-
# unwrap dropdown list behavior
|
| 24 |
if isinstance(preset_name, (list, tuple)):
|
| 25 |
preset_name = preset_name[0] if preset_name else None
|
| 26 |
|
|
@@ -31,19 +34,51 @@ def apply_preset(preset_name: Any) -> Tuple[Any, ...]:
|
|
| 31 |
if preset is None:
|
| 32 |
raise gr.Error("Invalid preset selected.")
|
| 33 |
|
|
|
|
|
|
|
| 34 |
prompt = preset.get("prompt", "")
|
| 35 |
negative = preset.get("negative_prompt", "")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
# only return data; UI wiring chooses what to set
|
| 38 |
status_msg = f"Applied preset: {preset_name}"
|
| 39 |
|
| 40 |
return (
|
| 41 |
# txt2img
|
| 42 |
prompt,
|
| 43 |
negative,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
# img2img
|
| 45 |
prompt,
|
| 46 |
negative,
|
|
|
|
|
|
|
| 47 |
# status
|
| 48 |
status_msg,
|
| 49 |
)
|
|
@@ -52,13 +87,13 @@ def apply_preset(preset_name: Any) -> Tuple[Any, ...]:
|
|
| 52 |
def build_presets_tab(
|
| 53 |
txt_controls: Txt2ImgControls,
|
| 54 |
img_controls: Img2ImgControls,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
) -> None:
|
| 56 |
-
"""
|
| 57 |
-
|
| 58 |
-
Args:
|
| 59 |
-
txt_controls: References to Text→Image input controls.
|
| 60 |
-
img_controls: References to Image→Image input controls.
|
| 61 |
-
"""
|
| 62 |
with gr.Tab("Presets"):
|
| 63 |
with gr.Row():
|
| 64 |
with gr.Column():
|
|
@@ -69,24 +104,27 @@ def build_presets_tab(
|
|
| 69 |
apply_button = gr.Button("Apply Preset")
|
| 70 |
status_box = gr.Markdown("")
|
| 71 |
|
| 72 |
-
with gr.Column():
|
| 73 |
-
gr.Markdown(
|
| 74 |
-
"Applying a preset fills prompt, negative prompt, steps, "
|
| 75 |
-
"guidance, and resolution for both **Text → Image** "
|
| 76 |
-
"and **Image → Image** tabs.",
|
| 77 |
-
)
|
| 78 |
-
|
| 79 |
apply_button.click(
|
| 80 |
fn=apply_preset,
|
| 81 |
-
inputs=[preset_name],
|
| 82 |
outputs=[
|
| 83 |
# txt2img
|
| 84 |
txt_controls.prompt,
|
| 85 |
txt_controls.negative,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
# img2img
|
| 87 |
img_controls.prompt,
|
| 88 |
img_controls.negative,
|
| 89 |
-
|
|
|
|
|
|
|
| 90 |
status_box,
|
| 91 |
],
|
| 92 |
)
|
|
|
|
| 11 |
from sdgen.ui.tabs.txt2img_tab import Txt2ImgControls
|
| 12 |
|
| 13 |
|
| 14 |
+
def apply_preset(
|
| 15 |
+
preset_name: Any,
|
| 16 |
+
model_name: Any,
|
| 17 |
+
) -> Tuple[Any, ...]:
|
| 18 |
+
"""Return values for txt2img and img2img controls based on the chosen preset.
|
| 19 |
|
| 20 |
Args:
|
| 21 |
preset_name: A string or a one-element list representing the preset key.
|
| 22 |
+
model_name: The current model choice, used to disable LoRA when Turbo.
|
| 23 |
|
| 24 |
Returns:
|
| 25 |
+
Tuple of UI values in a fixed field order.
|
| 26 |
"""
|
|
|
|
| 27 |
if isinstance(preset_name, (list, tuple)):
|
| 28 |
preset_name = preset_name[0] if preset_name else None
|
| 29 |
|
|
|
|
| 34 |
if preset is None:
|
| 35 |
raise gr.Error("Invalid preset selected.")
|
| 36 |
|
| 37 |
+
model = str(model_name).strip()
|
| 38 |
+
|
| 39 |
prompt = preset.get("prompt", "")
|
| 40 |
negative = preset.get("negative_prompt", "")
|
| 41 |
+
steps = int(preset.get("steps", 20))
|
| 42 |
+
cfg = float(preset.get("cfg", 5.0))
|
| 43 |
+
width = int(preset.get("width", 512))
|
| 44 |
+
height = int(preset.get("height", 512))
|
| 45 |
+
loraA = preset.get("lora_A")
|
| 46 |
+
alphaA = preset.get("alpha_A", 0.0)
|
| 47 |
+
loraB = preset.get("lora_B")
|
| 48 |
+
alphaB = preset.get("alpha_B", 0.0)
|
| 49 |
+
|
| 50 |
+
# Turbo mode → ignore LoRA and override sampler settings
|
| 51 |
+
if model == "Turbo":
|
| 52 |
+
steps = 2
|
| 53 |
+
cfg = 0.0
|
| 54 |
+
loraA = "(none)"
|
| 55 |
+
alphaA = 0.0
|
| 56 |
+
loraB = "(none)"
|
| 57 |
+
alphaB = 0.0
|
| 58 |
+
else:
|
| 59 |
+
# normalize empty
|
| 60 |
+
loraA = loraA or "(none)"
|
| 61 |
+
loraB = loraB or "(none)"
|
| 62 |
|
|
|
|
| 63 |
status_msg = f"Applied preset: {preset_name}"
|
| 64 |
|
| 65 |
return (
|
| 66 |
# txt2img
|
| 67 |
prompt,
|
| 68 |
negative,
|
| 69 |
+
steps,
|
| 70 |
+
cfg,
|
| 71 |
+
width,
|
| 72 |
+
height,
|
| 73 |
+
loraA,
|
| 74 |
+
alphaA,
|
| 75 |
+
loraB,
|
| 76 |
+
alphaB,
|
| 77 |
# img2img
|
| 78 |
prompt,
|
| 79 |
negative,
|
| 80 |
+
steps,
|
| 81 |
+
cfg,
|
| 82 |
# status
|
| 83 |
status_msg,
|
| 84 |
)
|
|
|
|
| 87 |
def build_presets_tab(
|
| 88 |
txt_controls: Txt2ImgControls,
|
| 89 |
img_controls: Img2ImgControls,
|
| 90 |
+
model_choice: gr.Dropdown,
|
| 91 |
+
lora_a: gr.Dropdown,
|
| 92 |
+
alpha_a: gr.Slider,
|
| 93 |
+
lora_b: gr.Dropdown,
|
| 94 |
+
alpha_b: gr.Slider,
|
| 95 |
) -> None:
|
| 96 |
+
"""Create the Presets tab and connect values to UI fields."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
with gr.Tab("Presets"):
|
| 98 |
with gr.Row():
|
| 99 |
with gr.Column():
|
|
|
|
| 104 |
apply_button = gr.Button("Apply Preset")
|
| 105 |
status_box = gr.Markdown("")
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
apply_button.click(
|
| 108 |
fn=apply_preset,
|
| 109 |
+
inputs=[preset_name, model_choice],
|
| 110 |
outputs=[
|
| 111 |
# txt2img
|
| 112 |
txt_controls.prompt,
|
| 113 |
txt_controls.negative,
|
| 114 |
+
txt_controls.steps,
|
| 115 |
+
txt_controls.guidance,
|
| 116 |
+
txt_controls.width,
|
| 117 |
+
txt_controls.height,
|
| 118 |
+
lora_a,
|
| 119 |
+
alpha_a,
|
| 120 |
+
lora_b,
|
| 121 |
+
alpha_b,
|
| 122 |
# img2img
|
| 123 |
img_controls.prompt,
|
| 124 |
img_controls.negative,
|
| 125 |
+
img_controls.steps,
|
| 126 |
+
img_controls.guidance,
|
| 127 |
+
# status box
|
| 128 |
status_box,
|
| 129 |
],
|
| 130 |
)
|
src/sdgen/ui/tabs/txt2img_tab.py
CHANGED
|
@@ -3,7 +3,7 @@
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
from dataclasses import dataclass
|
| 6 |
-
from typing import Callable, Tuple
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
|
|
@@ -24,15 +24,11 @@ class Txt2ImgControls:
|
|
| 24 |
seed: gr.components.Textbox
|
| 25 |
|
| 26 |
|
| 27 |
-
def build_txt2img_tab(
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
Returns:
|
| 34 |
-
A Txt2ImgControls instance containing references to all UI controls.
|
| 35 |
-
"""
|
| 36 |
with gr.Tab("Text → Image"):
|
| 37 |
with gr.Row():
|
| 38 |
with gr.Column():
|
|
@@ -95,9 +91,12 @@ the prompt more strictly. "
|
|
| 95 |
out_image = gr.Image(label="Output")
|
| 96 |
out_meta = gr.JSON(label="Metadata (JSON)")
|
| 97 |
|
|
|
|
|
|
|
|
|
|
| 98 |
generate_button.click(
|
| 99 |
fn=handler,
|
| 100 |
-
inputs=
|
| 101 |
outputs=[out_image, out_meta],
|
| 102 |
)
|
| 103 |
|
|
|
|
| 3 |
from __future__ import annotations
|
| 4 |
|
| 5 |
from dataclasses import dataclass
|
| 6 |
+
from typing import Callable, List, Optional, Tuple
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
|
|
|
|
| 24 |
seed: gr.components.Textbox
|
| 25 |
|
| 26 |
|
| 27 |
+
def build_txt2img_tab(
|
| 28 |
+
handler: Callable[..., Tuple],
|
| 29 |
+
extra_inputs: Optional[List[gr.components.Component]] = None,
|
| 30 |
+
) -> Txt2ImgControls:
|
| 31 |
+
"""Construct the Text → Image tab and bind the Generate button."""
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
with gr.Tab("Text → Image"):
|
| 33 |
with gr.Row():
|
| 34 |
with gr.Column():
|
|
|
|
| 91 |
out_image = gr.Image(label="Output")
|
| 92 |
out_meta = gr.JSON(label="Metadata (JSON)")
|
| 93 |
|
| 94 |
+
inputs = [prompt, negative, steps, guidance, width, height, seed]
|
| 95 |
+
if extra_inputs:
|
| 96 |
+
inputs.extend(extra_inputs)
|
| 97 |
generate_button.click(
|
| 98 |
fn=handler,
|
| 99 |
+
inputs=inputs,
|
| 100 |
outputs=[out_image, out_meta],
|
| 101 |
)
|
| 102 |
|
src/sdgen/upscaler/__init__.py
CHANGED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
"""Package initialization file for Stable Diffusion Image Generator."""
|
|
|
|
|
|