Spaces:
Runtime error
Runtime error
Upload 11 files
Browse files- src/frontend/webui/controlnet_ui.py +194 -0
- src/frontend/webui/errors.py +10 -0
- src/frontend/webui/generation_settings_ui.py +183 -0
- src/frontend/webui/image_to_image_ui.py +132 -0
- src/frontend/webui/image_variations_ui.py +118 -0
- src/frontend/webui/lora_models_ui.py +185 -0
- src/frontend/webui/models_ui.py +180 -0
- src/frontend/webui/realtime_ui.py +147 -0
- src/frontend/webui/text_to_image_ui.py +112 -0
- src/frontend/webui/ui.py +105 -0
- src/frontend/webui/upscaler_ui.py +83 -0
src/frontend/webui/controlnet_ui.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from backend.lora import get_lora_models
|
| 4 |
+
from state import get_settings
|
| 5 |
+
from backend.models.lcmdiffusion_setting import ControlNetSetting
|
| 6 |
+
from backend.annotators.image_control_factory import ImageControlFactory
|
| 7 |
+
|
| 8 |
+
_controlnet_models_map = None
|
| 9 |
+
_controlnet_enabled = False
|
| 10 |
+
_adapter_path = None
|
| 11 |
+
|
| 12 |
+
app_settings = get_settings()
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def on_user_input(
|
| 16 |
+
enable: bool,
|
| 17 |
+
adapter_name: str,
|
| 18 |
+
conditioning_scale: float,
|
| 19 |
+
control_image: Image,
|
| 20 |
+
preprocessor: str,
|
| 21 |
+
):
|
| 22 |
+
if not isinstance(adapter_name, str):
|
| 23 |
+
gr.Warning("Please select a valid ControlNet model")
|
| 24 |
+
return gr.Checkbox(value=False)
|
| 25 |
+
|
| 26 |
+
settings = app_settings.settings.lcm_diffusion_setting
|
| 27 |
+
if settings.controlnet is None:
|
| 28 |
+
settings.controlnet = ControlNetSetting()
|
| 29 |
+
|
| 30 |
+
if enable and (adapter_name is None or adapter_name == ""):
|
| 31 |
+
gr.Warning("Please select a valid ControlNet adapter")
|
| 32 |
+
return gr.Checkbox(value=False)
|
| 33 |
+
elif enable and not control_image:
|
| 34 |
+
gr.Warning("Please provide a ControlNet control image")
|
| 35 |
+
return gr.Checkbox(value=False)
|
| 36 |
+
|
| 37 |
+
if control_image is None:
|
| 38 |
+
return gr.Checkbox(value=enable)
|
| 39 |
+
|
| 40 |
+
if preprocessor == "None":
|
| 41 |
+
processed_control_image = control_image
|
| 42 |
+
else:
|
| 43 |
+
image_control_factory = ImageControlFactory()
|
| 44 |
+
control = image_control_factory.create_control(preprocessor)
|
| 45 |
+
processed_control_image = control.get_control_image(control_image)
|
| 46 |
+
|
| 47 |
+
if not enable:
|
| 48 |
+
settings.controlnet.enabled = False
|
| 49 |
+
else:
|
| 50 |
+
settings.controlnet.enabled = True
|
| 51 |
+
settings.controlnet.adapter_path = _controlnet_models_map[adapter_name]
|
| 52 |
+
settings.controlnet.conditioning_scale = float(conditioning_scale)
|
| 53 |
+
settings.controlnet._control_image = processed_control_image
|
| 54 |
+
|
| 55 |
+
# This code can be improved; currently, if the user clicks the
|
| 56 |
+
# "Enable ControlNet" checkbox or changes the currently selected
|
| 57 |
+
# ControlNet model, it will trigger a pipeline rebuild even if, in
|
| 58 |
+
# the end, the user leaves the same ControlNet settings
|
| 59 |
+
global _controlnet_enabled
|
| 60 |
+
global _adapter_path
|
| 61 |
+
if settings.controlnet.enabled != _controlnet_enabled or (
|
| 62 |
+
settings.controlnet.enabled
|
| 63 |
+
and settings.controlnet.adapter_path != _adapter_path
|
| 64 |
+
):
|
| 65 |
+
settings.rebuild_pipeline = True
|
| 66 |
+
_controlnet_enabled = settings.controlnet.enabled
|
| 67 |
+
_adapter_path = settings.controlnet.adapter_path
|
| 68 |
+
return gr.Checkbox(value=enable)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def on_change_conditioning_scale(cond_scale):
|
| 72 |
+
print(cond_scale)
|
| 73 |
+
app_settings.settings.lcm_diffusion_setting.controlnet.conditioning_scale = (
|
| 74 |
+
cond_scale
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def get_controlnet_ui() -> None:
|
| 79 |
+
with gr.Blocks() as ui:
|
| 80 |
+
gr.HTML(
|
| 81 |
+
'Download ControlNet v1.1 model from <a href="https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/tree/main">ControlNet v1.1 </a> (723 MB files) and place it in <b>controlnet_models</b> folder,restart the app'
|
| 82 |
+
)
|
| 83 |
+
with gr.Row():
|
| 84 |
+
with gr.Column():
|
| 85 |
+
with gr.Row():
|
| 86 |
+
global _controlnet_models_map
|
| 87 |
+
_controlnet_models_map = get_lora_models(
|
| 88 |
+
app_settings.settings.lcm_diffusion_setting.dirs["controlnet"]
|
| 89 |
+
)
|
| 90 |
+
controlnet_models = list(_controlnet_models_map.keys())
|
| 91 |
+
default_model = (
|
| 92 |
+
controlnet_models[0] if len(controlnet_models) else None
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
enabled_checkbox = gr.Checkbox(
|
| 96 |
+
label="Enable ControlNet",
|
| 97 |
+
info="Enable ControlNet",
|
| 98 |
+
show_label=True,
|
| 99 |
+
)
|
| 100 |
+
model_dropdown = gr.Dropdown(
|
| 101 |
+
_controlnet_models_map.keys(),
|
| 102 |
+
label="ControlNet model",
|
| 103 |
+
info="ControlNet model to load (.safetensors format)",
|
| 104 |
+
value=default_model,
|
| 105 |
+
interactive=True,
|
| 106 |
+
)
|
| 107 |
+
conditioning_scale_slider = gr.Slider(
|
| 108 |
+
0.0,
|
| 109 |
+
1.0,
|
| 110 |
+
value=0.5,
|
| 111 |
+
step=0.05,
|
| 112 |
+
label="ControlNet conditioning scale",
|
| 113 |
+
interactive=True,
|
| 114 |
+
)
|
| 115 |
+
control_image = gr.Image(
|
| 116 |
+
label="Control image",
|
| 117 |
+
type="pil",
|
| 118 |
+
)
|
| 119 |
+
preprocessor_radio = gr.Radio(
|
| 120 |
+
[
|
| 121 |
+
"Canny",
|
| 122 |
+
"Depth",
|
| 123 |
+
"LineArt",
|
| 124 |
+
"MLSD",
|
| 125 |
+
"NormalBAE",
|
| 126 |
+
"Pose",
|
| 127 |
+
"SoftEdge",
|
| 128 |
+
"Shuffle",
|
| 129 |
+
"None",
|
| 130 |
+
],
|
| 131 |
+
label="Preprocessor",
|
| 132 |
+
info="Select the preprocessor for the control image",
|
| 133 |
+
value="Canny",
|
| 134 |
+
interactive=True,
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
enabled_checkbox.input(
|
| 138 |
+
fn=on_user_input,
|
| 139 |
+
inputs=[
|
| 140 |
+
enabled_checkbox,
|
| 141 |
+
model_dropdown,
|
| 142 |
+
conditioning_scale_slider,
|
| 143 |
+
control_image,
|
| 144 |
+
preprocessor_radio,
|
| 145 |
+
],
|
| 146 |
+
outputs=[enabled_checkbox],
|
| 147 |
+
)
|
| 148 |
+
model_dropdown.input(
|
| 149 |
+
fn=on_user_input,
|
| 150 |
+
inputs=[
|
| 151 |
+
enabled_checkbox,
|
| 152 |
+
model_dropdown,
|
| 153 |
+
conditioning_scale_slider,
|
| 154 |
+
control_image,
|
| 155 |
+
preprocessor_radio,
|
| 156 |
+
],
|
| 157 |
+
outputs=[enabled_checkbox],
|
| 158 |
+
)
|
| 159 |
+
conditioning_scale_slider.input(
|
| 160 |
+
fn=on_user_input,
|
| 161 |
+
inputs=[
|
| 162 |
+
enabled_checkbox,
|
| 163 |
+
model_dropdown,
|
| 164 |
+
conditioning_scale_slider,
|
| 165 |
+
control_image,
|
| 166 |
+
preprocessor_radio,
|
| 167 |
+
],
|
| 168 |
+
outputs=[enabled_checkbox],
|
| 169 |
+
)
|
| 170 |
+
control_image.change(
|
| 171 |
+
fn=on_user_input,
|
| 172 |
+
inputs=[
|
| 173 |
+
enabled_checkbox,
|
| 174 |
+
model_dropdown,
|
| 175 |
+
conditioning_scale_slider,
|
| 176 |
+
control_image,
|
| 177 |
+
preprocessor_radio,
|
| 178 |
+
],
|
| 179 |
+
outputs=[enabled_checkbox],
|
| 180 |
+
)
|
| 181 |
+
preprocessor_radio.change(
|
| 182 |
+
fn=on_user_input,
|
| 183 |
+
inputs=[
|
| 184 |
+
enabled_checkbox,
|
| 185 |
+
model_dropdown,
|
| 186 |
+
conditioning_scale_slider,
|
| 187 |
+
control_image,
|
| 188 |
+
preprocessor_radio,
|
| 189 |
+
],
|
| 190 |
+
outputs=[enabled_checkbox],
|
| 191 |
+
)
|
| 192 |
+
conditioning_scale_slider.change(
|
| 193 |
+
on_change_conditioning_scale, conditioning_scale_slider
|
| 194 |
+
)
|
src/frontend/webui/errors.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def show_error(message: str) -> None:
|
| 5 |
+
if "num_inference_steps != 2" in message:
|
| 6 |
+
gr.Warning(
|
| 7 |
+
"Please set the generation setting inference steps to 2 for SANA sprint model"
|
| 8 |
+
)
|
| 9 |
+
else:
|
| 10 |
+
gr.Warning(message)
|
src/frontend/webui/generation_settings_ui.py
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from state import get_settings
|
| 3 |
+
from backend.models.gen_images import ImageFormat
|
| 4 |
+
|
| 5 |
+
app_settings = get_settings()
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def on_change_inference_steps(steps):
|
| 9 |
+
app_settings.settings.lcm_diffusion_setting.inference_steps = steps
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def on_change_image_width(img_width):
|
| 13 |
+
app_settings.settings.lcm_diffusion_setting.image_width = img_width
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def on_change_image_height(img_height):
|
| 17 |
+
app_settings.settings.lcm_diffusion_setting.image_height = img_height
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def on_change_num_images(num_images):
|
| 21 |
+
app_settings.settings.lcm_diffusion_setting.number_of_images = num_images
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def on_change_guidance_scale(guidance_scale):
|
| 25 |
+
app_settings.settings.lcm_diffusion_setting.guidance_scale = guidance_scale
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def on_change_clip_skip(clip_skip):
|
| 29 |
+
app_settings.settings.lcm_diffusion_setting.clip_skip = clip_skip
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def on_change_token_merging(token_merging):
|
| 33 |
+
app_settings.settings.lcm_diffusion_setting.token_merging = token_merging
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def on_change_seed_value(seed):
|
| 37 |
+
app_settings.settings.lcm_diffusion_setting.seed = seed
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def on_change_seed_checkbox(seed_checkbox):
|
| 41 |
+
app_settings.settings.lcm_diffusion_setting.use_seed = seed_checkbox
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def on_change_safety_checker_checkbox(safety_checker_checkbox):
|
| 45 |
+
app_settings.settings.lcm_diffusion_setting.use_safety_checker = (
|
| 46 |
+
safety_checker_checkbox
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def on_change_tiny_auto_encoder_checkbox(tiny_auto_encoder_checkbox):
|
| 51 |
+
app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder = (
|
| 52 |
+
tiny_auto_encoder_checkbox
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def on_offline_checkbox(offline_checkbox):
|
| 57 |
+
app_settings.settings.lcm_diffusion_setting.use_offline_model = offline_checkbox
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def on_change_image_format(image_format):
|
| 61 |
+
if image_format == "PNG":
|
| 62 |
+
app_settings.settings.generated_images.format = ImageFormat.PNG.value.upper()
|
| 63 |
+
else:
|
| 64 |
+
app_settings.settings.generated_images.format = ImageFormat.JPEG.value.upper()
|
| 65 |
+
|
| 66 |
+
app_settings.save()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def get_generation_settings_ui() -> None:
|
| 70 |
+
with gr.Blocks():
|
| 71 |
+
with gr.Row():
|
| 72 |
+
with gr.Column():
|
| 73 |
+
num_inference_steps = gr.Slider(
|
| 74 |
+
1,
|
| 75 |
+
25,
|
| 76 |
+
value=app_settings.settings.lcm_diffusion_setting.inference_steps,
|
| 77 |
+
step=1,
|
| 78 |
+
label="Inference Steps",
|
| 79 |
+
interactive=True,
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
image_height = gr.Slider(
|
| 83 |
+
256,
|
| 84 |
+
1024,
|
| 85 |
+
value=app_settings.settings.lcm_diffusion_setting.image_height,
|
| 86 |
+
step=256,
|
| 87 |
+
label="Image Height",
|
| 88 |
+
interactive=True,
|
| 89 |
+
)
|
| 90 |
+
image_width = gr.Slider(
|
| 91 |
+
256,
|
| 92 |
+
1024,
|
| 93 |
+
value=app_settings.settings.lcm_diffusion_setting.image_width,
|
| 94 |
+
step=256,
|
| 95 |
+
label="Image Width",
|
| 96 |
+
interactive=True,
|
| 97 |
+
)
|
| 98 |
+
num_images = gr.Slider(
|
| 99 |
+
1,
|
| 100 |
+
50,
|
| 101 |
+
value=app_settings.settings.lcm_diffusion_setting.number_of_images,
|
| 102 |
+
step=1,
|
| 103 |
+
label="Number of images to generate",
|
| 104 |
+
interactive=True,
|
| 105 |
+
)
|
| 106 |
+
guidance_scale = gr.Slider(
|
| 107 |
+
1.0,
|
| 108 |
+
10.0,
|
| 109 |
+
value=app_settings.settings.lcm_diffusion_setting.guidance_scale,
|
| 110 |
+
step=0.1,
|
| 111 |
+
label="Guidance Scale",
|
| 112 |
+
interactive=True,
|
| 113 |
+
)
|
| 114 |
+
clip_skip = gr.Slider(
|
| 115 |
+
1,
|
| 116 |
+
12,
|
| 117 |
+
value=app_settings.settings.lcm_diffusion_setting.clip_skip,
|
| 118 |
+
step=1,
|
| 119 |
+
label="CLIP Skip",
|
| 120 |
+
interactive=True,
|
| 121 |
+
)
|
| 122 |
+
token_merging = gr.Slider(
|
| 123 |
+
0.0,
|
| 124 |
+
1.0,
|
| 125 |
+
value=app_settings.settings.lcm_diffusion_setting.token_merging,
|
| 126 |
+
step=0.01,
|
| 127 |
+
label="Token Merging",
|
| 128 |
+
interactive=True,
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
seed = gr.Slider(
|
| 132 |
+
value=app_settings.settings.lcm_diffusion_setting.seed,
|
| 133 |
+
minimum=0,
|
| 134 |
+
maximum=999999999,
|
| 135 |
+
label="Seed",
|
| 136 |
+
step=1,
|
| 137 |
+
interactive=True,
|
| 138 |
+
)
|
| 139 |
+
seed_checkbox = gr.Checkbox(
|
| 140 |
+
label="Use seed",
|
| 141 |
+
value=app_settings.settings.lcm_diffusion_setting.use_seed,
|
| 142 |
+
interactive=True,
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
safety_checker_checkbox = gr.Checkbox(
|
| 146 |
+
label="Use Safety Checker",
|
| 147 |
+
value=app_settings.settings.lcm_diffusion_setting.use_safety_checker,
|
| 148 |
+
interactive=True,
|
| 149 |
+
)
|
| 150 |
+
tiny_auto_encoder_checkbox = gr.Checkbox(
|
| 151 |
+
label="Use Tiny AutoEncoder (Fast, moderate quality)",
|
| 152 |
+
value=app_settings.settings.lcm_diffusion_setting.use_tiny_auto_encoder,
|
| 153 |
+
interactive=True,
|
| 154 |
+
)
|
| 155 |
+
offline_checkbox = gr.Checkbox(
|
| 156 |
+
label="Use locally cached model or downloaded model folder(offline)",
|
| 157 |
+
value=app_settings.settings.lcm_diffusion_setting.use_offline_model,
|
| 158 |
+
interactive=True,
|
| 159 |
+
)
|
| 160 |
+
img_format = gr.Radio(
|
| 161 |
+
label="Output image format",
|
| 162 |
+
choices=["PNG", "JPEG"],
|
| 163 |
+
value=app_settings.settings.generated_images.format,
|
| 164 |
+
interactive=True,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
num_inference_steps.change(on_change_inference_steps, num_inference_steps)
|
| 168 |
+
image_height.change(on_change_image_height, image_height)
|
| 169 |
+
image_width.change(on_change_image_width, image_width)
|
| 170 |
+
num_images.change(on_change_num_images, num_images)
|
| 171 |
+
guidance_scale.change(on_change_guidance_scale, guidance_scale)
|
| 172 |
+
clip_skip.change(on_change_clip_skip, clip_skip)
|
| 173 |
+
token_merging.change(on_change_token_merging, token_merging)
|
| 174 |
+
seed.change(on_change_seed_value, seed)
|
| 175 |
+
seed_checkbox.change(on_change_seed_checkbox, seed_checkbox)
|
| 176 |
+
safety_checker_checkbox.change(
|
| 177 |
+
on_change_safety_checker_checkbox, safety_checker_checkbox
|
| 178 |
+
)
|
| 179 |
+
tiny_auto_encoder_checkbox.change(
|
| 180 |
+
on_change_tiny_auto_encoder_checkbox, tiny_auto_encoder_checkbox
|
| 181 |
+
)
|
| 182 |
+
offline_checkbox.change(on_offline_checkbox, offline_checkbox)
|
| 183 |
+
img_format.change(on_change_image_format, img_format)
|
src/frontend/webui/image_to_image_ui.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 4 |
+
from models.interface_types import InterfaceType
|
| 5 |
+
from frontend.utils import is_reshape_required
|
| 6 |
+
from constants import DEVICE
|
| 7 |
+
from state import get_settings, get_context
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 9 |
+
from frontend.webui.errors import show_error
|
| 10 |
+
|
| 11 |
+
app_settings = get_settings()
|
| 12 |
+
|
| 13 |
+
previous_width = 0
|
| 14 |
+
previous_height = 0
|
| 15 |
+
previous_model_id = ""
|
| 16 |
+
previous_num_of_images = 0
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_image_to_image(
|
| 20 |
+
prompt,
|
| 21 |
+
negative_prompt,
|
| 22 |
+
init_image,
|
| 23 |
+
strength,
|
| 24 |
+
) -> Any:
|
| 25 |
+
context = get_context(InterfaceType.WEBUI)
|
| 26 |
+
global \
|
| 27 |
+
previous_height, \
|
| 28 |
+
previous_width, \
|
| 29 |
+
previous_model_id, \
|
| 30 |
+
previous_num_of_images, \
|
| 31 |
+
app_settings
|
| 32 |
+
|
| 33 |
+
app_settings.settings.lcm_diffusion_setting.prompt = prompt
|
| 34 |
+
app_settings.settings.lcm_diffusion_setting.negative_prompt = negative_prompt
|
| 35 |
+
app_settings.settings.lcm_diffusion_setting.init_image = init_image
|
| 36 |
+
app_settings.settings.lcm_diffusion_setting.strength = strength
|
| 37 |
+
|
| 38 |
+
app_settings.settings.lcm_diffusion_setting.diffusion_task = (
|
| 39 |
+
DiffusionTask.image_to_image.value
|
| 40 |
+
)
|
| 41 |
+
model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
|
| 42 |
+
reshape = False
|
| 43 |
+
image_width = app_settings.settings.lcm_diffusion_setting.image_width
|
| 44 |
+
image_height = app_settings.settings.lcm_diffusion_setting.image_height
|
| 45 |
+
num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
|
| 46 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 47 |
+
reshape = is_reshape_required(
|
| 48 |
+
previous_width,
|
| 49 |
+
image_width,
|
| 50 |
+
previous_height,
|
| 51 |
+
image_height,
|
| 52 |
+
previous_model_id,
|
| 53 |
+
model_id,
|
| 54 |
+
previous_num_of_images,
|
| 55 |
+
num_images,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
| 59 |
+
future = executor.submit(
|
| 60 |
+
context.generate_text_to_image,
|
| 61 |
+
app_settings.settings,
|
| 62 |
+
reshape,
|
| 63 |
+
DEVICE,
|
| 64 |
+
)
|
| 65 |
+
images = future.result()
|
| 66 |
+
if images:
|
| 67 |
+
context.save_images(
|
| 68 |
+
images,
|
| 69 |
+
app_settings.settings,
|
| 70 |
+
)
|
| 71 |
+
else:
|
| 72 |
+
show_error(context.error)
|
| 73 |
+
|
| 74 |
+
previous_width = image_width
|
| 75 |
+
previous_height = image_height
|
| 76 |
+
previous_model_id = model_id
|
| 77 |
+
previous_num_of_images = num_images
|
| 78 |
+
return images
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def get_image_to_image_ui() -> None:
|
| 82 |
+
with gr.Blocks():
|
| 83 |
+
with gr.Row():
|
| 84 |
+
with gr.Column():
|
| 85 |
+
input_image = gr.Image(label="Init image", type="pil")
|
| 86 |
+
with gr.Row():
|
| 87 |
+
prompt = gr.Textbox(
|
| 88 |
+
show_label=False,
|
| 89 |
+
lines=3,
|
| 90 |
+
placeholder="A fantasy landscape",
|
| 91 |
+
container=False,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
generate_btn = gr.Button(
|
| 95 |
+
"Generate",
|
| 96 |
+
elem_id="generate_button",
|
| 97 |
+
scale=0,
|
| 98 |
+
)
|
| 99 |
+
negative_prompt = gr.Textbox(
|
| 100 |
+
label="Negative prompt (Works in LCM-LoRA mode, set guidance > 1.0):",
|
| 101 |
+
lines=1,
|
| 102 |
+
placeholder="",
|
| 103 |
+
)
|
| 104 |
+
strength = gr.Slider(
|
| 105 |
+
0.1,
|
| 106 |
+
1,
|
| 107 |
+
value=app_settings.settings.lcm_diffusion_setting.strength,
|
| 108 |
+
step=0.01,
|
| 109 |
+
label="Strength",
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
input_params = [
|
| 113 |
+
prompt,
|
| 114 |
+
negative_prompt,
|
| 115 |
+
input_image,
|
| 116 |
+
strength,
|
| 117 |
+
]
|
| 118 |
+
|
| 119 |
+
with gr.Column():
|
| 120 |
+
output = gr.Gallery(
|
| 121 |
+
label="Generated images",
|
| 122 |
+
show_label=True,
|
| 123 |
+
elem_id="gallery",
|
| 124 |
+
columns=2,
|
| 125 |
+
height=512,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
generate_btn.click(
|
| 129 |
+
fn=generate_image_to_image,
|
| 130 |
+
inputs=input_params,
|
| 131 |
+
outputs=output,
|
| 132 |
+
)
|
src/frontend/webui/image_variations_ui.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 4 |
+
from models.interface_types import InterfaceType
|
| 5 |
+
from frontend.utils import is_reshape_required
|
| 6 |
+
from constants import DEVICE
|
| 7 |
+
from state import get_settings, get_context
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 9 |
+
from frontend.webui.errors import show_error
|
| 10 |
+
|
| 11 |
+
app_settings = get_settings()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
previous_width = 0
|
| 15 |
+
previous_height = 0
|
| 16 |
+
previous_model_id = ""
|
| 17 |
+
previous_num_of_images = 0
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def generate_image_variations(
|
| 21 |
+
init_image,
|
| 22 |
+
variation_strength,
|
| 23 |
+
) -> Any:
|
| 24 |
+
context = get_context(InterfaceType.WEBUI)
|
| 25 |
+
global \
|
| 26 |
+
previous_height, \
|
| 27 |
+
previous_width, \
|
| 28 |
+
previous_model_id, \
|
| 29 |
+
previous_num_of_images, \
|
| 30 |
+
app_settings
|
| 31 |
+
|
| 32 |
+
app_settings.settings.lcm_diffusion_setting.init_image = init_image
|
| 33 |
+
app_settings.settings.lcm_diffusion_setting.strength = variation_strength
|
| 34 |
+
app_settings.settings.lcm_diffusion_setting.prompt = ""
|
| 35 |
+
app_settings.settings.lcm_diffusion_setting.negative_prompt = ""
|
| 36 |
+
|
| 37 |
+
app_settings.settings.lcm_diffusion_setting.diffusion_task = (
|
| 38 |
+
DiffusionTask.image_to_image.value
|
| 39 |
+
)
|
| 40 |
+
model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
|
| 41 |
+
reshape = False
|
| 42 |
+
image_width = app_settings.settings.lcm_diffusion_setting.image_width
|
| 43 |
+
image_height = app_settings.settings.lcm_diffusion_setting.image_height
|
| 44 |
+
num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
|
| 45 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 46 |
+
reshape = is_reshape_required(
|
| 47 |
+
previous_width,
|
| 48 |
+
image_width,
|
| 49 |
+
previous_height,
|
| 50 |
+
image_height,
|
| 51 |
+
previous_model_id,
|
| 52 |
+
model_id,
|
| 53 |
+
previous_num_of_images,
|
| 54 |
+
num_images,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
| 58 |
+
future = executor.submit(
|
| 59 |
+
context.generate_text_to_image,
|
| 60 |
+
app_settings.settings,
|
| 61 |
+
reshape,
|
| 62 |
+
DEVICE,
|
| 63 |
+
)
|
| 64 |
+
images = future.result()
|
| 65 |
+
if images:
|
| 66 |
+
context.save_images(
|
| 67 |
+
images,
|
| 68 |
+
app_settings.settings,
|
| 69 |
+
)
|
| 70 |
+
else:
|
| 71 |
+
show_error(context.error)
|
| 72 |
+
|
| 73 |
+
previous_width = image_width
|
| 74 |
+
previous_height = image_height
|
| 75 |
+
previous_model_id = model_id
|
| 76 |
+
previous_num_of_images = num_images
|
| 77 |
+
return images
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def get_image_variations_ui() -> None:
|
| 81 |
+
with gr.Blocks():
|
| 82 |
+
with gr.Row():
|
| 83 |
+
with gr.Column():
|
| 84 |
+
input_image = gr.Image(label="Init image", type="pil")
|
| 85 |
+
with gr.Row():
|
| 86 |
+
generate_btn = gr.Button(
|
| 87 |
+
"Generate",
|
| 88 |
+
elem_id="generate_button",
|
| 89 |
+
scale=0,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
variation_strength = gr.Slider(
|
| 93 |
+
0.1,
|
| 94 |
+
1,
|
| 95 |
+
value=0.4,
|
| 96 |
+
step=0.01,
|
| 97 |
+
label="Variations Strength",
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
input_params = [
|
| 101 |
+
input_image,
|
| 102 |
+
variation_strength,
|
| 103 |
+
]
|
| 104 |
+
|
| 105 |
+
with gr.Column():
|
| 106 |
+
output = gr.Gallery(
|
| 107 |
+
label="Generated images",
|
| 108 |
+
show_label=True,
|
| 109 |
+
elem_id="gallery",
|
| 110 |
+
columns=2,
|
| 111 |
+
height=512,
|
| 112 |
+
)
|
| 113 |
+
|
| 114 |
+
generate_btn.click(
|
| 115 |
+
fn=generate_image_variations,
|
| 116 |
+
inputs=input_params,
|
| 117 |
+
outputs=output,
|
| 118 |
+
)
|
src/frontend/webui/lora_models_ui.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from os import path
|
| 3 |
+
from backend.lora import (
|
| 4 |
+
get_lora_models,
|
| 5 |
+
get_active_lora_weights,
|
| 6 |
+
update_lora_weights,
|
| 7 |
+
load_lora_weight,
|
| 8 |
+
)
|
| 9 |
+
from state import get_settings, get_context
|
| 10 |
+
from frontend.utils import get_valid_lora_model
|
| 11 |
+
from models.interface_types import InterfaceType
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
_MAX_LORA_WEIGHTS = 5
|
| 15 |
+
|
| 16 |
+
_custom_lora_sliders = []
|
| 17 |
+
_custom_lora_names = []
|
| 18 |
+
_custom_lora_columns = []
|
| 19 |
+
|
| 20 |
+
app_settings = get_settings()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def on_click_update_weight(*lora_weights):
|
| 24 |
+
update_weights = []
|
| 25 |
+
active_weights = get_active_lora_weights()
|
| 26 |
+
if not len(active_weights):
|
| 27 |
+
gr.Warning("No active LoRAs, first you need to load LoRA model")
|
| 28 |
+
return
|
| 29 |
+
for idx, lora in enumerate(active_weights):
|
| 30 |
+
update_weights.append(
|
| 31 |
+
(
|
| 32 |
+
lora[0],
|
| 33 |
+
lora_weights[idx],
|
| 34 |
+
)
|
| 35 |
+
)
|
| 36 |
+
if len(update_weights) > 0:
|
| 37 |
+
update_lora_weights(
|
| 38 |
+
get_context(InterfaceType.WEBUI).lcm_text_to_image.pipeline,
|
| 39 |
+
app_settings.settings.lcm_diffusion_setting,
|
| 40 |
+
update_weights,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def on_click_load_lora(lora_name, lora_weight):
|
| 45 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 46 |
+
gr.Warning("Currently LoRA is not supported in OpenVINO.")
|
| 47 |
+
return
|
| 48 |
+
lora_models_map = get_lora_models(
|
| 49 |
+
app_settings.settings.lcm_diffusion_setting.lora.models_dir
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
# Load a new LoRA
|
| 53 |
+
settings = app_settings.settings.lcm_diffusion_setting
|
| 54 |
+
settings.lora.fuse = False
|
| 55 |
+
settings.lora.enabled = False
|
| 56 |
+
print(f"Selected Lora Model :{lora_name}")
|
| 57 |
+
print(f"Lora weight :{lora_weight}")
|
| 58 |
+
settings.lora.path = lora_models_map[lora_name]
|
| 59 |
+
settings.lora.weight = lora_weight
|
| 60 |
+
if not path.exists(settings.lora.path):
|
| 61 |
+
gr.Warning("Invalid LoRA model path!")
|
| 62 |
+
return
|
| 63 |
+
pipeline = get_context(InterfaceType.WEBUI).lcm_text_to_image.pipeline
|
| 64 |
+
if not pipeline:
|
| 65 |
+
gr.Warning("Pipeline not initialized. Please generate an image first.")
|
| 66 |
+
return
|
| 67 |
+
settings.lora.enabled = True
|
| 68 |
+
load_lora_weight(
|
| 69 |
+
get_context(InterfaceType.WEBUI).lcm_text_to_image.pipeline,
|
| 70 |
+
settings,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Update Gradio LoRA UI
|
| 74 |
+
global _MAX_LORA_WEIGHTS
|
| 75 |
+
values = []
|
| 76 |
+
labels = []
|
| 77 |
+
rows = []
|
| 78 |
+
active_weights = get_active_lora_weights()
|
| 79 |
+
for idx, lora in enumerate(active_weights):
|
| 80 |
+
labels.append(f"{lora[0]}: ")
|
| 81 |
+
values.append(lora[1])
|
| 82 |
+
rows.append(gr.Row.update(visible=True))
|
| 83 |
+
for i in range(len(active_weights), _MAX_LORA_WEIGHTS):
|
| 84 |
+
labels.append(f"Update weight")
|
| 85 |
+
values.append(0.0)
|
| 86 |
+
rows.append(gr.Row.update(visible=False))
|
| 87 |
+
return labels + values + rows
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_lora_models_ui() -> None:
|
| 91 |
+
with gr.Blocks() as ui:
|
| 92 |
+
gr.HTML(
|
| 93 |
+
"Download and place your LoRA model weights in <b>lora_models</b> folders and restart App"
|
| 94 |
+
)
|
| 95 |
+
with gr.Row():
|
| 96 |
+
with gr.Column():
|
| 97 |
+
with gr.Row():
|
| 98 |
+
lora_models_map = get_lora_models(
|
| 99 |
+
app_settings.settings.lcm_diffusion_setting.lora.models_dir
|
| 100 |
+
)
|
| 101 |
+
valid_model = get_valid_lora_model(
|
| 102 |
+
list(lora_models_map.values()),
|
| 103 |
+
app_settings.settings.lcm_diffusion_setting.lora.path,
|
| 104 |
+
app_settings.settings.lcm_diffusion_setting.lora.models_dir,
|
| 105 |
+
)
|
| 106 |
+
if valid_model != "":
|
| 107 |
+
valid_model_path = lora_models_map[valid_model]
|
| 108 |
+
app_settings.settings.lcm_diffusion_setting.lora.path = (
|
| 109 |
+
valid_model_path
|
| 110 |
+
)
|
| 111 |
+
else:
|
| 112 |
+
app_settings.settings.lcm_diffusion_setting.lora.path = ""
|
| 113 |
+
|
| 114 |
+
lora_model = gr.Dropdown(
|
| 115 |
+
lora_models_map.keys(),
|
| 116 |
+
label="LoRA model",
|
| 117 |
+
info="LoRA model weight to load (You can use Lora models from Civitai or Hugging Face .safetensors format)",
|
| 118 |
+
value=valid_model,
|
| 119 |
+
interactive=True,
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
lora_weight = gr.Slider(
|
| 123 |
+
0.0,
|
| 124 |
+
1.0,
|
| 125 |
+
value=app_settings.settings.lcm_diffusion_setting.lora.weight,
|
| 126 |
+
step=0.05,
|
| 127 |
+
label="Initial Lora weight",
|
| 128 |
+
interactive=True,
|
| 129 |
+
)
|
| 130 |
+
load_lora_btn = gr.Button(
|
| 131 |
+
"Load selected LoRA",
|
| 132 |
+
elem_id="load_lora_button",
|
| 133 |
+
scale=0,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
with gr.Row():
|
| 137 |
+
gr.Markdown(
|
| 138 |
+
"## Loaded LoRA models",
|
| 139 |
+
show_label=False,
|
| 140 |
+
)
|
| 141 |
+
update_lora_weights_btn = gr.Button(
|
| 142 |
+
"Update LoRA weights",
|
| 143 |
+
elem_id="load_lora_button",
|
| 144 |
+
scale=0,
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
global _MAX_LORA_WEIGHTS
|
| 148 |
+
global _custom_lora_sliders
|
| 149 |
+
global _custom_lora_names
|
| 150 |
+
global _custom_lora_columns
|
| 151 |
+
for i in range(0, _MAX_LORA_WEIGHTS):
|
| 152 |
+
new_row = gr.Column(visible=False)
|
| 153 |
+
_custom_lora_columns.append(new_row)
|
| 154 |
+
with new_row:
|
| 155 |
+
lora_name = gr.Markdown(
|
| 156 |
+
"Lora Name",
|
| 157 |
+
show_label=True,
|
| 158 |
+
)
|
| 159 |
+
lora_slider = gr.Slider(
|
| 160 |
+
0.0,
|
| 161 |
+
1.0,
|
| 162 |
+
step=0.05,
|
| 163 |
+
label="LoRA weight",
|
| 164 |
+
interactive=True,
|
| 165 |
+
visible=True,
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
_custom_lora_names.append(lora_name)
|
| 169 |
+
_custom_lora_sliders.append(lora_slider)
|
| 170 |
+
|
| 171 |
+
load_lora_btn.click(
|
| 172 |
+
fn=on_click_load_lora,
|
| 173 |
+
inputs=[lora_model, lora_weight],
|
| 174 |
+
outputs=[
|
| 175 |
+
*_custom_lora_names,
|
| 176 |
+
*_custom_lora_sliders,
|
| 177 |
+
*_custom_lora_columns,
|
| 178 |
+
],
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
update_lora_weights_btn.click(
|
| 182 |
+
fn=on_click_update_weight,
|
| 183 |
+
inputs=[*_custom_lora_sliders],
|
| 184 |
+
outputs=None,
|
| 185 |
+
)
|
src/frontend/webui/models_ui.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from constants import LCM_DEFAULT_MODEL
|
| 3 |
+
from state import get_settings
|
| 4 |
+
from frontend.utils import get_valid_model_id
|
| 5 |
+
|
| 6 |
+
app_settings = get_settings()
|
| 7 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = get_valid_model_id(
|
| 8 |
+
app_settings.openvino_lcm_models,
|
| 9 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def change_lcm_model_id(model_id):
|
| 14 |
+
app_settings.settings.lcm_diffusion_setting.lcm_model_id = model_id
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def change_lcm_lora_model_id(model_id):
|
| 18 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id = model_id
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def change_lcm_lora_base_model_id(model_id):
|
| 22 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id = model_id
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def change_openvino_lcm_model_id(model_id):
|
| 26 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id = model_id
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def change_gguf_diffusion_model(model_path):
|
| 30 |
+
if model_path == "None":
|
| 31 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.diffusion_path = ""
|
| 32 |
+
else:
|
| 33 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.diffusion_path = (
|
| 34 |
+
model_path
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def change_gguf_clip_model(model_path):
|
| 39 |
+
if model_path == "None":
|
| 40 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.clip_path = ""
|
| 41 |
+
else:
|
| 42 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.clip_path = model_path
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def change_gguf_t5xxl_model(model_path):
|
| 46 |
+
if model_path == "None":
|
| 47 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.t5xxl_path = ""
|
| 48 |
+
else:
|
| 49 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.t5xxl_path = model_path
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def change_gguf_vae_model(model_path):
|
| 53 |
+
if model_path == "None":
|
| 54 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.vae_path = ""
|
| 55 |
+
else:
|
| 56 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.vae_path = model_path
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def get_models_ui() -> None:
|
| 60 |
+
with gr.Blocks():
|
| 61 |
+
with gr.Row():
|
| 62 |
+
lcm_model_id = gr.Dropdown(
|
| 63 |
+
app_settings.lcm_models,
|
| 64 |
+
label="LCM model",
|
| 65 |
+
info="Diffusers LCM model ID",
|
| 66 |
+
value=get_valid_model_id(
|
| 67 |
+
app_settings.lcm_models,
|
| 68 |
+
app_settings.settings.lcm_diffusion_setting.lcm_model_id,
|
| 69 |
+
LCM_DEFAULT_MODEL,
|
| 70 |
+
),
|
| 71 |
+
interactive=True,
|
| 72 |
+
)
|
| 73 |
+
with gr.Row():
|
| 74 |
+
lcm_lora_model_id = gr.Dropdown(
|
| 75 |
+
app_settings.lcm_lora_models,
|
| 76 |
+
label="LCM LoRA model",
|
| 77 |
+
info="Diffusers LCM LoRA model ID",
|
| 78 |
+
value=get_valid_model_id(
|
| 79 |
+
app_settings.lcm_lora_models,
|
| 80 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.lcm_lora_id,
|
| 81 |
+
),
|
| 82 |
+
interactive=True,
|
| 83 |
+
)
|
| 84 |
+
lcm_lora_base_model_id = gr.Dropdown(
|
| 85 |
+
app_settings.stable_diffsuion_models,
|
| 86 |
+
label="LCM LoRA base model",
|
| 87 |
+
info="Diffusers LCM LoRA base model ID",
|
| 88 |
+
value=get_valid_model_id(
|
| 89 |
+
app_settings.stable_diffsuion_models,
|
| 90 |
+
app_settings.settings.lcm_diffusion_setting.lcm_lora.base_model_id,
|
| 91 |
+
),
|
| 92 |
+
interactive=True,
|
| 93 |
+
)
|
| 94 |
+
with gr.Row():
|
| 95 |
+
lcm_openvino_model_id = gr.Dropdown(
|
| 96 |
+
app_settings.openvino_lcm_models,
|
| 97 |
+
label="LCM OpenVINO model",
|
| 98 |
+
info="OpenVINO LCM-LoRA fused model ID",
|
| 99 |
+
value=get_valid_model_id(
|
| 100 |
+
app_settings.openvino_lcm_models,
|
| 101 |
+
app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id,
|
| 102 |
+
),
|
| 103 |
+
interactive=True,
|
| 104 |
+
)
|
| 105 |
+
with gr.Row():
|
| 106 |
+
gguf_diffusion_model_id = gr.Dropdown(
|
| 107 |
+
app_settings.gguf_diffusion_models,
|
| 108 |
+
label="GGUF diffusion model",
|
| 109 |
+
info="GGUF diffusion model ",
|
| 110 |
+
value=get_valid_model_id(
|
| 111 |
+
app_settings.gguf_diffusion_models,
|
| 112 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.diffusion_path,
|
| 113 |
+
),
|
| 114 |
+
interactive=True,
|
| 115 |
+
)
|
| 116 |
+
with gr.Row():
|
| 117 |
+
gguf_clip_model_id = gr.Dropdown(
|
| 118 |
+
app_settings.gguf_clip_models,
|
| 119 |
+
label="GGUF CLIP model",
|
| 120 |
+
info="GGUF CLIP model ",
|
| 121 |
+
value=get_valid_model_id(
|
| 122 |
+
app_settings.gguf_clip_models,
|
| 123 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.clip_path,
|
| 124 |
+
),
|
| 125 |
+
interactive=True,
|
| 126 |
+
)
|
| 127 |
+
gguf_t5xxl_model_id = gr.Dropdown(
|
| 128 |
+
app_settings.gguf_t5xxl_models,
|
| 129 |
+
label="GGUF T5-XXL model",
|
| 130 |
+
info="GGUF T5-XXL model ",
|
| 131 |
+
value=get_valid_model_id(
|
| 132 |
+
app_settings.gguf_t5xxl_models,
|
| 133 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.t5xxl_path,
|
| 134 |
+
),
|
| 135 |
+
interactive=True,
|
| 136 |
+
)
|
| 137 |
+
with gr.Row():
|
| 138 |
+
gguf_vae_model_id = gr.Dropdown(
|
| 139 |
+
app_settings.gguf_vae_models,
|
| 140 |
+
label="GGUF VAE model",
|
| 141 |
+
info="GGUF VAE model ",
|
| 142 |
+
value=get_valid_model_id(
|
| 143 |
+
app_settings.gguf_vae_models,
|
| 144 |
+
app_settings.settings.lcm_diffusion_setting.gguf_model.vae_path,
|
| 145 |
+
),
|
| 146 |
+
interactive=True,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
lcm_model_id.change(
|
| 150 |
+
change_lcm_model_id,
|
| 151 |
+
lcm_model_id,
|
| 152 |
+
)
|
| 153 |
+
lcm_lora_model_id.change(
|
| 154 |
+
change_lcm_lora_model_id,
|
| 155 |
+
lcm_lora_model_id,
|
| 156 |
+
)
|
| 157 |
+
lcm_lora_base_model_id.change(
|
| 158 |
+
change_lcm_lora_base_model_id,
|
| 159 |
+
lcm_lora_base_model_id,
|
| 160 |
+
)
|
| 161 |
+
lcm_openvino_model_id.change(
|
| 162 |
+
change_openvino_lcm_model_id,
|
| 163 |
+
lcm_openvino_model_id,
|
| 164 |
+
)
|
| 165 |
+
gguf_diffusion_model_id.change(
|
| 166 |
+
change_gguf_diffusion_model,
|
| 167 |
+
gguf_diffusion_model_id,
|
| 168 |
+
)
|
| 169 |
+
gguf_clip_model_id.change(
|
| 170 |
+
change_gguf_clip_model,
|
| 171 |
+
gguf_clip_model_id,
|
| 172 |
+
)
|
| 173 |
+
gguf_t5xxl_model_id.change(
|
| 174 |
+
change_gguf_t5xxl_model,
|
| 175 |
+
gguf_t5xxl_model_id,
|
| 176 |
+
)
|
| 177 |
+
gguf_vae_model_id.change(
|
| 178 |
+
change_gguf_vae_model,
|
| 179 |
+
gguf_vae_model_id,
|
| 180 |
+
)
|
src/frontend/webui/realtime_ui.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
from datetime import datetime
|
| 3 |
+
from time import perf_counter
|
| 4 |
+
|
| 5 |
+
import gradio as gr
|
| 6 |
+
import numpy as np
|
| 7 |
+
from backend.device import get_device_name, is_openvino_device
|
| 8 |
+
from backend.lcm_text_to_image import LCMTextToImage
|
| 9 |
+
from backend.models.lcmdiffusion_setting import LCMDiffusionSetting, LCMLora
|
| 10 |
+
from constants import APP_VERSION, DEVICE
|
| 11 |
+
from cv2 import imencode
|
| 12 |
+
|
| 13 |
+
lcm_text_to_image = LCMTextToImage()
|
| 14 |
+
lcm_lora = LCMLora(
|
| 15 |
+
base_model_id="Lykon/dreamshaper-8",
|
| 16 |
+
lcm_lora_id="latent-consistency/lcm-lora-sdv1-5",
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# https://github.com/gradio-app/gradio/issues/2635#issuecomment-1423531319
|
| 21 |
+
def encode_pil_to_base64_new(pil_image):
|
| 22 |
+
image_arr = np.asarray(pil_image)[:, :, ::-1]
|
| 23 |
+
_, byte_data = imencode(".png", image_arr)
|
| 24 |
+
base64_data = base64.b64encode(byte_data)
|
| 25 |
+
base64_string_opencv = base64_data.decode("utf-8")
|
| 26 |
+
return "data:image/png;base64," + base64_string_opencv
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# monkey patching encode pil
|
| 30 |
+
gr.processing_utils.encode_pil_to_base64 = encode_pil_to_base64_new
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def predict(
|
| 34 |
+
prompt,
|
| 35 |
+
steps,
|
| 36 |
+
seed,
|
| 37 |
+
):
|
| 38 |
+
lcm_diffusion_setting = LCMDiffusionSetting()
|
| 39 |
+
lcm_diffusion_setting.openvino_lcm_model_id = "rupeshs/sdxs-512-0.9-openvino"
|
| 40 |
+
lcm_diffusion_setting.prompt = prompt
|
| 41 |
+
lcm_diffusion_setting.guidance_scale = 1.0
|
| 42 |
+
lcm_diffusion_setting.inference_steps = steps
|
| 43 |
+
lcm_diffusion_setting.seed = seed
|
| 44 |
+
lcm_diffusion_setting.use_seed = True
|
| 45 |
+
lcm_diffusion_setting.image_width = 512
|
| 46 |
+
lcm_diffusion_setting.image_height = 512
|
| 47 |
+
lcm_diffusion_setting.use_openvino = True if is_openvino_device() else False
|
| 48 |
+
lcm_diffusion_setting.use_tiny_auto_encoder = True
|
| 49 |
+
lcm_text_to_image.init(
|
| 50 |
+
DEVICE,
|
| 51 |
+
lcm_diffusion_setting,
|
| 52 |
+
)
|
| 53 |
+
start = perf_counter()
|
| 54 |
+
|
| 55 |
+
images = lcm_text_to_image.generate(lcm_diffusion_setting)
|
| 56 |
+
latency = perf_counter() - start
|
| 57 |
+
print(f"Latency: {latency:.2f} seconds")
|
| 58 |
+
return images[0]
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
css = """
|
| 62 |
+
#container{
|
| 63 |
+
margin: 0 auto;
|
| 64 |
+
max-width: 40rem;
|
| 65 |
+
}
|
| 66 |
+
#intro{
|
| 67 |
+
max-width: 100%;
|
| 68 |
+
text-align: center;
|
| 69 |
+
margin: 0 auto;
|
| 70 |
+
}
|
| 71 |
+
#generate_button {
|
| 72 |
+
color: white;
|
| 73 |
+
border-color: #007bff;
|
| 74 |
+
background: #007bff;
|
| 75 |
+
width: 200px;
|
| 76 |
+
height: 50px;
|
| 77 |
+
}
|
| 78 |
+
footer {
|
| 79 |
+
visibility: hidden
|
| 80 |
+
}
|
| 81 |
+
"""
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def _get_footer_message() -> str:
|
| 85 |
+
version = f"<center><p> {APP_VERSION} "
|
| 86 |
+
current_year = datetime.now().year
|
| 87 |
+
footer_msg = version + (
|
| 88 |
+
f' © 2023 - {current_year} <a href="https://github.com/rupeshs">'
|
| 89 |
+
" Rupesh Sreeraman</a></p></center>"
|
| 90 |
+
)
|
| 91 |
+
return footer_msg
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
with gr.Blocks(css=css) as demo:
|
| 95 |
+
with gr.Column(elem_id="container"):
|
| 96 |
+
use_openvino = "- OpenVINO" if is_openvino_device() else ""
|
| 97 |
+
gr.Markdown(
|
| 98 |
+
f"""# Realtime FastSD CPU {use_openvino}
|
| 99 |
+
**Device : {DEVICE} , {get_device_name()}**
|
| 100 |
+
""",
|
| 101 |
+
elem_id="intro",
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
with gr.Row():
|
| 105 |
+
with gr.Row():
|
| 106 |
+
prompt = gr.Textbox(
|
| 107 |
+
placeholder="Describe the image you'd like to see",
|
| 108 |
+
scale=5,
|
| 109 |
+
container=False,
|
| 110 |
+
)
|
| 111 |
+
generate_btn = gr.Button(
|
| 112 |
+
"Generate",
|
| 113 |
+
scale=1,
|
| 114 |
+
elem_id="generate_button",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
image = gr.Image(type="filepath")
|
| 118 |
+
|
| 119 |
+
steps = gr.Slider(
|
| 120 |
+
label="Steps",
|
| 121 |
+
value=1,
|
| 122 |
+
minimum=1,
|
| 123 |
+
maximum=6,
|
| 124 |
+
step=1,
|
| 125 |
+
visible=False,
|
| 126 |
+
)
|
| 127 |
+
seed = gr.Slider(
|
| 128 |
+
randomize=True,
|
| 129 |
+
minimum=0,
|
| 130 |
+
maximum=999999999,
|
| 131 |
+
label="Seed",
|
| 132 |
+
step=1,
|
| 133 |
+
)
|
| 134 |
+
gr.HTML(_get_footer_message())
|
| 135 |
+
|
| 136 |
+
inputs = [prompt, steps, seed]
|
| 137 |
+
prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
|
| 138 |
+
generate_btn.click(
|
| 139 |
+
fn=predict, inputs=inputs, outputs=image, show_progress=False
|
| 140 |
+
)
|
| 141 |
+
steps.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
|
| 142 |
+
seed.change(fn=predict, inputs=inputs, outputs=image, show_progress=False)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def start_realtime_text_to_image(share=False):
|
| 146 |
+
demo.queue()
|
| 147 |
+
demo.launch(share=share)
|
src/frontend/webui/text_to_image_ui.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from typing import Any
|
| 3 |
+
from backend.models.lcmdiffusion_setting import DiffusionTask
|
| 4 |
+
from models.interface_types import InterfaceType
|
| 5 |
+
from constants import DEVICE
|
| 6 |
+
from state import get_settings, get_context
|
| 7 |
+
from frontend.utils import is_reshape_required
|
| 8 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 9 |
+
from frontend.webui.errors import show_error
|
| 10 |
+
|
| 11 |
+
app_settings = get_settings()
|
| 12 |
+
|
| 13 |
+
previous_width = 0
|
| 14 |
+
previous_height = 0
|
| 15 |
+
previous_model_id = ""
|
| 16 |
+
previous_num_of_images = 0
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def generate_text_to_image(
|
| 20 |
+
prompt,
|
| 21 |
+
neg_prompt,
|
| 22 |
+
) -> Any:
|
| 23 |
+
context = get_context(InterfaceType.WEBUI)
|
| 24 |
+
global \
|
| 25 |
+
previous_height, \
|
| 26 |
+
previous_width, \
|
| 27 |
+
previous_model_id, \
|
| 28 |
+
previous_num_of_images, \
|
| 29 |
+
app_settings
|
| 30 |
+
app_settings.settings.lcm_diffusion_setting.prompt = prompt
|
| 31 |
+
app_settings.settings.lcm_diffusion_setting.negative_prompt = neg_prompt
|
| 32 |
+
app_settings.settings.lcm_diffusion_setting.diffusion_task = (
|
| 33 |
+
DiffusionTask.text_to_image.value
|
| 34 |
+
)
|
| 35 |
+
model_id = app_settings.settings.lcm_diffusion_setting.openvino_lcm_model_id
|
| 36 |
+
reshape = False
|
| 37 |
+
image_width = app_settings.settings.lcm_diffusion_setting.image_width
|
| 38 |
+
image_height = app_settings.settings.lcm_diffusion_setting.image_height
|
| 39 |
+
num_images = app_settings.settings.lcm_diffusion_setting.number_of_images
|
| 40 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 41 |
+
reshape = is_reshape_required(
|
| 42 |
+
previous_width,
|
| 43 |
+
image_width,
|
| 44 |
+
previous_height,
|
| 45 |
+
image_height,
|
| 46 |
+
previous_model_id,
|
| 47 |
+
model_id,
|
| 48 |
+
previous_num_of_images,
|
| 49 |
+
num_images,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
with ThreadPoolExecutor(max_workers=1) as executor:
|
| 53 |
+
future = executor.submit(
|
| 54 |
+
context.generate_text_to_image,
|
| 55 |
+
app_settings.settings,
|
| 56 |
+
reshape,
|
| 57 |
+
DEVICE,
|
| 58 |
+
)
|
| 59 |
+
images = future.result()
|
| 60 |
+
if images:
|
| 61 |
+
context.save_images(
|
| 62 |
+
images,
|
| 63 |
+
app_settings.settings,
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
show_error(context.error)
|
| 67 |
+
|
| 68 |
+
previous_width = image_width
|
| 69 |
+
previous_height = image_height
|
| 70 |
+
previous_model_id = model_id
|
| 71 |
+
previous_num_of_images = num_images
|
| 72 |
+
return images
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def get_text_to_image_ui() -> None:
|
| 76 |
+
with gr.Blocks():
|
| 77 |
+
with gr.Row():
|
| 78 |
+
with gr.Column():
|
| 79 |
+
with gr.Row():
|
| 80 |
+
prompt = gr.Textbox(
|
| 81 |
+
show_label=False,
|
| 82 |
+
lines=3,
|
| 83 |
+
placeholder="A fantasy landscape",
|
| 84 |
+
container=False,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
generate_btn = gr.Button(
|
| 88 |
+
"Generate",
|
| 89 |
+
elem_id="generate_button",
|
| 90 |
+
scale=0,
|
| 91 |
+
)
|
| 92 |
+
negative_prompt = gr.Textbox(
|
| 93 |
+
label="Negative prompt (Works in LCM-LoRA mode, set guidance > 1.0) :",
|
| 94 |
+
lines=1,
|
| 95 |
+
placeholder="",
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
input_params = [prompt, negative_prompt]
|
| 99 |
+
|
| 100 |
+
with gr.Column():
|
| 101 |
+
output = gr.Gallery(
|
| 102 |
+
label="Generated images",
|
| 103 |
+
show_label=True,
|
| 104 |
+
elem_id="gallery",
|
| 105 |
+
columns=2,
|
| 106 |
+
height=512,
|
| 107 |
+
)
|
| 108 |
+
generate_btn.click(
|
| 109 |
+
fn=generate_text_to_image,
|
| 110 |
+
inputs=input_params,
|
| 111 |
+
outputs=output,
|
| 112 |
+
)
|
src/frontend/webui/ui.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
|
| 3 |
+
import gradio as gr
|
| 4 |
+
from backend.device import get_device_name
|
| 5 |
+
from constants import APP_VERSION
|
| 6 |
+
from frontend.webui.controlnet_ui import get_controlnet_ui
|
| 7 |
+
from frontend.webui.generation_settings_ui import get_generation_settings_ui
|
| 8 |
+
from frontend.webui.image_to_image_ui import get_image_to_image_ui
|
| 9 |
+
from frontend.webui.image_variations_ui import get_image_variations_ui
|
| 10 |
+
from frontend.webui.lora_models_ui import get_lora_models_ui
|
| 11 |
+
from frontend.webui.models_ui import get_models_ui
|
| 12 |
+
from frontend.webui.text_to_image_ui import get_text_to_image_ui
|
| 13 |
+
from frontend.webui.upscaler_ui import get_upscaler_ui
|
| 14 |
+
from state import get_settings
|
| 15 |
+
|
| 16 |
+
app_settings = get_settings()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _get_footer_message() -> str:
|
| 20 |
+
version = f"<center><p> {APP_VERSION} "
|
| 21 |
+
current_year = datetime.now().year
|
| 22 |
+
footer_msg = version + (
|
| 23 |
+
f' © 2023 - {current_year} <a href="https://github.com/rupeshs">'
|
| 24 |
+
" Rupesh Sreeraman</a></p></center>"
|
| 25 |
+
)
|
| 26 |
+
return footer_msg
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def get_web_ui() -> gr.Blocks:
|
| 30 |
+
def change_mode(mode):
|
| 31 |
+
global app_settings
|
| 32 |
+
app_settings.settings.lcm_diffusion_setting.use_lcm_lora = False
|
| 33 |
+
app_settings.settings.lcm_diffusion_setting.use_openvino = False
|
| 34 |
+
app_settings.settings.lcm_diffusion_setting.use_gguf_model = False
|
| 35 |
+
if mode == "LCM-LoRA":
|
| 36 |
+
app_settings.settings.lcm_diffusion_setting.use_lcm_lora = True
|
| 37 |
+
elif mode == "LCM-OpenVINO":
|
| 38 |
+
app_settings.settings.lcm_diffusion_setting.use_openvino = True
|
| 39 |
+
elif mode == "GGUF":
|
| 40 |
+
app_settings.settings.lcm_diffusion_setting.use_gguf_model = True
|
| 41 |
+
|
| 42 |
+
# Prevent saved LoRA and ControlNet settings from being used by
|
| 43 |
+
# default; in WebUI mode, the user must explicitly enable those
|
| 44 |
+
if app_settings.settings.lcm_diffusion_setting.lora:
|
| 45 |
+
app_settings.settings.lcm_diffusion_setting.lora.enabled = False
|
| 46 |
+
if app_settings.settings.lcm_diffusion_setting.controlnet:
|
| 47 |
+
app_settings.settings.lcm_diffusion_setting.controlnet.enabled = False
|
| 48 |
+
theme = gr.themes.Default(
|
| 49 |
+
primary_hue="blue",
|
| 50 |
+
)
|
| 51 |
+
with gr.Blocks(
|
| 52 |
+
title="FastSD CPU",
|
| 53 |
+
theme=theme,
|
| 54 |
+
css="footer {visibility: hidden}",
|
| 55 |
+
) as fastsd_web_ui:
|
| 56 |
+
gr.HTML("<center><H1>FastSD CPU</H1></center>")
|
| 57 |
+
gr.Markdown(
|
| 58 |
+
f"**Processor : {get_device_name()}**",
|
| 59 |
+
elem_id="processor",
|
| 60 |
+
)
|
| 61 |
+
current_mode = "LCM"
|
| 62 |
+
if app_settings.settings.lcm_diffusion_setting.use_openvino:
|
| 63 |
+
current_mode = "LCM-OpenVINO"
|
| 64 |
+
elif app_settings.settings.lcm_diffusion_setting.use_lcm_lora:
|
| 65 |
+
current_mode = "LCM-LoRA"
|
| 66 |
+
elif app_settings.settings.lcm_diffusion_setting.use_gguf_model:
|
| 67 |
+
current_mode = "GGUF"
|
| 68 |
+
|
| 69 |
+
mode = gr.Radio(
|
| 70 |
+
["LCM", "LCM-LoRA", "LCM-OpenVINO", "GGUF"],
|
| 71 |
+
label="Mode",
|
| 72 |
+
info="Current working mode",
|
| 73 |
+
value=current_mode,
|
| 74 |
+
)
|
| 75 |
+
mode.change(change_mode, inputs=mode)
|
| 76 |
+
|
| 77 |
+
with gr.Tabs():
|
| 78 |
+
with gr.TabItem("Text to Image"):
|
| 79 |
+
get_text_to_image_ui()
|
| 80 |
+
with gr.TabItem("Image to Image"):
|
| 81 |
+
get_image_to_image_ui()
|
| 82 |
+
with gr.TabItem("Image Variations"):
|
| 83 |
+
get_image_variations_ui()
|
| 84 |
+
with gr.TabItem("Upscaler"):
|
| 85 |
+
get_upscaler_ui()
|
| 86 |
+
with gr.TabItem("Generation Settings"):
|
| 87 |
+
get_generation_settings_ui()
|
| 88 |
+
with gr.TabItem("Models"):
|
| 89 |
+
get_models_ui()
|
| 90 |
+
with gr.TabItem("Lora Models"):
|
| 91 |
+
get_lora_models_ui()
|
| 92 |
+
with gr.TabItem("ControlNet"):
|
| 93 |
+
get_controlnet_ui()
|
| 94 |
+
|
| 95 |
+
gr.HTML(_get_footer_message())
|
| 96 |
+
|
| 97 |
+
return fastsd_web_ui
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def start_webui(
|
| 101 |
+
share: bool = False,
|
| 102 |
+
):
|
| 103 |
+
webui = get_web_ui()
|
| 104 |
+
webui.queue()
|
| 105 |
+
webui.launch(share=share)
|
src/frontend/webui/upscaler_ui.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
import gradio as gr
|
| 3 |
+
from models.interface_types import InterfaceType
|
| 4 |
+
from state import get_settings, get_context
|
| 5 |
+
from backend.upscale.upscaler import upscale_image
|
| 6 |
+
from backend.models.upscale import UpscaleMode
|
| 7 |
+
from paths import FastStableDiffusionPaths
|
| 8 |
+
|
| 9 |
+
app_settings = get_settings()
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
previous_width = 0
|
| 13 |
+
previous_height = 0
|
| 14 |
+
previous_model_id = ""
|
| 15 |
+
previous_num_of_images = 0
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def create_upscaled_image(
|
| 19 |
+
source_image,
|
| 20 |
+
upscale_mode,
|
| 21 |
+
) -> Any:
|
| 22 |
+
context = get_context(InterfaceType.WEBUI)
|
| 23 |
+
scale_factor = 2
|
| 24 |
+
if upscale_mode == "SD":
|
| 25 |
+
mode = UpscaleMode.sd_upscale.value
|
| 26 |
+
elif upscale_mode == "AURA-SR":
|
| 27 |
+
mode = UpscaleMode.aura_sr.value
|
| 28 |
+
scale_factor = 4
|
| 29 |
+
else:
|
| 30 |
+
mode = UpscaleMode.normal.value
|
| 31 |
+
|
| 32 |
+
upscaled_filepath = FastStableDiffusionPaths.get_upscale_filepath(
|
| 33 |
+
None,
|
| 34 |
+
scale_factor,
|
| 35 |
+
app_settings.settings.generated_images.format,
|
| 36 |
+
)
|
| 37 |
+
image = upscale_image(
|
| 38 |
+
context=context,
|
| 39 |
+
src_image_path=source_image,
|
| 40 |
+
dst_image_path=upscaled_filepath,
|
| 41 |
+
upscale_mode=mode,
|
| 42 |
+
)
|
| 43 |
+
return image
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def get_upscaler_ui() -> None:
|
| 47 |
+
with gr.Blocks():
|
| 48 |
+
with gr.Row():
|
| 49 |
+
with gr.Column():
|
| 50 |
+
input_image = gr.Image(label="Image", type="filepath")
|
| 51 |
+
with gr.Row():
|
| 52 |
+
upscale_mode = gr.Radio(
|
| 53 |
+
["EDSR", "SD", "AURA-SR"],
|
| 54 |
+
label="Upscale Mode (2x) | AURA-SR v2 (4x)",
|
| 55 |
+
info="Select upscale method(For SD Scale GGUF mode is not supported)",
|
| 56 |
+
value="EDSR",
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
generate_btn = gr.Button(
|
| 60 |
+
"Upscale",
|
| 61 |
+
elem_id="generate_button",
|
| 62 |
+
scale=0,
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
input_params = [
|
| 66 |
+
input_image,
|
| 67 |
+
upscale_mode,
|
| 68 |
+
]
|
| 69 |
+
|
| 70 |
+
with gr.Column():
|
| 71 |
+
output = gr.Gallery(
|
| 72 |
+
label="Generated images",
|
| 73 |
+
show_label=True,
|
| 74 |
+
elem_id="gallery",
|
| 75 |
+
columns=2,
|
| 76 |
+
height=512,
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
generate_btn.click(
|
| 80 |
+
fn=create_upscaled_image,
|
| 81 |
+
inputs=input_params,
|
| 82 |
+
outputs=output,
|
| 83 |
+
)
|