| import gradio as gr
|
| import os
|
| import sys
|
| from pathlib import Path
|
| from all_models import models
|
| from externalmod import gr_Interface_load
|
| from prompt_extend import extend_prompt
|
| from random import randint
|
| import asyncio
|
| from threading import RLock
|
| lock = RLock()
|
| HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None
|
|
|
| inference_timeout = 300
|
| MAX_SEED = 2**32-1
|
| current_model = models[0]
|
| new_models = ", ".join([m.split("/")[-1] for m in models[0:6]])
|
| text_gen1 = extend_prompt
|
|
|
|
|
|
|
|
|
|
|
| models2 = [gr_Interface_load(f"models/{m}", live=False, preprocess=True, postprocess=False, hf_token=HF_TOKEN) for m in models]
|
|
|
| def text_it1(inputs, text_gen1=text_gen1):
|
| go_t1 = text_gen1(inputs)
|
| return(go_t1)
|
|
|
| def set_model(current_model):
|
| current_model = models[current_model]
|
| return gr.update(label=(f"{current_model}"))
|
|
|
| def send_it1(inputs, model_choice, neg_input, height, width, steps, cfg, seed):
|
|
|
|
|
| output1 = gen_fn(model_choice, inputs, neg_input, height, width, steps, cfg, seed)
|
|
|
| return (output1)
|
|
|
|
|
|
|
| async def infer(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1, timeout=inference_timeout):
|
| from pathlib import Path
|
| kwargs = {}
|
| if height is not None and height >= 256: kwargs["height"] = height
|
| if width is not None and width >= 256: kwargs["width"] = width
|
| if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
|
| if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
| noise = ""
|
| if seed >= 0: kwargs["seed"] = seed
|
| else:
|
| rand = randint(1, 500)
|
| for i in range(rand):
|
| noise += " "
|
| task = asyncio.create_task(asyncio.to_thread(models2[model_index].fn,
|
| prompt=f'{prompt} {noise}', negative_prompt=nprompt, **kwargs, token=HF_TOKEN))
|
| await asyncio.sleep(0)
|
| try:
|
| result = await asyncio.wait_for(task, timeout=timeout)
|
| except (Exception, asyncio.TimeoutError) as e:
|
| print(e)
|
| print(f"Task timed out: {models2[model_index]}")
|
| if not task.done(): task.cancel()
|
| result = None
|
| if task.done() and result is not None:
|
| with lock:
|
| png_path = "image.png"
|
| result.save(png_path)
|
| image = str(Path(png_path).resolve())
|
| return image
|
| return None
|
|
|
| def gen_fn(model_index, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
|
| try:
|
| loop = asyncio.new_event_loop()
|
| result = loop.run_until_complete(infer(model_index, prompt, nprompt,
|
| height, width, steps, cfg, seed, inference_timeout))
|
| except (Exception, asyncio.CancelledError) as e:
|
| print(e)
|
| print(f"Task aborted: {models2[model_index]}")
|
| result = None
|
| finally:
|
| loop.close()
|
| return result
|
|
|
| css="""
|
| .gradio-container {!important; font-family: 'IBM Plex Sans', sans-serif !important;
|
| text-align: center; max-width: 1200px; margin: 0 auto; !important;}
|
| h1 {font-size: 6em; color: #ffc99f; margin-top: 30px; margin-bottom: 30px;
|
| text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;}
|
| h3 {color: #ffc99f; !important;}
|
| h4 {display: inline-block; color: #ffffff !important; }
|
| .wrapper img {font-size: 98% !important; white-space: nowrap !important;
|
| text-align: center !important; display: inline-block !important;color: #ffffff !important;}
|
| .wrapper {color: #ffffff !important;}
|
| .text-gray-500 {color: #ffc99f !important;}
|
| .gr-box {background-image: linear-gradient(#182634, #1e2f40, #254150) !important; border-top-color: #000000 !important;
|
| border-right-color: #ffffff !important; border-bottom-color: #ffffff !important; border-left-color: #000000 !important;}
|
| """
|
|
|
| with gr.Blocks(theme='John6666/YntecLight', fill_width=True, css=css) as myface:
|
| gr.HTML(f"""
|
| <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
|
| <div>
|
| <style>
|
| h1 {{
|
| font-size: 6em;
|
| color: #ffc99f;
|
| margin-top: 30px;
|
| margin-bottom: 30px;
|
| text-shadow: 3px 3px 0 rgba(0, 0, 0, 1) !important;
|
| }}
|
| h3 {{
|
| color: #ffc99f; !important;
|
| }}
|
| h4 {{
|
| display: inline-block;
|
| color: #ffffff !important;
|
| }}
|
| .wrapper img {{
|
| font-size: 98% !important;
|
| white-space: nowrap !important;
|
| text-align: center !important;
|
| display: inline-block !important;
|
| color: #ffffff !important;
|
| }}
|
| .wrapper {{
|
| color: #ffffff !important;
|
| }}
|
| .gradio-container {{
|
| background-image: linear-gradient(#254150, #1e2f40, #182634) !important;
|
| color: #ffaa66 !important;
|
| font-family: 'IBM Plex Sans', sans-serif !important;
|
| }}
|
| .text-gray-500 {{
|
| color: #ffc99f !important;
|
| }}
|
| .gr-box {{
|
| background-image: linear-gradient(#182634, #1e2f40, #254150) !important;
|
| border-top-color: #000000 !important;
|
| border-right-color: #ffffff !important;
|
| border-bottom-color: #ffffff !important;
|
| border-left-color: #000000 !important;
|
| }}
|
| .gr-input {{
|
| color: #ffc99f; !important;
|
| background-color: #254150 !important;
|
| }}
|
| :root {{
|
| --neutral-100: #000000 !important;
|
| }}
|
| </style>
|
| <body>
|
| <div class="center"><h1>Toy World</h1>
|
| </div>
|
| </body>
|
| </div>
|
| <p style="margin-bottom: 1px; color: #ffaa66;">
|
| <h3>Blitz Diffusion - {int(len(models))} Stable Diffusion models, but why? For your enjoyment!</h3></p>
|
| <br><div class="wrapper">8.1 <img src="https://huggingface.co/Yntec/DucHaitenLofi/resolve/main/NEW.webp" alt="NEW!" style="width:32px;height:16px;">Toys to play with: {new_models} and 6 more!</div>
|
| <p style="margin-bottom: 1px; font-size: 98%">
|
| <br><h4>If a model is already loaded each new image takes less than <b>10</b> seconds to generate!</h4></p>
|
| <p style="margin-bottom: 1px; color: #ffffff;">
|
| <br><div class="wrapper">Generate 6 images from 1 prompt at the <u><a href="https://huggingface.co/spaces/Yntec/PrintingPress">PrintingPress</a></u>, and use 6 models simultaneusly at <u><a href="https://huggingface.co/spaces/Yntec/diffusion80xx">Diffusion80XX</a></u>!
|
| </p></p>
|
| </div>
|
| """, elem_classes="gr-box")
|
| with gr.Row():
|
| with gr.Column(scale=100):
|
|
|
| model_name1 = gr.Dropdown(label="Select Model", choices=[m for m in models], type="index",
|
| value=current_model, interactive=True, elem_classes="gr-box")
|
| with gr.Row():
|
| with gr.Column(scale=100):
|
| with gr.Group():
|
| magic1 = gr.Textbox(label="Your Prompt", lines=4, elem_classes="gr-box")
|
| with gr.Accordion("Advanced", open=False, visible=True):
|
| neg_input = gr.Textbox(label='Negative prompt', lines=1, elem_classes="gr-box")
|
| with gr.Row():
|
| width = gr.Slider(label="Width", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes="gr-box")
|
| height = gr.Slider(label="Height", info="If 0, the default value is used.", maximum=1216, step=32, value=0, elem_classes="gr-box")
|
| with gr.Row():
|
| steps = gr.Slider(label="Number of inference steps", info="If 0, the default value is used.", maximum=100, step=1, value=0, elem_classes="gr-box")
|
| cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0, elem_classes="gr-box")
|
| seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1, elem_classes="gr-box")
|
| run=gr.Button("Generate Image", elem_classes="gr-button")
|
| with gr.Row():
|
| with gr.Column():
|
| output1 = gr.Image(label=(f"{current_model}"), format="png", elem_classes="gr-box")
|
| with gr.Row():
|
| with gr.Column(scale=50):
|
| input_text = gr.Textbox(label="Use this box to extend an idea automagically, by typing some words and clicking Extend Idea", lines=2, elem_classes="gr-box")
|
| see_prompts = gr.Button("Extend Idea -> overwrite the contents of the `Your Prompt´ box above", elem_classes="gr-button")
|
| use_short = gr.Button("Copy the contents of this box to the `Your Prompt´ box above", elem_classes="gr-button")
|
| def short_prompt(inputs):
|
| return (inputs)
|
|
|
| model_name1.change(set_model, inputs=model_name1, outputs=[output1])
|
| gr.on(
|
| triggers=[run.click, magic1.submit],
|
| fn=send_it1,
|
| inputs=[magic1, model_name1, neg_input, height, width, steps, cfg, seed],
|
| outputs=[output1],
|
| )
|
| use_short.click(short_prompt, inputs=[input_text], outputs=magic1)
|
| see_prompts.click(text_it1, inputs=[input_text], outputs=magic1)
|
|
|
|
|
| myface.launch(inline=True, show_api=False) |