| import gradio as gr
|
| import asyncio
|
| from threading import RLock
|
| from pathlib import Path
|
| from huggingface_hub import InferenceClient
|
|
|
|
|
| server_timeout = 600
|
| inference_timeout = 300
|
|
|
|
|
| lock = RLock()
|
| loaded_models = {}
|
| model_info_dict = {}
|
|
|
|
|
| def to_list(s):
|
| return [x.strip() for x in s.split(",")]
|
|
|
|
|
| def list_sub(a, b):
|
| return [e for e in a if e not in b]
|
|
|
|
|
| def list_uniq(l):
|
| return sorted(set(l), key=l.index)
|
|
|
|
|
| def is_repo_name(s):
|
| import re
|
| return re.fullmatch(r'^[^/]+?/[^/]+?$', s)
|
|
|
|
|
| def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30):
|
| from huggingface_hub import HfApi
|
| api = HfApi()
|
| default_tags = ["diffusers"]
|
| if not sort: sort = "last_modified"
|
| models = []
|
| try:
|
| model_infos = api.list_models(author=author, pipeline_tag="text-to-image",
|
| tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit * 5)
|
| except Exception as e:
|
| print(f"Error: Failed to list models.")
|
| print(e)
|
| return models
|
| for model in model_infos:
|
| if not model.private and not model.gated:
|
| if not_tag and not_tag in model.tags: continue
|
| models.append(model.id)
|
| if len(models) == limit: break
|
| return models
|
|
|
|
|
| def get_t2i_model_info_dict(repo_id: str):
|
| from huggingface_hub import HfApi
|
| api = HfApi()
|
| info = {"md": "None"}
|
| try:
|
| if not is_repo_name(repo_id) or not api.repo_exists(repo_id=repo_id): return info
|
| model = api.model_info(repo_id=repo_id)
|
| except Exception as e:
|
| print(f"Error: Failed to get {repo_id}'s info.")
|
| print(e)
|
| return info
|
| if model.private or model.gated: return info
|
| try:
|
| tags = model.tags
|
| except Exception as e:
|
| print(e)
|
| return info
|
| if not 'diffusers' in model.tags: return info
|
| if 'diffusers:StableDiffusionXLPipeline' in tags: info["ver"] = "SDXL"
|
| elif 'diffusers:StableDiffusionPipeline' in tags: info["ver"] = "SD1.5"
|
| elif 'diffusers:StableDiffusion3Pipeline' in tags: info["ver"] = "SD3"
|
| else: info["ver"] = "Other"
|
| info["url"] = f"https://huggingface.co/{repo_id}/"
|
| info["tags"] = model.card_data.tags if model.card_data and model.card_data.tags else []
|
| info["downloads"] = model.downloads
|
| info["likes"] = model.likes
|
| info["last_modified"] = model.last_modified.strftime("lastmod: %Y-%m-%d")
|
| un_tags = ['text-to-image', 'stable-diffusion', 'stable-diffusion-api', 'safetensors', 'stable-diffusion-xl']
|
| descs = [info["ver"]] + list_sub(info["tags"], un_tags) + [f'DLs: {info["downloads"]}'] + [f'❤: {info["likes"]}'] + [info["last_modified"]]
|
| info["md"] = f'Model Info: {", ".join(descs)} [Model Repo]({info["url"]})'
|
| return info
|
|
|
|
|
| def rename_image(image_path: str | None, model_name: str, save_path: str | None = None):
|
| from PIL import Image
|
| from datetime import datetime, timezone, timedelta
|
| if image_path is None: return None
|
| dt_now = datetime.now(timezone(timedelta(hours=9)))
|
| filename = f"{model_name.split('/')[-1]}_{dt_now.strftime('%Y%m%d_%H%M%S')}.png"
|
| try:
|
| if Path(image_path).exists():
|
| png_path = "image.png"
|
| Image.open(image_path).convert('RGBA').save(png_path, "PNG")
|
| if save_path is not None:
|
| new_path = str(Path(png_path).resolve().rename(Path(save_path).resolve()))
|
| else:
|
| new_path = str(Path(png_path).resolve().rename(Path(filename).resolve()))
|
| return new_path
|
| else:
|
| return None
|
| except Exception as e:
|
| print(e)
|
| return None
|
|
|
|
|
| def save_gallery(image_path: str | None, images: list[tuple] | None):
|
| if images is None: images = []
|
| files = [i[0] for i in images]
|
| if image_path is None: return images, files
|
| files.insert(0, str(image_path))
|
| images.insert(0, (str(image_path), Path(image_path).stem))
|
| return images, files
|
|
|
|
|
|
|
|
|
| def load_from_model(model_name: str, hf_token: str = None):
|
| import httpx
|
| import huggingface_hub
|
| from gradio.exceptions import ModelNotFoundError
|
| model_url = f"https://huggingface.co/{model_name}"
|
| api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
| print(f"Fetching model from: {model_url}")
|
|
|
| headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
|
| response = httpx.request("GET", api_url, headers=headers)
|
| if response.status_code != 200:
|
| raise ModelNotFoundError(
|
| f"Could not find model: {model_name}. If it is a private or gated model, please provide your Hugging Face access token (https://huggingface.co/settings/tokens) as the argument for the `hf_token` parameter."
|
| )
|
| headers["X-Wait-For-Model"] = "true"
|
| client = huggingface_hub.InferenceClient(model=model_name, headers=headers,
|
| token=hf_token, timeout=server_timeout)
|
| inputs = gr.components.Textbox(label="Input")
|
| outputs = gr.components.Image(label="Output")
|
| fn = client.text_to_image
|
|
|
| def query_huggingface_inference_endpoints(*data, **kwargs):
|
| return fn(*data, **kwargs)
|
|
|
| interface_info = {
|
| "fn": query_huggingface_inference_endpoints,
|
| "inputs": inputs,
|
| "outputs": outputs,
|
| "title": model_name,
|
| }
|
| return gr.Interface(**interface_info)
|
|
|
|
|
| def load_model(model_name: str):
|
| global loaded_models
|
| global model_info_dict
|
| if model_name in loaded_models.keys(): return loaded_models[model_name]
|
| try:
|
| loaded_models[model_name] = load_from_model(model_name)
|
| print(f"Loaded: {model_name}")
|
| except Exception as e:
|
| if model_name in loaded_models.keys(): del loaded_models[model_name]
|
| print(f"Failed to load: {model_name}")
|
| print(e)
|
| return None
|
| try:
|
| model_info_dict[model_name] = get_t2i_model_info_dict(model_name)
|
| print(f"Assigned: {model_name}")
|
| except Exception as e:
|
| if model_name in model_info_dict.keys(): del model_info_dict[model_name]
|
| print(f"Failed to assigned: {model_name}")
|
| print(e)
|
| return loaded_models[model_name]
|
|
|
|
|
| def load_model_api(model_name: str):
|
| global loaded_models
|
| global model_info_dict
|
| if model_name in loaded_models.keys(): return loaded_models[model_name]
|
| try:
|
| client = InferenceClient(timeout=5)
|
| status = client.get_model_status(model_name)
|
| if status is None or status.framework != "diffusers" or status.state not in ["Loadable", "Loaded"]:
|
| print(f"Failed to load by API: {model_name}")
|
| return None
|
| else:
|
| loaded_models[model_name] = InferenceClient(model_name, timeout=server_timeout)
|
| print(f"Loaded by API: {model_name}")
|
| except Exception as e:
|
| if model_name in loaded_models.keys(): del loaded_models[model_name]
|
| print(f"Failed to load by API: {model_name}")
|
| print(e)
|
| return None
|
| try:
|
| model_info_dict[model_name] = get_t2i_model_info_dict(model_name)
|
| print(f"Assigned by API: {model_name}")
|
| except Exception as e:
|
| if model_name in model_info_dict.keys(): del model_info_dict[model_name]
|
| print(f"Failed to assigned by API: {model_name}")
|
| print(e)
|
| return loaded_models[model_name]
|
|
|
|
|
| def load_models(models: list):
|
| for model in models:
|
| load_model(model)
|
|
|
|
|
| positive_prefix = {
|
| "Pony": to_list("score_9, score_8_up, score_7_up"),
|
| "Pony Anime": to_list("source_anime, anime, score_9, score_8_up, score_7_up"),
|
| }
|
| positive_suffix = {
|
| "Common": to_list("highly detailed, masterpiece, best quality, very aesthetic, absurdres"),
|
| "Anime": to_list("anime artwork, anime style, studio anime, highly detailed"),
|
| }
|
| negative_prefix = {
|
| "Pony": to_list("score_6, score_5, score_4"),
|
| "Pony Anime": to_list("score_6, score_5, score_4, source_pony, source_furry, source_cartoon"),
|
| "Pony Real": to_list("score_6, score_5, score_4, source_anime, source_pony, source_furry, source_cartoon"),
|
| }
|
| negative_suffix = {
|
| "Common": to_list("lowres, (bad), bad hands, bad feet, text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract]"),
|
| "Pony Anime": to_list("busty, ugly face, mutated hands, low res, blurry face, black and white, the simpsons, overwatch, apex legends"),
|
| "Pony Real": to_list("ugly, airbrushed, simple background, cgi, cartoon, anime"),
|
| }
|
| positive_all = negative_all = []
|
| for k, v in (positive_prefix | positive_suffix).items():
|
| positive_all = positive_all + v + [s.replace("_", " ") for s in v]
|
| positive_all = list_uniq(positive_all)
|
| for k, v in (negative_prefix | negative_suffix).items():
|
| negative_all = negative_all + v + [s.replace("_", " ") for s in v]
|
| positive_all = list_uniq(positive_all)
|
|
|
|
|
| def recom_prompt(prompt: str = "", neg_prompt: str = "", pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []):
|
| def flatten(src):
|
| return [item for row in src for item in row]
|
| prompts = to_list(prompt)
|
| neg_prompts = to_list(neg_prompt)
|
| prompts = list_sub(prompts, positive_all)
|
| neg_prompts = list_sub(neg_prompts, negative_all)
|
| last_empty_p = [""] if not prompts and type != "None" else []
|
| last_empty_np = [""] if not neg_prompts and type != "None" else []
|
| prefix_ps = flatten([positive_prefix.get(s, []) for s in pos_pre])
|
| suffix_ps = flatten([positive_suffix.get(s, []) for s in pos_suf])
|
| prefix_nps = flatten([negative_prefix.get(s, []) for s in neg_pre])
|
| suffix_nps = flatten([negative_suffix.get(s, []) for s in neg_suf])
|
| prompt = ", ".join(list_uniq(prefix_ps + prompts + suffix_ps) + last_empty_p)
|
| neg_prompt = ", ".join(list_uniq(prefix_nps + neg_prompts + suffix_nps) + last_empty_np)
|
| return prompt, neg_prompt
|
|
|
|
|
| recom_prompt_type = {
|
| "None": ([], [], [], []),
|
| "Auto": ([], [], [], []),
|
| "Common": ([], ["Common"], [], ["Common"]),
|
| "Animagine": ([], ["Common", "Anime"], [], ["Common"]),
|
| "Pony": (["Pony"], ["Common"], ["Pony"], ["Common"]),
|
| "Pony Anime": (["Pony", "Pony Anime"], ["Common", "Anime"], ["Pony", "Pony Anime"], ["Common", "Pony Anime"]),
|
| "Pony Real": (["Pony"], ["Common"], ["Pony", "Pony Real"], ["Common", "Pony Real"]),
|
| }
|
|
|
|
|
| enable_auto_recom_prompt = False
|
| def insert_recom_prompt(prompt: str = "", neg_prompt: str = "", type: str = "None"):
|
| global enable_auto_recom_prompt
|
| if type == "Auto": enable_auto_recom_prompt = True
|
| else: enable_auto_recom_prompt = False
|
| pos_pre, pos_suf, neg_pre, neg_suf = recom_prompt_type.get(type, ([], [], [], []))
|
| return recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
|
|
|
|
| def set_recom_prompt_preset(type: str = "None"):
|
| pos_pre, pos_suf, neg_pre, neg_suf = recom_prompt_type.get(type, ([], [], [], []))
|
| return pos_pre, pos_suf, neg_pre, neg_suf
|
|
|
|
|
| def get_recom_prompt_type():
|
| type = list(recom_prompt_type.keys())
|
| type.remove("Auto")
|
| return type
|
|
|
|
|
| def get_positive_prefix():
|
| return list(positive_prefix.keys())
|
|
|
|
|
| def get_positive_suffix():
|
| return list(positive_suffix.keys())
|
|
|
|
|
| def get_negative_prefix():
|
| return list(negative_prefix.keys())
|
|
|
|
|
| def get_negative_suffix():
|
| return list(negative_suffix.keys())
|
|
|
|
|
| def get_tag_type(pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = []):
|
| tag_type = "danbooru"
|
| words = pos_pre + pos_suf + neg_pre + neg_suf
|
| for word in words:
|
| if "Pony" in word:
|
| tag_type = "e621"
|
| break
|
| return tag_type
|
|
|
|
|
| def get_model_info_md(model_name: str):
|
| if model_name in model_info_dict.keys(): return model_info_dict[model_name].get("md", "")
|
|
|
|
|
| def change_model(model_name: str):
|
| load_model_api(model_name)
|
| return get_model_info_md(model_name)
|
|
|
|
|
| def warm_model(model_name: str):
|
| model = load_model_api(model_name)
|
| if model:
|
| try:
|
| print(f"Warming model: {model_name}")
|
| infer_body(model, " ")
|
| except Exception as e:
|
| print(e)
|
|
|
|
|
|
|
|
|
| def infer_body(client: InferenceClient | gr.Interface, prompt: str, neg_prompt: str | None = None,
|
| height: int | None = None, width: int | None = None,
|
| steps: int | None = None, cfg: int | None = None):
|
| png_path = "image.png"
|
| kwargs = {}
|
| if height is not None and height >= 256: kwargs["height"] = height
|
| if width is not None and width >= 256: kwargs["width"] = width
|
| if steps is not None and steps >= 1: kwargs["num_inference_steps"] = steps
|
| if cfg is not None and cfg > 0: cfg = kwargs["guidance_scale"] = cfg
|
| try:
|
| if isinstance(client, InferenceClient):
|
| image = client.text_to_image(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
| elif isinstance(client, gr.Interface):
|
| image = client.fn(prompt=prompt, negative_prompt=neg_prompt, **kwargs)
|
| else: return None
|
| image.save(png_path)
|
| return str(Path(png_path).resolve())
|
| except Exception as e:
|
| print(e)
|
| return None
|
|
|
|
|
| async def infer(model_name: str, prompt: str, neg_prompt: str | None = None,
|
| height: int | None = None, width: int | None = None,
|
| steps: int | None = None, cfg: int | None = None,
|
| save_path: str | None = None, timeout: float = inference_timeout):
|
| import random
|
| noise = ""
|
| rand = random.randint(1, 500)
|
| for i in range(rand):
|
| noise += " "
|
| model = load_model(model_name)
|
| if not model: return None
|
| task = asyncio.create_task(asyncio.to_thread(infer_body, model, f"{prompt} {noise}", neg_prompt,
|
| height, width, steps, cfg))
|
| await asyncio.sleep(0)
|
| try:
|
| result = await asyncio.wait_for(task, timeout=timeout)
|
| except (Exception, asyncio.TimeoutError) as e:
|
| print(e)
|
| print(f"Task timed out: {model_name}")
|
| if not task.done(): task.cancel()
|
| result = None
|
| if task.done() and result is not None:
|
| with lock:
|
| image = rename_image(result, model_name, save_path)
|
| return image
|
| return None
|
|
|
|
|
| def infer_fn(model_name: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
|
| width: int | None = None, steps: int | None = None, cfg: int | None = None,
|
| pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
|
| if model_name == 'NA':
|
| return None
|
| try:
|
| prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| loop = asyncio.new_event_loop()
|
| result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
|
| steps, cfg, save_path, inference_timeout))
|
| except (Exception, asyncio.CancelledError) as e:
|
| print(e)
|
| print(f"Task aborted: {model_name}")
|
| result = None
|
| finally:
|
| loop.close()
|
| return result
|
|
|
|
|
| def infer_rand_fn(model_name_dummy: str, prompt: str, neg_prompt: str | None = None, height: int | None = None,
|
| width: int | None = None, steps: int | None = None, cfg: int | None = None,
|
| pos_pre: list = [], pos_suf: list = [], neg_pre: list = [], neg_suf: list = [], save_path: str | None = None):
|
| import random
|
| if model_name_dummy == 'NA':
|
| return None
|
| random.seed()
|
| model_name = random.choice(list(loaded_models.keys()))
|
| try:
|
| prompt, neg_prompt = recom_prompt(prompt, neg_prompt, pos_pre, pos_suf, neg_pre, neg_suf)
|
| loop = asyncio.new_event_loop()
|
| result = loop.run_until_complete(infer(model_name, prompt, neg_prompt, height, width,
|
| steps, cfg, save_path, inference_timeout))
|
| except (Exception, asyncio.CancelledError) as e:
|
| print(e)
|
| print(f"Task aborted: {model_name}")
|
| result = None
|
| finally:
|
| loop.close()
|
| return result
|
|
|