John6666 commited on
Commit
90ff00e
·
verified ·
1 Parent(s): ecc15b8

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +204 -28
app.py CHANGED
@@ -1886,34 +1886,210 @@ with gr.Blocks(theme=args.theme, elem_id="main", fill_width=True, fill_height=Fa
1886
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt_gui], show_api=False)
1887
  copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt_gui], show_api=False)
1888
 
1889
- # Stable programmatic endpoint
1890
- # --- minimal sync wrapper + stable API ---
1891
- from typing import Any, List, Tuple, Optional
1892
-
1893
- def generate_minimal(
1894
- args: List[Any],
1895
- model_name: str=LOAD_DIFFUSERS_FORMAT_MODEL[0],
1896
- vae_model: Optional[str]=None,
1897
- task: str=TASK_MODEL_LIST[0],
1898
- controlnet_model: str=DIFFUSERS_CONTROLNET_MODEL[0]
1899
- ) -> Tuple[str, Optional[List[str]], Optional[str]]:
1900
- # Preload the requested model (drain loader yields so the model is ready)
1901
- for _ in sd_gen.load_new_model(model_name, vae_model, task, controlnet_model):
1902
- pass
1903
- # Run the existing streaming generator and collapse to the final triple
1904
- last_status, last_imgs, last_info = "START", None, None
1905
- for status, imgs, info in sd_gen_generate_pipeline(*args):
1906
- last_status, last_imgs, last_info = status, imgs, info
1907
- return last_status or "COMPLETE", last_imgs, last_info
1908
-
1909
- gr.api(
1910
- generate_minimal,
1911
- api_name="generate_image", # => POST /gradio_api/call/generate_image
1912
- api_description="Preload model then generate with full positional args. Returns final (status, images, info).",
1913
- show_api=True, # force listing on the API page
1914
- queue=True, # reuse app queue
1915
- concurrency_id="gpu" # share GPU queue with other heavy events
1916
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1917
 
1918
  gr.LoginButton()
1919
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")
 
1886
  copy_prompt_btn.click(gradio_copy_prompt, inputs=[output_text], outputs=[prompt_gui], show_api=False)
1887
  copy_prompt_btn_pony.click(gradio_copy_prompt, inputs=[output_text_pony], outputs=[prompt_gui], show_api=False)
1888
 
1889
+ from __future__ import annotations
1890
+ from typing import Any, Dict, List, Optional, Tuple, Generator
1891
+ import inspect
1892
+ # 1) Single source of truth: define the full explicit signature once.
1893
+ def _signature_src(
1894
+ # 0..6
1895
+ prompt: str,
1896
+ negative_prompt: str = "",
1897
+ num_images: int = 1,
1898
+ num_inference_steps: int = 28,
1899
+ guidance_scale: float = 7.0,
1900
+ clip_skip: int = 0,
1901
+ seed: int = -1,
1902
+ # 7..20 LoRA
1903
+ lora1: str = "", lora1_wt: float = 1.0,
1904
+ lora2: str = "", lora2_wt: float = 1.0,
1905
+ lora3: str = "", lora3_wt: float = 1.0,
1906
+ lora4: str = "", lora4_wt: float = 1.0,
1907
+ lora5: str = "", lora5_wt: float = 1.0,
1908
+ lora6: str = "", lora6_wt: float = 1.0,
1909
+ lora7: str = "", lora7_wt: float = 1.0,
1910
+ # 21..23 sched
1911
+ sampler: str = "Euler",
1912
+ schedule_type: str = "Automatic",
1913
+ schedule_prediction_type: str = "Automatic",
1914
+ # 24..28 canvas/model/task
1915
+ height: int = 1024,
1916
+ width: int = 1024,
1917
+ model_name: str = "votepurchase/animagine-xl-3.1",
1918
+ vae_model: str = "None",
1919
+ task: str = "txt2img",
1920
+ # 29..35 control/style/mask
1921
+ image_control_dict: Optional[dict] = None,
1922
+ preprocessor_name: str = "Canny",
1923
+ preprocess_resolution: int = 512,
1924
+ image_resolution: int = 1024,
1925
+ style_prompt: Optional[List[str]] = None,
1926
+ style_json: Optional[dict] = None,
1927
+ image_mask: Optional[Any] = None,
1928
+ # 36..45 thresholds
1929
+ strength: float = 0.55,
1930
+ low_threshold: int = 100,
1931
+ high_threshold: int = 200,
1932
+ value_threshold: float = 0.1,
1933
+ distance_threshold: float = 0.1,
1934
+ recolor_gamma_correction: float = 1.0,
1935
+ tile_blur_sigma: int = 9,
1936
+ control_net_output_scaling: float = 1.0,
1937
+ control_net_start_threshold: float = 0.0,
1938
+ control_net_stop_threshold: float = 1.0,
1939
+ # 46..51 TI/syntax/upscaler
1940
+ textual_inversion: bool = False,
1941
+ prompt_syntax: str = "Classic",
1942
+ upscaler_model_path: Optional[str] = None,
1943
+ upscaler_increases_size: float = 1.2,
1944
+ upscaler_tile_size: int = 0,
1945
+ upscaler_tile_overlap: int = 8,
1946
+ # 52..60 hires
1947
+ hires_steps: int = 30,
1948
+ hires_denoising_strength: float = 0.55,
1949
+ hires_sampler: str = "Use same sampler",
1950
+ hires_prompt: str = "",
1951
+ hires_negative_prompt: str = "",
1952
+ adetailer_inpaint_only: bool = True,
1953
+ adetailer_verbose: bool = False,
1954
+ hires_schedule_type: str = "Use same schedule type",
1955
+ hires_guidance_scale: float = -1.0,
1956
+ # 61 controlnet model (loaderにも使用)
1957
+ controlnet_model: str = "Automatic",
1958
+ # 62..71 loop/ui/save/cache
1959
+ loop_generation: bool = False,
1960
+ leave_progress_bar: bool = False,
1961
+ disable_progress_bar: bool = False,
1962
+ image_previews: bool = True,
1963
+ display_images: bool = True,
1964
+ save_generated_images: bool = True,
1965
+ filename_pattern: str = "model,seed",
1966
+ image_storage_location: str = "./images/",
1967
+ retain_compel_previous_load: bool = True,
1968
+ retain_detailfix_model_previous_load: bool = True,
1969
+ retain_hires_model_previous_load: bool = True,
1970
+ # 72..78 t2i + perf
1971
+ t2i_adapter_preprocessor: Optional[str] = None,
1972
+ t2i_adapter_conditioning_scale: float = 0.55,
1973
+ t2i_adapter_conditioning_factor: float = 1.0,
1974
+ xformers_memory_efficient_attention: bool = True,
1975
+ free_u: bool = False,
1976
+ generator_in_cpu: bool = False,
1977
+ # 79..101 ADetailer
1978
+ adetailer_sampler: str = "Use same sampler",
1979
+ adetailer_active_a: bool = False,
1980
+ prompt_ad_a: str = "",
1981
+ negative_prompt_ad_a: str = "",
1982
+ strength_ad_a: float = 0.35,
1983
+ face_detector_ad_a: bool = False,
1984
+ person_detector_ad_a: bool = True,
1985
+ hand_detector_ad_a: bool = False,
1986
+ mask_dilation_a: int = 4,
1987
+ mask_blur_a: int = 4,
1988
+ mask_padding_a: int = 32,
1989
+ adetailer_active_b: bool = False,
1990
+ prompt_ad_b: str = "",
1991
+ negative_prompt_ad_b: str = "",
1992
+ strength_ad_b: float = 0.35,
1993
+ face_detector_ad_b: bool = False,
1994
+ person_detector_ad_b: bool = True,
1995
+ hand_detector_ad_b: bool = False,
1996
+ mask_dilation_b: int = 4,
1997
+ mask_blur_b: int = 4,
1998
+ mask_padding_b: int = 32,
1999
+ # 102..117 cache/guidance/ip_adapter/pag/face_rest
2000
+ cache_compel_texts: bool = True,
2001
+ guidance_rescale: float = 0.0,
2002
+ image_ip1_dict: Optional[dict] = None, mask_ip1: Optional[Any] = None,
2003
+ model_ip1: str = "plus_face", mode_ip1: str = "original", scale_ip1: float = 0.7,
2004
+ image_ip2_dict: Optional[dict] = None, mask_ip2: Optional[Any] = None,
2005
+ model_ip2: str = "base", mode_ip2: str = "style", scale_ip2: float = 0.7,
2006
+ pag_scale: float = 0.0,
2007
+ face_restoration_model: Optional[str] = None,
2008
+ face_restoration_visibility: float = 1.0,
2009
+ face_restoration_weight: float = 0.5,
2010
+ # 118..120 tail
2011
+ load_lora_cpu: bool = False,
2012
+ verbose_info_gui: int = 0,
2013
+ gpu_duration: int = 20,
2014
+ ) -> Tuple[str, Optional[List[str]], Optional[str]]:
2015
+ raise NotImplementedError
2016
+
2017
+ # 2) Helpers: loader + argv builder
2018
+ def _load_model(model_name: str, vae_model: str, task: str, controlnet_model: str) -> None:
2019
+ for _ in sd_gen.load_new_model(model_name, vae_model, task, controlnet_model):
2020
+ pass
2021
+
2022
+ # Order matches sd_gen_generate_pipeline(*argv)
2023
+ _GEN_ARG_ORDER = [
2024
+ # keep in sync with _signature_src and pipeline
2025
+ "prompt","negative_prompt","num_images","num_inference_steps","guidance_scale","clip_skip","seed",
2026
+ "lora1","lora1_wt","lora2","lora2_wt","lora3","lora3_wt","lora4","lora4_wt","lora5","lora5_wt","lora6","lora6_wt","lora7","lora7_wt",
2027
+ "sampler","schedule_type","schedule_prediction_type",
2028
+ "height","width","model_name","vae_model","task",
2029
+ "image_control_dict","preprocessor_name","preprocess_resolution","image_resolution",
2030
+ "style_prompt","style_json","image_mask",
2031
+ "strength","low_threshold","high_threshold","value_threshold","distance_threshold",
2032
+ "recolor_gamma_correction","tile_blur_sigma",
2033
+ "control_net_output_scaling","control_net_start_threshold","control_net_stop_threshold",
2034
+ "textual_inversion","prompt_syntax",
2035
+ "upscaler_model_path","upscaler_increases_size","upscaler_tile_size","upscaler_tile_overlap",
2036
+ "hires_steps","hires_denoising_strength","hires_sampler","hires_prompt","hires_negative_prompt",
2037
+ "adetailer_inpaint_only","adetailer_verbose","hires_schedule_type","hires_guidance_scale",
2038
+ "controlnet_model",
2039
+ "loop_generation","leave_progress_bar","disable_progress_bar","image_previews","display_images","save_generated_images",
2040
+ "filename_pattern","image_storage_location",
2041
+ "retain_compel_previous_load","retain_detailfix_model_previous_load","retain_hires_model_previous_load",
2042
+ "t2i_adapter_preprocessor","t2i_adapter_conditioning_scale","t2i_adapter_conditioning_factor",
2043
+ "xformers_memory_efficient_attention","free_u","generator_in_cpu",
2044
+ "adetailer_sampler",
2045
+ "adetailer_active_a","prompt_ad_a","negative_prompt_ad_a","strength_ad_a",
2046
+ "face_detector_ad_a","person_detector_ad_a","hand_detector_ad_a",
2047
+ "mask_dilation_a","mask_blur_a","mask_padding_a",
2048
+ "adetailer_active_b","prompt_ad_b","negative_prompt_ad_b","strength_ad_b",
2049
+ "face_detector_ad_b","person_detector_ad_b","hand_detector_ad_b",
2050
+ "mask_dilation_b","mask_blur_b","mask_padding_b",
2051
+ "cache_compel_texts","guidance_rescale",
2052
+ "image_ip1_dict","mask_ip1","model_ip1","mode_ip1","scale_ip1",
2053
+ "image_ip2_dict","mask_ip2","model_ip2","mode_ip2","scale_ip2",
2054
+ "pag_scale","face_restoration_model","face_restoration_visibility","face_restoration_weight",
2055
+ "load_lora_cpu","verbose_info_gui","gpu_duration",
2056
+ ]
2057
+
2058
+ def _argv_from_kwargs(kwargs: Dict[str, Any]) -> List[Any]:
2059
+ # Convert kwargs to the exact positional argv expected by generator.
2060
+ return [kwargs.get(k) for k in _GEN_ARG_ORDER]
2061
+
2062
+ def _generate_image(argv: List[Any]) -> Generator[Tuple[str, Optional[List[str]], Optional[str]], None, None]:
2063
+ # Delegate to existing generator
2064
+ yield from sd_gen_generate_pipeline(*argv)
2065
+
2066
+ # 3) Signature-clone decorator: keep one signature across both endpoints
2067
+ def clone_signature(src):
2068
+ def deco(dst):
2069
+ dst.__signature__ = inspect.signature(src)
2070
+ dst.__annotations__ = src.__annotations__.copy()
2071
+ return dst
2072
+ return deco
2073
+
2074
+ # 4) Implementations: both share the same signature via clone_signature
2075
+ @clone_signature(_signature_src)
2076
+ def generate_image(*, **kwargs) -> Tuple[str, Optional[List[str]], Optional[str]]:
2077
+ _load_model(kwargs["model_name"], kwargs["vae_model"], kwargs["task"], kwargs["controlnet_model"])
2078
+ argv = _argv_from_kwargs(kwargs)
2079
+ last = ("COMPLETE", None, None)
2080
+ for last in _generate_image(argv):
2081
+ pass
2082
+ return last
2083
+
2084
+ @clone_signature(_signature_src)
2085
+ def generate_image_stream(*, **kwargs) -> Generator[Tuple[str, Optional[List[str]], Optional[str]], None, None]:
2086
+ _load_model(kwargs["model_name"], kwargs["vae_model"], kwargs["task"], kwargs["controlnet_model"])
2087
+ argv = _argv_from_kwargs(kwargs)
2088
+ yield from _generate_image(argv)
2089
+
2090
+ # 5) Register two APIs with identical, named kwargs
2091
+ gr.api(generate_image, api_name="generate_image", show_api=True, queue=True, concurrency_id="gpu")
2092
+ gr.api(generate_image_stream, api_name="generate_image_stream", show_api=True, queue=True, concurrency_id="gpu")
2093
 
2094
  gr.LoginButton()
2095
  gr.DuplicateButton(value="Duplicate Space for private use (This demo does not work on CPU. Requires GPU Space)")