Spaces:
Running
on
Zero
Running
on
Zero
| import os | |
| import subprocess | |
| import sys | |
| import importlib.util | |
| import time | |
| # Set environment variables | |
| os.environ["TRANSFORMERS_NO_ADVISORY_WARNINGS"] = "1" | |
| os.environ["TRANSFORMERS_COMPILER_DISABLED"] = "1" | |
| # Function to install required packages | |
| def install_required_packages(): | |
| required_packages = [ | |
| "warmup_scheduler", | |
| "torchtools" | |
| ] | |
| github_repos = { | |
| "warmup_scheduler": "git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git", | |
| "torchtools": "git+https://github.com/pabloppp/pytorch-tools.git" | |
| } | |
| missing_packages = [] | |
| # First check which packages need to be installed | |
| for package in required_packages: | |
| if importlib.util.find_spec(package) is None: | |
| missing_packages.append(package) | |
| print(f"{package} needs to be installed") | |
| # Install missing packages | |
| for package in missing_packages: | |
| print(f"Installing {package}...") | |
| try: | |
| if package in github_repos: | |
| subprocess.check_call([ | |
| sys.executable, "-m", "pip", "install", github_repos[package] | |
| ]) | |
| else: | |
| subprocess.check_call([ | |
| sys.executable, "-m", "pip", "install", package | |
| ]) | |
| print(f"{package} installed successfully") | |
| # Wait a moment to ensure the package is available for import | |
| time.sleep(1) | |
| except subprocess.CalledProcessError as e: | |
| print(f"Failed to install {package}: {e}") | |
| # If there were any packages installed, try to force a refresh of sys.modules | |
| if missing_packages: | |
| print("Refreshing Python module cache...") | |
| for package in missing_packages: | |
| if package in sys.modules: | |
| del sys.modules[package] | |
| # Create patches for missing modules if they can't be installed | |
| def create_module_patches(): | |
| # Create a patch for torchtools.transforms if it doesn't exist | |
| if importlib.util.find_spec("torchtools") is None or importlib.util.find_spec("torchtools.transforms") is None: | |
| print("Creating patch for torchtools.transforms...") | |
| # Create the directory structure | |
| os.makedirs("torchtools/transforms", exist_ok=True) | |
| # Create __init__.py files | |
| with open("torchtools/__init__.py", "w") as f: | |
| f.write("# Patch for torchtools\n") | |
| # Create a simplified SmartCrop class | |
| with open("torchtools/transforms/__init__.py", "w") as f: | |
| f.write("""# Patch for torchtools.transforms | |
| import torch | |
| import torch.nn.functional as F | |
| class SmartCrop: | |
| def __init__(self, size=None, scale=None, preserve_aspect_ratio=True): | |
| self.size = size | |
| self.scale = scale | |
| self.preserve_aspect_ratio = preserve_aspect_ratio | |
| def __call__(self, image): | |
| # Basic placeholder implementation that resizes the image | |
| # For actual smart cropping, a more complex implementation would be needed | |
| if self.size is not None: | |
| return F.interpolate(image.unsqueeze(0), size=self.size, mode='bilinear', align_corners=False).squeeze(0) | |
| elif self.scale is not None: | |
| h, w = image.shape[-2:] | |
| new_h, new_w = int(h * self.scale), int(w * self.scale) | |
| return F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False).squeeze(0) | |
| return image | |
| """) | |
| # Add the patch directory to the system path | |
| sys.path.insert(0, os.path.abspath('./')) | |
| print("Torchtools patch created successfully") | |
| # Install required packages | |
| print("Checking and installing required packages...") | |
| install_required_packages() | |
| # Create patch modules for any missing dependencies | |
| print("Creating patches for any missing modules...") | |
| create_module_patches() | |
| # Give a moment for the system to register newly installed packages | |
| time.sleep(2) | |
| # Now continue with the imports | |
| print("Importing the required modules...") | |
| import yaml | |
| import torch | |
| sys.path.append(os.path.abspath('./')) | |
| # Try importing the modules | |
| try: | |
| from inference.utils import * | |
| from train import WurstCoreB | |
| from gdf import DDPMSampler | |
| from train import WurstCore_t2i as WurstCoreC | |
| print("Successfully imported all required modules!") | |
| except ImportError as e: | |
| print(f"Warning: Import error: {e}") | |
| print("Continuing with the application setup...") | |
| import numpy as np | |
| import random | |
| import argparse | |
| import gradio as gr | |
| import spaces | |
| from huggingface_hub import hf_hub_url | |
| from huggingface_hub import hf_hub_download | |
| def parse_args(): | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--height', type=int, default=2560, help='image height') | |
| parser.add_argument('--width', type=int, default=5120, help='image width') | |
| parser.add_argument('--seed', type=int, default=123, help='random seed') | |
| parser.add_argument('--dtype', type=str, default='bf16', help='if bf16 does not work, change it to float32') | |
| parser.add_argument('--config_c', type=str, | |
| default='configs/training/t2i.yaml', help='config file for stage c, latent generation') | |
| parser.add_argument('--config_b', type=str, | |
| default='configs/inference/stage_b_1b.yaml', help='config file for stage b, latent decoding') | |
| parser.add_argument('--prompt', type=str, | |
| default='A photo-realistic image of a west highland white terrier in the garden, high quality, detail rich, 8K', help='text prompt') | |
| parser.add_argument('--num_image', type=int, default=1, help='how many images generated') | |
| parser.add_argument('--output_dir', type=str, default='figures/output_results/', help='output directory for generated image') | |
| parser.add_argument('--stage_a_tiled', action='store_true', help='whether or not to use tiled decoding for stage a to save memory') | |
| parser.add_argument('--pretrained_path', type=str, default='models/ultrapixel_t2i.safetensors', help='pretrained path of newly added parameter of UltraPixel') | |
| args = parser.parse_args() | |
| return args | |
| def clear_image(): | |
| return None | |
| def load_message(height, width, seed, prompt, args, stage_a_tiled): | |
| args.height = height | |
| args.width = width | |
| args.seed = seed | |
| args.prompt = prompt + ' rich detail, 4k, high quality' | |
| args.stage_a_tiled = stage_a_tiled | |
| return args | |
| def get_image(height, width, seed, prompt, cfg, timesteps, stage_a_tiled): | |
| global args | |
| args = load_message(height, width, seed, prompt, args, stage_a_tiled) | |
| torch.manual_seed(args.seed) | |
| random.seed(args.seed) | |
| np.random.seed(args.seed) | |
| dtype = torch.bfloat16 if args.dtype == 'bf16' else torch.float | |
| captions = [args.prompt] * args.num_image | |
| height, width = args.height, args.width | |
| batch_size = 1 | |
| height_lr, width_lr = get_target_lr_size(height / width, std_size=32) | |
| stage_c_latent_shape, stage_b_latent_shape = calculate_latent_sizes(height, width, batch_size=batch_size) | |
| stage_c_latent_shape_lr, stage_b_latent_shape_lr = calculate_latent_sizes(height_lr, width_lr, batch_size=batch_size) | |
| # Stage C Parameters | |
| extras.sampling_configs['cfg'] = 4 | |
| extras.sampling_configs['shift'] = 1 | |
| extras.sampling_configs['timesteps'] = 20 | |
| extras.sampling_configs['t_start'] = 1.0 | |
| extras.sampling_configs['sampler'] = DDPMSampler(extras.gdf) | |
| # Stage B Parameters | |
| extras_b.sampling_configs['cfg'] = 1.1 | |
| extras_b.sampling_configs['shift'] = 1 | |
| extras_b.sampling_configs['timesteps'] = 10 | |
| extras_b.sampling_configs['t_start'] = 1.0 | |
| for _, caption in enumerate(captions): | |
| batch = {'captions': [caption] * batch_size} | |
| conditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=False) | |
| unconditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=True) | |
| with torch.no_grad(): | |
| models.generator.cuda() | |
| print('STAGE C GENERATION***************************') | |
| with torch.cuda.amp.autocast(dtype=dtype): | |
| sampled_c = generation_c(batch, models, extras, core, stage_c_latent_shape, stage_c_latent_shape_lr, device) | |
| models.generator.cpu() | |
| torch.cuda.empty_cache() | |
| conditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=False) | |
| unconditions_b = core_b.get_conditions(batch, models_b, extras_b, is_eval=True, is_unconditional=True) | |
| conditions_b['effnet'] = sampled_c | |
| unconditions_b['effnet'] = torch.zeros_like(sampled_c) | |
| print('STAGE B + A DECODING***************************') | |
| with torch.cuda.amp.autocast(dtype=dtype): | |
| sampled = decode_b(conditions_b, unconditions_b, models_b, stage_b_latent_shape, extras_b, device, stage_a_tiled=args.stage_a_tiled) | |
| torch.cuda.empty_cache() | |
| imgs = show_images(sampled) | |
| return imgs[0] | |
| css = """ | |
| footer { | |
| visibility: hidden; | |
| } | |
| /* Main container styling */ | |
| #col-container { | |
| max-width: 1200px; | |
| margin: 0 auto; | |
| padding: 20px; | |
| background-color: #f8f9fa; | |
| border-radius: 15px; | |
| box-shadow: 0 4px 15px rgba(0, 0, 0, 0.1); | |
| } | |
| /* Header styling */ | |
| h1 { | |
| text-align: center; | |
| color: #ff6b00; | |
| font-size: 2.5rem; | |
| margin-bottom: 20px; | |
| font-weight: 700; | |
| text-shadow: 1px 1px 2px rgba(0,0,0,0.1); | |
| } | |
| /* Button styling */ | |
| button.primary { | |
| background-color: #ff6b00 !important; | |
| color: white !important; | |
| border: none !important; | |
| border-radius: 8px !important; | |
| padding: 10px 20px !important; | |
| font-weight: 600 !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| button.primary:hover { | |
| background-color: #e55f00 !important; | |
| transform: translateY(-2px); | |
| box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2) !important; | |
| } | |
| /* Input field styling */ | |
| input[type="text"] { | |
| border-radius: 8px !important; | |
| border: 2px solid #ddd !important; | |
| padding: 12px !important; | |
| font-size: 1rem !important; | |
| transition: all 0.3s ease !important; | |
| } | |
| input[type="text"]:focus { | |
| border-color: #ff6b00 !important; | |
| box-shadow: 0 0 0 3px rgba(255, 107, 0, 0.2) !important; | |
| } | |
| /* Output image container */ | |
| .output-image { | |
| border-radius: 12px; | |
| overflow: hidden; | |
| box-shadow: 0 8px 20px rgba(0, 0, 0, 0.15); | |
| margin: 20px 0; | |
| transition: all 0.3s ease; | |
| } | |
| .output-image:hover { | |
| transform: scale(1.02); | |
| } | |
| /* Accordion styling */ | |
| .accordion { | |
| border-radius: 10px !important; | |
| overflow: hidden !important; | |
| margin: 15px 0 !important; | |
| border: 1px solid #eaeaea !important; | |
| } | |
| /* Example gallery */ | |
| .examples-gallery { | |
| display: grid; | |
| grid-template-columns: repeat(auto-fill, minmax(300px, 1fr)); | |
| gap: 15px; | |
| margin-top: 20px; | |
| } | |
| .example-item { | |
| background-color: white; | |
| border-radius: 10px; | |
| padding: 10px; | |
| box-shadow: 0 2px 8px rgba(0, 0, 0, 0.1); | |
| transition: all 0.3s ease; | |
| } | |
| .example-item:hover { | |
| transform: translateY(-5px); | |
| box-shadow: 0 5px 15px rgba(0, 0, 0, 0.2); | |
| } | |
| /* Loading animation */ | |
| @keyframes pulse { | |
| 0% { opacity: 0.6; } | |
| 50% { opacity: 1; } | |
| 100% { opacity: 0.6; } | |
| } | |
| .loading { | |
| animation: pulse 1.5s infinite; | |
| text-align: center; | |
| padding: 20px; | |
| color: #ff6b00; | |
| font-weight: bold; | |
| } | |
| """ | |
| with gr.Blocks(theme="soft", css=css) as demo: | |
| with gr.Column(elem_id="col-container"): | |
| gr.Markdown("<h1>UHD Image Generator (5120×4096)</h1>") | |
| with gr.Group(): | |
| with gr.Row(): | |
| with gr.Column(scale=5): | |
| prompt = gr.Textbox( | |
| label="Text Prompt", | |
| max_lines=2, | |
| placeholder="Enter your prompt (e.g., 'A majestic mountain landscape with snow')", | |
| elem_id="prompt-input" | |
| ) | |
| with gr.Column(scale=1): | |
| generate_button = gr.Button("Generate Image", variant="primary", elem_id="generate-btn") | |
| clear_button = gr.Button("Clear", elem_id="clear-btn") | |
| # Loading indicator | |
| with gr.Row(visible=False) as loading_indicator: | |
| gr.Markdown('<div class="loading">Generating your ultra high resolution image... This may take a minute...</div>') | |
| # Output image with nicer styling | |
| output_img = gr.Image(label="Generated Image", elem_classes="output-image") | |
| with gr.Accordion("Advanced Settings", open=False): | |
| with gr.Row(): | |
| with gr.Column(): | |
| seed = gr.Number( | |
| label="Random Seed", | |
| value=123, | |
| step=1, | |
| minimum=0, | |
| ) | |
| cfg = gr.Slider( | |
| label="CFG Scale (Creativity vs. Prompt Adherence)", | |
| minimum=3, | |
| maximum=10, | |
| step=0.1, | |
| value=4 | |
| ) | |
| with gr.Column(): | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="Width", | |
| minimum=1536, | |
| maximum=5120, | |
| step=32, | |
| value=4096 | |
| ) | |
| height = gr.Slider( | |
| label="Height", | |
| minimum=1536, | |
| maximum=4096, | |
| step=32, | |
| value=2304 | |
| ) | |
| timesteps = gr.Slider( | |
| label="Timesteps (Quality vs. Speed)", | |
| minimum=10, | |
| maximum=50, | |
| step=1, | |
| value=20 | |
| ) | |
| stage_a_tiled = gr.Checkbox( | |
| label="Use Tiled Decoding (Lower Memory Usage)", | |
| value=False | |
| ) | |
| # Aspect ratio presets | |
| with gr.Row(): | |
| gr.Markdown("### Quick Aspect Ratio Presets") | |
| with gr.Row(): | |
| preset_landscape = gr.Button("Landscape (16:9)", size="sm") | |
| preset_portrait = gr.Button("Portrait (9:16)", size="sm") | |
| preset_square = gr.Button("Square (1:1)", size="sm") | |
| preset_ultrawide = gr.Button("Ultrawide (21:9)", size="sm") | |
| # Examples with better organization | |
| gr.Markdown("### Example Prompts") | |
| with gr.Row(): | |
| example_tabs = gr.Tabs([ | |
| gr.TabItem("Nature", gr.Examples( | |
| examples=[ | |
| "A detailed view of a blooming magnolia tree, with large, white flowers and dark green leaves, set against a clear blue sky.", | |
| "A majestic view of snow-covered mountains with a calm lake against a blue sky background", | |
| ], | |
| inputs=[prompt], | |
| outputs=[output_img], | |
| )), | |
| gr.TabItem("Animals", gr.Examples( | |
| examples=[ | |
| "A crocodile wearing a sweater", | |
| "A cute golden retriever puppy chasing a red ball on a green lawn", | |
| ], | |
| inputs=[prompt], | |
| outputs=[output_img], | |
| )), | |
| gr.TabItem("Anime", gr.Examples( | |
| examples=[ | |
| "A vibrant anime scene of a young girl with long, flowing pink hair, big sparkling blue eyes, and a school uniform, standing under a cherry blossom tree with petals falling around her.", | |
| ], | |
| inputs=[prompt], | |
| outputs=[output_img], | |
| )), | |
| gr.TabItem("Architecture", gr.Examples( | |
| examples=[ | |
| "A cozy, rustic log cabin nestled in a snow-covered forest, with smoke rising from the stone chimney, warm lights glowing from the windows, and a path of footprints leading to the front door.", | |
| ], | |
| inputs=[prompt], | |
| outputs=[output_img], | |
| )), | |
| ]) | |
| # Function to set aspect ratio presets | |
| def set_landscape(): | |
| return 5120, 2880 | |
| def set_portrait(): | |
| return 2880, 4096 | |
| def set_square(): | |
| return 3584, 3584 | |
| def set_ultrawide(): | |
| return 5120, 2160 | |
| # Connect buttons to functions | |
| preset_landscape.click(set_landscape, outputs=[width, height]) | |
| preset_portrait.click(set_portrait, outputs=[width, height]) | |
| preset_square.click(set_square, outputs=[width, height]) | |
| preset_ultrawide.click(set_ultrawide, outputs=[width, height]) | |
| # Connect events | |
| generate_button.click( | |
| lambda: gr.update(visible=True), | |
| outputs=[loading_indicator] | |
| ).then( | |
| get_image, | |
| inputs=[height, width, seed, prompt, cfg, timesteps, stage_a_tiled], | |
| outputs=[output_img] | |
| ).then( | |
| lambda: gr.update(visible=False), | |
| outputs=[loading_indicator] | |
| ) | |
| clear_button.click(clear_image, inputs=[], outputs=[output_img]) | |
| def download_with_wget(url, save_path): | |
| try: | |
| subprocess.run(['wget', url, '-O', save_path], check=True) | |
| print(f"Downloaded to {save_path}") | |
| except subprocess.CalledProcessError as e: | |
| print(f"Error downloading file: {e}") | |
| def download_model(): | |
| urls = [ | |
| 'https://huggingface.co/stabilityai/StableWurst/resolve/main/stage_a.safetensors', | |
| 'https://huggingface.co/stabilityai/StableWurst/resolve/main/previewer.safetensors', | |
| 'https://huggingface.co/stabilityai/StableWurst/resolve/main/effnet_encoder.safetensors', | |
| 'https://huggingface.co/stabilityai/StableWurst/resolve/main/stage_b_lite_bf16.safetensors', | |
| 'https://huggingface.co/stabilityai/StableWurst/resolve/main/stage_c_bf16.safetensors', | |
| ] | |
| for file_url in urls: | |
| hf_hub_download(repo_id="stabilityai/stable-cascade", filename=file_url.split('/')[-1], local_dir='models') | |
| hf_hub_download(repo_id="roubaofeipi/UltraPixel", filename='ultrapixel_t2i.safetensors', local_dir='models') | |
| if __name__ == "__main__": | |
| args = parse_args() | |
| device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
| download_model() | |
| config_file = args.config_c | |
| with open(config_file, "r", encoding="utf-8") as file: | |
| loaded_config = yaml.safe_load(file) | |
| core = WurstCoreC(config_dict=loaded_config, device=device, training=False) | |
| # SETUP STAGE B | |
| config_file_b = args.config_b | |
| with open(config_file_b, "r", encoding="utf-8") as file: | |
| config_file_b = yaml.safe_load(file) | |
| core_b = WurstCoreB(config_dict=config_file_b, device=device, training=False) | |
| extras = core.setup_extras_pre() | |
| models = core.setup_models(extras) | |
| models.generator.eval().requires_grad_(False) | |
| print("STAGE C READY") | |
| extras_b = core_b.setup_extras_pre() | |
| models_b = core_b.setup_models(extras_b, skip_clip=True) | |
| models_b = WurstCoreB.Models( | |
| **{**models_b.to_dict(), 'tokenizer': models.tokenizer, 'text_model': models.text_model} | |
| ) | |
| models_b.generator.bfloat16().eval().requires_grad_(False) | |
| print("STAGE B READY") | |
| pretrained_path = args.pretrained_path | |
| sdd = torch.load(pretrained_path, map_location='cpu') | |
| collect_sd = {} | |
| for k, v in sdd.items(): | |
| collect_sd[k[7:]] = v | |
| models.train_norm.load_state_dict(collect_sd) | |
| models.generator.eval() | |
| models.train_norm.eval() | |
| demo.launch(debug=True, share=True) |