Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| import random | |
| from diffusers import DiffusionPipeline | |
| import torch | |
| # --- 1. CONSTANTS AND MODEL CONFIGURATION --- | |
| # Styles and logical prompts from Perchance code | |
| STYLES = { | |
| "Fotografia Realistyczna (Domyślny)": { | |
| "prompt": ", raw photo, realistic, candid shot, natural lighting, highly detailed face, dslr, sharp focus, 8k uhd, film grain, Fujifilm", | |
| "negative": "cartoon, anime, 3d render, painting, drawing, smooth skin, photoshop", | |
| }, | |
| "Kinowy (Dramatyczny)": { | |
| "prompt": ", cinematic lighting, dramatic atmosphere, movie still, color graded, shallow depth of field, bokeh, volumetric fog, highly detailed, 8k, masterpiece", | |
| "negative": "bright, cheerful, flat lighting, amateur", | |
| }, | |
| "Surowy (Raw)": { | |
| "prompt": ", high quality, detailed", | |
| "negative": "low quality", | |
| }, | |
| } | |
| # Most comprehensive Negative Prompt (commonNegative) | |
| COMMON_NEGATIVE = "(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, watermark, text, signature, sketch, poorly drawn face, low quality, worst quality, bad composition, blurry face, horror, grainy" | |
| # Random description examples (shortened version) | |
| RANDOM_DESCRIPTIONS = { | |
| "beautiful woman, tight dress, narrow waist, ethereal", | |
| "succubus, wind blowing hair, plunging neckline, bokeh", | |
| "catgirl, moonlit, high-angle shot, long exposure", | |
| } | |
| # Device detection for ZeroGPU compatibility | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model_repo_id = "stabilityai/sdxl-turbo" | |
| # Model initialization with ZeroGPU compatibility | |
| try: | |
| if torch.cuda.is_available(): | |
| pipe = DiffusionPipeline.from_pretrained( | |
| model_repo_id, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32 | |
| ) | |
| pipe = pipe.to(device) | |
| except Exception as e: | |
| print(f"Model loading failed: {e}") | |
| pipe = None | |
| MAX_SEED = np.iinfo(np.int32).max | |
| MAX_IMAGE_SIZE = 1024 | |
| def infer( | |
| user_prompt, | |
| style_name, | |
| user_negative_prompt, | |
| seed, | |
| randomize_seed, | |
| width, | |
| height, | |
| guidance_scale, | |
| num_inference_steps, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| if pipe is None: | |
| raise gr.Error("Model not loaded. Please check the logs for details.") | |
| if randomize_seed: | |
| seed = random.randint(0, MAX_SEED) | |
| # 1. Compose Final Prompts (Perchance Logic) | |
| style = STYLES.get(style_name, {}) | |
| # Final positive prompt = User Description + Style Prompt | |
| final_prompt = user_prompt + style.get("prompt", "")) | |
| # Final negative prompt = COMMON_NEGATIVE + Style Negative + User Negative | |
| final_negative_prompt = ( | |
| COMMON_NEGATIVE + | |
| ", " + style.get("negative", "") + | |
| (", " + user_negative_prompt if user_negative_prompt else "") | |
| # 2. Call the model | |
| generator = torch.Generator().manual_seed(seed)) | |
| with torch.inference_mode(): | |
| image = pipe( | |
| prompt=final_prompt, | |
| negative_prompt=final_negative_prompt, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps, | |
| width=width, | |
| height=height, | |
| generator=generator, | |
| ).images[0] | |
| return image, seed | |
| # Create examples | |
| examples = [ | |
| [random.choice(RANDOM_DESCRIPTIONS), "Fotografia Realistyczna (Domyślny)", ""], | |
| ["An astronaut riding a green horse, detailed, sci-fi", "Kinowy (Dramatyczny)", ""], | |
| ["A delicious ceviche cheesecake slice, studio lighting", "Fotografia Realistyczna (Domyślny)", "blurry, dark"], | |
| ] | |
| # Custom CSS for better styling | |
| custom_css = """ | |
| #col-container { | |
| margin: 0 auto; | |
| max-width: 640px; | |
| } | |
| .gradio-container { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| } | |
| """ | |
| # Gradio 6 Application | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🎨 AI Image Generator (High Quality) 🖼️") | |
| gr.Markdown( | |
| "Add a description, choose a style, and click **Generate**.<br>" | |
| "<span style='font-size:80%; color:grey;'>Engine: SDXL-Turbo. Implemented advanced prompts and common negative prompts.</span>" | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| user_prompt = gr.Textbox( | |
| label="📝 Description (What do you want to see?)", | |
| lines=3, | |
| placeholder="high quality portrait photo. The more details, the better.", | |
| ) | |
| with gr.Column(scale=1): | |
| style_name = gr.Dropdown( | |
| label="🎨 Style and Quality", | |
| choices=list(STYLES.keys())), | |
| value="Fotografia Realistyczna (Domyślny)", | |
| ) | |
| with gr.Row(): | |
| run_button = gr.Button("Generate", variant="primary") | |
| with gr.Row(): | |
| result = gr.Image(label="Generated Image", height=512) | |
| with gr.Row(): | |
| with gr.Column(): | |
| seed = gr.Slider( | |
| label="🌱 Seed", | |
| minimum=0, | |
| maximum=MAX_SEED, | |
| step=1, | |
| value=0, | |
| ) | |
| randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
| with gr.Accordion("🎛 Advanced Settings", open=False): | |
| with gr.Row(): | |
| width = gr.Slider( | |
| label="📏 Width", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=1024, | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| height = gr.Slider( | |
| label="📐 Height", | |
| minimum=256, | |
| maximum=MAX_IMAGE_SIZE, | |
| step=32, | |
| value=1024, | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| user_negative_prompt = gr.Textbox( | |
| label="🚫 Additional Anti-description", | |
| lines=1, | |
| placeholder="Enter additional words to eliminate, e.g. 'cartoon, painting, drawing'", | |
| ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| guidance_scale = gr.Slider( | |
| label="🎛 Guidance Scale", | |
| minimum=0.0, | |
| maximum=10.0, | |
| step=0.1, | |
| value=0.0, | |
| ) | |
| num_inference_steps = gr.Slider( | |
| label="⚡ Number of Inference Steps", | |
| minimum=1, | |
| maximum=50, | |
| step=1, | |
| value=2, | |
| ) | |
| # Examples section | |
| gr.Examples( | |
| examples=examples, | |
| inputs=[user_prompt, style_name, user_negative_prompt], | |
| label="💡 Examples - Click to load", | |
| ) | |
| # Event listeners | |
| run_button.click( | |
| fn=infer, | |
| inputs=[ | |
| user_prompt, | |
| style_name, | |
| user_negative_prompt, | |
| seed, | |
| randomize_seed, | |
| width, | |
| height, | |
| guidance_scale, | |
| num_inference_steps, | |
| ], | |
| outputs=[result, seed], | |
| api_visibility="public", | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| theme=gr.themes.Soft( | |
| primary_hue="indigo", | |
| secondary_hue="blue", | |
| neutral_hue="slate", | |
| font=gr.themes.GoogleFont("Inter"), | |
| text_size="lg", | |
| spacing_size="md", | |
| radius_size="lg" | |
| ), | |
| css=custom_css, | |
| footer_links=[ | |
| { | |
| "label": "Built with anycoder", | |
| "url": "https://huggingface.co/spaces/akhaliq/anycoder", | |
| }, | |
| { | |
| "label": "Gradio", | |
| "url": "https://gradio.app", | |
| }, | |
| ], | |
| ) |