Spaces:
Running
Running
| import gradio as gr | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| import gc | |
| MODEL_ID = "CompVis/stable-diffusion-v1-4" | |
| pipe = None | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| print(f"π Device: {device}") | |
| def load_model(): | |
| global pipe | |
| if pipe is not None: | |
| return "β Model sudah siap!" | |
| gc.collect() | |
| if device == "cuda": | |
| torch.cuda.empty_cache() | |
| print("π¦ Loading model...") | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16 if device == "cuda" else torch.float32, | |
| safety_checker=None | |
| ) | |
| # π₯ OPTIMASI WAJIB | |
| pipe.enable_attention_slicing() | |
| if device == "cuda": | |
| pipe.to("cuda") | |
| else: | |
| pipe.enable_vae_slicing() | |
| print("β Model ready") | |
| return "β Model siap digunakan!" | |
| def generate(prompt, negative_prompt, steps, guidance, width, height, seed): | |
| global pipe | |
| if pipe is None: | |
| return None, "β οΈ Model belum siap" | |
| try: | |
| # Limit ukuran (biar ga OOM di free tier) | |
| width = min(width, 512) | |
| height = min(height, 512) | |
| generator = torch.manual_seed(int(seed)) if seed != -1 else None | |
| image = pipe( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| num_inference_steps=int(steps), | |
| guidance_scale=float(guidance), | |
| width=width, | |
| height=height, | |
| generator=generator | |
| ).images[0] | |
| gc.collect() | |
| if device == "cuda": | |
| torch.cuda.empty_cache() | |
| return image, "β Done" | |
| except Exception as e: | |
| return None, f"β Error: {str(e)}" | |
| # UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# π¨ AI Image Generator (HF Free Tier Safe)") | |
| status = gr.Markdown("β³ Loading model...") | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt = gr.Textbox(label="Prompt") | |
| negative = gr.Textbox(label="Negative Prompt", value="blurry, low quality") | |
| steps = gr.Slider(10, 25, value=18) | |
| guidance = gr.Slider(1, 10, value=7) | |
| width = gr.Dropdown([256, 384, 512], value=512) | |
| height = gr.Dropdown([256, 384, 512], value=512) | |
| seed = gr.Number(value=-1) | |
| btn = gr.Button("Generate") | |
| with gr.Column(): | |
| output = gr.Image() | |
| result = gr.Markdown() | |
| demo.load(load_model, outputs=status) | |
| btn.click( | |
| generate, | |
| inputs=[prompt, negative, steps, guidance, width, height, seed], | |
| outputs=[output, result] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch() |