import torch import gradio as gr from diffusers import StableDiffusionImg2ImgPipeline from PIL import Image def load_model(): device = "cuda" if torch.cuda.is_available() else "cpu" model_id = "Linaqruf/animagine-xl" # Fine-tuned model for anime/Ghibli-style images return StableDiffusionImg2ImgPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32).to(device) def ghibli_transform(image): pipe = load_model() prompt = "Ghibli style painting, vibrant colors, highly detailed, soft lighting, Studio Ghibli artwork" result = pipe(prompt=prompt, image=image, strength=0.5, num_inference_steps=30, guidance_scale=7.0).images[0] # Optimized for quality return result def main(): iface = gr.Interface( fn=ghibli_transform, inputs=gr.Image(type="pil"), outputs=gr.Image(type="pil"), title="Ghibli Style Image Transformer", description="Upload an image to convert it into a high-quality Studio Ghibli-style artwork!", ) iface.launch() if __name__ == "__main__": main()