Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from diffusers import StableDiffusionPipeline | |
| import torch | |
| import os | |
| # تهيئة الجهاز: استعمل GPU إذا متوفر، وإلا CPU | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| # معرف النموذج، يمكن تغييره لنموذج آخر على HF Hub | |
| MODEL_ID = "runwayml/stable-diffusion-v1-5" | |
| def load_model(): | |
| try: | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| MODEL_ID, | |
| torch_dtype=torch.float16 if device == "cuda" else torch.float32, | |
| safety_checker=None, # تعطيل الفلتر إن أردت (حسب حاجتك) | |
| ) | |
| pipe = pipe.to(device) | |
| return pipe | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| return None | |
| pipe = load_model() | |
| def generate_image(prompt): | |
| if pipe is None: | |
| return "Model not loaded properly. Please try again later." | |
| try: | |
| image = pipe(prompt, guidance_scale=7.5).images[0] | |
| return image | |
| except Exception as e: | |
| return f"Error generating image: {e}" | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Stable Diffusion Image Generator") | |
| prompt_input = gr.Textbox(label="Enter your prompt", lines=2) | |
| generate_btn = gr.Button("Generate Image") | |
| output_image = gr.Image() | |
| generate_btn.click(fn=generate_image, inputs=prompt_input, outputs=output_image) | |
| if __name__ == "__main__": | |
| demo.launch() | |