Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from PIL import Image | |
| import torch | |
| from diffusers import StableDiffusionPipeline | |
| # Charger le pipeline de diffusion depuis Hugging Face | |
| model_name = "Yaquv/rickthenpc" | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| try: | |
| pipe = StableDiffusionPipeline.from_pretrained(model_name) | |
| pipe = pipe.to(device) | |
| except Exception as e: | |
| print(f"Erreur lors du chargement du modèle : {e}") | |
| pipe = None | |
| # Fonction de génération et de post-traitement | |
| def generate_image(prompt): | |
| """ | |
| Génère une image à partir du prompt en utilisant le modèle Hugging Face. | |
| """ | |
| if pipe is None: | |
| raise ValueError("The model couldn't be loaded.") | |
| try: | |
| # Générer l'image | |
| result = pipe(prompt) | |
| # Vérifier que le résultat contient des images | |
| if not hasattr(result, 'images') or len(result.images) == 0: | |
| raise ValueError("The model couldn't generate an image.") | |
| image = result.images[0] | |
| # S'assurer que l'image est au format PIL.Image | |
| if not isinstance(image, Image.Image): | |
| image = Image.fromarray(image) | |
| return image | |
| except Exception as e: | |
| # Lever une exception pour que Gradio puisse la gérer | |
| raise ValueError(f"Erreur lors de la génération : {str(e)}") | |
| # Interface Gradio | |
| iface = gr.Interface( | |
| fn=generate_image, | |
| inputs=gr.Textbox(label="Prompt"), | |
| outputs=gr.Image(label="Generated Image"), | |
| title="Rick Generator", | |
| description="Enter a prompt to generate an image with the Rick Generator model." | |
| ) | |
| # Lancer l'application | |
| if __name__ == "__main__": | |
| iface.launch() |