import os from diffusers import DiffusionPipeline import gradio as gr # Retrieve the Hugging Face token from the environment variable hf_token = os.getenv("HUGGINGFACE_TOKEN") # Ensure the token is not None if hf_token is None: raise EnvironmentError("Hugging Face token not found in environment variables. Make sure it's correctly set.") # Load the model try: pipe = DiffusionPipeline.from_pretrained( "Maryamm/Lora_finetune_mnist", # Your private model's path use_auth_token=hf_token, force_download=True # Ensure it downloads the model ) pipe = pipe.to("cpu") # Ensure it's running on CPU except Exception as e: raise RuntimeError(f"Failed to load model: {e}") def infer(prompt): image = pipe(prompt).images[0] return image gr.Interface(fn=infer, inputs="text", outputs="image").launch()