| from typing import Optional | |
| from huggingface_hub import InferenceClient | |
| from PIL import Image | |
| import io | |
| import os | |
| TEXT_TO_IMAGE_MODEL = os.environ.get("HF_IMAGE_MODEL", "stabilityai/stable-diffusion-2-1") | |
| def generate_image(prompt: str, guidance_scale: float = 7.5, num_inference_steps: int = 30) -> Image.Image: | |
| """Generate an image from text using HF Inference API.""" | |
| client = InferenceClient(token=os.environ.get("HF_TOKEN")) | |
| try: | |
| img_bytes = client.text_to_image( | |
| model=TEXT_TO_IMAGE_MODEL, | |
| prompt=prompt, | |
| guidance_scale=guidance_scale, | |
| num_inference_steps=num_inference_steps, | |
| ) | |
| return Image.open(io.BytesIO(img_bytes)) | |
| except Exception as e: | |
| # Return a placeholder image | |
| img = Image.new("RGB", (512, 512), color=(240, 240, 240)) | |
| return img |