sofia-ai-workspace / generation.py
GoGma's picture
Create generation.py
e7b3e27 verified
import os
import random
from datetime import datetime
from typing import Optional
from huggingface_hub import InferenceClient
# Directorio donde se guardan las imágenes generadas
OUTPUT_DIR = "generated_images"
os.makedirs(OUTPUT_DIR, exist_ok=True)
# Cliente de inferencia (igual que en Sofia Rivera)
client = InferenceClient()
def generate_image_from_prompt(
prompt: str,
negative_prompt: str = "",
model_name: str = "black-forest-labs/FLUX.1-dev",
seed: Optional[int] = None,
) -> tuple[Optional[str], str]:
"""
Genera una imagen usando Hugging Face InferenceClient.text_to_image
y la guarda en OUTPUT_DIR.
Devuelve (image_path, status_message).
Si hay error, image_path = None y status_message contiene el error.
"""
try:
if seed is None:
seed = random.randint(0, 2_147_483_647)
image = client.text_to_image(
prompt=prompt,
negative_prompt=negative_prompt,
model=model_name,
guidance_scale=7.5,
num_inference_steps=50,
)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"sofia_{timestamp}_{seed}.png"
file_path = os.path.join(OUTPUT_DIR, filename)
image.save(file_path)
status = f"✅ Imagen generada y guardada: {filename}\nModelo: {model_name}\nSeed: {seed}"
return file_path, status
except Exception as e:
error_msg = f"❌ Error al generar imagen: {str(e)}"
return None, error_msg