test / app.py
tranduy2611's picture
Update app.py
8184fca
from fastapi import FastAPI, File, UploadFile, Form
from fastapi.responses import FileResponse
from diffusers import AutoPipelineForImage2Image
from PIL import Image
from huggingface_hub import login
import torch
import os
os.environ["HF_HOME"] = "/app/huggingface_cache"
# Ensure the cache directory exists
os.makedirs("/app/huggingface_cache", exist_ok=True)
# Initialize FastAPI app
app = FastAPI()
hf_token = os.environ.get("HUGGINGFACE_TOKEN")
login(token=hf_token)
# Load the model
model_id = "kandinsky-community/kandinsky-2-2-decoder"
device = "cuda" if torch.cuda.is_available() else "cpu"
pipe = AutoPipelineForImage2Image.from_pretrained(
model_id,
torch_dtype=torch.float32, use_safetensors=True
).to(device)
# Create an output directory
os.makedirs("output_images", exist_ok=True)
@app.get("/")
async def root():
return {"message": "Welcome to the Image-to-Image API!"}
@app.post("/image-to-image/")
async def image_to_image(
prompt: str = Form(...),
stren: float = Form(...),
negative_prompt: str = Form(...),
image: UploadFile = File(...)
):
"""
Perform image-to-image transformation using a given prompt and input image.
Args:
- prompt (str): Text prompt describing the desired transformation.
- image (UploadFile): Input image file.
Returns:
- FileResponse: The transformed image file.
"""
try:
# Open and preprocess the input image
input_image = Image.open(image.file).convert("RGB")
original_size = input_image.size # Save the original size
# Generate the output image using the pipeline
generated_image = pipe(prompt=prompt,negative_prompt=negative_prompt ,image=input_image,strength=stren).images[0]
resized_image = generated_image.resize(original_size, Image.LANCZOS)
# Save the generated image
output_path = f"output_images/generated_{image.filename}"
resized_image.save(output_path)
# Return the generated image as a response
return FileResponse(output_path, media_type="image/png")
except Exception as e:
return {"error": str(e)}